hexsha stringlengths 40 40 | size int64 4 996k | ext stringclasses 8
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 4 245 | max_stars_repo_name stringlengths 6 130 | max_stars_repo_head_hexsha stringlengths 40 40 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 245 | max_issues_repo_name stringlengths 6 130 | max_issues_repo_head_hexsha stringlengths 40 40 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 245 | max_forks_repo_name stringlengths 6 130 | max_forks_repo_head_hexsha stringlengths 40 40 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 4 996k | avg_line_length float64 1.33 58.2k | max_line_length int64 2 323k | alphanum_fraction float64 0 0.97 | content_no_comment stringlengths 0 946k | is_comment_constant_removed bool 2
classes | is_sharp_comment_removed bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
f71c8a133ef8994968d105d86d6a4f81b0c891b8 | 3,888 | py | Python | examples/python_service/pyservice.py | laungcisin/skein | 7f023239dcdee1482774466032bd63468cc7e42f | [
"BSD-3-Clause"
] | 124 | 2018-04-21T23:26:57.000Z | 2022-01-24T14:34:26.000Z | examples/python_service/pyservice.py | laungcisin/skein | 7f023239dcdee1482774466032bd63468cc7e42f | [
"BSD-3-Clause"
] | 144 | 2018-05-21T13:57:01.000Z | 2022-03-31T13:07:42.000Z | examples/python_service/pyservice.py | laungcisin/skein | 7f023239dcdee1482774466032bd63468cc7e42f | [
"BSD-3-Clause"
] | 36 | 2018-07-01T19:09:42.000Z | 2022-03-31T16:04:47.000Z | import argparse
import os
import tempfile
from getpass import getuser
import skein
from skein.tornado import SimpleAuthMixin, KerberosAuthMixin, init_kerberos
from tornado import web, ioloop
# An argument parser for configuring the application
parser = argparse.ArgumentParser(
description="A web service for submitting python scripts to YARN."
)
parser.add_argument(
"--keytab", default=None,
help=("The location of a keytab file. If not specified, 'simple' "
"authentication will be used")
)
parser.add_argument(
"--principal", default=None,
help=("The principal to use if using kerberos. Defaults to the "
"current user name.")
)
parser.add_argument(
"--port", default=8888, type=int,
help="The port to serve from. Default is 8888."
)
args = parser.parse_args()
if args.keytab:
# Use the kerberos auth mixin, and initialize kerberos for HTTP auth
AuthMixin = KerberosAuthMixin
init_kerberos(keytab=args.keytab)
# Also create the skein client with keytab and principal specified
skein_client = skein.Client(
keytab=args.keytab,
principal=args.principal or getuser()
)
else:
# Use the simple auth mixin
AuthMixin = SimpleAuthMixin
skein_client = skein.Client()
# Read in the `index.html` source
thisdir = os.path.dirname(__file__)
with open(os.path.join(thisdir, "index.html")) as f:
INDEX_HTML = f.read()
class LaunchHandler(AuthMixin, web.RequestHandler):
@property
def client(self):
return self.settings['client']
@web.authenticated
def get(self):
# Main page just displays the web form
self.write(INDEX_HTML)
@web.authenticated
async def post(self):
# Extract request parameters
queue = self.get_argument('queue') or 'default'
memory = float(self.get_argument('memory'))
vcores = int(self.get_argument('vcores'))
try:
script = self.request.files['script'][0]
except (IndexError, KeyError):
raise web.HTTPError(400, reason="Missing script")
# Check memory and vcores are in bounds
if memory < 0.5 or memory > 8:
raise web.HTTPError("0.5 <= memory <= 8 required")
if vcores < 1 or vcores > 4:
raise web.HTTPError("1 <= vcores <= 4 required")
# We need to write the script temporarily to disk so Skein can upload it
with tempfile.NamedTemporaryFile() as f:
f.write(script['body'])
f.file.flush()
# ** Construct the application specification **
# Note that we specify the user as user logged in to the web page.
# If kerberos authentication was used, this would match the user's
# principal.
spec = skein.ApplicationSpec(
name="pyscript",
queue=queue,
user=self.current_user,
master=skein.Master(
resources=skein.Resources(
memory="%f GiB" % memory,
vcores=vcores
),
files={script['filename']: f.name},
script="python %s" % script['filename']
)
)
# Submit the application and get a report
report = await ioloop.IOLoop.current().run_in_executor(
None, self.submit_and_report, spec
)
# Redirect the user to the application's tracking url
self.redirect(report.tracking_url)
def submit_and_report(self, spec):
app_id = self.client.submit(spec)
report = self.client.application_report(app_id)
return report
# Start the application and serve on the specified port
app = web.Application([("/", LaunchHandler)], client=skein_client)
app.listen(args.port)
ioloop.IOLoop.current().start()
| 31.868852 | 80 | 0.626286 | import argparse
import os
import tempfile
from getpass import getuser
import skein
from skein.tornado import SimpleAuthMixin, KerberosAuthMixin, init_kerberos
from tornado import web, ioloop
parser = argparse.ArgumentParser(
description="A web service for submitting python scripts to YARN."
)
parser.add_argument(
"--keytab", default=None,
help=("The location of a keytab file. If not specified, 'simple' "
"authentication will be used")
)
parser.add_argument(
"--principal", default=None,
help=("The principal to use if using kerberos. Defaults to the "
"current user name.")
)
parser.add_argument(
"--port", default=8888, type=int,
help="The port to serve from. Default is 8888."
)
args = parser.parse_args()
if args.keytab:
AuthMixin = KerberosAuthMixin
init_kerberos(keytab=args.keytab)
skein_client = skein.Client(
keytab=args.keytab,
principal=args.principal or getuser()
)
else:
AuthMixin = SimpleAuthMixin
skein_client = skein.Client()
thisdir = os.path.dirname(__file__)
with open(os.path.join(thisdir, "index.html")) as f:
INDEX_HTML = f.read()
class LaunchHandler(AuthMixin, web.RequestHandler):
@property
def client(self):
return self.settings['client']
@web.authenticated
def get(self):
self.write(INDEX_HTML)
@web.authenticated
async def post(self):
queue = self.get_argument('queue') or 'default'
memory = float(self.get_argument('memory'))
vcores = int(self.get_argument('vcores'))
try:
script = self.request.files['script'][0]
except (IndexError, KeyError):
raise web.HTTPError(400, reason="Missing script")
if memory < 0.5 or memory > 8:
raise web.HTTPError("0.5 <= memory <= 8 required")
if vcores < 1 or vcores > 4:
raise web.HTTPError("1 <= vcores <= 4 required")
with tempfile.NamedTemporaryFile() as f:
f.write(script['body'])
f.file.flush()
# principal.
spec = skein.ApplicationSpec(
name="pyscript",
queue=queue,
user=self.current_user,
master=skein.Master(
resources=skein.Resources(
memory="%f GiB" % memory,
vcores=vcores
),
files={script['filename']: f.name},
script="python %s" % script['filename']
)
)
# Submit the application and get a report
report = await ioloop.IOLoop.current().run_in_executor(
None, self.submit_and_report, spec
)
# Redirect the user to the application's tracking url
self.redirect(report.tracking_url)
def submit_and_report(self, spec):
app_id = self.client.submit(spec)
report = self.client.application_report(app_id)
return report
app = web.Application([("/", LaunchHandler)], client=skein_client)
app.listen(args.port)
ioloop.IOLoop.current().start()
| true | true |
f71c8b37ee651e199c6b02d5bd122d3d43661a14 | 2,874 | py | Python | scripts/product.py | etherisc/gif-contracts | 9bc09787a19bd79a0576e46856405cff7fdee15c | [
"Apache-2.0"
] | null | null | null | scripts/product.py | etherisc/gif-contracts | 9bc09787a19bd79a0576e46856405cff7fdee15c | [
"Apache-2.0"
] | null | null | null | scripts/product.py | etherisc/gif-contracts | 9bc09787a19bd79a0576e46856405cff7fdee15c | [
"Apache-2.0"
] | null | null | null | from web3 import Web3
from brownie import Contract
from brownie.convert import to_bytes
from brownie.network import accounts
from brownie.network.account import Account
from brownie import (
Wei,
Contract,
# Registry,
# RegistryController,
License,
LicenseController,
Policy,
PolicyController,
QueryController,
ProductService,
OracleService,
ComponentOwnerService,
PolicyFlowDefault,
InstanceOperatorService,
TestOracle,
TestProduct,
)
from scripts.const import (
ORACLE_INPUT_FORMAT,
ORACLE_OUTPUT_FORMAT,
ORACLE_NAME,
PRODUCT_NAME,
)
from scripts.util import (
get_account,
encode_function_data,
# s2h,
s2b32,
deployGifModule,
deployGifService,
)
from scripts.instance import (
GifInstance,
)
class GifTestOracle(object):
def __init__(self, instance: GifInstance, oracleOwner: Account):
operatorService = instance.getInstanceOperatorService()
componentOwnerService = instance.getComponentOwnerService()
oracleService = instance.getOracleService()
# 1) add oracle provider role to owner
opRole = operatorService.oracleProviderRole()
operatorService.addRoleToAccount(oracleOwner, opRole)
# 2) oracle owner creates oracle
self.oracle = TestOracle.deploy(
s2b32(ORACLE_NAME),
instance.getRegistry(),
{'from': oracleOwner})
# 3) oracle owner proposes oracle to instance
componentOwnerService.propose(
self.oracle,
{'from': oracleOwner})
# 4) instance operator approves oracle
operatorService.approveOracle(
self.oracle.getId(),
{'from': instance.getOwner()})
def getOracleId(self) -> int:
return self.oracle.getId()
def getOracleContract(self) -> TestOracle:
return self.oracle
class GifTestProduct(object):
def __init__(self, instance: GifInstance, oracle: GifTestOracle, productOwner: Account):
self.policyController = instance.getPolicyController()
operatorService = instance.getInstanceOperatorService()
productService = instance.getProductService()
self.product = TestProduct.deploy(
productService,
s2b32(PRODUCT_NAME),
oracle.getOracleId(),
{'from': productOwner})
operatorService.approveProduct(
self.product.getId(),
{'from': instance.getOwner()})
def getProductId(self) -> int:
return self.product.getId()
def getProductContract(self) -> TestProduct:
return self.product
def getPolicy(self, policyId: str):
return self.policyController.getPolicy(policyId) | 27.113208 | 93 | 0.641267 | from web3 import Web3
from brownie import Contract
from brownie.convert import to_bytes
from brownie.network import accounts
from brownie.network.account import Account
from brownie import (
Wei,
Contract,
License,
LicenseController,
Policy,
PolicyController,
QueryController,
ProductService,
OracleService,
ComponentOwnerService,
PolicyFlowDefault,
InstanceOperatorService,
TestOracle,
TestProduct,
)
from scripts.const import (
ORACLE_INPUT_FORMAT,
ORACLE_OUTPUT_FORMAT,
ORACLE_NAME,
PRODUCT_NAME,
)
from scripts.util import (
get_account,
encode_function_data,
s2b32,
deployGifModule,
deployGifService,
)
from scripts.instance import (
GifInstance,
)
class GifTestOracle(object):
def __init__(self, instance: GifInstance, oracleOwner: Account):
operatorService = instance.getInstanceOperatorService()
componentOwnerService = instance.getComponentOwnerService()
oracleService = instance.getOracleService()
opRole = operatorService.oracleProviderRole()
operatorService.addRoleToAccount(oracleOwner, opRole)
self.oracle = TestOracle.deploy(
s2b32(ORACLE_NAME),
instance.getRegistry(),
{'from': oracleOwner})
componentOwnerService.propose(
self.oracle,
{'from': oracleOwner})
operatorService.approveOracle(
self.oracle.getId(),
{'from': instance.getOwner()})
def getOracleId(self) -> int:
return self.oracle.getId()
def getOracleContract(self) -> TestOracle:
return self.oracle
class GifTestProduct(object):
def __init__(self, instance: GifInstance, oracle: GifTestOracle, productOwner: Account):
self.policyController = instance.getPolicyController()
operatorService = instance.getInstanceOperatorService()
productService = instance.getProductService()
self.product = TestProduct.deploy(
productService,
s2b32(PRODUCT_NAME),
oracle.getOracleId(),
{'from': productOwner})
operatorService.approveProduct(
self.product.getId(),
{'from': instance.getOwner()})
def getProductId(self) -> int:
return self.product.getId()
def getProductContract(self) -> TestProduct:
return self.product
def getPolicy(self, policyId: str):
return self.policyController.getPolicy(policyId) | true | true |
f71c8bb35951957eb8062c9ab9ba757124ceaade | 1,056 | py | Python | database/creds.py | LaudateCorpus1/n-view | 8f474e40344c9a48e1d6ad43a4cfcb7de641219c | [
"Apache-2.0"
] | null | null | null | database/creds.py | LaudateCorpus1/n-view | 8f474e40344c9a48e1d6ad43a4cfcb7de641219c | [
"Apache-2.0"
] | null | null | null | database/creds.py | LaudateCorpus1/n-view | 8f474e40344c9a48e1d6ad43a4cfcb7de641219c | [
"Apache-2.0"
] | null | null | null | # (C) Copyright 2019 Hewlett Packard Enterprise Development LP.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# __author__ = "@netwookie"
# __credits__ = ["Rick Kauffman"]
# __license__ = "Apache2.0"
# __version__ = "1.0.0"
# __maintainer__ = "Rick Kauffman"
# __email__ = "rick.a.kauffman@hpe.com"
from mongoengine import signals
from application import db
class Creds(db.Document):
hostip = db.StringField(db_field="h", required=True)
username= db.StringField(db_field="u", required=True)
password = db.StringField(db_field="p", required=True)
| 35.2 | 74 | 0.749053 |
from mongoengine import signals
from application import db
class Creds(db.Document):
hostip = db.StringField(db_field="h", required=True)
username= db.StringField(db_field="u", required=True)
password = db.StringField(db_field="p", required=True)
| true | true |
f71c8c2ec884cd59a6a4294250c173594ed45b44 | 2,351 | py | Python | bin/ssa-end-to-end-testing/modules/github_service.py | adriaandens/security_content | f1f2f8370ce0f0986804ea9f89555de307a49d66 | [
"Apache-2.0"
] | 1 | 2021-06-17T05:23:19.000Z | 2021-06-17T05:23:19.000Z | bin/ssa-end-to-end-testing/modules/github_service.py | adriaandens/security_content | f1f2f8370ce0f0986804ea9f89555de307a49d66 | [
"Apache-2.0"
] | null | null | null | bin/ssa-end-to-end-testing/modules/github_service.py | adriaandens/security_content | f1f2f8370ce0f0986804ea9f89555de307a49d66 | [
"Apache-2.0"
] | null | null | null |
import git
import os
import logging
import glob
# Logger
logging.basicConfig(level=os.environ.get("LOGLEVEL", "INFO"))
LOGGER = logging.getLogger(__name__)
SECURITY_CONTENT_URL = "https://github.com/splunk/security_content"
class GithubService:
def __init__(self, security_content_branch):
self.security_content_branch = security_content_branch
self.security_content_repo_obj = self.clone_project(SECURITY_CONTENT_URL, f"security_content", f"develop")
self.security_content_repo_obj.git.checkout(security_content_branch)
def clone_project(self, url, project, branch):
LOGGER.info(f"Clone Security Content Project")
repo_obj = git.Repo.clone_from(url, project, branch=branch)
return repo_obj
def get_changed_test_files_ssa(self):
branch1 = self.security_content_branch
branch2 = 'develop'
g = git.Git('security_content')
changed_ssa_test_files = []
if branch1 != 'develop':
differ = g.diff('--name-only', branch1, branch2)
changed_files = differ.splitlines()
for file_path in changed_files:
# added or changed test files
if file_path.startswith('tests'):
if os.path.basename(file_path).startswith('ssa'):
if file_path not in changed_ssa_test_files:
changed_ssa_test_files.append(file_path)
# changed detections
if file_path.startswith('detections'):
if os.path.basename(file_path).startswith('ssa'):
file_path_base = os.path.splitext(file_path)[0].replace('detections', 'tests') + '.test'
file_path_new = file_path_base + '.yml'
if file_path_new not in changed_ssa_test_files:
changed_ssa_test_files.append(file_path_new)
# all SSA test files for nightly build
else:
changed_files = sorted(glob.glob('security_content/tests/*/*.yml'))
for file_path in changed_files:
file_path = file_path.replace('security_content/','')
if os.path.basename(file_path).startswith('ssa'):
changed_ssa_test_files.append(file_path)
return changed_ssa_test_files
| 36.169231 | 114 | 0.632071 |
import git
import os
import logging
import glob
logging.basicConfig(level=os.environ.get("LOGLEVEL", "INFO"))
LOGGER = logging.getLogger(__name__)
SECURITY_CONTENT_URL = "https://github.com/splunk/security_content"
class GithubService:
def __init__(self, security_content_branch):
self.security_content_branch = security_content_branch
self.security_content_repo_obj = self.clone_project(SECURITY_CONTENT_URL, f"security_content", f"develop")
self.security_content_repo_obj.git.checkout(security_content_branch)
def clone_project(self, url, project, branch):
LOGGER.info(f"Clone Security Content Project")
repo_obj = git.Repo.clone_from(url, project, branch=branch)
return repo_obj
def get_changed_test_files_ssa(self):
branch1 = self.security_content_branch
branch2 = 'develop'
g = git.Git('security_content')
changed_ssa_test_files = []
if branch1 != 'develop':
differ = g.diff('--name-only', branch1, branch2)
changed_files = differ.splitlines()
for file_path in changed_files:
if file_path.startswith('tests'):
if os.path.basename(file_path).startswith('ssa'):
if file_path not in changed_ssa_test_files:
changed_ssa_test_files.append(file_path)
if file_path.startswith('detections'):
if os.path.basename(file_path).startswith('ssa'):
file_path_base = os.path.splitext(file_path)[0].replace('detections', 'tests') + '.test'
file_path_new = file_path_base + '.yml'
if file_path_new not in changed_ssa_test_files:
changed_ssa_test_files.append(file_path_new)
else:
changed_files = sorted(glob.glob('security_content/tests/*/*.yml'))
for file_path in changed_files:
file_path = file_path.replace('security_content/','')
if os.path.basename(file_path).startswith('ssa'):
changed_ssa_test_files.append(file_path)
return changed_ssa_test_files
| true | true |
f71c8ce9b3d8ee3617835b4bd38ad01e0b6f17d2 | 2,725 | py | Python | gumpy/split.py | gumpy-bci/gumpy | abd8230dc50bd8b0a2348c6e08a1bba1c0ed3146 | [
"MIT"
] | 55 | 2018-02-20T14:17:06.000Z | 2022-03-22T06:33:31.000Z | gumpy/gumpy/split.py | Tizzio/gumpy-project | c51ee75ddf1eaa58813b493282014da6f31f5591 | [
"MIT"
] | 5 | 2018-02-17T06:54:55.000Z | 2019-07-16T15:18:25.000Z | gumpy/gumpy/split.py | Tizzio/gumpy-project | c51ee75ddf1eaa58813b493282014da6f31f5591 | [
"MIT"
] | 23 | 2018-02-17T06:45:56.000Z | 2022-03-04T06:01:07.000Z | import sklearn.model_selection
import numpy as np
from sklearn.model_selection import ShuffleSplit, StratifiedShuffleSplit, cross_val_score, StratifiedKFold
def normal(X, labels, test_size):
"""Split a dataset into training and test parts.
Args:
X (numpy.ndarray): 2D features matrix
labels: labels vector
test_size: size of the split
Returns:
A 2D CSP features matrix
"""
Y = labels
X_train, X_test, Y_train, Y_test = \
sklearn.model_selection.train_test_split(X, Y,
test_size=test_size,
random_state=0)
return X_train, X_test, Y_train, Y_test
def time_series_split(features, labels, n_splits):
"""Split a dataset into n splits.
"""
xx = sklearn.model_selection.TimeSeriesSplit(n_splits)
for train_index, test_index in xx.split(features):
X_train, X_test = features[train_index], features[test_index]
y_train, y_test = labels[train_index], labels[test_index]
return X_train, X_test, y_train, y_test
def stratified_KFold(features, labels, n_splits):
"""Stratified K-Folds cross-validator
Stratification is the process of rearranging the data as to ensure each fold is a good representative of the whole
and by also keeping the balance of classes
"""
skf = StratifiedKFold(n_splits)
skf.get_n_splits(features, labels)
for train_index, test_index in skf.split(features, labels):
X_train, X_test = features[train_index], features[test_index]
Y_train, Y_test = labels[train_index], labels[test_index]
return X_train, X_test, Y_train, Y_test
#Stratified ShuffleSplit cross-validator
def stratified_shuffle_Split(features, labels, n_splits,test_size,random_state):
"""Stratified ShuffleSplit cross-validator
"""
cv = StratifiedShuffleSplit(n_splits, test_size, random_state=random_state)
for train_index, test_index in cv.split(features,labels):
X_train = features[train_index]
X_test = features[test_index]
Y_train = labels[train_index]
Y_test = labels[test_index]
return X_train, X_test, Y_train, Y_test
#Random permutation cross-validator
def shuffle_Split(features, labels, n_splits,test_size,random_state):
"""ShuffleSplit: Random permutation cross-validator
"""
cv = ShuffleSplit(n_splits, test_size, random_state=random_state)
for train_index, test_index in cv.split(features):
X_train = features[train_index]
X_test = features[test_index]
Y_train = labels[train_index]
Y_test = labels[test_index]
return X_train, X_test, Y_train, Y_test
| 36.333333 | 119 | 0.693945 | import sklearn.model_selection
import numpy as np
from sklearn.model_selection import ShuffleSplit, StratifiedShuffleSplit, cross_val_score, StratifiedKFold
def normal(X, labels, test_size):
Y = labels
X_train, X_test, Y_train, Y_test = \
sklearn.model_selection.train_test_split(X, Y,
test_size=test_size,
random_state=0)
return X_train, X_test, Y_train, Y_test
def time_series_split(features, labels, n_splits):
xx = sklearn.model_selection.TimeSeriesSplit(n_splits)
for train_index, test_index in xx.split(features):
X_train, X_test = features[train_index], features[test_index]
y_train, y_test = labels[train_index], labels[test_index]
return X_train, X_test, y_train, y_test
def stratified_KFold(features, labels, n_splits):
skf = StratifiedKFold(n_splits)
skf.get_n_splits(features, labels)
for train_index, test_index in skf.split(features, labels):
X_train, X_test = features[train_index], features[test_index]
Y_train, Y_test = labels[train_index], labels[test_index]
return X_train, X_test, Y_train, Y_test
def stratified_shuffle_Split(features, labels, n_splits,test_size,random_state):
cv = StratifiedShuffleSplit(n_splits, test_size, random_state=random_state)
for train_index, test_index in cv.split(features,labels):
X_train = features[train_index]
X_test = features[test_index]
Y_train = labels[train_index]
Y_test = labels[test_index]
return X_train, X_test, Y_train, Y_test
def shuffle_Split(features, labels, n_splits,test_size,random_state):
cv = ShuffleSplit(n_splits, test_size, random_state=random_state)
for train_index, test_index in cv.split(features):
X_train = features[train_index]
X_test = features[test_index]
Y_train = labels[train_index]
Y_test = labels[test_index]
return X_train, X_test, Y_train, Y_test
| true | true |
f71c8d37ae326e29cdf957282fbbe1c51cf54ac4 | 1,004 | py | Python | src/slack.py | villoro/airflow_tasks | 81bd892744a9bbbf6e01903649b6c3786a955a5a | [
"MIT"
] | null | null | null | src/slack.py | villoro/airflow_tasks | 81bd892744a9bbbf6e01903649b6c3786a955a5a | [
"MIT"
] | 4 | 2020-10-09T15:59:09.000Z | 2020-11-18T08:34:44.000Z | src/slack.py | villoro/airflow_tasks | 81bd892744a9bbbf6e01903649b6c3786a955a5a | [
"MIT"
] | null | null | null | import json
import requests
from utils import get_secret
from utils import is_pro
def send_slack(text="", channel="test", blocks=None):
assert channel in ["test", "events", "general"]
webhook = get_secret(f"SLACK_WEBHOOK_{channel.upper()}")
data = {"text": text}
if blocks:
data["blocks"] = blocks
res = requests.post(
webhook, data=json.dumps(data), headers={"Content-Type": "application/json"}
)
res.raise_for_status()
def slack_state_handler(task, old_state, new_state):
if not new_state.is_finished():
return new_state
failure = new_state.is_failed()
# Prepare message
if failure:
msg = f"*{task.name}:* :x:"
else:
msg = f"*{task.name}:* {task.duration} :heavy_check_mark:"
# Notify result
send_slack(msg, channel="events" if is_pro() else "test")
# In pro notify about failures in general
if failure and is_pro():
send_slack(msg, channel="general")
return new_state
| 21.361702 | 84 | 0.644422 | import json
import requests
from utils import get_secret
from utils import is_pro
def send_slack(text="", channel="test", blocks=None):
assert channel in ["test", "events", "general"]
webhook = get_secret(f"SLACK_WEBHOOK_{channel.upper()}")
data = {"text": text}
if blocks:
data["blocks"] = blocks
res = requests.post(
webhook, data=json.dumps(data), headers={"Content-Type": "application/json"}
)
res.raise_for_status()
def slack_state_handler(task, old_state, new_state):
if not new_state.is_finished():
return new_state
failure = new_state.is_failed()
if failure:
msg = f"*{task.name}:* :x:"
else:
msg = f"*{task.name}:* {task.duration} :heavy_check_mark:"
send_slack(msg, channel="events" if is_pro() else "test")
if failure and is_pro():
send_slack(msg, channel="general")
return new_state
| true | true |
f71c8d87b4e0910142ebc974a5c242cbc32868ab | 798 | py | Python | tree/b_my_solution.py | master-cim/algorithm | a57f473ceb32b96240989e31ac33154e55c00724 | [
"MIT"
] | 1 | 2022-03-31T07:30:53.000Z | 2022-03-31T07:30:53.000Z | tree/b_my_solution.py | master-cim/algorithm | a57f473ceb32b96240989e31ac33154e55c00724 | [
"MIT"
] | null | null | null | tree/b_my_solution.py | master-cim/algorithm | a57f473ceb32b96240989e31ac33154e55c00724 | [
"MIT"
] | 2 | 2022-03-04T09:42:03.000Z | 2022-03-30T14:51:32.000Z | # B. Сбалансированное дерево
# ID успешной посылки 66593272
class Node:
def __init__(self, value, left=None, right=None):
self.value = value
self.right = right
self.left = left
def height(root):
if root is None:
return 0
return max(height(root.left), height(root.right)) + 1
def solution(root):
if root is None:
return True
left_height = height(root.left)
right_height = height(root.right)
if ((abs(left_height - right_height) <= 1)
and solution(root.left) is True
and solution(root.right) is True):
return True
return False
def test():
node1 = Node(1)
node2 = Node(-5)
node3 = Node(3, node1, node2)
node4 = Node(10)
node5 = Node(2, node3, node4)
assert solution(node5)
| 21.567568 | 57 | 0.616541 |
class Node:
def __init__(self, value, left=None, right=None):
self.value = value
self.right = right
self.left = left
def height(root):
if root is None:
return 0
return max(height(root.left), height(root.right)) + 1
def solution(root):
if root is None:
return True
left_height = height(root.left)
right_height = height(root.right)
if ((abs(left_height - right_height) <= 1)
and solution(root.left) is True
and solution(root.right) is True):
return True
return False
def test():
node1 = Node(1)
node2 = Node(-5)
node3 = Node(3, node1, node2)
node4 = Node(10)
node5 = Node(2, node3, node4)
assert solution(node5)
| true | true |
f71c8d946e5ae29a441cb944deb2a30473a80d7d | 21,205 | py | Python | py/desispec/scripts/stdstars.py | segasai/desispec | 4786347a8ad44effa4985671423f7ba0129ba6c3 | [
"BSD-3-Clause"
] | null | null | null | py/desispec/scripts/stdstars.py | segasai/desispec | 4786347a8ad44effa4985671423f7ba0129ba6c3 | [
"BSD-3-Clause"
] | null | null | null | py/desispec/scripts/stdstars.py | segasai/desispec | 4786347a8ad44effa4985671423f7ba0129ba6c3 | [
"BSD-3-Clause"
] | null | null | null |
"""
Get the normalized best template to do flux calibration.
"""
#- TODO: refactor algorithmic code into a separate module/function
import argparse
import sys
import numpy as np
from astropy.io import fits
from astropy import units
from astropy.table import Table
from desispec import io
from desispec.fluxcalibration import match_templates,normalize_templates,isStdStar
from desispec.interpolation import resample_flux
from desiutil.log import get_logger
from desispec.parallel import default_nproc
from desispec.io.filters import load_legacy_survey_filter
from desiutil.dust import ext_odonnell,extinction_total_to_selective_ratio
from desispec.fiberbitmasking import get_fiberbitmasked_frame
def parse(options=None):
parser = argparse.ArgumentParser(description="Fit of standard star spectra in frames.")
parser.add_argument('--frames', type = str, default = None, required=True, nargs='*',
help = 'list of path to DESI frame fits files (needs to be same exposure, spectro)')
parser.add_argument('--skymodels', type = str, default = None, required=True, nargs='*',
help = 'list of path to DESI sky model fits files (needs to be same exposure, spectro)')
parser.add_argument('--fiberflats', type = str, default = None, required=True, nargs='*',
help = 'list of path to DESI fiberflats fits files (needs to be same exposure, spectro)')
parser.add_argument('--starmodels', type = str, help = 'path of spectro-photometric stellar spectra fits')
parser.add_argument('-o','--outfile', type = str, help = 'output file for normalized stdstar model flux')
parser.add_argument('--ncpu', type = int, default = default_nproc, required = False, help = 'use ncpu for multiprocessing')
parser.add_argument('--delta-color', type = float, default = 0.2, required = False, help = 'max delta-color for the selection of standard stars (on top of meas. errors)')
parser.add_argument('--color', type = str, default = "G-R", choices=['G-R', 'R-Z'], required = False, help = 'color for selection of standard stars')
parser.add_argument('--z-max', type = float, default = 0.008, required = False, help = 'max peculiar velocity (blue/red)shift range')
parser.add_argument('--z-res', type = float, default = 0.00002, required = False, help = 'dz grid resolution')
parser.add_argument('--template-error', type = float, default = 0.1, required = False, help = 'fractional template error used in chi2 computation (about 0.1 for BOSS b1)')
parser.add_argument('--maxstdstars', type=int, default=30, \
help='Maximum number of stdstars to include')
log = get_logger()
args = None
if options is None:
args = parser.parse_args()
cmd = ' '.join(sys.argv)
else:
args = parser.parse_args(options)
cmd = 'desi_fit_stdstars ' + ' '.join(options)
log.info('RUNNING {}'.format(cmd))
return args
def safe_read_key(header,key) :
value = None
try :
value=header[key]
except KeyError :
value = None
pass
if value is None : # second try
value=header[key.ljust(8).upper()]
return value
def dust_transmission(wave,ebv) :
Rv = 3.1
extinction = ext_odonnell(wave,Rv=Rv)
return 10**(-Rv*extinction*ebv/2.5)
def main(args) :
""" finds the best models of all standard stars in the frame
and normlize the model flux. Output is written to a file and will be called for calibration.
"""
log = get_logger()
log.info("mag delta %s = %f (for the pre-selection of stellar models)"%(args.color,args.delta_color))
log.info('multiprocess parallelizing with {} processes'.format(args.ncpu))
# READ DATA
############################################
# First loop through and group by exposure and spectrograph
frames_by_expid = {}
for filename in args.frames :
log.info("reading %s"%filename)
frame=io.read_frame(filename)
expid = safe_read_key(frame.meta,"EXPID")
camera = safe_read_key(frame.meta,"CAMERA").strip().lower()
spec = camera[1]
uniq_key = (expid,spec)
if uniq_key in frames_by_expid.keys():
frames_by_expid[uniq_key][camera] = frame
else:
frames_by_expid[uniq_key] = {camera: frame}
frames={}
flats={}
skies={}
spectrograph=None
starfibers=None
starindices=None
fibermap=None
# For each unique expid,spec pair, get the logical OR of the FIBERSTATUS for all
# cameras and then proceed with extracting the frame information
# once we modify the fibermap FIBERSTATUS
for (expid,spec),camdict in frames_by_expid.items():
fiberstatus = None
for frame in camdict.values():
if fiberstatus is None:
fiberstatus = frame.fibermap['FIBERSTATUS'].data.copy()
else:
fiberstatus |= frame.fibermap['FIBERSTATUS']
for camera,frame in camdict.items():
frame.fibermap['FIBERSTATUS'] |= fiberstatus
# Set fibermask flagged spectra to have 0 flux and variance
frame = get_fiberbitmasked_frame(frame,bitmask='stdstars',ivar_framemask=True)
frame_fibermap = frame.fibermap
frame_starindices = np.where(isStdStar(frame_fibermap))[0]
#- Confirm that all fluxes have entries but trust targeting bits
#- to get basic magnitude range correct
keep = np.ones(len(frame_starindices), dtype=bool)
for colname in ['FLUX_G', 'FLUX_R', 'FLUX_Z']: #- and W1 and W2?
keep &= frame_fibermap[colname][frame_starindices] > 10**((22.5-30)/2.5)
keep &= frame_fibermap[colname][frame_starindices] < 10**((22.5-0)/2.5)
frame_starindices = frame_starindices[keep]
if spectrograph is None :
spectrograph = frame.spectrograph
fibermap = frame_fibermap
starindices=frame_starindices
starfibers=fibermap["FIBER"][starindices]
elif spectrograph != frame.spectrograph :
log.error("incompatible spectrographs %d != %d"%(spectrograph,frame.spectrograph))
raise ValueError("incompatible spectrographs %d != %d"%(spectrograph,frame.spectrograph))
elif starindices.size != frame_starindices.size or np.sum(starindices!=frame_starindices)>0 :
log.error("incompatible fibermap")
raise ValueError("incompatible fibermap")
if not camera in frames :
frames[camera]=[]
frames[camera].append(frame)
# possibly cleanup memory
del frames_by_expid
for filename in args.skymodels :
log.info("reading %s"%filename)
sky=io.read_sky(filename)
camera=safe_read_key(sky.header,"CAMERA").strip().lower()
if not camera in skies :
skies[camera]=[]
skies[camera].append(sky)
for filename in args.fiberflats :
log.info("reading %s"%filename)
flat=io.read_fiberflat(filename)
camera=safe_read_key(flat.header,"CAMERA").strip().lower()
# NEED TO ADD MORE CHECKS
if camera in flats:
log.warning("cannot handle several flats of same camera (%s), will use only the first one"%camera)
#raise ValueError("cannot handle several flats of same camera (%s)"%camera)
else :
flats[camera]=flat
if starindices.size == 0 :
log.error("no STD star found in fibermap")
raise ValueError("no STD star found in fibermap")
log.info("found %d STD stars"%starindices.size)
# log.warning("Not using flux errors for Standard Star fits!")
# DIVIDE FLAT AND SUBTRACT SKY , TRIM DATA
############################################
# since poping dict, we need to copy keys to iterate over to avoid
# RuntimeError due to changing dict
frame_cams = list(frames.keys())
for cam in frame_cams:
if not cam in skies:
log.warning("Missing sky for %s"%cam)
frames.pop(cam)
continue
if not cam in flats:
log.warning("Missing flat for %s"%cam)
frames.pop(cam)
continue
flat=flats[cam]
for frame,sky in zip(frames[cam],skies[cam]) :
frame.flux = frame.flux[starindices]
frame.ivar = frame.ivar[starindices]
frame.ivar *= (frame.mask[starindices] == 0)
frame.ivar *= (sky.ivar[starindices] != 0)
frame.ivar *= (sky.mask[starindices] == 0)
frame.ivar *= (flat.ivar[starindices] != 0)
frame.ivar *= (flat.mask[starindices] == 0)
frame.flux *= ( frame.ivar > 0) # just for clean plots
for star in range(frame.flux.shape[0]) :
ok=np.where((frame.ivar[star]>0)&(flat.fiberflat[star]!=0))[0]
if ok.size > 0 :
frame.flux[star] = frame.flux[star]/flat.fiberflat[star] - sky.flux[star]
frame.resolution_data = frame.resolution_data[starindices]
nframes=len(frames[cam])
if nframes>1 :
# optimal weights for the coaddition = ivar*throughput, not directly ivar,
# we estimate the relative throughput with median fluxes at this stage
medflux=np.zeros(nframes)
for i,frame in enumerate(frames[cam]) :
if np.sum(frame.ivar>0) == 0 :
log.error("ivar=0 for all std star spectra in frame {}-{:08d}".format(cam,frame.meta["EXPID"]))
else :
medflux[i] = np.median(frame.flux[frame.ivar>0])
log.debug("medflux = {}".format(medflux))
medflux *= (medflux>0)
if np.sum(medflux>0)==0 :
log.error("mean median flux = 0, for all stars in fibers {}".format(list(frames[cam][0].fibermap["FIBER"][starindices])))
sys.exit(12)
mmedflux = np.mean(medflux[medflux>0])
weights=medflux/mmedflux
log.info("coadding {} exposures in cam {}, w={}".format(nframes,cam,weights))
sw=np.zeros(frames[cam][0].flux.shape)
swf=np.zeros(frames[cam][0].flux.shape)
swr=np.zeros(frames[cam][0].resolution_data.shape)
for i,frame in enumerate(frames[cam]) :
sw += weights[i]*frame.ivar
swf += weights[i]*frame.ivar*frame.flux
swr += weights[i]*frame.ivar[:,None,:]*frame.resolution_data
coadded_frame = frames[cam][0]
coadded_frame.ivar = sw
coadded_frame.flux = swf/(sw+(sw==0))
coadded_frame.resolution_data = swr/((sw+(sw==0))[:,None,:])
frames[cam] = [ coadded_frame ]
# CHECK S/N
############################################
# for each band in 'brz', record quadratic sum of median S/N across wavelength
snr=dict()
for band in ['b','r','z'] :
snr[band]=np.zeros(starindices.size)
for cam in frames :
band=cam[0].lower()
for frame in frames[cam] :
msnr = np.median( frame.flux * np.sqrt( frame.ivar ) / np.sqrt(np.gradient(frame.wave)) , axis=1 ) # median SNR per sqrt(A.)
msnr *= (msnr>0)
snr[band] = np.sqrt( snr[band]**2 + msnr**2 )
log.info("SNR(B) = {}".format(snr['b']))
###############################
max_number_of_stars = 50
min_blue_snr = 4.
###############################
indices=np.argsort(snr['b'])[::-1][:max_number_of_stars]
validstars = np.where(snr['b'][indices]>min_blue_snr)[0]
#- TODO: later we filter on models based upon color, thus throwing
#- away very blue stars for which we don't have good models.
log.info("Number of stars with median stacked blue S/N > {} /sqrt(A) = {}".format(min_blue_snr,validstars.size))
if validstars.size == 0 :
log.error("No valid star")
sys.exit(12)
validstars = indices[validstars]
for band in ['b','r','z'] :
snr[band]=snr[band][validstars]
log.info("BLUE SNR of selected stars={}".format(snr['b']))
for cam in frames :
for frame in frames[cam] :
frame.flux = frame.flux[validstars]
frame.ivar = frame.ivar[validstars]
frame.resolution_data = frame.resolution_data[validstars]
starindices = starindices[validstars]
starfibers = starfibers[validstars]
nstars = starindices.size
fibermap = Table(fibermap[starindices])
# MASK OUT THROUGHPUT DIP REGION
############################################
mask_throughput_dip_region = True
if mask_throughput_dip_region :
wmin=4300.
wmax=4500.
log.warning("Masking out the wavelength region [{},{}]A in the standard star fit".format(wmin,wmax))
for cam in frames :
for frame in frames[cam] :
ii=np.where( (frame.wave>=wmin)&(frame.wave<=wmax) )[0]
if ii.size>0 :
frame.ivar[:,ii] = 0
# READ MODELS
############################################
log.info("reading star models in %s"%args.starmodels)
stdwave,stdflux,templateid,teff,logg,feh=io.read_stdstar_templates(args.starmodels)
# COMPUTE MAGS OF MODELS FOR EACH STD STAR MAG
############################################
#- Support older fibermaps
if 'PHOTSYS' not in fibermap.colnames:
log.warning('Old fibermap format; using defaults for missing columns')
log.warning(" PHOTSYS = 'S'")
log.warning(" EBV = 0.0")
fibermap['PHOTSYS'] = 'S'
fibermap['EBV'] = 0.0
model_filters = dict()
for band in ["G","R","Z"] :
for photsys in np.unique(fibermap['PHOTSYS']) :
model_filters[band+photsys] = load_legacy_survey_filter(band=band,photsys=photsys)
log.info("computing model mags for %s"%sorted(model_filters.keys()))
model_mags = dict()
fluxunits = 1e-17 * units.erg / units.s / units.cm**2 / units.Angstrom
for filter_name, filter_response in model_filters.items():
model_mags[filter_name] = filter_response.get_ab_magnitude(stdflux*fluxunits,stdwave)
log.info("done computing model mags")
# LOOP ON STARS TO FIND BEST MODEL
############################################
linear_coefficients=np.zeros((nstars,stdflux.shape[0]))
chi2dof=np.zeros((nstars))
redshift=np.zeros((nstars))
normflux=[]
star_mags = dict()
star_unextincted_mags = dict()
photometric_systems = np.unique(fibermap['PHOTSYS'])
for band in ['G', 'R', 'Z']:
star_mags[band] = 22.5 - 2.5 * np.log10(fibermap['FLUX_'+band])
star_unextincted_mags[band] = np.zeros(star_mags[band].shape)
for photsys in photometric_systems :
r_band = extinction_total_to_selective_ratio(band , photsys) # dimensionless
# r_band = a_band / E(B-V)
# E(B-V) is a difference of magnitudes (dimensionless)
# a_band = -2.5*log10(effective dust transmission) , dimensionless
# effective dust transmission =
# integral( SED(lambda) * filter_transmission(lambda,band) * milkyway_dust_transmission(lambda,E(B-V)) dlamdba)
# / integral( SED(lambda) * filter_transmission(lambda,band) dlamdba)
selection = (fibermap['PHOTSYS'] == photsys)
a_band = r_band * fibermap['EBV'][selection] # dimensionless
star_unextincted_mags[band][selection] = 22.5 - 2.5 * np.log10(fibermap['FLUX_'+band][selection]) - a_band
star_colors = dict()
star_colors['G-R'] = star_mags['G'] - star_mags['R']
star_colors['R-Z'] = star_mags['R'] - star_mags['Z']
star_unextincted_colors = dict()
star_unextincted_colors['G-R'] = star_unextincted_mags['G'] - star_unextincted_mags['R']
star_unextincted_colors['R-Z'] = star_unextincted_mags['R'] - star_unextincted_mags['Z']
fitted_model_colors = np.zeros(nstars)
for star in range(nstars) :
log.info("finding best model for observed star #%d"%star)
# np.array of wave,flux,ivar,resol
wave = {}
flux = {}
ivar = {}
resolution_data = {}
for camera in frames :
for i,frame in enumerate(frames[camera]) :
identifier="%s-%d"%(camera,i)
wave[identifier]=frame.wave
flux[identifier]=frame.flux[star]
ivar[identifier]=frame.ivar[star]
resolution_data[identifier]=frame.resolution_data[star]
# preselect models based on magnitudes
photsys=fibermap['PHOTSYS'][star]
if not args.color in ['G-R','R-Z'] :
raise ValueError('Unknown color {}'.format(args.color))
bands=args.color.split("-")
model_colors = model_mags[bands[0]+photsys] - model_mags[bands[1]+photsys]
color_diff = model_colors - star_unextincted_colors[args.color][star]
selection = np.abs(color_diff) < args.delta_color
if np.sum(selection) == 0 :
log.warning("no model in the selected color range for this star")
continue
# smallest cube in parameter space including this selection (needed for interpolation)
new_selection = (teff>=np.min(teff[selection]))&(teff<=np.max(teff[selection]))
new_selection &= (logg>=np.min(logg[selection]))&(logg<=np.max(logg[selection]))
new_selection &= (feh>=np.min(feh[selection]))&(feh<=np.max(feh[selection]))
selection = np.where(new_selection)[0]
log.info("star#%d fiber #%d, %s = %f, number of pre-selected models = %d/%d"%(
star, starfibers[star], args.color, star_unextincted_colors[args.color][star],
selection.size, stdflux.shape[0]))
# Match unextincted standard stars to data
coefficients, redshift[star], chi2dof[star] = match_templates(
wave, flux, ivar, resolution_data,
stdwave, stdflux[selection],
teff[selection], logg[selection], feh[selection],
ncpu=args.ncpu, z_max=args.z_max, z_res=args.z_res,
template_error=args.template_error
)
linear_coefficients[star,selection] = coefficients
log.info('Star Fiber: {}; TEFF: {:.3f}; LOGG: {:.3f}; FEH: {:.3f}; Redshift: {:g}; Chisq/dof: {:.3f}'.format(
starfibers[star],
np.inner(teff,linear_coefficients[star]),
np.inner(logg,linear_coefficients[star]),
np.inner(feh,linear_coefficients[star]),
redshift[star],
chi2dof[star])
)
# Apply redshift to original spectrum at full resolution
model=np.zeros(stdwave.size)
redshifted_stdwave = stdwave*(1+redshift[star])
for i,c in enumerate(linear_coefficients[star]) :
if c != 0 :
model += c*np.interp(stdwave,redshifted_stdwave,stdflux[i])
# Apply dust extinction to the model
log.info("Applying MW dust extinction to star {} with EBV = {}".format(star,fibermap['EBV'][star]))
model *= dust_transmission(stdwave, fibermap['EBV'][star])
# Compute final color of dust-extincted model
photsys=fibermap['PHOTSYS'][star]
if not args.color in ['G-R','R-Z'] :
raise ValueError('Unknown color {}'.format(args.color))
bands=args.color.split("-")
model_mag1 = model_filters[bands[0]+photsys].get_ab_magnitude(model*fluxunits, stdwave)
model_mag2 = model_filters[bands[1]+photsys].get_ab_magnitude(model*fluxunits, stdwave)
fitted_model_colors[star] = model_mag1 - model_mag2
if bands[0]=="R" :
model_magr = model_mag1
elif bands[1]=="R" :
model_magr = model_mag2
#- TODO: move this back into normalize_templates, at the cost of
#- recalculating a model magnitude?
# Normalize the best model using reported magnitude
scalefac=10**((model_magr - star_mags['R'][star])/2.5)
log.info('scaling R mag {:.3f} to {:.3f} using scale {}'.format(model_magr, star_mags['R'][star], scalefac))
normflux.append(model*scalefac)
# Now write the normalized flux for all best models to a file
normflux=np.array(normflux)
fitted_stars = np.where(chi2dof != 0)[0]
if fitted_stars.size == 0 :
log.error("No star has been fit.")
sys.exit(12)
data={}
data['LOGG']=linear_coefficients[fitted_stars,:].dot(logg)
data['TEFF']= linear_coefficients[fitted_stars,:].dot(teff)
data['FEH']= linear_coefficients[fitted_stars,:].dot(feh)
data['CHI2DOF']=chi2dof[fitted_stars]
data['REDSHIFT']=redshift[fitted_stars]
data['COEFF']=linear_coefficients[fitted_stars,:]
data['DATA_%s'%args.color]=star_colors[args.color][fitted_stars]
data['MODEL_%s'%args.color]=fitted_model_colors[fitted_stars]
data['BLUE_SNR'] = snr['b'][fitted_stars]
data['RED_SNR'] = snr['r'][fitted_stars]
data['NIR_SNR'] = snr['z'][fitted_stars]
io.write_stdstar_models(args.outfile,normflux,stdwave,starfibers[fitted_stars],data)
| 43.01217 | 175 | 0.614902 |
import argparse
import sys
import numpy as np
from astropy.io import fits
from astropy import units
from astropy.table import Table
from desispec import io
from desispec.fluxcalibration import match_templates,normalize_templates,isStdStar
from desispec.interpolation import resample_flux
from desiutil.log import get_logger
from desispec.parallel import default_nproc
from desispec.io.filters import load_legacy_survey_filter
from desiutil.dust import ext_odonnell,extinction_total_to_selective_ratio
from desispec.fiberbitmasking import get_fiberbitmasked_frame
def parse(options=None):
parser = argparse.ArgumentParser(description="Fit of standard star spectra in frames.")
parser.add_argument('--frames', type = str, default = None, required=True, nargs='*',
help = 'list of path to DESI frame fits files (needs to be same exposure, spectro)')
parser.add_argument('--skymodels', type = str, default = None, required=True, nargs='*',
help = 'list of path to DESI sky model fits files (needs to be same exposure, spectro)')
parser.add_argument('--fiberflats', type = str, default = None, required=True, nargs='*',
help = 'list of path to DESI fiberflats fits files (needs to be same exposure, spectro)')
parser.add_argument('--starmodels', type = str, help = 'path of spectro-photometric stellar spectra fits')
parser.add_argument('-o','--outfile', type = str, help = 'output file for normalized stdstar model flux')
parser.add_argument('--ncpu', type = int, default = default_nproc, required = False, help = 'use ncpu for multiprocessing')
parser.add_argument('--delta-color', type = float, default = 0.2, required = False, help = 'max delta-color for the selection of standard stars (on top of meas. errors)')
parser.add_argument('--color', type = str, default = "G-R", choices=['G-R', 'R-Z'], required = False, help = 'color for selection of standard stars')
parser.add_argument('--z-max', type = float, default = 0.008, required = False, help = 'max peculiar velocity (blue/red)shift range')
parser.add_argument('--z-res', type = float, default = 0.00002, required = False, help = 'dz grid resolution')
parser.add_argument('--template-error', type = float, default = 0.1, required = False, help = 'fractional template error used in chi2 computation (about 0.1 for BOSS b1)')
parser.add_argument('--maxstdstars', type=int, default=30, \
help='Maximum number of stdstars to include')
log = get_logger()
args = None
if options is None:
args = parser.parse_args()
cmd = ' '.join(sys.argv)
else:
args = parser.parse_args(options)
cmd = 'desi_fit_stdstars ' + ' '.join(options)
log.info('RUNNING {}'.format(cmd))
return args
def safe_read_key(header,key) :
value = None
try :
value=header[key]
except KeyError :
value = None
pass
if value is None :
value=header[key.ljust(8).upper()]
return value
def dust_transmission(wave,ebv) :
Rv = 3.1
extinction = ext_odonnell(wave,Rv=Rv)
return 10**(-Rv*extinction*ebv/2.5)
def main(args) :
log = get_logger()
log.info("mag delta %s = %f (for the pre-selection of stellar models)"%(args.color,args.delta_color))
log.info('multiprocess parallelizing with {} processes'.format(args.ncpu))
for camera,frame in camdict.items():
frame.fibermap['FIBERSTATUS'] |= fiberstatus
frame = get_fiberbitmasked_frame(frame,bitmask='stdstars',ivar_framemask=True)
frame_fibermap = frame.fibermap
frame_starindices = np.where(isStdStar(frame_fibermap))[0]
keep = np.ones(len(frame_starindices), dtype=bool)
for colname in ['FLUX_G', 'FLUX_R', 'FLUX_Z']:
keep &= frame_fibermap[colname][frame_starindices] > 10**((22.5-30)/2.5)
keep &= frame_fibermap[colname][frame_starindices] < 10**((22.5-0)/2.5)
frame_starindices = frame_starindices[keep]
if spectrograph is None :
spectrograph = frame.spectrograph
fibermap = frame_fibermap
starindices=frame_starindices
starfibers=fibermap["FIBER"][starindices]
elif spectrograph != frame.spectrograph :
log.error("incompatible spectrographs %d != %d"%(spectrograph,frame.spectrograph))
raise ValueError("incompatible spectrographs %d != %d"%(spectrograph,frame.spectrograph))
elif starindices.size != frame_starindices.size or np.sum(starindices!=frame_starindices)>0 :
log.error("incompatible fibermap")
raise ValueError("incompatible fibermap")
if not camera in frames :
frames[camera]=[]
frames[camera].append(frame)
del frames_by_expid
for filename in args.skymodels :
log.info("reading %s"%filename)
sky=io.read_sky(filename)
camera=safe_read_key(sky.header,"CAMERA").strip().lower()
if not camera in skies :
skies[camera]=[]
skies[camera].append(sky)
for filename in args.fiberflats :
log.info("reading %s"%filename)
flat=io.read_fiberflat(filename)
camera=safe_read_key(flat.header,"CAMERA").strip().lower()
if camera in flats:
log.warning("cannot handle several flats of same camera (%s), will use only the first one"%camera)
else :
flats[camera]=flat
if starindices.size == 0 :
log.error("no STD star found in fibermap")
raise ValueError("no STD star found in fibermap")
log.info("found %d STD stars"%starindices.size)
erflat[star]!=0))[0]
if ok.size > 0 :
frame.flux[star] = frame.flux[star]/flat.fiberflat[star] - sky.flux[star]
frame.resolution_data = frame.resolution_data[starindices]
nframes=len(frames[cam])
if nframes>1 :
medflux=np.zeros(nframes)
for i,frame in enumerate(frames[cam]) :
if np.sum(frame.ivar>0) == 0 :
log.error("ivar=0 for all std star spectra in frame {}-{:08d}".format(cam,frame.meta["EXPID"]))
else :
medflux[i] = np.median(frame.flux[frame.ivar>0])
log.debug("medflux = {}".format(medflux))
medflux *= (medflux>0)
if np.sum(medflux>0)==0 :
log.error("mean median flux = 0, for all stars in fibers {}".format(list(frames[cam][0].fibermap["FIBER"][starindices])))
sys.exit(12)
mmedflux = np.mean(medflux[medflux>0])
weights=medflux/mmedflux
log.info("coadding {} exposures in cam {}, w={}".format(nframes,cam,weights))
sw=np.zeros(frames[cam][0].flux.shape)
swf=np.zeros(frames[cam][0].flux.shape)
swr=np.zeros(frames[cam][0].resolution_data.shape)
for i,frame in enumerate(frames[cam]) :
sw += weights[i]*frame.ivar
swf += weights[i]*frame.ivar*frame.flux
swr += weights[i]*frame.ivar[:,None,:]*frame.resolution_data
coadded_frame = frames[cam][0]
coadded_frame.ivar = sw
coadded_frame.flux = swf/(sw+(sw==0))
coadded_frame.resolution_data = swr/((sw+(sw==0))[:,None,:])
frames[cam] = [ coadded_frame ]
:,ii] = 0
# READ MODELS
############################################
log.info("reading star models in %s"%args.starmodels)
stdwave,stdflux,templateid,teff,logg,feh=io.read_stdstar_templates(args.starmodels)
# COMPUTE MAGS OF MODELS FOR EACH STD STAR MAG
############################################
#- Support older fibermaps
if 'PHOTSYS' not in fibermap.colnames:
log.warning('Old fibermap format; using defaults for missing columns')
log.warning(" PHOTSYS = 'S'")
log.warning(" EBV = 0.0")
fibermap['PHOTSYS'] = 'S'
fibermap['EBV'] = 0.0
model_filters = dict()
for band in ["G","R","Z"] :
for photsys in np.unique(fibermap['PHOTSYS']) :
model_filters[band+photsys] = load_legacy_survey_filter(band=band,photsys=photsys)
log.info("computing model mags for %s"%sorted(model_filters.keys()))
model_mags = dict()
fluxunits = 1e-17 * units.erg / units.s / units.cm**2 / units.Angstrom
for filter_name, filter_response in model_filters.items():
model_mags[filter_name] = filter_response.get_ab_magnitude(stdflux*fluxunits,stdwave)
log.info("done computing model mags")
# LOOP ON STARS TO FIND BEST MODEL
############################################
linear_coefficients=np.zeros((nstars,stdflux.shape[0]))
chi2dof=np.zeros((nstars))
redshift=np.zeros((nstars))
normflux=[]
star_mags = dict()
star_unextincted_mags = dict()
photometric_systems = np.unique(fibermap['PHOTSYS'])
for band in ['G', 'R', 'Z']:
star_mags[band] = 22.5 - 2.5 * np.log10(fibermap['FLUX_'+band])
star_unextincted_mags[band] = np.zeros(star_mags[band].shape)
for photsys in photometric_systems :
r_band = extinction_total_to_selective_ratio(band , photsys) # dimensionless
# r_band = a_band / E(B-V)
# E(B-V) is a difference of magnitudes (dimensionless)
# a_band = -2.5*log10(effective dust transmission) , dimensionless
# effective dust transmission =
# integral( SED(lambda) * filter_transmission(lambda,band) * milkyway_dust_transmission(lambda,E(B-V)) dlamdba)
# / integral( SED(lambda) * filter_transmission(lambda,band) dlamdba)
selection = (fibermap['PHOTSYS'] == photsys)
a_band = r_band * fibermap['EBV'][selection] # dimensionless
star_unextincted_mags[band][selection] = 22.5 - 2.5 * np.log10(fibermap['FLUX_'+band][selection]) - a_band
star_colors = dict()
star_colors['G-R'] = star_mags['G'] - star_mags['R']
star_colors['R-Z'] = star_mags['R'] - star_mags['Z']
star_unextincted_colors = dict()
star_unextincted_colors['G-R'] = star_unextincted_mags['G'] - star_unextincted_mags['R']
star_unextincted_colors['R-Z'] = star_unextincted_mags['R'] - star_unextincted_mags['Z']
fitted_model_colors = np.zeros(nstars)
for star in range(nstars) :
log.info("finding best model for observed star #%d"%star)
# np.array of wave,flux,ivar,resol
wave = {}
flux = {}
ivar = {}
resolution_data = {}
for camera in frames :
for i,frame in enumerate(frames[camera]) :
identifier="%s-%d"%(camera,i)
wave[identifier]=frame.wave
flux[identifier]=frame.flux[star]
ivar[identifier]=frame.ivar[star]
resolution_data[identifier]=frame.resolution_data[star]
# preselect models based on magnitudes
photsys=fibermap['PHOTSYS'][star]
if not args.color in ['G-R','R-Z'] :
raise ValueError('Unknown color {}'.format(args.color))
bands=args.color.split("-")
model_colors = model_mags[bands[0]+photsys] - model_mags[bands[1]+photsys]
color_diff = model_colors - star_unextincted_colors[args.color][star]
selection = np.abs(color_diff) < args.delta_color
if np.sum(selection) == 0 :
log.warning("no model in the selected color range for this star")
continue
# smallest cube in parameter space including this selection (needed for interpolation)
new_selection = (teff>=np.min(teff[selection]))&(teff<=np.max(teff[selection]))
new_selection &= (logg>=np.min(logg[selection]))&(logg<=np.max(logg[selection]))
new_selection &= (feh>=np.min(feh[selection]))&(feh<=np.max(feh[selection]))
selection = np.where(new_selection)[0]
log.info("star#%d fiber #%d, %s = %f, number of pre-selected models = %d/%d"%(
star, starfibers[star], args.color, star_unextincted_colors[args.color][star],
selection.size, stdflux.shape[0]))
# Match unextincted standard stars to data
coefficients, redshift[star], chi2dof[star] = match_templates(
wave, flux, ivar, resolution_data,
stdwave, stdflux[selection],
teff[selection], logg[selection], feh[selection],
ncpu=args.ncpu, z_max=args.z_max, z_res=args.z_res,
template_error=args.template_error
)
linear_coefficients[star,selection] = coefficients
log.info('Star Fiber: {}; TEFF: {:.3f}; LOGG: {:.3f}; FEH: {:.3f}; Redshift: {:g}; Chisq/dof: {:.3f}'.format(
starfibers[star],
np.inner(teff,linear_coefficients[star]),
np.inner(logg,linear_coefficients[star]),
np.inner(feh,linear_coefficients[star]),
redshift[star],
chi2dof[star])
)
# Apply redshift to original spectrum at full resolution
model=np.zeros(stdwave.size)
redshifted_stdwave = stdwave*(1+redshift[star])
for i,c in enumerate(linear_coefficients[star]) :
if c != 0 :
model += c*np.interp(stdwave,redshifted_stdwave,stdflux[i])
# Apply dust extinction to the model
log.info("Applying MW dust extinction to star {} with EBV = {}".format(star,fibermap['EBV'][star]))
model *= dust_transmission(stdwave, fibermap['EBV'][star])
# Compute final color of dust-extincted model
photsys=fibermap['PHOTSYS'][star]
if not args.color in ['G-R','R-Z'] :
raise ValueError('Unknown color {}'.format(args.color))
bands=args.color.split("-")
model_mag1 = model_filters[bands[0]+photsys].get_ab_magnitude(model*fluxunits, stdwave)
model_mag2 = model_filters[bands[1]+photsys].get_ab_magnitude(model*fluxunits, stdwave)
fitted_model_colors[star] = model_mag1 - model_mag2
if bands[0]=="R" :
model_magr = model_mag1
elif bands[1]=="R" :
model_magr = model_mag2
#- TODO: move this back into normalize_templates, at the cost of
#- recalculating a model magnitude?
# Normalize the best model using reported magnitude
scalefac=10**((model_magr - star_mags['R'][star])/2.5)
log.info('scaling R mag {:.3f} to {:.3f} using scale {}'.format(model_magr, star_mags['R'][star], scalefac))
normflux.append(model*scalefac)
# Now write the normalized flux for all best models to a file
normflux=np.array(normflux)
fitted_stars = np.where(chi2dof != 0)[0]
if fitted_stars.size == 0 :
log.error("No star has been fit.")
sys.exit(12)
data={}
data['LOGG']=linear_coefficients[fitted_stars,:].dot(logg)
data['TEFF']= linear_coefficients[fitted_stars,:].dot(teff)
data['FEH']= linear_coefficients[fitted_stars,:].dot(feh)
data['CHI2DOF']=chi2dof[fitted_stars]
data['REDSHIFT']=redshift[fitted_stars]
data['COEFF']=linear_coefficients[fitted_stars,:]
data['DATA_%s'%args.color]=star_colors[args.color][fitted_stars]
data['MODEL_%s'%args.color]=fitted_model_colors[fitted_stars]
data['BLUE_SNR'] = snr['b'][fitted_stars]
data['RED_SNR'] = snr['r'][fitted_stars]
data['NIR_SNR'] = snr['z'][fitted_stars]
io.write_stdstar_models(args.outfile,normflux,stdwave,starfibers[fitted_stars],data)
| true | true |
f71c8ece5ac79d8215cf3897a8f8aec003849358 | 33,179 | py | Python | ros/src/tl_detector/light_classification/protos/box_predictor_pb2.py | allaydesai/SDCND_system_integration | 078c1f77ea0c5f09af42f7974d9b49a4000f10d7 | [
"MIT"
] | 13 | 2020-03-04T10:16:28.000Z | 2022-01-06T11:14:29.000Z | ros/src/tl_detector/light_classification/protos/box_predictor_pb2.py | allaydesai/SDCND_system_integration | 078c1f77ea0c5f09af42f7974d9b49a4000f10d7 | [
"MIT"
] | 5 | 2020-01-28T23:04:54.000Z | 2022-02-10T00:23:36.000Z | ros/src/tl_detector/light_classification/protos/box_predictor_pb2.py | allaydesai/SDCND_system_integration | 078c1f77ea0c5f09af42f7974d9b49a4000f10d7 | [
"MIT"
] | 6 | 2019-10-22T12:43:40.000Z | 2021-09-18T08:10:31.000Z | # Generated by the protocol buffer compiler. DO NOT EDIT!
# source: object_detection/protos/box_predictor.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from object_detection.protos import hyperparams_pb2 as object__detection_dot_protos_dot_hyperparams__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='object_detection/protos/box_predictor.proto',
package='object_detection.protos',
syntax='proto2',
serialized_pb=_b('\n+object_detection/protos/box_predictor.proto\x12\x17object_detection.protos\x1a)object_detection/protos/hyperparams.proto\"\x90\x03\n\x0c\x42oxPredictor\x12Y\n\x1b\x63onvolutional_box_predictor\x18\x01 \x01(\x0b\x32\x32.object_detection.protos.ConvolutionalBoxPredictorH\x00\x12P\n\x17mask_rcnn_box_predictor\x18\x02 \x01(\x0b\x32-.object_detection.protos.MaskRCNNBoxPredictorH\x00\x12G\n\x12rfcn_box_predictor\x18\x03 \x01(\x0b\x32).object_detection.protos.RfcnBoxPredictorH\x00\x12s\n)weight_shared_convolutional_box_predictor\x18\x04 \x01(\x0b\x32>.object_detection.protos.WeightSharedConvolutionalBoxPredictorH\x00\x42\x15\n\x13\x62ox_predictor_oneof\"\x90\x03\n\x19\x43onvolutionalBoxPredictor\x12>\n\x10\x63onv_hyperparams\x18\x01 \x01(\x0b\x32$.object_detection.protos.Hyperparams\x12\x14\n\tmin_depth\x18\x02 \x01(\x05:\x01\x30\x12\x14\n\tmax_depth\x18\x03 \x01(\x05:\x01\x30\x12&\n\x1bnum_layers_before_predictor\x18\x04 \x01(\x05:\x01\x30\x12\x19\n\x0buse_dropout\x18\x05 \x01(\x08:\x04true\x12%\n\x18\x64ropout_keep_probability\x18\x06 \x01(\x02:\x03\x30.8\x12\x16\n\x0bkernel_size\x18\x07 \x01(\x05:\x01\x31\x12\x18\n\rbox_code_size\x18\x08 \x01(\x05:\x01\x34\x12&\n\x17\x61pply_sigmoid_to_scores\x18\t \x01(\x08:\x05\x66\x61lse\x12%\n\x1a\x63lass_prediction_bias_init\x18\n \x01(\x02:\x01\x30\x12\x1c\n\ruse_depthwise\x18\x0b \x01(\x08:\x05\x66\x61lse\"\xcc\x05\n%WeightSharedConvolutionalBoxPredictor\x12>\n\x10\x63onv_hyperparams\x18\x01 \x01(\x0b\x32$.object_detection.protos.Hyperparams\x12&\n\x1bnum_layers_before_predictor\x18\x04 \x01(\x05:\x01\x30\x12\x10\n\x05\x64\x65pth\x18\x02 \x01(\x05:\x01\x30\x12\x16\n\x0bkernel_size\x18\x07 \x01(\x05:\x01\x33\x12\x18\n\rbox_code_size\x18\x08 \x01(\x05:\x01\x34\x12%\n\x1a\x63lass_prediction_bias_init\x18\n \x01(\x02:\x01\x30\x12\x1a\n\x0buse_dropout\x18\x0b \x01(\x08:\x05\x66\x61lse\x12%\n\x18\x64ropout_keep_probability\x18\x0c \x01(\x02:\x03\x30.8\x12%\n\x16share_prediction_tower\x18\r \x01(\x08:\x05\x66\x61lse\x12\x1c\n\ruse_depthwise\x18\x0e \x01(\x08:\x05\x66\x61lse\x12p\n\x0fscore_converter\x18\x10 \x01(\x0e\x32M.object_detection.protos.WeightSharedConvolutionalBoxPredictor.ScoreConverter:\x08IDENTITY\x12v\n\x18\x62ox_encodings_clip_range\x18\x11 \x01(\x0b\x32T.object_detection.protos.WeightSharedConvolutionalBoxPredictor.BoxEncodingsClipRange\x1a\x31\n\x15\x42oxEncodingsClipRange\x12\x0b\n\x03min\x18\x01 \x01(\x02\x12\x0b\n\x03max\x18\x02 \x01(\x02\"+\n\x0eScoreConverter\x12\x0c\n\x08IDENTITY\x10\x00\x12\x0b\n\x07SIGMOID\x10\x01\"\xbf\x04\n\x14MaskRCNNBoxPredictor\x12<\n\x0e\x66\x63_hyperparams\x18\x01 \x01(\x0b\x32$.object_detection.protos.Hyperparams\x12\x1a\n\x0buse_dropout\x18\x02 \x01(\x08:\x05\x66\x61lse\x12%\n\x18\x64ropout_keep_probability\x18\x03 \x01(\x02:\x03\x30.5\x12\x18\n\rbox_code_size\x18\x04 \x01(\x05:\x01\x34\x12>\n\x10\x63onv_hyperparams\x18\x05 \x01(\x0b\x32$.object_detection.protos.Hyperparams\x12%\n\x16predict_instance_masks\x18\x06 \x01(\x08:\x05\x66\x61lse\x12\'\n\x1amask_prediction_conv_depth\x18\x07 \x01(\x05:\x03\x32\x35\x36\x12 \n\x11predict_keypoints\x18\x08 \x01(\x08:\x05\x66\x61lse\x12\x17\n\x0bmask_height\x18\t \x01(\x05:\x02\x31\x35\x12\x16\n\nmask_width\x18\n \x01(\x05:\x02\x31\x35\x12*\n\x1fmask_prediction_num_conv_layers\x18\x0b \x01(\x05:\x01\x32\x12\'\n\x18masks_are_class_agnostic\x18\x0c \x01(\x08:\x05\x66\x61lse\x12\'\n\x18share_box_across_classes\x18\r \x01(\x08:\x05\x66\x61lse\x12+\n\x1c\x63onvolve_then_upsample_masks\x18\x0e \x01(\x08:\x05\x66\x61lse\"\xf9\x01\n\x10RfcnBoxPredictor\x12>\n\x10\x63onv_hyperparams\x18\x01 \x01(\x0b\x32$.object_detection.protos.Hyperparams\x12\"\n\x17num_spatial_bins_height\x18\x02 \x01(\x05:\x01\x33\x12!\n\x16num_spatial_bins_width\x18\x03 \x01(\x05:\x01\x33\x12\x13\n\x05\x64\x65pth\x18\x04 \x01(\x05:\x04\x31\x30\x32\x34\x12\x18\n\rbox_code_size\x18\x05 \x01(\x05:\x01\x34\x12\x17\n\x0b\x63rop_height\x18\x06 \x01(\x05:\x02\x31\x32\x12\x16\n\ncrop_width\x18\x07 \x01(\x05:\x02\x31\x32')
,
dependencies=[object__detection_dot_protos_dot_hyperparams__pb2.DESCRIPTOR,])
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
_WEIGHTSHAREDCONVOLUTIONALBOXPREDICTOR_SCORECONVERTER = _descriptor.EnumDescriptor(
name='ScoreConverter',
full_name='object_detection.protos.WeightSharedConvolutionalBoxPredictor.ScoreConverter',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='IDENTITY', index=0, number=0,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='SIGMOID', index=1, number=1,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=1595,
serialized_end=1638,
)
_sym_db.RegisterEnumDescriptor(_WEIGHTSHAREDCONVOLUTIONALBOXPREDICTOR_SCORECONVERTER)
_BOXPREDICTOR = _descriptor.Descriptor(
name='BoxPredictor',
full_name='object_detection.protos.BoxPredictor',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='convolutional_box_predictor', full_name='object_detection.protos.BoxPredictor.convolutional_box_predictor', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='mask_rcnn_box_predictor', full_name='object_detection.protos.BoxPredictor.mask_rcnn_box_predictor', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='rfcn_box_predictor', full_name='object_detection.protos.BoxPredictor.rfcn_box_predictor', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='weight_shared_convolutional_box_predictor', full_name='object_detection.protos.BoxPredictor.weight_shared_convolutional_box_predictor', index=3,
number=4, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
_descriptor.OneofDescriptor(
name='box_predictor_oneof', full_name='object_detection.protos.BoxPredictor.box_predictor_oneof',
index=0, containing_type=None, fields=[]),
],
serialized_start=116,
serialized_end=516,
)
_CONVOLUTIONALBOXPREDICTOR = _descriptor.Descriptor(
name='ConvolutionalBoxPredictor',
full_name='object_detection.protos.ConvolutionalBoxPredictor',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='conv_hyperparams', full_name='object_detection.protos.ConvolutionalBoxPredictor.conv_hyperparams', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='min_depth', full_name='object_detection.protos.ConvolutionalBoxPredictor.min_depth', index=1,
number=2, type=5, cpp_type=1, label=1,
has_default_value=True, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='max_depth', full_name='object_detection.protos.ConvolutionalBoxPredictor.max_depth', index=2,
number=3, type=5, cpp_type=1, label=1,
has_default_value=True, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='num_layers_before_predictor', full_name='object_detection.protos.ConvolutionalBoxPredictor.num_layers_before_predictor', index=3,
number=4, type=5, cpp_type=1, label=1,
has_default_value=True, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='use_dropout', full_name='object_detection.protos.ConvolutionalBoxPredictor.use_dropout', index=4,
number=5, type=8, cpp_type=7, label=1,
has_default_value=True, default_value=True,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='dropout_keep_probability', full_name='object_detection.protos.ConvolutionalBoxPredictor.dropout_keep_probability', index=5,
number=6, type=2, cpp_type=6, label=1,
has_default_value=True, default_value=float(0.8),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='kernel_size', full_name='object_detection.protos.ConvolutionalBoxPredictor.kernel_size', index=6,
number=7, type=5, cpp_type=1, label=1,
has_default_value=True, default_value=1,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='box_code_size', full_name='object_detection.protos.ConvolutionalBoxPredictor.box_code_size', index=7,
number=8, type=5, cpp_type=1, label=1,
has_default_value=True, default_value=4,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='apply_sigmoid_to_scores', full_name='object_detection.protos.ConvolutionalBoxPredictor.apply_sigmoid_to_scores', index=8,
number=9, type=8, cpp_type=7, label=1,
has_default_value=True, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='class_prediction_bias_init', full_name='object_detection.protos.ConvolutionalBoxPredictor.class_prediction_bias_init', index=9,
number=10, type=2, cpp_type=6, label=1,
has_default_value=True, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='use_depthwise', full_name='object_detection.protos.ConvolutionalBoxPredictor.use_depthwise', index=10,
number=11, type=8, cpp_type=7, label=1,
has_default_value=True, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=519,
serialized_end=919,
)
_WEIGHTSHAREDCONVOLUTIONALBOXPREDICTOR_BOXENCODINGSCLIPRANGE = _descriptor.Descriptor(
name='BoxEncodingsClipRange',
full_name='object_detection.protos.WeightSharedConvolutionalBoxPredictor.BoxEncodingsClipRange',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='min', full_name='object_detection.protos.WeightSharedConvolutionalBoxPredictor.BoxEncodingsClipRange.min', index=0,
number=1, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='max', full_name='object_detection.protos.WeightSharedConvolutionalBoxPredictor.BoxEncodingsClipRange.max', index=1,
number=2, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=1544,
serialized_end=1593,
)
_WEIGHTSHAREDCONVOLUTIONALBOXPREDICTOR = _descriptor.Descriptor(
name='WeightSharedConvolutionalBoxPredictor',
full_name='object_detection.protos.WeightSharedConvolutionalBoxPredictor',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='conv_hyperparams', full_name='object_detection.protos.WeightSharedConvolutionalBoxPredictor.conv_hyperparams', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='num_layers_before_predictor', full_name='object_detection.protos.WeightSharedConvolutionalBoxPredictor.num_layers_before_predictor', index=1,
number=4, type=5, cpp_type=1, label=1,
has_default_value=True, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='depth', full_name='object_detection.protos.WeightSharedConvolutionalBoxPredictor.depth', index=2,
number=2, type=5, cpp_type=1, label=1,
has_default_value=True, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='kernel_size', full_name='object_detection.protos.WeightSharedConvolutionalBoxPredictor.kernel_size', index=3,
number=7, type=5, cpp_type=1, label=1,
has_default_value=True, default_value=3,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='box_code_size', full_name='object_detection.protos.WeightSharedConvolutionalBoxPredictor.box_code_size', index=4,
number=8, type=5, cpp_type=1, label=1,
has_default_value=True, default_value=4,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='class_prediction_bias_init', full_name='object_detection.protos.WeightSharedConvolutionalBoxPredictor.class_prediction_bias_init', index=5,
number=10, type=2, cpp_type=6, label=1,
has_default_value=True, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='use_dropout', full_name='object_detection.protos.WeightSharedConvolutionalBoxPredictor.use_dropout', index=6,
number=11, type=8, cpp_type=7, label=1,
has_default_value=True, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='dropout_keep_probability', full_name='object_detection.protos.WeightSharedConvolutionalBoxPredictor.dropout_keep_probability', index=7,
number=12, type=2, cpp_type=6, label=1,
has_default_value=True, default_value=float(0.8),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='share_prediction_tower', full_name='object_detection.protos.WeightSharedConvolutionalBoxPredictor.share_prediction_tower', index=8,
number=13, type=8, cpp_type=7, label=1,
has_default_value=True, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='use_depthwise', full_name='object_detection.protos.WeightSharedConvolutionalBoxPredictor.use_depthwise', index=9,
number=14, type=8, cpp_type=7, label=1,
has_default_value=True, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='score_converter', full_name='object_detection.protos.WeightSharedConvolutionalBoxPredictor.score_converter', index=10,
number=16, type=14, cpp_type=8, label=1,
has_default_value=True, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='box_encodings_clip_range', full_name='object_detection.protos.WeightSharedConvolutionalBoxPredictor.box_encodings_clip_range', index=11,
number=17, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[_WEIGHTSHAREDCONVOLUTIONALBOXPREDICTOR_BOXENCODINGSCLIPRANGE, ],
enum_types=[
_WEIGHTSHAREDCONVOLUTIONALBOXPREDICTOR_SCORECONVERTER,
],
options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=922,
serialized_end=1638,
)
_MASKRCNNBOXPREDICTOR = _descriptor.Descriptor(
name='MaskRCNNBoxPredictor',
full_name='object_detection.protos.MaskRCNNBoxPredictor',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='fc_hyperparams', full_name='object_detection.protos.MaskRCNNBoxPredictor.fc_hyperparams', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='use_dropout', full_name='object_detection.protos.MaskRCNNBoxPredictor.use_dropout', index=1,
number=2, type=8, cpp_type=7, label=1,
has_default_value=True, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='dropout_keep_probability', full_name='object_detection.protos.MaskRCNNBoxPredictor.dropout_keep_probability', index=2,
number=3, type=2, cpp_type=6, label=1,
has_default_value=True, default_value=float(0.5),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='box_code_size', full_name='object_detection.protos.MaskRCNNBoxPredictor.box_code_size', index=3,
number=4, type=5, cpp_type=1, label=1,
has_default_value=True, default_value=4,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='conv_hyperparams', full_name='object_detection.protos.MaskRCNNBoxPredictor.conv_hyperparams', index=4,
number=5, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='predict_instance_masks', full_name='object_detection.protos.MaskRCNNBoxPredictor.predict_instance_masks', index=5,
number=6, type=8, cpp_type=7, label=1,
has_default_value=True, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='mask_prediction_conv_depth', full_name='object_detection.protos.MaskRCNNBoxPredictor.mask_prediction_conv_depth', index=6,
number=7, type=5, cpp_type=1, label=1,
has_default_value=True, default_value=256,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='predict_keypoints', full_name='object_detection.protos.MaskRCNNBoxPredictor.predict_keypoints', index=7,
number=8, type=8, cpp_type=7, label=1,
has_default_value=True, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='mask_height', full_name='object_detection.protos.MaskRCNNBoxPredictor.mask_height', index=8,
number=9, type=5, cpp_type=1, label=1,
has_default_value=True, default_value=15,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='mask_width', full_name='object_detection.protos.MaskRCNNBoxPredictor.mask_width', index=9,
number=10, type=5, cpp_type=1, label=1,
has_default_value=True, default_value=15,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='mask_prediction_num_conv_layers', full_name='object_detection.protos.MaskRCNNBoxPredictor.mask_prediction_num_conv_layers', index=10,
number=11, type=5, cpp_type=1, label=1,
has_default_value=True, default_value=2,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='masks_are_class_agnostic', full_name='object_detection.protos.MaskRCNNBoxPredictor.masks_are_class_agnostic', index=11,
number=12, type=8, cpp_type=7, label=1,
has_default_value=True, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='share_box_across_classes', full_name='object_detection.protos.MaskRCNNBoxPredictor.share_box_across_classes', index=12,
number=13, type=8, cpp_type=7, label=1,
has_default_value=True, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='convolve_then_upsample_masks', full_name='object_detection.protos.MaskRCNNBoxPredictor.convolve_then_upsample_masks', index=13,
number=14, type=8, cpp_type=7, label=1,
has_default_value=True, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=1641,
serialized_end=2216,
)
_RFCNBOXPREDICTOR = _descriptor.Descriptor(
name='RfcnBoxPredictor',
full_name='object_detection.protos.RfcnBoxPredictor',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='conv_hyperparams', full_name='object_detection.protos.RfcnBoxPredictor.conv_hyperparams', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='num_spatial_bins_height', full_name='object_detection.protos.RfcnBoxPredictor.num_spatial_bins_height', index=1,
number=2, type=5, cpp_type=1, label=1,
has_default_value=True, default_value=3,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='num_spatial_bins_width', full_name='object_detection.protos.RfcnBoxPredictor.num_spatial_bins_width', index=2,
number=3, type=5, cpp_type=1, label=1,
has_default_value=True, default_value=3,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='depth', full_name='object_detection.protos.RfcnBoxPredictor.depth', index=3,
number=4, type=5, cpp_type=1, label=1,
has_default_value=True, default_value=1024,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='box_code_size', full_name='object_detection.protos.RfcnBoxPredictor.box_code_size', index=4,
number=5, type=5, cpp_type=1, label=1,
has_default_value=True, default_value=4,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='crop_height', full_name='object_detection.protos.RfcnBoxPredictor.crop_height', index=5,
number=6, type=5, cpp_type=1, label=1,
has_default_value=True, default_value=12,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='crop_width', full_name='object_detection.protos.RfcnBoxPredictor.crop_width', index=6,
number=7, type=5, cpp_type=1, label=1,
has_default_value=True, default_value=12,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=2219,
serialized_end=2468,
)
_BOXPREDICTOR.fields_by_name['convolutional_box_predictor'].message_type = _CONVOLUTIONALBOXPREDICTOR
_BOXPREDICTOR.fields_by_name['mask_rcnn_box_predictor'].message_type = _MASKRCNNBOXPREDICTOR
_BOXPREDICTOR.fields_by_name['rfcn_box_predictor'].message_type = _RFCNBOXPREDICTOR
_BOXPREDICTOR.fields_by_name['weight_shared_convolutional_box_predictor'].message_type = _WEIGHTSHAREDCONVOLUTIONALBOXPREDICTOR
_BOXPREDICTOR.oneofs_by_name['box_predictor_oneof'].fields.append(
_BOXPREDICTOR.fields_by_name['convolutional_box_predictor'])
_BOXPREDICTOR.fields_by_name['convolutional_box_predictor'].containing_oneof = _BOXPREDICTOR.oneofs_by_name['box_predictor_oneof']
_BOXPREDICTOR.oneofs_by_name['box_predictor_oneof'].fields.append(
_BOXPREDICTOR.fields_by_name['mask_rcnn_box_predictor'])
_BOXPREDICTOR.fields_by_name['mask_rcnn_box_predictor'].containing_oneof = _BOXPREDICTOR.oneofs_by_name['box_predictor_oneof']
_BOXPREDICTOR.oneofs_by_name['box_predictor_oneof'].fields.append(
_BOXPREDICTOR.fields_by_name['rfcn_box_predictor'])
_BOXPREDICTOR.fields_by_name['rfcn_box_predictor'].containing_oneof = _BOXPREDICTOR.oneofs_by_name['box_predictor_oneof']
_BOXPREDICTOR.oneofs_by_name['box_predictor_oneof'].fields.append(
_BOXPREDICTOR.fields_by_name['weight_shared_convolutional_box_predictor'])
_BOXPREDICTOR.fields_by_name['weight_shared_convolutional_box_predictor'].containing_oneof = _BOXPREDICTOR.oneofs_by_name['box_predictor_oneof']
_CONVOLUTIONALBOXPREDICTOR.fields_by_name['conv_hyperparams'].message_type = object__detection_dot_protos_dot_hyperparams__pb2._HYPERPARAMS
_WEIGHTSHAREDCONVOLUTIONALBOXPREDICTOR_BOXENCODINGSCLIPRANGE.containing_type = _WEIGHTSHAREDCONVOLUTIONALBOXPREDICTOR
_WEIGHTSHAREDCONVOLUTIONALBOXPREDICTOR.fields_by_name['conv_hyperparams'].message_type = object__detection_dot_protos_dot_hyperparams__pb2._HYPERPARAMS
_WEIGHTSHAREDCONVOLUTIONALBOXPREDICTOR.fields_by_name['score_converter'].enum_type = _WEIGHTSHAREDCONVOLUTIONALBOXPREDICTOR_SCORECONVERTER
_WEIGHTSHAREDCONVOLUTIONALBOXPREDICTOR.fields_by_name['box_encodings_clip_range'].message_type = _WEIGHTSHAREDCONVOLUTIONALBOXPREDICTOR_BOXENCODINGSCLIPRANGE
_WEIGHTSHAREDCONVOLUTIONALBOXPREDICTOR_SCORECONVERTER.containing_type = _WEIGHTSHAREDCONVOLUTIONALBOXPREDICTOR
_MASKRCNNBOXPREDICTOR.fields_by_name['fc_hyperparams'].message_type = object__detection_dot_protos_dot_hyperparams__pb2._HYPERPARAMS
_MASKRCNNBOXPREDICTOR.fields_by_name['conv_hyperparams'].message_type = object__detection_dot_protos_dot_hyperparams__pb2._HYPERPARAMS
_RFCNBOXPREDICTOR.fields_by_name['conv_hyperparams'].message_type = object__detection_dot_protos_dot_hyperparams__pb2._HYPERPARAMS
DESCRIPTOR.message_types_by_name['BoxPredictor'] = _BOXPREDICTOR
DESCRIPTOR.message_types_by_name['ConvolutionalBoxPredictor'] = _CONVOLUTIONALBOXPREDICTOR
DESCRIPTOR.message_types_by_name['WeightSharedConvolutionalBoxPredictor'] = _WEIGHTSHAREDCONVOLUTIONALBOXPREDICTOR
DESCRIPTOR.message_types_by_name['MaskRCNNBoxPredictor'] = _MASKRCNNBOXPREDICTOR
DESCRIPTOR.message_types_by_name['RfcnBoxPredictor'] = _RFCNBOXPREDICTOR
BoxPredictor = _reflection.GeneratedProtocolMessageType('BoxPredictor', (_message.Message,), dict(
DESCRIPTOR = _BOXPREDICTOR,
__module__ = 'object_detection.protos.box_predictor_pb2'
# @@protoc_insertion_point(class_scope:object_detection.protos.BoxPredictor)
))
_sym_db.RegisterMessage(BoxPredictor)
ConvolutionalBoxPredictor = _reflection.GeneratedProtocolMessageType('ConvolutionalBoxPredictor', (_message.Message,), dict(
DESCRIPTOR = _CONVOLUTIONALBOXPREDICTOR,
__module__ = 'object_detection.protos.box_predictor_pb2'
# @@protoc_insertion_point(class_scope:object_detection.protos.ConvolutionalBoxPredictor)
))
_sym_db.RegisterMessage(ConvolutionalBoxPredictor)
WeightSharedConvolutionalBoxPredictor = _reflection.GeneratedProtocolMessageType('WeightSharedConvolutionalBoxPredictor', (_message.Message,), dict(
BoxEncodingsClipRange = _reflection.GeneratedProtocolMessageType('BoxEncodingsClipRange', (_message.Message,), dict(
DESCRIPTOR = _WEIGHTSHAREDCONVOLUTIONALBOXPREDICTOR_BOXENCODINGSCLIPRANGE,
__module__ = 'object_detection.protos.box_predictor_pb2'
# @@protoc_insertion_point(class_scope:object_detection.protos.WeightSharedConvolutionalBoxPredictor.BoxEncodingsClipRange)
))
,
DESCRIPTOR = _WEIGHTSHAREDCONVOLUTIONALBOXPREDICTOR,
__module__ = 'object_detection.protos.box_predictor_pb2'
# @@protoc_insertion_point(class_scope:object_detection.protos.WeightSharedConvolutionalBoxPredictor)
))
_sym_db.RegisterMessage(WeightSharedConvolutionalBoxPredictor)
_sym_db.RegisterMessage(WeightSharedConvolutionalBoxPredictor.BoxEncodingsClipRange)
MaskRCNNBoxPredictor = _reflection.GeneratedProtocolMessageType('MaskRCNNBoxPredictor', (_message.Message,), dict(
DESCRIPTOR = _MASKRCNNBOXPREDICTOR,
__module__ = 'object_detection.protos.box_predictor_pb2'
# @@protoc_insertion_point(class_scope:object_detection.protos.MaskRCNNBoxPredictor)
))
_sym_db.RegisterMessage(MaskRCNNBoxPredictor)
RfcnBoxPredictor = _reflection.GeneratedProtocolMessageType('RfcnBoxPredictor', (_message.Message,), dict(
DESCRIPTOR = _RFCNBOXPREDICTOR,
__module__ = 'object_detection.protos.box_predictor_pb2'
# @@protoc_insertion_point(class_scope:object_detection.protos.RfcnBoxPredictor)
))
_sym_db.RegisterMessage(RfcnBoxPredictor)
# @@protoc_insertion_point(module_scope)
| 53.0864 | 3,991 | 0.777811 |
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
_sym_db = _symbol_database.Default()
from object_detection.protos import hyperparams_pb2 as object__detection_dot_protos_dot_hyperparams__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='object_detection/protos/box_predictor.proto',
package='object_detection.protos',
syntax='proto2',
serialized_pb=_b('\n+object_detection/protos/box_predictor.proto\x12\x17object_detection.protos\x1a)object_detection/protos/hyperparams.proto\"\x90\x03\n\x0c\x42oxPredictor\x12Y\n\x1b\x63onvolutional_box_predictor\x18\x01 \x01(\x0b\x32\x32.object_detection.protos.ConvolutionalBoxPredictorH\x00\x12P\n\x17mask_rcnn_box_predictor\x18\x02 \x01(\x0b\x32-.object_detection.protos.MaskRCNNBoxPredictorH\x00\x12G\n\x12rfcn_box_predictor\x18\x03 \x01(\x0b\x32).object_detection.protos.RfcnBoxPredictorH\x00\x12s\n)weight_shared_convolutional_box_predictor\x18\x04 \x01(\x0b\x32>.object_detection.protos.WeightSharedConvolutionalBoxPredictorH\x00\x42\x15\n\x13\x62ox_predictor_oneof\"\x90\x03\n\x19\x43onvolutionalBoxPredictor\x12>\n\x10\x63onv_hyperparams\x18\x01 \x01(\x0b\x32$.object_detection.protos.Hyperparams\x12\x14\n\tmin_depth\x18\x02 \x01(\x05:\x01\x30\x12\x14\n\tmax_depth\x18\x03 \x01(\x05:\x01\x30\x12&\n\x1bnum_layers_before_predictor\x18\x04 \x01(\x05:\x01\x30\x12\x19\n\x0buse_dropout\x18\x05 \x01(\x08:\x04true\x12%\n\x18\x64ropout_keep_probability\x18\x06 \x01(\x02:\x03\x30.8\x12\x16\n\x0bkernel_size\x18\x07 \x01(\x05:\x01\x31\x12\x18\n\rbox_code_size\x18\x08 \x01(\x05:\x01\x34\x12&\n\x17\x61pply_sigmoid_to_scores\x18\t \x01(\x08:\x05\x66\x61lse\x12%\n\x1a\x63lass_prediction_bias_init\x18\n \x01(\x02:\x01\x30\x12\x1c\n\ruse_depthwise\x18\x0b \x01(\x08:\x05\x66\x61lse\"\xcc\x05\n%WeightSharedConvolutionalBoxPredictor\x12>\n\x10\x63onv_hyperparams\x18\x01 \x01(\x0b\x32$.object_detection.protos.Hyperparams\x12&\n\x1bnum_layers_before_predictor\x18\x04 \x01(\x05:\x01\x30\x12\x10\n\x05\x64\x65pth\x18\x02 \x01(\x05:\x01\x30\x12\x16\n\x0bkernel_size\x18\x07 \x01(\x05:\x01\x33\x12\x18\n\rbox_code_size\x18\x08 \x01(\x05:\x01\x34\x12%\n\x1a\x63lass_prediction_bias_init\x18\n \x01(\x02:\x01\x30\x12\x1a\n\x0buse_dropout\x18\x0b \x01(\x08:\x05\x66\x61lse\x12%\n\x18\x64ropout_keep_probability\x18\x0c \x01(\x02:\x03\x30.8\x12%\n\x16share_prediction_tower\x18\r \x01(\x08:\x05\x66\x61lse\x12\x1c\n\ruse_depthwise\x18\x0e \x01(\x08:\x05\x66\x61lse\x12p\n\x0fscore_converter\x18\x10 \x01(\x0e\x32M.object_detection.protos.WeightSharedConvolutionalBoxPredictor.ScoreConverter:\x08IDENTITY\x12v\n\x18\x62ox_encodings_clip_range\x18\x11 \x01(\x0b\x32T.object_detection.protos.WeightSharedConvolutionalBoxPredictor.BoxEncodingsClipRange\x1a\x31\n\x15\x42oxEncodingsClipRange\x12\x0b\n\x03min\x18\x01 \x01(\x02\x12\x0b\n\x03max\x18\x02 \x01(\x02\"+\n\x0eScoreConverter\x12\x0c\n\x08IDENTITY\x10\x00\x12\x0b\n\x07SIGMOID\x10\x01\"\xbf\x04\n\x14MaskRCNNBoxPredictor\x12<\n\x0e\x66\x63_hyperparams\x18\x01 \x01(\x0b\x32$.object_detection.protos.Hyperparams\x12\x1a\n\x0buse_dropout\x18\x02 \x01(\x08:\x05\x66\x61lse\x12%\n\x18\x64ropout_keep_probability\x18\x03 \x01(\x02:\x03\x30.5\x12\x18\n\rbox_code_size\x18\x04 \x01(\x05:\x01\x34\x12>\n\x10\x63onv_hyperparams\x18\x05 \x01(\x0b\x32$.object_detection.protos.Hyperparams\x12%\n\x16predict_instance_masks\x18\x06 \x01(\x08:\x05\x66\x61lse\x12\'\n\x1amask_prediction_conv_depth\x18\x07 \x01(\x05:\x03\x32\x35\x36\x12 \n\x11predict_keypoints\x18\x08 \x01(\x08:\x05\x66\x61lse\x12\x17\n\x0bmask_height\x18\t \x01(\x05:\x02\x31\x35\x12\x16\n\nmask_width\x18\n \x01(\x05:\x02\x31\x35\x12*\n\x1fmask_prediction_num_conv_layers\x18\x0b \x01(\x05:\x01\x32\x12\'\n\x18masks_are_class_agnostic\x18\x0c \x01(\x08:\x05\x66\x61lse\x12\'\n\x18share_box_across_classes\x18\r \x01(\x08:\x05\x66\x61lse\x12+\n\x1c\x63onvolve_then_upsample_masks\x18\x0e \x01(\x08:\x05\x66\x61lse\"\xf9\x01\n\x10RfcnBoxPredictor\x12>\n\x10\x63onv_hyperparams\x18\x01 \x01(\x0b\x32$.object_detection.protos.Hyperparams\x12\"\n\x17num_spatial_bins_height\x18\x02 \x01(\x05:\x01\x33\x12!\n\x16num_spatial_bins_width\x18\x03 \x01(\x05:\x01\x33\x12\x13\n\x05\x64\x65pth\x18\x04 \x01(\x05:\x04\x31\x30\x32\x34\x12\x18\n\rbox_code_size\x18\x05 \x01(\x05:\x01\x34\x12\x17\n\x0b\x63rop_height\x18\x06 \x01(\x05:\x02\x31\x32\x12\x16\n\ncrop_width\x18\x07 \x01(\x05:\x02\x31\x32')
,
dependencies=[object__detection_dot_protos_dot_hyperparams__pb2.DESCRIPTOR,])
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
_WEIGHTSHAREDCONVOLUTIONALBOXPREDICTOR_SCORECONVERTER = _descriptor.EnumDescriptor(
name='ScoreConverter',
full_name='object_detection.protos.WeightSharedConvolutionalBoxPredictor.ScoreConverter',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='IDENTITY', index=0, number=0,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='SIGMOID', index=1, number=1,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=1595,
serialized_end=1638,
)
_sym_db.RegisterEnumDescriptor(_WEIGHTSHAREDCONVOLUTIONALBOXPREDICTOR_SCORECONVERTER)
_BOXPREDICTOR = _descriptor.Descriptor(
name='BoxPredictor',
full_name='object_detection.protos.BoxPredictor',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='convolutional_box_predictor', full_name='object_detection.protos.BoxPredictor.convolutional_box_predictor', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='mask_rcnn_box_predictor', full_name='object_detection.protos.BoxPredictor.mask_rcnn_box_predictor', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='rfcn_box_predictor', full_name='object_detection.protos.BoxPredictor.rfcn_box_predictor', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='weight_shared_convolutional_box_predictor', full_name='object_detection.protos.BoxPredictor.weight_shared_convolutional_box_predictor', index=3,
number=4, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
_descriptor.OneofDescriptor(
name='box_predictor_oneof', full_name='object_detection.protos.BoxPredictor.box_predictor_oneof',
index=0, containing_type=None, fields=[]),
],
serialized_start=116,
serialized_end=516,
)
_CONVOLUTIONALBOXPREDICTOR = _descriptor.Descriptor(
name='ConvolutionalBoxPredictor',
full_name='object_detection.protos.ConvolutionalBoxPredictor',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='conv_hyperparams', full_name='object_detection.protos.ConvolutionalBoxPredictor.conv_hyperparams', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='min_depth', full_name='object_detection.protos.ConvolutionalBoxPredictor.min_depth', index=1,
number=2, type=5, cpp_type=1, label=1,
has_default_value=True, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='max_depth', full_name='object_detection.protos.ConvolutionalBoxPredictor.max_depth', index=2,
number=3, type=5, cpp_type=1, label=1,
has_default_value=True, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='num_layers_before_predictor', full_name='object_detection.protos.ConvolutionalBoxPredictor.num_layers_before_predictor', index=3,
number=4, type=5, cpp_type=1, label=1,
has_default_value=True, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='use_dropout', full_name='object_detection.protos.ConvolutionalBoxPredictor.use_dropout', index=4,
number=5, type=8, cpp_type=7, label=1,
has_default_value=True, default_value=True,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='dropout_keep_probability', full_name='object_detection.protos.ConvolutionalBoxPredictor.dropout_keep_probability', index=5,
number=6, type=2, cpp_type=6, label=1,
has_default_value=True, default_value=float(0.8),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='kernel_size', full_name='object_detection.protos.ConvolutionalBoxPredictor.kernel_size', index=6,
number=7, type=5, cpp_type=1, label=1,
has_default_value=True, default_value=1,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='box_code_size', full_name='object_detection.protos.ConvolutionalBoxPredictor.box_code_size', index=7,
number=8, type=5, cpp_type=1, label=1,
has_default_value=True, default_value=4,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='apply_sigmoid_to_scores', full_name='object_detection.protos.ConvolutionalBoxPredictor.apply_sigmoid_to_scores', index=8,
number=9, type=8, cpp_type=7, label=1,
has_default_value=True, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='class_prediction_bias_init', full_name='object_detection.protos.ConvolutionalBoxPredictor.class_prediction_bias_init', index=9,
number=10, type=2, cpp_type=6, label=1,
has_default_value=True, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='use_depthwise', full_name='object_detection.protos.ConvolutionalBoxPredictor.use_depthwise', index=10,
number=11, type=8, cpp_type=7, label=1,
has_default_value=True, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=519,
serialized_end=919,
)
_WEIGHTSHAREDCONVOLUTIONALBOXPREDICTOR_BOXENCODINGSCLIPRANGE = _descriptor.Descriptor(
name='BoxEncodingsClipRange',
full_name='object_detection.protos.WeightSharedConvolutionalBoxPredictor.BoxEncodingsClipRange',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='min', full_name='object_detection.protos.WeightSharedConvolutionalBoxPredictor.BoxEncodingsClipRange.min', index=0,
number=1, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='max', full_name='object_detection.protos.WeightSharedConvolutionalBoxPredictor.BoxEncodingsClipRange.max', index=1,
number=2, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=1544,
serialized_end=1593,
)
_WEIGHTSHAREDCONVOLUTIONALBOXPREDICTOR = _descriptor.Descriptor(
name='WeightSharedConvolutionalBoxPredictor',
full_name='object_detection.protos.WeightSharedConvolutionalBoxPredictor',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='conv_hyperparams', full_name='object_detection.protos.WeightSharedConvolutionalBoxPredictor.conv_hyperparams', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='num_layers_before_predictor', full_name='object_detection.protos.WeightSharedConvolutionalBoxPredictor.num_layers_before_predictor', index=1,
number=4, type=5, cpp_type=1, label=1,
has_default_value=True, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='depth', full_name='object_detection.protos.WeightSharedConvolutionalBoxPredictor.depth', index=2,
number=2, type=5, cpp_type=1, label=1,
has_default_value=True, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='kernel_size', full_name='object_detection.protos.WeightSharedConvolutionalBoxPredictor.kernel_size', index=3,
number=7, type=5, cpp_type=1, label=1,
has_default_value=True, default_value=3,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='box_code_size', full_name='object_detection.protos.WeightSharedConvolutionalBoxPredictor.box_code_size', index=4,
number=8, type=5, cpp_type=1, label=1,
has_default_value=True, default_value=4,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='class_prediction_bias_init', full_name='object_detection.protos.WeightSharedConvolutionalBoxPredictor.class_prediction_bias_init', index=5,
number=10, type=2, cpp_type=6, label=1,
has_default_value=True, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='use_dropout', full_name='object_detection.protos.WeightSharedConvolutionalBoxPredictor.use_dropout', index=6,
number=11, type=8, cpp_type=7, label=1,
has_default_value=True, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='dropout_keep_probability', full_name='object_detection.protos.WeightSharedConvolutionalBoxPredictor.dropout_keep_probability', index=7,
number=12, type=2, cpp_type=6, label=1,
has_default_value=True, default_value=float(0.8),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='share_prediction_tower', full_name='object_detection.protos.WeightSharedConvolutionalBoxPredictor.share_prediction_tower', index=8,
number=13, type=8, cpp_type=7, label=1,
has_default_value=True, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='use_depthwise', full_name='object_detection.protos.WeightSharedConvolutionalBoxPredictor.use_depthwise', index=9,
number=14, type=8, cpp_type=7, label=1,
has_default_value=True, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='score_converter', full_name='object_detection.protos.WeightSharedConvolutionalBoxPredictor.score_converter', index=10,
number=16, type=14, cpp_type=8, label=1,
has_default_value=True, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='box_encodings_clip_range', full_name='object_detection.protos.WeightSharedConvolutionalBoxPredictor.box_encodings_clip_range', index=11,
number=17, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[_WEIGHTSHAREDCONVOLUTIONALBOXPREDICTOR_BOXENCODINGSCLIPRANGE, ],
enum_types=[
_WEIGHTSHAREDCONVOLUTIONALBOXPREDICTOR_SCORECONVERTER,
],
options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=922,
serialized_end=1638,
)
_MASKRCNNBOXPREDICTOR = _descriptor.Descriptor(
name='MaskRCNNBoxPredictor',
full_name='object_detection.protos.MaskRCNNBoxPredictor',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='fc_hyperparams', full_name='object_detection.protos.MaskRCNNBoxPredictor.fc_hyperparams', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='use_dropout', full_name='object_detection.protos.MaskRCNNBoxPredictor.use_dropout', index=1,
number=2, type=8, cpp_type=7, label=1,
has_default_value=True, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='dropout_keep_probability', full_name='object_detection.protos.MaskRCNNBoxPredictor.dropout_keep_probability', index=2,
number=3, type=2, cpp_type=6, label=1,
has_default_value=True, default_value=float(0.5),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='box_code_size', full_name='object_detection.protos.MaskRCNNBoxPredictor.box_code_size', index=3,
number=4, type=5, cpp_type=1, label=1,
has_default_value=True, default_value=4,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='conv_hyperparams', full_name='object_detection.protos.MaskRCNNBoxPredictor.conv_hyperparams', index=4,
number=5, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='predict_instance_masks', full_name='object_detection.protos.MaskRCNNBoxPredictor.predict_instance_masks', index=5,
number=6, type=8, cpp_type=7, label=1,
has_default_value=True, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='mask_prediction_conv_depth', full_name='object_detection.protos.MaskRCNNBoxPredictor.mask_prediction_conv_depth', index=6,
number=7, type=5, cpp_type=1, label=1,
has_default_value=True, default_value=256,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='predict_keypoints', full_name='object_detection.protos.MaskRCNNBoxPredictor.predict_keypoints', index=7,
number=8, type=8, cpp_type=7, label=1,
has_default_value=True, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='mask_height', full_name='object_detection.protos.MaskRCNNBoxPredictor.mask_height', index=8,
number=9, type=5, cpp_type=1, label=1,
has_default_value=True, default_value=15,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='mask_width', full_name='object_detection.protos.MaskRCNNBoxPredictor.mask_width', index=9,
number=10, type=5, cpp_type=1, label=1,
has_default_value=True, default_value=15,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='mask_prediction_num_conv_layers', full_name='object_detection.protos.MaskRCNNBoxPredictor.mask_prediction_num_conv_layers', index=10,
number=11, type=5, cpp_type=1, label=1,
has_default_value=True, default_value=2,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='masks_are_class_agnostic', full_name='object_detection.protos.MaskRCNNBoxPredictor.masks_are_class_agnostic', index=11,
number=12, type=8, cpp_type=7, label=1,
has_default_value=True, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='share_box_across_classes', full_name='object_detection.protos.MaskRCNNBoxPredictor.share_box_across_classes', index=12,
number=13, type=8, cpp_type=7, label=1,
has_default_value=True, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='convolve_then_upsample_masks', full_name='object_detection.protos.MaskRCNNBoxPredictor.convolve_then_upsample_masks', index=13,
number=14, type=8, cpp_type=7, label=1,
has_default_value=True, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=1641,
serialized_end=2216,
)
_RFCNBOXPREDICTOR = _descriptor.Descriptor(
name='RfcnBoxPredictor',
full_name='object_detection.protos.RfcnBoxPredictor',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='conv_hyperparams', full_name='object_detection.protos.RfcnBoxPredictor.conv_hyperparams', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='num_spatial_bins_height', full_name='object_detection.protos.RfcnBoxPredictor.num_spatial_bins_height', index=1,
number=2, type=5, cpp_type=1, label=1,
has_default_value=True, default_value=3,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='num_spatial_bins_width', full_name='object_detection.protos.RfcnBoxPredictor.num_spatial_bins_width', index=2,
number=3, type=5, cpp_type=1, label=1,
has_default_value=True, default_value=3,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='depth', full_name='object_detection.protos.RfcnBoxPredictor.depth', index=3,
number=4, type=5, cpp_type=1, label=1,
has_default_value=True, default_value=1024,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='box_code_size', full_name='object_detection.protos.RfcnBoxPredictor.box_code_size', index=4,
number=5, type=5, cpp_type=1, label=1,
has_default_value=True, default_value=4,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='crop_height', full_name='object_detection.protos.RfcnBoxPredictor.crop_height', index=5,
number=6, type=5, cpp_type=1, label=1,
has_default_value=True, default_value=12,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='crop_width', full_name='object_detection.protos.RfcnBoxPredictor.crop_width', index=6,
number=7, type=5, cpp_type=1, label=1,
has_default_value=True, default_value=12,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=2219,
serialized_end=2468,
)
_BOXPREDICTOR.fields_by_name['convolutional_box_predictor'].message_type = _CONVOLUTIONALBOXPREDICTOR
_BOXPREDICTOR.fields_by_name['mask_rcnn_box_predictor'].message_type = _MASKRCNNBOXPREDICTOR
_BOXPREDICTOR.fields_by_name['rfcn_box_predictor'].message_type = _RFCNBOXPREDICTOR
_BOXPREDICTOR.fields_by_name['weight_shared_convolutional_box_predictor'].message_type = _WEIGHTSHAREDCONVOLUTIONALBOXPREDICTOR
_BOXPREDICTOR.oneofs_by_name['box_predictor_oneof'].fields.append(
_BOXPREDICTOR.fields_by_name['convolutional_box_predictor'])
_BOXPREDICTOR.fields_by_name['convolutional_box_predictor'].containing_oneof = _BOXPREDICTOR.oneofs_by_name['box_predictor_oneof']
_BOXPREDICTOR.oneofs_by_name['box_predictor_oneof'].fields.append(
_BOXPREDICTOR.fields_by_name['mask_rcnn_box_predictor'])
_BOXPREDICTOR.fields_by_name['mask_rcnn_box_predictor'].containing_oneof = _BOXPREDICTOR.oneofs_by_name['box_predictor_oneof']
_BOXPREDICTOR.oneofs_by_name['box_predictor_oneof'].fields.append(
_BOXPREDICTOR.fields_by_name['rfcn_box_predictor'])
_BOXPREDICTOR.fields_by_name['rfcn_box_predictor'].containing_oneof = _BOXPREDICTOR.oneofs_by_name['box_predictor_oneof']
_BOXPREDICTOR.oneofs_by_name['box_predictor_oneof'].fields.append(
_BOXPREDICTOR.fields_by_name['weight_shared_convolutional_box_predictor'])
_BOXPREDICTOR.fields_by_name['weight_shared_convolutional_box_predictor'].containing_oneof = _BOXPREDICTOR.oneofs_by_name['box_predictor_oneof']
_CONVOLUTIONALBOXPREDICTOR.fields_by_name['conv_hyperparams'].message_type = object__detection_dot_protos_dot_hyperparams__pb2._HYPERPARAMS
_WEIGHTSHAREDCONVOLUTIONALBOXPREDICTOR_BOXENCODINGSCLIPRANGE.containing_type = _WEIGHTSHAREDCONVOLUTIONALBOXPREDICTOR
_WEIGHTSHAREDCONVOLUTIONALBOXPREDICTOR.fields_by_name['conv_hyperparams'].message_type = object__detection_dot_protos_dot_hyperparams__pb2._HYPERPARAMS
_WEIGHTSHAREDCONVOLUTIONALBOXPREDICTOR.fields_by_name['score_converter'].enum_type = _WEIGHTSHAREDCONVOLUTIONALBOXPREDICTOR_SCORECONVERTER
_WEIGHTSHAREDCONVOLUTIONALBOXPREDICTOR.fields_by_name['box_encodings_clip_range'].message_type = _WEIGHTSHAREDCONVOLUTIONALBOXPREDICTOR_BOXENCODINGSCLIPRANGE
_WEIGHTSHAREDCONVOLUTIONALBOXPREDICTOR_SCORECONVERTER.containing_type = _WEIGHTSHAREDCONVOLUTIONALBOXPREDICTOR
_MASKRCNNBOXPREDICTOR.fields_by_name['fc_hyperparams'].message_type = object__detection_dot_protos_dot_hyperparams__pb2._HYPERPARAMS
_MASKRCNNBOXPREDICTOR.fields_by_name['conv_hyperparams'].message_type = object__detection_dot_protos_dot_hyperparams__pb2._HYPERPARAMS
_RFCNBOXPREDICTOR.fields_by_name['conv_hyperparams'].message_type = object__detection_dot_protos_dot_hyperparams__pb2._HYPERPARAMS
DESCRIPTOR.message_types_by_name['BoxPredictor'] = _BOXPREDICTOR
DESCRIPTOR.message_types_by_name['ConvolutionalBoxPredictor'] = _CONVOLUTIONALBOXPREDICTOR
DESCRIPTOR.message_types_by_name['WeightSharedConvolutionalBoxPredictor'] = _WEIGHTSHAREDCONVOLUTIONALBOXPREDICTOR
DESCRIPTOR.message_types_by_name['MaskRCNNBoxPredictor'] = _MASKRCNNBOXPREDICTOR
DESCRIPTOR.message_types_by_name['RfcnBoxPredictor'] = _RFCNBOXPREDICTOR
BoxPredictor = _reflection.GeneratedProtocolMessageType('BoxPredictor', (_message.Message,), dict(
DESCRIPTOR = _BOXPREDICTOR,
__module__ = 'object_detection.protos.box_predictor_pb2'
# @@protoc_insertion_point(class_scope:object_detection.protos.BoxPredictor)
))
_sym_db.RegisterMessage(BoxPredictor)
ConvolutionalBoxPredictor = _reflection.GeneratedProtocolMessageType('ConvolutionalBoxPredictor', (_message.Message,), dict(
DESCRIPTOR = _CONVOLUTIONALBOXPREDICTOR,
__module__ = 'object_detection.protos.box_predictor_pb2'
# @@protoc_insertion_point(class_scope:object_detection.protos.ConvolutionalBoxPredictor)
))
_sym_db.RegisterMessage(ConvolutionalBoxPredictor)
WeightSharedConvolutionalBoxPredictor = _reflection.GeneratedProtocolMessageType('WeightSharedConvolutionalBoxPredictor', (_message.Message,), dict(
BoxEncodingsClipRange = _reflection.GeneratedProtocolMessageType('BoxEncodingsClipRange', (_message.Message,), dict(
DESCRIPTOR = _WEIGHTSHAREDCONVOLUTIONALBOXPREDICTOR_BOXENCODINGSCLIPRANGE,
__module__ = 'object_detection.protos.box_predictor_pb2'
# @@protoc_insertion_point(class_scope:object_detection.protos.WeightSharedConvolutionalBoxPredictor.BoxEncodingsClipRange)
))
,
DESCRIPTOR = _WEIGHTSHAREDCONVOLUTIONALBOXPREDICTOR,
__module__ = 'object_detection.protos.box_predictor_pb2'
# @@protoc_insertion_point(class_scope:object_detection.protos.WeightSharedConvolutionalBoxPredictor)
))
_sym_db.RegisterMessage(WeightSharedConvolutionalBoxPredictor)
_sym_db.RegisterMessage(WeightSharedConvolutionalBoxPredictor.BoxEncodingsClipRange)
MaskRCNNBoxPredictor = _reflection.GeneratedProtocolMessageType('MaskRCNNBoxPredictor', (_message.Message,), dict(
DESCRIPTOR = _MASKRCNNBOXPREDICTOR,
__module__ = 'object_detection.protos.box_predictor_pb2'
# @@protoc_insertion_point(class_scope:object_detection.protos.MaskRCNNBoxPredictor)
))
_sym_db.RegisterMessage(MaskRCNNBoxPredictor)
RfcnBoxPredictor = _reflection.GeneratedProtocolMessageType('RfcnBoxPredictor', (_message.Message,), dict(
DESCRIPTOR = _RFCNBOXPREDICTOR,
__module__ = 'object_detection.protos.box_predictor_pb2'
# @@protoc_insertion_point(class_scope:object_detection.protos.RfcnBoxPredictor)
))
_sym_db.RegisterMessage(RfcnBoxPredictor)
# @@protoc_insertion_point(module_scope)
| true | true |
f71c8eebfa69486d737645f22b74bf824a91eeee | 1,479 | py | Python | beerxml/picobrew_parser.py | rryanburton/PicobrewServerDjango | 24e616677a8543638204889bfe19062b9d16c7ae | [
"MIT"
] | 5 | 2017-07-25T04:32:47.000Z | 2020-10-10T14:27:16.000Z | beerxml/picobrew_parser.py | rryanburton/PicobrewServerDjango | 24e616677a8543638204889bfe19062b9d16c7ae | [
"MIT"
] | 3 | 2020-02-11T23:53:22.000Z | 2021-06-10T19:29:52.000Z | beerxml/picobrew_parser.py | rryanburton/PicobrewServerDjango | 24e616677a8543638204889bfe19062b9d16c7ae | [
"MIT"
] | 1 | 2018-12-23T08:57:34.000Z | 2018-12-23T08:57:34.000Z | from pybeerxml.parser import Parser
from .picobrew_recipe import PicoBrewRecipe
from .picobrew_program_step import PicoBrewProgramStep
from xml.etree import ElementTree
class PicoBrewParser(Parser):
def parse(self, xml_file):
# Parse the BeerXML file
recipes = super(PicoBrewParser, self).parse(xml_file)
# include the recipe filename in the parsed recipes for id creation
for recipe in recipes:
recipe.filename = xml_file
# Cast all recipes to PicoBrewRcipes
recipes = [PicoBrewRecipe(recipe) for recipe in recipes]
# Parse the PicoBrew Program Steps
programs = self.parse_program_steps(xml_file)
# merge the parsed recipes with the PicoBrew program steps
for (recipe, steps) in zip(recipes, programs):
recipe.steps = steps
return recipes
def parse_program_steps(self, xml_file):
programs = []
with open(xml_file, "rt") as f:
tree = ElementTree.parse(f)
for programNode in tree.iterfind(".//PROGRAM"):
steps = []
for stepNode in list(programNode):
tag_name = self.to_lower(stepNode.tag)
if tag_name == "step":
step = PicoBrewProgramStep()
self.nodes_to_object(stepNode, step)
steps.append(step)
programs.append(steps)
return programs
| 29.58 | 75 | 0.610548 | from pybeerxml.parser import Parser
from .picobrew_recipe import PicoBrewRecipe
from .picobrew_program_step import PicoBrewProgramStep
from xml.etree import ElementTree
class PicoBrewParser(Parser):
def parse(self, xml_file):
recipes = super(PicoBrewParser, self).parse(xml_file)
for recipe in recipes:
recipe.filename = xml_file
recipes = [PicoBrewRecipe(recipe) for recipe in recipes]
programs = self.parse_program_steps(xml_file)
for (recipe, steps) in zip(recipes, programs):
recipe.steps = steps
return recipes
def parse_program_steps(self, xml_file):
programs = []
with open(xml_file, "rt") as f:
tree = ElementTree.parse(f)
for programNode in tree.iterfind(".//PROGRAM"):
steps = []
for stepNode in list(programNode):
tag_name = self.to_lower(stepNode.tag)
if tag_name == "step":
step = PicoBrewProgramStep()
self.nodes_to_object(stepNode, step)
steps.append(step)
programs.append(steps)
return programs
| true | true |
f71c8f6aa2a62ab271f35e5e3080e58ef457c6cb | 782 | py | Python | examples/kmeansHeightWeight.py | Duane321/pyprobml | 6d0ba29f22dc7fec9dfc73788bc5520e97663bdb | [
"MIT"
] | null | null | null | examples/kmeansHeightWeight.py | Duane321/pyprobml | 6d0ba29f22dc7fec9dfc73788bc5520e97663bdb | [
"MIT"
] | null | null | null | examples/kmeansHeightWeight.py | Duane321/pyprobml | 6d0ba29f22dc7fec9dfc73788bc5520e97663bdb | [
"MIT"
] | null | null | null | #!/usr/bin/env python
import matplotlib.pyplot as pl
import numpy as np
from utils import util
from sklearn.cluster import KMeans
from utils.util import save_fig
data = util.load_mat('heightWeight/heightWeight')
data = data['heightWeightData']
markers = 'Dox'
colors = 'rgb'
for i in range(3):
KM_model = KMeans(init='k-means++', n_clusters=i+1)
labels = KM_model.fit_predict(data[:, [1, 2]])
labels_unique = np.unique(labels)
fig = pl.figure(i)
for j in range(len(labels_unique)):
data_chosen = data[labels == labels_unique[j]]
pl.scatter(data_chosen[:, 1], data_chosen[:, 2],
marker=markers[j],
color=colors[j])
pl.title('k = %s' % (i+1))
save_fig('kmeansHeightWeight_%s.png' % (i+1))
pl.show()
| 28.962963 | 56 | 0.644501 |
import matplotlib.pyplot as pl
import numpy as np
from utils import util
from sklearn.cluster import KMeans
from utils.util import save_fig
data = util.load_mat('heightWeight/heightWeight')
data = data['heightWeightData']
markers = 'Dox'
colors = 'rgb'
for i in range(3):
KM_model = KMeans(init='k-means++', n_clusters=i+1)
labels = KM_model.fit_predict(data[:, [1, 2]])
labels_unique = np.unique(labels)
fig = pl.figure(i)
for j in range(len(labels_unique)):
data_chosen = data[labels == labels_unique[j]]
pl.scatter(data_chosen[:, 1], data_chosen[:, 2],
marker=markers[j],
color=colors[j])
pl.title('k = %s' % (i+1))
save_fig('kmeansHeightWeight_%s.png' % (i+1))
pl.show()
| true | true |
f71c8fc259c0697f53a0ebace9290263e205e66d | 2,867 | py | Python | testsuite/N806.py | ramnes/pep8-naming | 9d2004fcd28d2434bcceeed843cd353a2e8808e2 | [
"MIT"
] | null | null | null | testsuite/N806.py | ramnes/pep8-naming | 9d2004fcd28d2434bcceeed843cd353a2e8808e2 | [
"MIT"
] | null | null | null | testsuite/N806.py | ramnes/pep8-naming | 9d2004fcd28d2434bcceeed843cd353a2e8808e2 | [
"MIT"
] | null | null | null | #: Okay
def test():
good = 1
#: Okay
def test():
def test2():
good = 1
#: Okay
GOOD = 1
#: Okay
class Test(object):
GOOD = 1
#: N806
def test():
Bad = 1
#: N806
def test():
VERY = 2
#: N806
def test():
def test2():
class Foo(object):
def test3(self):
Bad = 3
#: Okay(--ignore-names=Bad)
def test():
Bad = 1
#: Okay
def good():
global Bad
Bad = 1
#: N806
def bad():
global Bad
def foo():
Bad = 1
#: Okay
def test():
# namedtuples are often CamelCased since we treat them a bit like classes
import collections
Thing = collections.namedtuple('Thing', 'a b c')
from collections import namedtuple
ThingTwo = namedtuple('ThingTwo', 'a b c')
#: N806
def bad():
# Currently don't support aliased imports of namedtuple
from collections import namedtuple as nt
Thing = nt('Thing', 'a b c')
#: N806
def unpacking_into_tuple():
Var1, Var2 = range(2)
#: Okay
def unpacking_into_tuple():
var1, var2 = range(2)
#: N806
def unpacking_into_list():
[Var1, Var2] = range(2)
#: Okay
def unpacking_into_list():
[var1, var2] = range(2)
#: Okay
a, [b, c] = [1, [2, 3]]
#: N806
def recursive_unpack():
a, [bB, c] = [1, [2, 3]]
#: Okay
def assingnment_to_attribute():
a.b = 1
#: N806
def f():
with Foo(), Bar() as Bad:
pass
#: Okay
def f():
with FOO() as foo, bar() as bar:
pass
#: Okay
def f():
with suppress(E):
pass
with contextlib.suppress(E):
pass
#: Okay
with Test() as bar:
pass
#: N806
def f():
with Test() as BAD:
pass
#: Okay
def f():
with C() as [a, b, c]:
pass
#: N806
def f():
with C() as [a, Bad, c]:
pass
#: N806
def f():
with C() as (a, b, baD):
pass
#: Okay
def f():
for i in iterator:
pass
#: N806:2:9
def f():
for Bad in iterator:
pass
#: Okay
def f():
for a, b in enumerate(iterator):
pass
#: N806
def f():
for index, ITEM in enumerate(iterator):
pass
#: N806
def f():
try:
f()
except Exception as Bad:
pass
#: Okay
def f():
try:
f()
except Exception as good:
pass
#: Okay
def f():
try:
f()
except:
pass
#: Okay
def f():
try:
f()
except good:
pass
#: N806
def f():
try:
f()
except RuntimeError as good:
pass
except IndexError as BAD:
pass
#: Okay
def f():
return [i for i in range(3)]
#: N806:2:22
def t():
return [ITEM for ITEM in range(3)]
#: N806:2:24
def d():
return {AA: BB for AA, BB in {}}
#: N806:2:22
def s():
return {Item for Item in range(3)}
#: N806:2:57
def n():
return (good + BAD for good in range(3) if good for BAD in range(3) if BAD)
#: N806:2:26
def e():
return tuple(BaD for BaD in range(2))
| 16.668605 | 79 | 0.536798 |
def test():
good = 1
def test():
def test2():
good = 1
GOOD = 1
class Test(object):
GOOD = 1
def test():
Bad = 1
def test():
VERY = 2
def test():
def test2():
class Foo(object):
def test3(self):
Bad = 3
def test():
Bad = 1
def good():
global Bad
Bad = 1
def bad():
global Bad
def foo():
Bad = 1
def test():
import collections
Thing = collections.namedtuple('Thing', 'a b c')
from collections import namedtuple
ThingTwo = namedtuple('ThingTwo', 'a b c')
def bad():
from collections import namedtuple as nt
Thing = nt('Thing', 'a b c')
#: N806
def unpacking_into_tuple():
Var1, Var2 = range(2)
#: Okay
def unpacking_into_tuple():
var1, var2 = range(2)
#: N806
def unpacking_into_list():
[Var1, Var2] = range(2)
#: Okay
def unpacking_into_list():
[var1, var2] = range(2)
#: Okay
a, [b, c] = [1, [2, 3]]
#: N806
def recursive_unpack():
a, [bB, c] = [1, [2, 3]]
#: Okay
def assingnment_to_attribute():
a.b = 1
#: N806
def f():
with Foo(), Bar() as Bad:
pass
#: Okay
def f():
with FOO() as foo, bar() as bar:
pass
#: Okay
def f():
with suppress(E):
pass
with contextlib.suppress(E):
pass
#: Okay
with Test() as bar:
pass
#: N806
def f():
with Test() as BAD:
pass
#: Okay
def f():
with C() as [a, b, c]:
pass
#: N806
def f():
with C() as [a, Bad, c]:
pass
#: N806
def f():
with C() as (a, b, baD):
pass
#: Okay
def f():
for i in iterator:
pass
#: N806:2:9
def f():
for Bad in iterator:
pass
#: Okay
def f():
for a, b in enumerate(iterator):
pass
#: N806
def f():
for index, ITEM in enumerate(iterator):
pass
#: N806
def f():
try:
f()
except Exception as Bad:
pass
#: Okay
def f():
try:
f()
except Exception as good:
pass
#: Okay
def f():
try:
f()
except:
pass
#: Okay
def f():
try:
f()
except good:
pass
#: N806
def f():
try:
f()
except RuntimeError as good:
pass
except IndexError as BAD:
pass
#: Okay
def f():
return [i for i in range(3)]
#: N806:2:22
def t():
return [ITEM for ITEM in range(3)]
#: N806:2:24
def d():
return {AA: BB for AA, BB in {}}
#: N806:2:22
def s():
return {Item for Item in range(3)}
#: N806:2:57
def n():
return (good + BAD for good in range(3) if good for BAD in range(3) if BAD)
#: N806:2:26
def e():
return tuple(BaD for BaD in range(2))
| true | true |
f71c91a3f54d9b713dc013f6441b683eae4ab3e6 | 6,683 | py | Python | graphnas_variants/micro_graphnas/micro_search_space.py | mhnnunes/nas_gnn | 91092acfee9fdbbef3e22252040b80aa96143311 | [
"Apache-2.0"
] | 13 | 2020-07-29T12:45:22.000Z | 2022-03-07T06:26:02.000Z | graphnas_variants/micro_graphnas/micro_search_space.py | mhnnunes/nas_gnn | 91092acfee9fdbbef3e22252040b80aa96143311 | [
"Apache-2.0"
] | null | null | null | graphnas_variants/micro_graphnas/micro_search_space.py | mhnnunes/nas_gnn | 91092acfee9fdbbef3e22252040b80aa96143311 | [
"Apache-2.0"
] | 3 | 2020-09-27T06:43:17.000Z | 2020-11-26T08:43:35.000Z | import torch
import torch.nn.functional as F
from torch.nn import Module
from torch_geometric.nn.conv import *
gnn_list = [
"gat_8", # GAT with 8 heads
"gat_6", # GAT with 6 heads
"gat_4", # GAT with 4 heads
"gat_2", # GAT with 2 heads
"gat_1", # GAT with 1 heads
"gcn", # GCN
"cheb", # chebnet
"sage", # sage
"arma",
"sg", # simplifying gcn
"linear", # skip connection
"zero", # skip connection
]
act_list = [
# "sigmoid", "tanh", "relu", "linear",
# "softplus", "leaky_relu", "relu6", "elu"
"sigmoid", "tanh", "relu", "linear", "elu"
]
def act_map(act):
if act == "linear":
return lambda x: x
elif act == "elu":
return F.elu
elif act == "sigmoid":
return torch.sigmoid
elif act == "tanh":
return torch.tanh
elif act == "relu":
return torch.nn.functional.relu
elif act == "relu6":
return torch.nn.functional.relu6
elif act == "softplus":
return torch.nn.functional.softplus
elif act == "leaky_relu":
return torch.nn.functional.leaky_relu
else:
raise Exception("wrong activate function")
def gnn_map(gnn_name, in_dim, out_dim, concat=False, bias=True) -> Module:
'''
:param gnn_name:
:param in_dim:
:param out_dim:
:param concat: for gat, concat multi-head output or not
:return: GNN model
'''
if gnn_name == "gat_8":
return GATConv(in_dim, out_dim, 8, concat=concat, bias=bias)
elif gnn_name == "gat_6":
return GATConv(in_dim, out_dim, 6, concat=concat, bias=bias)
elif gnn_name == "gat_4":
return GATConv(in_dim, out_dim, 4, concat=concat, bias=bias)
elif gnn_name == "gat_2":
return GATConv(in_dim, out_dim, 2, concat=concat, bias=bias)
elif gnn_name in ["gat_1", "gat"]:
return GATConv(in_dim, out_dim, 1, concat=concat, bias=bias)
elif gnn_name == "gcn":
return GCNConv(in_dim, out_dim)
elif gnn_name == "cheb":
return ChebConv(in_dim, out_dim, K=2, bias=bias)
elif gnn_name == "sage":
return SAGEConv(in_dim, out_dim, bias=bias)
elif gnn_name == "gated":
return GatedGraphConv(in_dim, out_dim, bias=bias)
elif gnn_name == "arma":
return ARMAConv(in_dim, out_dim, bias=bias)
elif gnn_name == "sg":
return SGConv(in_dim, out_dim, bias=bias)
elif gnn_name == "linear":
return LinearConv(in_dim, out_dim, bias=bias)
elif gnn_name == "zero":
return ZeroConv(in_dim, out_dim, bias=bias)
class LinearConv(Module):
def __init__(self,
in_channels,
out_channels,
bias=True):
super(LinearConv, self).__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.linear = torch.nn.Linear(in_channels, out_channels, bias)
def forward(self, x, edge_index, edge_weight=None):
return self.linear(x)
def __repr__(self):
return '{}({}, {})'.format(self.__class__.__name__, self.in_channels,
self.out_channels)
class ZeroConv(Module):
def __init__(self,
in_channels,
out_channels,
bias=True):
super(ZeroConv, self).__init__()
self.out_dim = out_channels
def forward(self, x, edge_index, edge_weight=None):
return torch.zeros([x.size(0), self.out_dim]).to(x.device)
def __repr__(self):
return '{}({}, {})'.format(self.__class__.__name__, self.in_channels,
self.out_channels)
class SearchSpace(object):
def __init__(self, search_space=None):
if search_space:
self.search_space = search_space
else:
self.search_space = {}
self.search_space["act"] = act_list # activate function
self.search_space["gnn"] = gnn_list # gnn type
# 0 means history, 1 means current,
# each layer contains two input
self.search_space["self_index"] = [0, 1]
# same as self_index,
self.search_space["concat_type"] = ["add",
"product",
"concat"]
self.search_space['learning_rate'] = [1e-2, 1e-3, 1e-4, 5e-3, 5e-4]
self.search_space['dropout'] = [0.0, 0.1, 0.2, 0.3, 0.4,
0.5, 0.6, 0.7, 0.8, 0.9]
self.search_space['weight_decay'] = [0, 1e-3, 1e-4,
1e-5, 5e-5, 5e-4]
self.search_space['hidden_unit'] = [8, 16, 32, 64, 128, 256, 512]
pass
def get_search_space(self):
return self.search_space
@staticmethod
def generate_action_list(cell=4):
action_list = []
for i in range(cell):
action_list += ["self_index", "gnn"]
action_list += ["act", "concat_type"]
return action_list
class IncrementSearchSpace(object):
def __init__(self, search_space=None, max_cell=10):
if search_space:
self.search_space = search_space
else:
self.search_space = {}
self.search_space["act"] = act_list # activate function
self.search_space["gnn"] = gnn_list # gnn type
for i in range(max_cell):
self.search_space[f"self_index_{i}"] = list(range(2 + i))
# 0 means history, 1 means current,
# each layer contains two input
self.search_space["concat_type"] = ["add",
"product",
"concat"]
# same as self_index,
self.search_space['learning_rate'] = [1e-2, 1e-3, 1e-4, 5e-3, 5e-4]
self.search_space['dropout'] = [0.0, 0.1, 0.2, 0.3, 0.4,
0.5, 0.6, 0.7, 0.8, 0.9]
self.search_space['weight_decay'] = [0, 1e-3, 1e-4,
1e-5, 5e-5, 5e-4]
self.search_space['hidden_unit'] = [8, 16, 32, 64, 128, 256, 512]
pass
def get_search_space(self):
return self.search_space
@staticmethod
def generate_action_list(cell=4):
action_list = []
for i in range(cell):
action_list += [f"self_index_{i}", "gnn"]
action_list += ["act", "concat_type"]
return action_list
if __name__ == "__main__":
obj = IncrementSearchSpace()
print(obj.generate_action_list())
print(obj.get_search_space())
| 34.448454 | 79 | 0.5511 | import torch
import torch.nn.functional as F
from torch.nn import Module
from torch_geometric.nn.conv import *
gnn_list = [
"gat_8",
"gat_6",
"gat_4",
"gat_2",
"gat_1",
"gcn",
"cheb",
"sage",
"arma",
"sg",
"linear",
"zero",
]
act_list = [
"sigmoid", "tanh", "relu", "linear", "elu"
]
def act_map(act):
if act == "linear":
return lambda x: x
elif act == "elu":
return F.elu
elif act == "sigmoid":
return torch.sigmoid
elif act == "tanh":
return torch.tanh
elif act == "relu":
return torch.nn.functional.relu
elif act == "relu6":
return torch.nn.functional.relu6
elif act == "softplus":
return torch.nn.functional.softplus
elif act == "leaky_relu":
return torch.nn.functional.leaky_relu
else:
raise Exception("wrong activate function")
def gnn_map(gnn_name, in_dim, out_dim, concat=False, bias=True) -> Module:
if gnn_name == "gat_8":
return GATConv(in_dim, out_dim, 8, concat=concat, bias=bias)
elif gnn_name == "gat_6":
return GATConv(in_dim, out_dim, 6, concat=concat, bias=bias)
elif gnn_name == "gat_4":
return GATConv(in_dim, out_dim, 4, concat=concat, bias=bias)
elif gnn_name == "gat_2":
return GATConv(in_dim, out_dim, 2, concat=concat, bias=bias)
elif gnn_name in ["gat_1", "gat"]:
return GATConv(in_dim, out_dim, 1, concat=concat, bias=bias)
elif gnn_name == "gcn":
return GCNConv(in_dim, out_dim)
elif gnn_name == "cheb":
return ChebConv(in_dim, out_dim, K=2, bias=bias)
elif gnn_name == "sage":
return SAGEConv(in_dim, out_dim, bias=bias)
elif gnn_name == "gated":
return GatedGraphConv(in_dim, out_dim, bias=bias)
elif gnn_name == "arma":
return ARMAConv(in_dim, out_dim, bias=bias)
elif gnn_name == "sg":
return SGConv(in_dim, out_dim, bias=bias)
elif gnn_name == "linear":
return LinearConv(in_dim, out_dim, bias=bias)
elif gnn_name == "zero":
return ZeroConv(in_dim, out_dim, bias=bias)
class LinearConv(Module):
def __init__(self,
in_channels,
out_channels,
bias=True):
super(LinearConv, self).__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.linear = torch.nn.Linear(in_channels, out_channels, bias)
def forward(self, x, edge_index, edge_weight=None):
return self.linear(x)
def __repr__(self):
return '{}({}, {})'.format(self.__class__.__name__, self.in_channels,
self.out_channels)
class ZeroConv(Module):
def __init__(self,
in_channels,
out_channels,
bias=True):
super(ZeroConv, self).__init__()
self.out_dim = out_channels
def forward(self, x, edge_index, edge_weight=None):
return torch.zeros([x.size(0), self.out_dim]).to(x.device)
def __repr__(self):
return '{}({}, {})'.format(self.__class__.__name__, self.in_channels,
self.out_channels)
class SearchSpace(object):
def __init__(self, search_space=None):
if search_space:
self.search_space = search_space
else:
self.search_space = {}
self.search_space["act"] = act_list
self.search_space["gnn"] = gnn_list
self.search_space["self_index"] = [0, 1]
self.search_space["concat_type"] = ["add",
"product",
"concat"]
self.search_space['learning_rate'] = [1e-2, 1e-3, 1e-4, 5e-3, 5e-4]
self.search_space['dropout'] = [0.0, 0.1, 0.2, 0.3, 0.4,
0.5, 0.6, 0.7, 0.8, 0.9]
self.search_space['weight_decay'] = [0, 1e-3, 1e-4,
1e-5, 5e-5, 5e-4]
self.search_space['hidden_unit'] = [8, 16, 32, 64, 128, 256, 512]
pass
def get_search_space(self):
return self.search_space
@staticmethod
def generate_action_list(cell=4):
action_list = []
for i in range(cell):
action_list += ["self_index", "gnn"]
action_list += ["act", "concat_type"]
return action_list
class IncrementSearchSpace(object):
def __init__(self, search_space=None, max_cell=10):
if search_space:
self.search_space = search_space
else:
self.search_space = {}
self.search_space["act"] = act_list
self.search_space["gnn"] = gnn_list
for i in range(max_cell):
self.search_space[f"self_index_{i}"] = list(range(2 + i))
self.search_space["concat_type"] = ["add",
"product",
"concat"]
self.search_space['learning_rate'] = [1e-2, 1e-3, 1e-4, 5e-3, 5e-4]
self.search_space['dropout'] = [0.0, 0.1, 0.2, 0.3, 0.4,
0.5, 0.6, 0.7, 0.8, 0.9]
self.search_space['weight_decay'] = [0, 1e-3, 1e-4,
1e-5, 5e-5, 5e-4]
self.search_space['hidden_unit'] = [8, 16, 32, 64, 128, 256, 512]
pass
def get_search_space(self):
return self.search_space
@staticmethod
def generate_action_list(cell=4):
action_list = []
for i in range(cell):
action_list += [f"self_index_{i}", "gnn"]
action_list += ["act", "concat_type"]
return action_list
if __name__ == "__main__":
obj = IncrementSearchSpace()
print(obj.generate_action_list())
print(obj.get_search_space())
| true | true |
f71c93ab4c1c3f762719bbced5d27f4ff685c6fe | 25,712 | py | Python | ambari-server/src/test/python/stacks/2.0.6/YARN/test_yarn_client.py | runningt/ambari | 7b91469e68a80bfcabe4b79d855caa684bf51378 | [
"Apache-2.0"
] | null | null | null | ambari-server/src/test/python/stacks/2.0.6/YARN/test_yarn_client.py | runningt/ambari | 7b91469e68a80bfcabe4b79d855caa684bf51378 | [
"Apache-2.0"
] | null | null | null | ambari-server/src/test/python/stacks/2.0.6/YARN/test_yarn_client.py | runningt/ambari | 7b91469e68a80bfcabe4b79d855caa684bf51378 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
'''
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
import json
import os
from mock.mock import MagicMock, call, patch
from stacks.utils.RMFTestCase import *
from resource_management.libraries.functions import version
from resource_management.libraries.script.script import Script
from resource_management.libraries import functions
origin_exists = os.path.exists
@patch("platform.linux_distribution", new = MagicMock(return_value="Linux"))
@patch.object(os.path, "exists", new=MagicMock(
side_effect=lambda *args: origin_exists(args[0])
if args[0][-2:] == "j2" else True))
class TestYarnClient(RMFTestCase):
COMMON_SERVICES_PACKAGE_DIR = "YARN/2.1.0.2.0/package"
STACK_VERSION = "2.0.6"
def test_configure_default(self):
self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/yarn_client.py",
classname = "YarnClient",
command = "configure",
config_file="default.json",
stack_version = self.STACK_VERSION,
target = RMFTestCase.TARGET_COMMON_SERVICES
)
self.assertResourceCalled('Directory', '/var/run/hadoop-yarn',
owner = 'yarn',
group = 'hadoop',
create_parents = True,
cd_access = 'a',
)
self.assertResourceCalled('Directory', '/var/run/hadoop-yarn/yarn',
owner = 'yarn',
group = 'hadoop',
create_parents = True,
cd_access = 'a',
)
self.assertResourceCalled('Directory', '/var/log/hadoop-yarn/yarn',
owner = 'yarn',
group = 'hadoop',
create_parents = True,
cd_access = 'a',
)
self.assertResourceCalled('Directory', '/var/run/hadoop-mapreduce',
owner = 'mapred',
group = 'hadoop',
create_parents = True,
cd_access = 'a',
)
self.assertResourceCalled('Directory', '/var/run/hadoop-mapreduce/mapred',
owner = 'mapred',
group = 'hadoop',
create_parents = True,
cd_access = 'a',
)
self.assertResourceCalled('Directory', '/var/log/hadoop-mapreduce',
owner = 'mapred',
group = 'hadoop',
create_parents = True,
cd_access = 'a',
)
self.assertResourceCalled('Directory', '/var/log/hadoop-mapreduce/mapred',
owner = 'mapred',
group = 'hadoop',
create_parents = True,
cd_access = 'a',
)
self.assertResourceCalled('Directory', '/var/log/hadoop-yarn',
owner = 'yarn',
group = 'hadoop',
create_parents = True,
ignore_failures = True,
cd_access = 'a',
)
self.assertResourceCalled('XmlConfig', 'core-site.xml',
owner = 'hdfs',
group = 'hadoop',
mode = 0644,
conf_dir = '/etc/hadoop/conf',
configurations = self.getConfig()['configurations']['core-site'],
configuration_attributes = self.getConfig()['configuration_attributes']['core-site']
)
self.assertResourceCalled('XmlConfig', 'hdfs-site.xml',
owner = 'hdfs',
group = 'hadoop',
mode = 0644,
conf_dir = '/etc/hadoop/conf',
configurations = self.getConfig()['configurations']['hdfs-site'],
configuration_attributes = self.getConfig()['configuration_attributes']['hdfs-site']
)
self.assertResourceCalled('XmlConfig', 'mapred-site.xml',
owner = 'yarn',
group = 'hadoop',
mode = 0644,
conf_dir = '/etc/hadoop/conf',
configurations = self.getConfig()['configurations']['mapred-site'],
configuration_attributes = self.getConfig()['configuration_attributes']['mapred-site']
)
self.assertResourceCalled('XmlConfig', 'yarn-site.xml',
owner = 'yarn',
group = 'hadoop',
mode = 0644,
conf_dir = '/etc/hadoop/conf',
configurations = self.getConfig()['configurations']['yarn-site'],
configuration_attributes = self.getConfig()['configuration_attributes']['yarn-site']
)
self.assertResourceCalled('XmlConfig', 'capacity-scheduler.xml',
owner = 'yarn',
group = 'hadoop',
mode = 0644,
conf_dir = '/etc/hadoop/conf',
configurations = self.getConfig()['configurations']['capacity-scheduler'],
configuration_attributes = self.getConfig()['configuration_attributes']['capacity-scheduler']
)
self.assertResourceCalled('File', '/etc/security/limits.d/yarn.conf',
content = Template('yarn.conf.j2'),
mode = 0644,
)
self.assertResourceCalled('File', '/etc/security/limits.d/mapreduce.conf',
content = Template('mapreduce.conf.j2'),
mode = 0644,
)
self.assertResourceCalled('File', '/etc/hadoop/conf/yarn-env.sh',
content = InlineTemplate(self.getConfig()['configurations']['yarn-env']['content']),
owner = 'yarn',
group = 'hadoop',
mode = 0755,
)
self.assertResourceCalled('File', '/usr/lib/hadoop-yarn/bin/container-executor',
group = 'hadoop',
mode = 02050,
)
self.assertResourceCalled('File', '/etc/hadoop/conf/container-executor.cfg',
content = Template('container-executor.cfg.j2'),
group = 'hadoop',
mode = 0644,
)
self.assertResourceCalled('Directory', '/cgroups_test/cpu',
group = 'hadoop',
create_parents = True,
mode = 0755,
cd_access="a"
)
self.assertResourceCalled('File', '/etc/hadoop/conf/mapred-env.sh',
content = InlineTemplate(self.getConfig()['configurations']['mapred-env']['content']),
mode = 0755,
owner = 'hdfs',
)
self.assertResourceCalled('File', '/etc/hadoop/conf/taskcontroller.cfg',
content = Template('taskcontroller.cfg.j2'),
owner = 'hdfs',
)
self.assertResourceCalled('XmlConfig', 'mapred-site.xml',
owner = 'mapred',
group = 'hadoop',
conf_dir = '/etc/hadoop/conf',
configurations = self.getConfig()['configurations']['mapred-site'],
configuration_attributes = self.getConfig()['configuration_attributes']['mapred-site']
)
self.assertResourceCalled('XmlConfig', 'capacity-scheduler.xml',
owner = 'hdfs',
group = 'hadoop',
conf_dir = '/etc/hadoop/conf',
configurations = self.getConfig()['configurations']['capacity-scheduler'],
configuration_attributes = self.getConfig()['configuration_attributes']['capacity-scheduler']
)
self.assertResourceCalled('File', '/etc/hadoop/conf/fair-scheduler.xml',
owner = 'mapred',
group = 'hadoop',
)
self.assertResourceCalled('File', '/etc/hadoop/conf/ssl-client.xml.example',
owner = 'mapred',
group = 'hadoop',
)
self.assertResourceCalled('File', '/etc/hadoop/conf/ssl-server.xml.example',
owner = 'mapred',
group = 'hadoop',
)
self.assertNoMoreResources()
def test_configure_secured(self):
self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/yarn_client.py",
classname = "YarnClient",
command = "configure",
config_file="secured.json",
stack_version = self.STACK_VERSION,
target = RMFTestCase.TARGET_COMMON_SERVICES
)
self.assertResourceCalled('Directory', '/var/run/hadoop-yarn',
owner = 'yarn',
group = 'hadoop',
create_parents = True,
cd_access = 'a',
)
self.assertResourceCalled('Directory', '/var/run/hadoop-yarn/yarn',
owner = 'yarn',
group = 'hadoop',
create_parents = True,
cd_access = 'a',
)
self.assertResourceCalled('Directory', '/var/log/hadoop-yarn/yarn',
owner = 'yarn',
group = 'hadoop',
create_parents = True,
cd_access = 'a',
)
self.assertResourceCalled('Directory', '/var/run/hadoop-mapreduce',
owner = 'mapred',
group = 'hadoop',
create_parents = True,
cd_access = 'a',
)
self.assertResourceCalled('Directory', '/var/run/hadoop-mapreduce/mapred',
owner = 'mapred',
group = 'hadoop',
create_parents = True,
cd_access = 'a',
)
self.assertResourceCalled('Directory', '/var/log/hadoop-mapreduce',
owner = 'mapred',
group = 'hadoop',
create_parents = True,
cd_access = 'a',
)
self.assertResourceCalled('Directory', '/var/log/hadoop-mapreduce/mapred',
owner = 'mapred',
group = 'hadoop',
create_parents = True,
cd_access = 'a',
)
self.assertResourceCalled('Directory', '/var/log/hadoop-yarn',
owner = 'yarn',
group = 'hadoop',
create_parents = True,
ignore_failures = True,
cd_access = 'a',
)
self.assertResourceCalled('XmlConfig', 'core-site.xml',
owner = 'hdfs',
group = 'hadoop',
mode = 0644,
conf_dir = '/etc/hadoop/conf',
configurations = self.getConfig()['configurations']['core-site'],
configuration_attributes = self.getConfig()['configuration_attributes']['core-site']
)
self.assertResourceCalled('XmlConfig', 'hdfs-site.xml',
owner = 'hdfs',
group = 'hadoop',
mode = 0644,
conf_dir = '/etc/hadoop/conf',
configurations = self.getConfig()['configurations']['hdfs-site'],
configuration_attributes = self.getConfig()['configuration_attributes']['hdfs-site']
)
self.assertResourceCalled('XmlConfig', 'mapred-site.xml',
owner = 'yarn',
group = 'hadoop',
mode = 0644,
conf_dir = '/etc/hadoop/conf',
configurations = self.getConfig()['configurations']['mapred-site'],
configuration_attributes = self.getConfig()['configuration_attributes']['mapred-site']
)
self.assertResourceCalled('XmlConfig', 'yarn-site.xml',
owner = 'yarn',
group = 'hadoop',
mode = 0644,
conf_dir = '/etc/hadoop/conf',
configurations = self.getConfig()['configurations']['yarn-site'],
configuration_attributes = self.getConfig()['configuration_attributes']['yarn-site']
)
self.assertResourceCalled('XmlConfig', 'capacity-scheduler.xml',
owner = 'yarn',
group = 'hadoop',
mode = 0644,
conf_dir = '/etc/hadoop/conf',
configurations = self.getConfig()['configurations']['capacity-scheduler'],
configuration_attributes = self.getConfig()['configuration_attributes']['capacity-scheduler']
)
self.assertResourceCalled('File', '/etc/security/limits.d/yarn.conf',
content = Template('yarn.conf.j2'),
mode = 0644,
)
self.assertResourceCalled('File', '/etc/security/limits.d/mapreduce.conf',
content = Template('mapreduce.conf.j2'),
mode = 0644,
)
self.assertResourceCalled('File', '/etc/hadoop/conf/yarn-env.sh',
content = InlineTemplate(self.getConfig()['configurations']['yarn-env']['content']),
owner = 'yarn',
group = 'hadoop',
mode = 0755,
)
self.assertResourceCalled('File', '/usr/lib/hadoop-yarn/bin/container-executor',
group = 'hadoop',
mode = 06050,
)
self.assertResourceCalled('File', '/etc/hadoop/conf/container-executor.cfg',
content = Template('container-executor.cfg.j2'),
group = 'hadoop',
mode = 0644,
)
self.assertResourceCalled('Directory', '/cgroups_test/cpu',
group = 'hadoop',
create_parents = True,
mode = 0755,
cd_access="a"
)
self.assertResourceCalled('File', '/etc/hadoop/conf/mapred-env.sh',
content = InlineTemplate(self.getConfig()['configurations']['mapred-env']['content']),
mode = 0755,
owner = 'root',
)
self.assertResourceCalled('File', '/usr/lib/hadoop/sbin/task-controller',
owner = 'root',
group = 'hadoop',
mode = 06050,
)
self.assertResourceCalled('File', '/etc/hadoop/conf/taskcontroller.cfg',
content = Template('taskcontroller.cfg.j2'),
owner = 'root',
group = 'hadoop',
mode = 0644,
)
self.assertResourceCalled('File', '/etc/hadoop/conf/yarn_jaas.conf',
content = Template('yarn_jaas.conf.j2'),
owner = 'yarn',
group = 'hadoop',
)
self.assertResourceCalled('File', '/etc/hadoop/conf/yarn_nm_jaas.conf',
content = Template('yarn_nm_jaas.conf.j2'),
owner = 'yarn',
group = 'hadoop',
)
self.assertResourceCalled('File', '/etc/hadoop/conf/mapred_jaas.conf',
content = Template('mapred_jaas.conf.j2'),
owner = 'mapred',
group = 'hadoop',
)
self.assertResourceCalled('XmlConfig', 'mapred-site.xml',
owner = 'mapred',
group = 'hadoop',
conf_dir = '/etc/hadoop/conf',
configurations = self.getConfig()['configurations']['mapred-site'],
configuration_attributes = self.getConfig()['configuration_attributes']['mapred-site']
)
self.assertResourceCalled('XmlConfig', 'capacity-scheduler.xml',
owner = 'hdfs',
group = 'hadoop',
conf_dir = '/etc/hadoop/conf',
configurations = self.getConfig()['configurations']['capacity-scheduler'],
configuration_attributes = self.getConfig()['configuration_attributes']['capacity-scheduler']
)
self.assertResourceCalled('File', '/etc/hadoop/conf/fair-scheduler.xml',
owner = 'mapred',
group = 'hadoop',
)
self.assertResourceCalled('File', '/etc/hadoop/conf/ssl-client.xml.example',
owner = 'mapred',
group = 'hadoop',
)
self.assertResourceCalled('File', '/etc/hadoop/conf/ssl-server.xml.example',
owner = 'mapred',
group = 'hadoop',
)
self.assertNoMoreResources()
def test_restart_client(self):
self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/yarn_client.py",
classname = "YarnClient",
command = "restart",
config_file="default.json",
config_overrides = { 'roleParams' : { "component_category": "CLIENT" } },
stack_version = self.STACK_VERSION,
target = RMFTestCase.TARGET_COMMON_SERVICES
)
self.assertResourceCalled('Directory', '/var/run/hadoop-yarn',
owner = 'yarn',
group = 'hadoop',
create_parents = True,
cd_access = 'a',
)
self.assertResourceCalled('Directory', '/var/run/hadoop-yarn/yarn',
owner = 'yarn',
group = 'hadoop',
create_parents = True,
cd_access = 'a',
)
self.assertResourceCalled('Directory', '/var/log/hadoop-yarn/yarn',
owner = 'yarn',
group = 'hadoop',
create_parents = True,
cd_access = 'a',
)
self.assertResourceCalled('Directory', '/var/run/hadoop-mapreduce',
owner = 'mapred',
group = 'hadoop',
create_parents = True,
cd_access = 'a',
)
self.assertResourceCalled('Directory', '/var/run/hadoop-mapreduce/mapred',
owner = 'mapred',
group = 'hadoop',
create_parents = True,
cd_access = 'a',
)
self.assertResourceCalled('Directory', '/var/log/hadoop-mapreduce',
owner = 'mapred',
group = 'hadoop',
create_parents = True,
cd_access = 'a',
)
self.assertResourceCalled('Directory', '/var/log/hadoop-mapreduce/mapred',
owner = 'mapred',
group = 'hadoop',
create_parents = True,
cd_access = 'a',
)
self.assertResourceCalled('Directory', '/var/log/hadoop-yarn',
owner = 'yarn',
group = 'hadoop',
create_parents = True,
ignore_failures = True,
cd_access = 'a',
)
self.assertResourceCalled('XmlConfig', 'core-site.xml',
owner = 'hdfs',
group = 'hadoop',
mode = 0644,
conf_dir = '/etc/hadoop/conf',
configurations = self.getConfig()['configurations']['core-site'],
configuration_attributes = self.getConfig()['configuration_attributes']['core-site']
)
self.assertResourceCalled('XmlConfig', 'hdfs-site.xml',
owner = 'hdfs',
group = 'hadoop',
mode = 0644,
conf_dir = '/etc/hadoop/conf',
configurations = self.getConfig()['configurations']['hdfs-site'],
configuration_attributes = self.getConfig()['configuration_attributes']['hdfs-site']
)
self.assertResourceCalled('XmlConfig', 'mapred-site.xml',
owner = 'yarn',
group = 'hadoop',
mode = 0644,
conf_dir = '/etc/hadoop/conf',
configurations = self.getConfig()['configurations']['mapred-site'],
configuration_attributes = self.getConfig()['configuration_attributes']['mapred-site']
)
self.assertResourceCalled('XmlConfig', 'yarn-site.xml',
owner = 'yarn',
group = 'hadoop',
mode = 0644,
conf_dir = '/etc/hadoop/conf',
configurations = self.getConfig()['configurations']['yarn-site'],
configuration_attributes = self.getConfig()['configuration_attributes']['yarn-site']
)
self.assertResourceCalled('XmlConfig', 'capacity-scheduler.xml',
owner = 'yarn',
group = 'hadoop',
mode = 0644,
conf_dir = '/etc/hadoop/conf',
configurations = self.getConfig()['configurations']['capacity-scheduler'],
configuration_attributes = self.getConfig()['configuration_attributes']['capacity-scheduler']
)
self.assertResourceCalled('File', '/etc/security/limits.d/yarn.conf',
content = Template('yarn.conf.j2'),
mode = 0644,
)
self.assertResourceCalled('File', '/etc/security/limits.d/mapreduce.conf',
content = Template('mapreduce.conf.j2'),
mode = 0644,
)
self.assertResourceCalled('File', '/etc/hadoop/conf/yarn-env.sh',
content = InlineTemplate(self.getConfig()['configurations']['yarn-env']['content']),
owner = 'yarn',
group = 'hadoop',
mode = 0755,
)
self.assertResourceCalled('File', '/usr/lib/hadoop-yarn/bin/container-executor',
group = 'hadoop',
mode = 02050,
)
self.assertResourceCalled('File', '/etc/hadoop/conf/container-executor.cfg',
content = Template('container-executor.cfg.j2'),
group = 'hadoop',
mode = 0644,
)
self.assertResourceCalled('Directory', '/cgroups_test/cpu',
group = 'hadoop',
create_parents = True,
mode = 0755,
cd_access="a"
)
self.assertResourceCalled('File', '/etc/hadoop/conf/mapred-env.sh',
content = InlineTemplate(self.getConfig()['configurations']['mapred-env']['content']),
mode = 0755,
owner = 'hdfs',
)
self.assertResourceCalled('File', '/etc/hadoop/conf/taskcontroller.cfg',
content = Template('taskcontroller.cfg.j2'),
owner = 'hdfs',
)
self.assertResourceCalled('XmlConfig', 'mapred-site.xml',
owner = 'mapred',
group = 'hadoop',
conf_dir = '/etc/hadoop/conf',
configurations = self.getConfig()['configurations']['mapred-site'],
configuration_attributes = self.getConfig()['configuration_attributes']['mapred-site']
)
self.assertResourceCalled('XmlConfig', 'capacity-scheduler.xml',
owner = 'hdfs',
group = 'hadoop',
conf_dir = '/etc/hadoop/conf',
configurations = self.getConfig()['configurations']['capacity-scheduler'],
configuration_attributes = self.getConfig()['configuration_attributes']['capacity-scheduler']
)
self.assertResourceCalled('File', '/etc/hadoop/conf/fair-scheduler.xml',
owner = 'mapred',
group = 'hadoop',
)
self.assertResourceCalled('File', '/etc/hadoop/conf/ssl-client.xml.example',
owner = 'mapred',
group = 'hadoop',
)
self.assertResourceCalled('File', '/etc/hadoop/conf/ssl-server.xml.example',
owner = 'mapred',
group = 'hadoop',
)
self.assertNoMoreResources()
@patch.object(functions, "get_stack_version", new=MagicMock(return_value="2.2.0.0-2041"))
def test_upgrade(self):
self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/yarn_client.py",
classname = "YarnClient",
command = "restart",
config_file="client-upgrade.json",
stack_version = self.STACK_VERSION,
target = RMFTestCase.TARGET_COMMON_SERVICES
)
self.assertResourceCalled("Execute", ('ambari-python-wrap', '/usr/bin/hdp-select', 'set', 'hadoop-client', '2.2.1.0-2067'), sudo=True)
# for now, it's enough that <stack-selector-tool> is confirmed
@patch.object(functions, "get_stack_version", new = MagicMock(return_value='2.3.0.0-1234'))
def test_pre_upgrade_restart_23(self):
config_file = self.get_src_folder()+"/test/python/stacks/2.0.6/configs/default.json"
with open(config_file, "r") as f:
json_content = json.load(f)
version = '2.3.0.0-1234'
json_content['commandParams']['version'] = version
mocks_dict = {}
self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/yarn_client.py",
classname = "YarnClient",
command = "pre_upgrade_restart",
config_dict = json_content,
stack_version = self.STACK_VERSION,
target = RMFTestCase.TARGET_COMMON_SERVICES,
call_mocks = [(0, None, ''), (0, None)],
mocks_dict = mocks_dict)
self.assertResourceCalledIgnoreEarlier('Execute', ('ambari-python-wrap', '/usr/bin/hdp-select', 'set', 'hadoop-client', version), sudo=True)
self.assertNoMoreResources()
self.assertEquals(1, mocks_dict['call'].call_count)
self.assertEquals(1, mocks_dict['checked_call'].call_count)
self.assertEquals(
('ambari-python-wrap', '/usr/bin/conf-select', 'set-conf-dir', '--package', 'hadoop', '--stack-version', '2.3.0.0-1234', '--conf-version', '0'),
mocks_dict['checked_call'].call_args_list[0][0][0])
self.assertEquals(
('ambari-python-wrap', '/usr/bin/conf-select', 'create-conf-dir', '--package', 'hadoop', '--stack-version', '2.3.0.0-1234', '--conf-version', '0'),
mocks_dict['call'].call_args_list[0][0][0])
| 43.14094 | 153 | 0.552427 |
'''
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
import json
import os
from mock.mock import MagicMock, call, patch
from stacks.utils.RMFTestCase import *
from resource_management.libraries.functions import version
from resource_management.libraries.script.script import Script
from resource_management.libraries import functions
origin_exists = os.path.exists
@patch("platform.linux_distribution", new = MagicMock(return_value="Linux"))
@patch.object(os.path, "exists", new=MagicMock(
side_effect=lambda *args: origin_exists(args[0])
if args[0][-2:] == "j2" else True))
class TestYarnClient(RMFTestCase):
COMMON_SERVICES_PACKAGE_DIR = "YARN/2.1.0.2.0/package"
STACK_VERSION = "2.0.6"
def test_configure_default(self):
self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/yarn_client.py",
classname = "YarnClient",
command = "configure",
config_file="default.json",
stack_version = self.STACK_VERSION,
target = RMFTestCase.TARGET_COMMON_SERVICES
)
self.assertResourceCalled('Directory', '/var/run/hadoop-yarn',
owner = 'yarn',
group = 'hadoop',
create_parents = True,
cd_access = 'a',
)
self.assertResourceCalled('Directory', '/var/run/hadoop-yarn/yarn',
owner = 'yarn',
group = 'hadoop',
create_parents = True,
cd_access = 'a',
)
self.assertResourceCalled('Directory', '/var/log/hadoop-yarn/yarn',
owner = 'yarn',
group = 'hadoop',
create_parents = True,
cd_access = 'a',
)
self.assertResourceCalled('Directory', '/var/run/hadoop-mapreduce',
owner = 'mapred',
group = 'hadoop',
create_parents = True,
cd_access = 'a',
)
self.assertResourceCalled('Directory', '/var/run/hadoop-mapreduce/mapred',
owner = 'mapred',
group = 'hadoop',
create_parents = True,
cd_access = 'a',
)
self.assertResourceCalled('Directory', '/var/log/hadoop-mapreduce',
owner = 'mapred',
group = 'hadoop',
create_parents = True,
cd_access = 'a',
)
self.assertResourceCalled('Directory', '/var/log/hadoop-mapreduce/mapred',
owner = 'mapred',
group = 'hadoop',
create_parents = True,
cd_access = 'a',
)
self.assertResourceCalled('Directory', '/var/log/hadoop-yarn',
owner = 'yarn',
group = 'hadoop',
create_parents = True,
ignore_failures = True,
cd_access = 'a',
)
self.assertResourceCalled('XmlConfig', 'core-site.xml',
owner = 'hdfs',
group = 'hadoop',
mode = 0644,
conf_dir = '/etc/hadoop/conf',
configurations = self.getConfig()['configurations']['core-site'],
configuration_attributes = self.getConfig()['configuration_attributes']['core-site']
)
self.assertResourceCalled('XmlConfig', 'hdfs-site.xml',
owner = 'hdfs',
group = 'hadoop',
mode = 0644,
conf_dir = '/etc/hadoop/conf',
configurations = self.getConfig()['configurations']['hdfs-site'],
configuration_attributes = self.getConfig()['configuration_attributes']['hdfs-site']
)
self.assertResourceCalled('XmlConfig', 'mapred-site.xml',
owner = 'yarn',
group = 'hadoop',
mode = 0644,
conf_dir = '/etc/hadoop/conf',
configurations = self.getConfig()['configurations']['mapred-site'],
configuration_attributes = self.getConfig()['configuration_attributes']['mapred-site']
)
self.assertResourceCalled('XmlConfig', 'yarn-site.xml',
owner = 'yarn',
group = 'hadoop',
mode = 0644,
conf_dir = '/etc/hadoop/conf',
configurations = self.getConfig()['configurations']['yarn-site'],
configuration_attributes = self.getConfig()['configuration_attributes']['yarn-site']
)
self.assertResourceCalled('XmlConfig', 'capacity-scheduler.xml',
owner = 'yarn',
group = 'hadoop',
mode = 0644,
conf_dir = '/etc/hadoop/conf',
configurations = self.getConfig()['configurations']['capacity-scheduler'],
configuration_attributes = self.getConfig()['configuration_attributes']['capacity-scheduler']
)
self.assertResourceCalled('File', '/etc/security/limits.d/yarn.conf',
content = Template('yarn.conf.j2'),
mode = 0644,
)
self.assertResourceCalled('File', '/etc/security/limits.d/mapreduce.conf',
content = Template('mapreduce.conf.j2'),
mode = 0644,
)
self.assertResourceCalled('File', '/etc/hadoop/conf/yarn-env.sh',
content = InlineTemplate(self.getConfig()['configurations']['yarn-env']['content']),
owner = 'yarn',
group = 'hadoop',
mode = 0755,
)
self.assertResourceCalled('File', '/usr/lib/hadoop-yarn/bin/container-executor',
group = 'hadoop',
mode = 02050,
)
self.assertResourceCalled('File', '/etc/hadoop/conf/container-executor.cfg',
content = Template('container-executor.cfg.j2'),
group = 'hadoop',
mode = 0644,
)
self.assertResourceCalled('Directory', '/cgroups_test/cpu',
group = 'hadoop',
create_parents = True,
mode = 0755,
cd_access="a"
)
self.assertResourceCalled('File', '/etc/hadoop/conf/mapred-env.sh',
content = InlineTemplate(self.getConfig()['configurations']['mapred-env']['content']),
mode = 0755,
owner = 'hdfs',
)
self.assertResourceCalled('File', '/etc/hadoop/conf/taskcontroller.cfg',
content = Template('taskcontroller.cfg.j2'),
owner = 'hdfs',
)
self.assertResourceCalled('XmlConfig', 'mapred-site.xml',
owner = 'mapred',
group = 'hadoop',
conf_dir = '/etc/hadoop/conf',
configurations = self.getConfig()['configurations']['mapred-site'],
configuration_attributes = self.getConfig()['configuration_attributes']['mapred-site']
)
self.assertResourceCalled('XmlConfig', 'capacity-scheduler.xml',
owner = 'hdfs',
group = 'hadoop',
conf_dir = '/etc/hadoop/conf',
configurations = self.getConfig()['configurations']['capacity-scheduler'],
configuration_attributes = self.getConfig()['configuration_attributes']['capacity-scheduler']
)
self.assertResourceCalled('File', '/etc/hadoop/conf/fair-scheduler.xml',
owner = 'mapred',
group = 'hadoop',
)
self.assertResourceCalled('File', '/etc/hadoop/conf/ssl-client.xml.example',
owner = 'mapred',
group = 'hadoop',
)
self.assertResourceCalled('File', '/etc/hadoop/conf/ssl-server.xml.example',
owner = 'mapred',
group = 'hadoop',
)
self.assertNoMoreResources()
def test_configure_secured(self):
self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/yarn_client.py",
classname = "YarnClient",
command = "configure",
config_file="secured.json",
stack_version = self.STACK_VERSION,
target = RMFTestCase.TARGET_COMMON_SERVICES
)
self.assertResourceCalled('Directory', '/var/run/hadoop-yarn',
owner = 'yarn',
group = 'hadoop',
create_parents = True,
cd_access = 'a',
)
self.assertResourceCalled('Directory', '/var/run/hadoop-yarn/yarn',
owner = 'yarn',
group = 'hadoop',
create_parents = True,
cd_access = 'a',
)
self.assertResourceCalled('Directory', '/var/log/hadoop-yarn/yarn',
owner = 'yarn',
group = 'hadoop',
create_parents = True,
cd_access = 'a',
)
self.assertResourceCalled('Directory', '/var/run/hadoop-mapreduce',
owner = 'mapred',
group = 'hadoop',
create_parents = True,
cd_access = 'a',
)
self.assertResourceCalled('Directory', '/var/run/hadoop-mapreduce/mapred',
owner = 'mapred',
group = 'hadoop',
create_parents = True,
cd_access = 'a',
)
self.assertResourceCalled('Directory', '/var/log/hadoop-mapreduce',
owner = 'mapred',
group = 'hadoop',
create_parents = True,
cd_access = 'a',
)
self.assertResourceCalled('Directory', '/var/log/hadoop-mapreduce/mapred',
owner = 'mapred',
group = 'hadoop',
create_parents = True,
cd_access = 'a',
)
self.assertResourceCalled('Directory', '/var/log/hadoop-yarn',
owner = 'yarn',
group = 'hadoop',
create_parents = True,
ignore_failures = True,
cd_access = 'a',
)
self.assertResourceCalled('XmlConfig', 'core-site.xml',
owner = 'hdfs',
group = 'hadoop',
mode = 0644,
conf_dir = '/etc/hadoop/conf',
configurations = self.getConfig()['configurations']['core-site'],
configuration_attributes = self.getConfig()['configuration_attributes']['core-site']
)
self.assertResourceCalled('XmlConfig', 'hdfs-site.xml',
owner = 'hdfs',
group = 'hadoop',
mode = 0644,
conf_dir = '/etc/hadoop/conf',
configurations = self.getConfig()['configurations']['hdfs-site'],
configuration_attributes = self.getConfig()['configuration_attributes']['hdfs-site']
)
self.assertResourceCalled('XmlConfig', 'mapred-site.xml',
owner = 'yarn',
group = 'hadoop',
mode = 0644,
conf_dir = '/etc/hadoop/conf',
configurations = self.getConfig()['configurations']['mapred-site'],
configuration_attributes = self.getConfig()['configuration_attributes']['mapred-site']
)
self.assertResourceCalled('XmlConfig', 'yarn-site.xml',
owner = 'yarn',
group = 'hadoop',
mode = 0644,
conf_dir = '/etc/hadoop/conf',
configurations = self.getConfig()['configurations']['yarn-site'],
configuration_attributes = self.getConfig()['configuration_attributes']['yarn-site']
)
self.assertResourceCalled('XmlConfig', 'capacity-scheduler.xml',
owner = 'yarn',
group = 'hadoop',
mode = 0644,
conf_dir = '/etc/hadoop/conf',
configurations = self.getConfig()['configurations']['capacity-scheduler'],
configuration_attributes = self.getConfig()['configuration_attributes']['capacity-scheduler']
)
self.assertResourceCalled('File', '/etc/security/limits.d/yarn.conf',
content = Template('yarn.conf.j2'),
mode = 0644,
)
self.assertResourceCalled('File', '/etc/security/limits.d/mapreduce.conf',
content = Template('mapreduce.conf.j2'),
mode = 0644,
)
self.assertResourceCalled('File', '/etc/hadoop/conf/yarn-env.sh',
content = InlineTemplate(self.getConfig()['configurations']['yarn-env']['content']),
owner = 'yarn',
group = 'hadoop',
mode = 0755,
)
self.assertResourceCalled('File', '/usr/lib/hadoop-yarn/bin/container-executor',
group = 'hadoop',
mode = 06050,
)
self.assertResourceCalled('File', '/etc/hadoop/conf/container-executor.cfg',
content = Template('container-executor.cfg.j2'),
group = 'hadoop',
mode = 0644,
)
self.assertResourceCalled('Directory', '/cgroups_test/cpu',
group = 'hadoop',
create_parents = True,
mode = 0755,
cd_access="a"
)
self.assertResourceCalled('File', '/etc/hadoop/conf/mapred-env.sh',
content = InlineTemplate(self.getConfig()['configurations']['mapred-env']['content']),
mode = 0755,
owner = 'root',
)
self.assertResourceCalled('File', '/usr/lib/hadoop/sbin/task-controller',
owner = 'root',
group = 'hadoop',
mode = 06050,
)
self.assertResourceCalled('File', '/etc/hadoop/conf/taskcontroller.cfg',
content = Template('taskcontroller.cfg.j2'),
owner = 'root',
group = 'hadoop',
mode = 0644,
)
self.assertResourceCalled('File', '/etc/hadoop/conf/yarn_jaas.conf',
content = Template('yarn_jaas.conf.j2'),
owner = 'yarn',
group = 'hadoop',
)
self.assertResourceCalled('File', '/etc/hadoop/conf/yarn_nm_jaas.conf',
content = Template('yarn_nm_jaas.conf.j2'),
owner = 'yarn',
group = 'hadoop',
)
self.assertResourceCalled('File', '/etc/hadoop/conf/mapred_jaas.conf',
content = Template('mapred_jaas.conf.j2'),
owner = 'mapred',
group = 'hadoop',
)
self.assertResourceCalled('XmlConfig', 'mapred-site.xml',
owner = 'mapred',
group = 'hadoop',
conf_dir = '/etc/hadoop/conf',
configurations = self.getConfig()['configurations']['mapred-site'],
configuration_attributes = self.getConfig()['configuration_attributes']['mapred-site']
)
self.assertResourceCalled('XmlConfig', 'capacity-scheduler.xml',
owner = 'hdfs',
group = 'hadoop',
conf_dir = '/etc/hadoop/conf',
configurations = self.getConfig()['configurations']['capacity-scheduler'],
configuration_attributes = self.getConfig()['configuration_attributes']['capacity-scheduler']
)
self.assertResourceCalled('File', '/etc/hadoop/conf/fair-scheduler.xml',
owner = 'mapred',
group = 'hadoop',
)
self.assertResourceCalled('File', '/etc/hadoop/conf/ssl-client.xml.example',
owner = 'mapred',
group = 'hadoop',
)
self.assertResourceCalled('File', '/etc/hadoop/conf/ssl-server.xml.example',
owner = 'mapred',
group = 'hadoop',
)
self.assertNoMoreResources()
def test_restart_client(self):
self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/yarn_client.py",
classname = "YarnClient",
command = "restart",
config_file="default.json",
config_overrides = { 'roleParams' : { "component_category": "CLIENT" } },
stack_version = self.STACK_VERSION,
target = RMFTestCase.TARGET_COMMON_SERVICES
)
self.assertResourceCalled('Directory', '/var/run/hadoop-yarn',
owner = 'yarn',
group = 'hadoop',
create_parents = True,
cd_access = 'a',
)
self.assertResourceCalled('Directory', '/var/run/hadoop-yarn/yarn',
owner = 'yarn',
group = 'hadoop',
create_parents = True,
cd_access = 'a',
)
self.assertResourceCalled('Directory', '/var/log/hadoop-yarn/yarn',
owner = 'yarn',
group = 'hadoop',
create_parents = True,
cd_access = 'a',
)
self.assertResourceCalled('Directory', '/var/run/hadoop-mapreduce',
owner = 'mapred',
group = 'hadoop',
create_parents = True,
cd_access = 'a',
)
self.assertResourceCalled('Directory', '/var/run/hadoop-mapreduce/mapred',
owner = 'mapred',
group = 'hadoop',
create_parents = True,
cd_access = 'a',
)
self.assertResourceCalled('Directory', '/var/log/hadoop-mapreduce',
owner = 'mapred',
group = 'hadoop',
create_parents = True,
cd_access = 'a',
)
self.assertResourceCalled('Directory', '/var/log/hadoop-mapreduce/mapred',
owner = 'mapred',
group = 'hadoop',
create_parents = True,
cd_access = 'a',
)
self.assertResourceCalled('Directory', '/var/log/hadoop-yarn',
owner = 'yarn',
group = 'hadoop',
create_parents = True,
ignore_failures = True,
cd_access = 'a',
)
self.assertResourceCalled('XmlConfig', 'core-site.xml',
owner = 'hdfs',
group = 'hadoop',
mode = 0644,
conf_dir = '/etc/hadoop/conf',
configurations = self.getConfig()['configurations']['core-site'],
configuration_attributes = self.getConfig()['configuration_attributes']['core-site']
)
self.assertResourceCalled('XmlConfig', 'hdfs-site.xml',
owner = 'hdfs',
group = 'hadoop',
mode = 0644,
conf_dir = '/etc/hadoop/conf',
configurations = self.getConfig()['configurations']['hdfs-site'],
configuration_attributes = self.getConfig()['configuration_attributes']['hdfs-site']
)
self.assertResourceCalled('XmlConfig', 'mapred-site.xml',
owner = 'yarn',
group = 'hadoop',
mode = 0644,
conf_dir = '/etc/hadoop/conf',
configurations = self.getConfig()['configurations']['mapred-site'],
configuration_attributes = self.getConfig()['configuration_attributes']['mapred-site']
)
self.assertResourceCalled('XmlConfig', 'yarn-site.xml',
owner = 'yarn',
group = 'hadoop',
mode = 0644,
conf_dir = '/etc/hadoop/conf',
configurations = self.getConfig()['configurations']['yarn-site'],
configuration_attributes = self.getConfig()['configuration_attributes']['yarn-site']
)
self.assertResourceCalled('XmlConfig', 'capacity-scheduler.xml',
owner = 'yarn',
group = 'hadoop',
mode = 0644,
conf_dir = '/etc/hadoop/conf',
configurations = self.getConfig()['configurations']['capacity-scheduler'],
configuration_attributes = self.getConfig()['configuration_attributes']['capacity-scheduler']
)
self.assertResourceCalled('File', '/etc/security/limits.d/yarn.conf',
content = Template('yarn.conf.j2'),
mode = 0644,
)
self.assertResourceCalled('File', '/etc/security/limits.d/mapreduce.conf',
content = Template('mapreduce.conf.j2'),
mode = 0644,
)
self.assertResourceCalled('File', '/etc/hadoop/conf/yarn-env.sh',
content = InlineTemplate(self.getConfig()['configurations']['yarn-env']['content']),
owner = 'yarn',
group = 'hadoop',
mode = 0755,
)
self.assertResourceCalled('File', '/usr/lib/hadoop-yarn/bin/container-executor',
group = 'hadoop',
mode = 02050,
)
self.assertResourceCalled('File', '/etc/hadoop/conf/container-executor.cfg',
content = Template('container-executor.cfg.j2'),
group = 'hadoop',
mode = 0644,
)
self.assertResourceCalled('Directory', '/cgroups_test/cpu',
group = 'hadoop',
create_parents = True,
mode = 0755,
cd_access="a"
)
self.assertResourceCalled('File', '/etc/hadoop/conf/mapred-env.sh',
content = InlineTemplate(self.getConfig()['configurations']['mapred-env']['content']),
mode = 0755,
owner = 'hdfs',
)
self.assertResourceCalled('File', '/etc/hadoop/conf/taskcontroller.cfg',
content = Template('taskcontroller.cfg.j2'),
owner = 'hdfs',
)
self.assertResourceCalled('XmlConfig', 'mapred-site.xml',
owner = 'mapred',
group = 'hadoop',
conf_dir = '/etc/hadoop/conf',
configurations = self.getConfig()['configurations']['mapred-site'],
configuration_attributes = self.getConfig()['configuration_attributes']['mapred-site']
)
self.assertResourceCalled('XmlConfig', 'capacity-scheduler.xml',
owner = 'hdfs',
group = 'hadoop',
conf_dir = '/etc/hadoop/conf',
configurations = self.getConfig()['configurations']['capacity-scheduler'],
configuration_attributes = self.getConfig()['configuration_attributes']['capacity-scheduler']
)
self.assertResourceCalled('File', '/etc/hadoop/conf/fair-scheduler.xml',
owner = 'mapred',
group = 'hadoop',
)
self.assertResourceCalled('File', '/etc/hadoop/conf/ssl-client.xml.example',
owner = 'mapred',
group = 'hadoop',
)
self.assertResourceCalled('File', '/etc/hadoop/conf/ssl-server.xml.example',
owner = 'mapred',
group = 'hadoop',
)
self.assertNoMoreResources()
@patch.object(functions, "get_stack_version", new=MagicMock(return_value="2.2.0.0-2041"))
def test_upgrade(self):
self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/yarn_client.py",
classname = "YarnClient",
command = "restart",
config_file="client-upgrade.json",
stack_version = self.STACK_VERSION,
target = RMFTestCase.TARGET_COMMON_SERVICES
)
self.assertResourceCalled("Execute", ('ambari-python-wrap', '/usr/bin/hdp-select', 'set', 'hadoop-client', '2.2.1.0-2067'), sudo=True)
@patch.object(functions, "get_stack_version", new = MagicMock(return_value='2.3.0.0-1234'))
def test_pre_upgrade_restart_23(self):
config_file = self.get_src_folder()+"/test/python/stacks/2.0.6/configs/default.json"
with open(config_file, "r") as f:
json_content = json.load(f)
version = '2.3.0.0-1234'
json_content['commandParams']['version'] = version
mocks_dict = {}
self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/yarn_client.py",
classname = "YarnClient",
command = "pre_upgrade_restart",
config_dict = json_content,
stack_version = self.STACK_VERSION,
target = RMFTestCase.TARGET_COMMON_SERVICES,
call_mocks = [(0, None, ''), (0, None)],
mocks_dict = mocks_dict)
self.assertResourceCalledIgnoreEarlier('Execute', ('ambari-python-wrap', '/usr/bin/hdp-select', 'set', 'hadoop-client', version), sudo=True)
self.assertNoMoreResources()
self.assertEquals(1, mocks_dict['call'].call_count)
self.assertEquals(1, mocks_dict['checked_call'].call_count)
self.assertEquals(
('ambari-python-wrap', '/usr/bin/conf-select', 'set-conf-dir', '--package', 'hadoop', '--stack-version', '2.3.0.0-1234', '--conf-version', '0'),
mocks_dict['checked_call'].call_args_list[0][0][0])
self.assertEquals(
('ambari-python-wrap', '/usr/bin/conf-select', 'create-conf-dir', '--package', 'hadoop', '--stack-version', '2.3.0.0-1234', '--conf-version', '0'),
mocks_dict['call'].call_args_list[0][0][0])
| false | true |
f71c945e6058577857c0b8a5868cd8a7b234044b | 2,412 | py | Python | jupyter_server_mathjax/app.py | minrk/jupyter_server_mathjax | 4dfbcf70ee00de3776cd2acf1debdc790e56f64e | [
"BSD-3-Clause"
] | null | null | null | jupyter_server_mathjax/app.py | minrk/jupyter_server_mathjax | 4dfbcf70ee00de3776cd2acf1debdc790e56f64e | [
"BSD-3-Clause"
] | null | null | null | jupyter_server_mathjax/app.py | minrk/jupyter_server_mathjax | 4dfbcf70ee00de3776cd2acf1debdc790e56f64e | [
"BSD-3-Clause"
] | null | null | null | # Copyright (c) Jupyter Development Team.
# Distributed under the terms of the Modified BSD License.
from pathlib import Path
from traitlets import default, observe, Unicode
from tornado.web import RedirectHandler
from jupyter_server.extension.application import ExtensionApp
from jupyter_server.utils import url_path_join
from jupyter_server.transutils import _
STATIC_ASSETS_PATH = Path(__file__).parent / "static"
class DeprecatedRedirectHandler(RedirectHandler):
def get(self, *args, **kwargs):
import warnings
warnings.warn(
"Redirecting old Notebook MathJax URL to new one. This will be removed in a future release.",
PendingDeprecationWarning,
)
super().get(*args, **kwargs)
class MathJaxExtension(ExtensionApp):
name = "jupyter_server_mathjax"
# By listing the path to the assets here, jupyter_server
# automatically creates a static file handler at
# /static/jupyter_server_mathjax/...
static_paths = [str(STATIC_ASSETS_PATH)]
mathjax_config = Unicode(
"TeX-AMS-MML_HTMLorMML-full,Safe",
config=True,
help=_("""The MathJax.js configuration file that is to be used."""),
)
@observe("mathjax_config")
def _update_mathjax_config(self, change):
self.log.info(_("Using MathJax configuration file: %s"), change["new"])
def initialize_settings(self):
# Add settings specific to this extension to the
# tornado webapp settings.
self.settings.update({
"mathjax_config": self.mathjax_config,
"mathjax_url": "/static/jupyter_server_mathjax/MathJax.js"
})
def initialize_handlers(self):
webapp = self.serverapp.web_app
base_url = self.serverapp.base_url
host_pattern = ".*$"
# Add a deprecated redirect for all MathJax paths from the classic
# notebook to the static endpoint created for this extension.
webapp.add_handlers(
host_pattern,
[
(
url_path_join(base_url, "/static/components/MathJax/(.*)"),
DeprecatedRedirectHandler,
{
"url": url_path_join(
self.static_url_prefix, "/{0}" # {0} = group 0 in url path
)
},
)
],
)
| 31.736842 | 105 | 0.625622 |
from pathlib import Path
from traitlets import default, observe, Unicode
from tornado.web import RedirectHandler
from jupyter_server.extension.application import ExtensionApp
from jupyter_server.utils import url_path_join
from jupyter_server.transutils import _
STATIC_ASSETS_PATH = Path(__file__).parent / "static"
class DeprecatedRedirectHandler(RedirectHandler):
def get(self, *args, **kwargs):
import warnings
warnings.warn(
"Redirecting old Notebook MathJax URL to new one. This will be removed in a future release.",
PendingDeprecationWarning,
)
super().get(*args, **kwargs)
class MathJaxExtension(ExtensionApp):
name = "jupyter_server_mathjax"
static_paths = [str(STATIC_ASSETS_PATH)]
mathjax_config = Unicode(
"TeX-AMS-MML_HTMLorMML-full,Safe",
config=True,
help=_("""The MathJax.js configuration file that is to be used."""),
)
@observe("mathjax_config")
def _update_mathjax_config(self, change):
self.log.info(_("Using MathJax configuration file: %s"), change["new"])
def initialize_settings(self):
self.settings.update({
"mathjax_config": self.mathjax_config,
"mathjax_url": "/static/jupyter_server_mathjax/MathJax.js"
})
def initialize_handlers(self):
webapp = self.serverapp.web_app
base_url = self.serverapp.base_url
host_pattern = ".*$"
webapp.add_handlers(
host_pattern,
[
(
url_path_join(base_url, "/static/components/MathJax/(.*)"),
DeprecatedRedirectHandler,
{
"url": url_path_join(
self.static_url_prefix, "/{0}"
)
},
)
],
)
| true | true |
f71c94ef510848605c979ad6aae3be1a96a86bcd | 5,538 | py | Python | src/movies/management/commands/add_kp_movie.py | Little-Pogchamp-Team/kinopoisk_on_django | 06e1b5ee14c7e77dd5b69140732461a02bf44566 | [
"MIT"
] | 10 | 2021-01-10T09:39:16.000Z | 2022-02-05T06:40:47.000Z | src/movies/management/commands/add_kp_movie.py | Little-Pogchamp-Team/kinopoisk_on_django | 06e1b5ee14c7e77dd5b69140732461a02bf44566 | [
"MIT"
] | null | null | null | src/movies/management/commands/add_kp_movie.py | Little-Pogchamp-Team/kinopoisk_on_django | 06e1b5ee14c7e77dd5b69140732461a02bf44566 | [
"MIT"
] | 1 | 2021-01-11T17:04:06.000Z | 2021-01-11T17:04:06.000Z | import asyncio
import os
from datetime import date
from os import getenv
from django.core.files.images import ImageFile
from django.core.management.base import BaseCommand
from movies.models import Poster, Movie, Genre
from person.models import Person, Photo, PersonRole
from parser.formatter import get_formatted_movie_fields, get_formatted_person_fields, get_formatted_role_fields
from parser.kinopoisk_api import KP
from argparse import ArgumentParser
class Command(BaseCommand):
help = 'Get full film info from kinopoisk and add to database'
def add_arguments(self, parser: ArgumentParser):
parser.add_argument('movie_id', type=int)
parser.add_argument('-k', '--api-key', default=getenv('KP_API_KEY'))
async def _get_movie_info(self, kp: KP, movie_id: int):
movie, persons = await kp.get_full_film_info(movie_id)
posters = await kp.get_film_photo(movie_id)
kp.REQUESTS_LIMIT = 50
photos_tasks = [asyncio.create_task(kp.get_person_photo(person["kp_id"])) for person in persons]
photos = await asyncio.gather(*photos_tasks)
return {
'movie': movie,
'posters': posters,
'persons': persons,
'photos': photos
}
def _get_kp_id_from_image_data(self, image_data: dict):
filename: str = next(iter(image_data))
return int(filename.removesuffix('.jpg').removeprefix('person_').removeprefix('movie_'))
@staticmethod
def safe_mkdir(dirname):
if not os.path.exists(dirname):
os.mkdir(dirname)
def add_person(self, raw_person_data: dict, photos) -> tuple[int, Person]:
kp_id = int(raw_person_data.get('kp_id'))
person_data = get_formatted_person_fields(raw_person_data)
person_data['birth_date'] = date(*map(int, birth_date.split('-'))) \
if (birth_date := person_data['birth_date']) else None
person_data['death'] = date(*map(int, birth_date.split('-'))) \
if (birth_date := person_data['death']) else None
person: Person = Person.objects.get_or_create(**person_data)[0]
if not person.photos.exists() and (image_bin := next(iter(photos[kp_id].values()))):
self.safe_mkdir('temp')
file_path = os.path.join('temp', next(iter(photos[kp_id])))
with open(file_path, 'wb') as f:
f.write(image_bin)
try:
Photo(image=ImageFile(open(file_path, 'rb')),
person=person,
orientation=Photo.OrientationType.VERTICAL.name,
format=Photo.FormatType.MEDIUM.name).save()
finally:
os.remove(file_path)
return kp_id, person
def handle(self, *args, **options):
movie_id = options['movie_id']
self.main(movie_id, options['api_key'])
def main(self, movie_id, api_key):
print(api_key)
kinopoisk = KP(api_key)
self.stdout.write("Collect data")
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
future = asyncio.ensure_future(self._get_movie_info(kinopoisk, movie_id))
loop.run_until_complete(future)
full_movie_info: dict = future.result()
self.stdout.write(self.style.SUCCESS("Data received"))
movie_info: dict = full_movie_info['movie']
genres = [Genre.objects.get_or_create(title=genre)[0] for genre in movie_info['genres']]
formatted_movie_info = get_formatted_movie_fields(movie_info)
# movie = Movie.objects.filter(**formatted_movie_info).first()
if Movie.objects.filter(**formatted_movie_info).exists():
self.stdout.write(self.style.WARNING(f"Movie {movie_id} exists in this database"))
return
formatted_movie_info['movie_type_id'] = formatted_movie_info.pop('movie_type')
movie: Movie = Movie(**formatted_movie_info)
movie.save()
self.stdout.write(f"Movie {movie} created")
for genre in genres:
movie.genres.add(genre)
self.stdout.write(self.style.SUCCESS("Movie saved"))
photos = {self._get_kp_id_from_image_data(image_data): image_data for image_data in full_movie_info['photos']}
persons_kp_id_map = {}
raw_person_data: dict
for raw_person_data in full_movie_info['persons']:
kp_id, person = self.add_person(raw_person_data, photos)
persons_kp_id_map[kp_id] = person
self.stdout.write(self.style.SUCCESS("Persons saved"))
for role in movie_info['roles']:
PersonRole(**get_formatted_role_fields(role, movie, persons_kp_id_map[int(role['kp_id'])])).save()
self.stdout.write(self.style.SUCCESS("Roles saved"))
for filename, image_bin in full_movie_info['posters'].items():
if not image_bin:
continue
self.safe_mkdir('temp')
file_path = os.path.join('temp', filename)
with open(file_path, 'wb') as f:
f.write(image_bin)
try:
Poster(movie=movie,
image=ImageFile(open(file_path, 'rb')),
orientation=Poster.OrientationType.VERTICAL.name,
format=Poster.FormatType.LARGE.name if '_small' in filename else Poster.FormatType.LARGE.name). \
save()
finally:
os.remove(file_path)
os.rmdir('temp')
self.stdout.write(self.style.SUCCESS("Posters saved"))
| 43.606299 | 120 | 0.639581 | import asyncio
import os
from datetime import date
from os import getenv
from django.core.files.images import ImageFile
from django.core.management.base import BaseCommand
from movies.models import Poster, Movie, Genre
from person.models import Person, Photo, PersonRole
from parser.formatter import get_formatted_movie_fields, get_formatted_person_fields, get_formatted_role_fields
from parser.kinopoisk_api import KP
from argparse import ArgumentParser
class Command(BaseCommand):
help = 'Get full film info from kinopoisk and add to database'
def add_arguments(self, parser: ArgumentParser):
parser.add_argument('movie_id', type=int)
parser.add_argument('-k', '--api-key', default=getenv('KP_API_KEY'))
async def _get_movie_info(self, kp: KP, movie_id: int):
movie, persons = await kp.get_full_film_info(movie_id)
posters = await kp.get_film_photo(movie_id)
kp.REQUESTS_LIMIT = 50
photos_tasks = [asyncio.create_task(kp.get_person_photo(person["kp_id"])) for person in persons]
photos = await asyncio.gather(*photos_tasks)
return {
'movie': movie,
'posters': posters,
'persons': persons,
'photos': photos
}
def _get_kp_id_from_image_data(self, image_data: dict):
filename: str = next(iter(image_data))
return int(filename.removesuffix('.jpg').removeprefix('person_').removeprefix('movie_'))
@staticmethod
def safe_mkdir(dirname):
if not os.path.exists(dirname):
os.mkdir(dirname)
def add_person(self, raw_person_data: dict, photos) -> tuple[int, Person]:
kp_id = int(raw_person_data.get('kp_id'))
person_data = get_formatted_person_fields(raw_person_data)
person_data['birth_date'] = date(*map(int, birth_date.split('-'))) \
if (birth_date := person_data['birth_date']) else None
person_data['death'] = date(*map(int, birth_date.split('-'))) \
if (birth_date := person_data['death']) else None
person: Person = Person.objects.get_or_create(**person_data)[0]
if not person.photos.exists() and (image_bin := next(iter(photos[kp_id].values()))):
self.safe_mkdir('temp')
file_path = os.path.join('temp', next(iter(photos[kp_id])))
with open(file_path, 'wb') as f:
f.write(image_bin)
try:
Photo(image=ImageFile(open(file_path, 'rb')),
person=person,
orientation=Photo.OrientationType.VERTICAL.name,
format=Photo.FormatType.MEDIUM.name).save()
finally:
os.remove(file_path)
return kp_id, person
def handle(self, *args, **options):
movie_id = options['movie_id']
self.main(movie_id, options['api_key'])
def main(self, movie_id, api_key):
print(api_key)
kinopoisk = KP(api_key)
self.stdout.write("Collect data")
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
future = asyncio.ensure_future(self._get_movie_info(kinopoisk, movie_id))
loop.run_until_complete(future)
full_movie_info: dict = future.result()
self.stdout.write(self.style.SUCCESS("Data received"))
movie_info: dict = full_movie_info['movie']
genres = [Genre.objects.get_or_create(title=genre)[0] for genre in movie_info['genres']]
formatted_movie_info = get_formatted_movie_fields(movie_info)
if Movie.objects.filter(**formatted_movie_info).exists():
self.stdout.write(self.style.WARNING(f"Movie {movie_id} exists in this database"))
return
formatted_movie_info['movie_type_id'] = formatted_movie_info.pop('movie_type')
movie: Movie = Movie(**formatted_movie_info)
movie.save()
self.stdout.write(f"Movie {movie} created")
for genre in genres:
movie.genres.add(genre)
self.stdout.write(self.style.SUCCESS("Movie saved"))
photos = {self._get_kp_id_from_image_data(image_data): image_data for image_data in full_movie_info['photos']}
persons_kp_id_map = {}
raw_person_data: dict
for raw_person_data in full_movie_info['persons']:
kp_id, person = self.add_person(raw_person_data, photos)
persons_kp_id_map[kp_id] = person
self.stdout.write(self.style.SUCCESS("Persons saved"))
for role in movie_info['roles']:
PersonRole(**get_formatted_role_fields(role, movie, persons_kp_id_map[int(role['kp_id'])])).save()
self.stdout.write(self.style.SUCCESS("Roles saved"))
for filename, image_bin in full_movie_info['posters'].items():
if not image_bin:
continue
self.safe_mkdir('temp')
file_path = os.path.join('temp', filename)
with open(file_path, 'wb') as f:
f.write(image_bin)
try:
Poster(movie=movie,
image=ImageFile(open(file_path, 'rb')),
orientation=Poster.OrientationType.VERTICAL.name,
format=Poster.FormatType.LARGE.name if '_small' in filename else Poster.FormatType.LARGE.name). \
save()
finally:
os.remove(file_path)
os.rmdir('temp')
self.stdout.write(self.style.SUCCESS("Posters saved"))
| true | true |
f71c9666f42e0445cb30a86089bfe762d8443e53 | 1,292 | py | Python | archspee/presenters/log.py | wangpy/archspee | 97855f903106fba567ffda8cdc25b061cd8bdf5e | [
"MIT"
] | 8 | 2019-01-22T13:03:40.000Z | 2021-12-30T22:11:12.000Z | archspee/presenters/log.py | wangpy/archspee | 97855f903106fba567ffda8cdc25b061cd8bdf5e | [
"MIT"
] | null | null | null | archspee/presenters/log.py | wangpy/archspee | 97855f903106fba567ffda8cdc25b061cd8bdf5e | [
"MIT"
] | null | null | null | from archspee.presenters import PresenterBase
from archspee.listeners import ListenerStatus
_LOG_LEVEL = None
class LogPresenter(PresenterBase):
def __init__(self, action_callback, **kwargs):
self.__log_level = _LOG_LEVEL
super(LogPresenter, self).__init__(action_callback)
self.status = ListenerStatus.standby
self.disabled = False
def on_listener_status(self, trigger_id, status, is_disabled):
if status != self.status or is_disabled != self.disabled:
self.logger.info('Status changed: status=%s, disabled=%d' % (repr(status), is_disabled))
self.status = status
self.disabled = is_disabled
def on_recognization_started(self, trigger_id):
self.logger.info('Recognization started')
def on_intent_handled(self, trigger_id, spoken_text, intent, entities, summary, body, level):
self.logger.info('Intent handled: %s, %s (%s)' % (summary, body, level))
def on_error_handled(self, trigger_id, status_code, response_text, summary, body, level):
self.logger.info('Error handled: %s, %s (%s)' % (summary, body, level))
def start(self):
self.logger.info('Log presenter started.');
def terminate(self):
self.logger.info('Log presenter terminated.');
| 39.151515 | 100 | 0.687307 | from archspee.presenters import PresenterBase
from archspee.listeners import ListenerStatus
_LOG_LEVEL = None
class LogPresenter(PresenterBase):
def __init__(self, action_callback, **kwargs):
self.__log_level = _LOG_LEVEL
super(LogPresenter, self).__init__(action_callback)
self.status = ListenerStatus.standby
self.disabled = False
def on_listener_status(self, trigger_id, status, is_disabled):
if status != self.status or is_disabled != self.disabled:
self.logger.info('Status changed: status=%s, disabled=%d' % (repr(status), is_disabled))
self.status = status
self.disabled = is_disabled
def on_recognization_started(self, trigger_id):
self.logger.info('Recognization started')
def on_intent_handled(self, trigger_id, spoken_text, intent, entities, summary, body, level):
self.logger.info('Intent handled: %s, %s (%s)' % (summary, body, level))
def on_error_handled(self, trigger_id, status_code, response_text, summary, body, level):
self.logger.info('Error handled: %s, %s (%s)' % (summary, body, level))
def start(self):
self.logger.info('Log presenter started.');
def terminate(self):
self.logger.info('Log presenter terminated.');
| true | true |
f71c96af05ee8e95f66b314c7abe60dd75cb2846 | 14,146 | py | Python | python/oneflow/nn/optimizer/optimizer.py | butterluo/oneflow | cf2ce575d80f89642b71bee2248e69b09213007d | [
"Apache-2.0"
] | null | null | null | python/oneflow/nn/optimizer/optimizer.py | butterluo/oneflow | cf2ce575d80f89642b71bee2248e69b09213007d | [
"Apache-2.0"
] | null | null | null | python/oneflow/nn/optimizer/optimizer.py | butterluo/oneflow | cf2ce575d80f89642b71bee2248e69b09213007d | [
"Apache-2.0"
] | null | null | null | """
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import collections
import warnings
from copy import deepcopy
from itertools import chain
from typing import Any, Callable, Dict, Union
from oneflow.framework.tensor import Tensor
from oneflow.nn.graph.block import TensorBlock
from oneflow.nn.parameter import Parameter
from oneflow.nn.utils.clip_grad import clip_grad_norm_
import oneflow as flow
class ParamGroup(object):
def __init__(
self, parameters: Dict[str, Any], default_options: Dict,
):
# ParamGroup must be constructed by Dict["params": parameters: List[Parameter, Tensor or TensorBlock], "...": ...]
assert isinstance(parameters, dict) and "params" in parameters
assert not isinstance(parameters["params"], (Parameter, Tensor))
self._parameters = list()
for p in parameters["params"]:
if isinstance(p, (Parameter, Tensor)):
self._parameters.append(p)
elif isinstance(p, TensorBlock):
# Add parameter from nn.Graph
self._parameters.append(p.origin)
else:
raise ValueError(
"parameters in ParamGroup must be Tensor or TensorBlock."
)
self._options = deepcopy(default_options)
for key in self._options:
if key in parameters:
self._options[key] = parameters[key]
self._enable_clip_grad = False
if "clip_grad_max_norm" in parameters and "clip_grad_norm_type" in parameters:
self._enable_clip_grad = True
self._options["clip_grad_max_norm"] = parameters["clip_grad_max_norm"]
self._options["clip_grad_norm_type"] = parameters["clip_grad_norm_type"]
def __getitem__(self, key):
return self._options[key]
def __setitem__(self, key, value):
self._options[key] = value
def __contains__(self, key):
return self._options.__contains__(key)
def setdefault(self, key, value):
if key not in self._options:
self._options[key] = value
def items(self):
return self.__dict__.items()
@property
def options(self):
return self._options
@property
def parameters(self):
return self._parameters
class _SourceOpOnlyResourceDependenceMode:
def __init__(self):
self.guard_ = None
def __enter__(self):
self.guard = (
flow._oneflow_internal.eager.multi_client.SourceOpOnlyResourceDependenceModeGuard()
)
def __exit__(self, *args, **kwargs):
del self.guard
def _decorate_step(step):
def decorated_step(*args, **kwargs):
with _SourceOpOnlyResourceDependenceMode():
return step(*args, **kwargs)
return decorated_step
class Optimizer(object):
def __init__(self, parameters, options):
self.param_groups = list()
self._default_options = options
self._state = dict()
self._state["step"] = 0
self._parse_input_parameters(parameters)
self.step = _decorate_step(self.step)
def add_param_group(self, param_group) -> None:
raise NotImplementedError()
def load_state_dict(self, state_dict) -> None:
r"""
Load the state of the optimizer which is created by `state_dict` function.
It almost copied from: https://pytorch.org/docs/stable/_modules/torch/optim/optimizer.html#Optimizer.load_state_dict
"""
# Validate the state_dict
groups = self.param_groups
saved_groups = state_dict["param_groups"]
if len(groups) != len(saved_groups):
raise ValueError(
"loaded state dict has a different number of parameter groups"
)
param_lens = (len(g._parameters) for g in groups)
saved_lens = (len(g["params"]) for g in saved_groups)
if any(p_len != s_len for p_len, s_len in zip(param_lens, saved_lens)):
raise ValueError(
"loaded state dict contains a parameter group "
"that doesn't match the size of optimizer's group"
)
# Update the state
id_map = {
old_id: p
for old_id, p in zip(
chain.from_iterable((g["params"] for g in saved_groups)),
chain.from_iterable((g._parameters for g in groups)),
)
}
def cast(param, value):
r"""Make a deep copy of value, casting all tensors to device or placement of param."""
if isinstance(value, Tensor):
if value.is_local:
value = value.to(param.device)
else:
value = value.to_consistent(
placement=param.placement, sbp=param.sbp
)
return value
elif isinstance(value, dict):
return {k: cast(param, v) for k, v in value.items()}
elif isinstance(value, collections.Iterable):
return type(value)(cast(param, v) for v in value)
else:
return value
# Copy state assigned to params (and cast tensors to appropriate types).
# State that is not assigned to params is copied as is (needed for
# backward compatibility).
state = dict()
for k, v in state_dict["state"].items():
if k in id_map:
param = id_map[k]
state[param] = cast(param, v)
else:
state[k] = v
self._state = state
# Update parameter groups, setting their 'params' value
def update_group(group, new_group):
group._options = deepcopy(new_group["_options"])
group._enable_clip_grad = new_group["_enable_clip_grad"]
return group
param_groups = [update_group(g, ng) for g, ng in zip(groups, saved_groups)]
self.param_groups = param_groups
def state_dict(self):
r"""
Returns the state of the optimizer as a :class:`dict`.
It contains two entries:
* state - a dict holding current optimization state. Its content
differs between optimizer classes.
* param_group - a dict containing all parameter groups.
It almost copied from: https://pytorch.org/docs/stable/_modules/torch/optim/optimizer.html#Optimizer.state_dict
"""
# Save order indices instead of Tensors
param_mappings = {}
start_index = 0
def pack_group(group):
nonlocal start_index
packed = {k: v for k, v in group.items() if k != "_parameters"}
param_mappings.update(
{
id(p): i
for i, p in enumerate(group._parameters, start_index)
if id(p) not in param_mappings
}
)
packed["params"] = [param_mappings[id(p)] for p in group._parameters]
start_index += len(packed["params"])
return packed
param_groups = [pack_group(g) for g in self.param_groups]
# Remap state to use order indices as keys
packed_state = {
(param_mappings[id(k)] if isinstance(k, Tensor) else k): v
for k, v in self._state.items()
}
return {
"state": packed_state,
"param_groups": param_groups,
}
def step(self, closure: Union[Callable, None] = None) -> Union[Tensor, None]:
raise NotImplementedError()
def clip_grad(self):
r"""Clips gradient norm of an iterable of parameters.
The norm is computed over all gradients together, as if they were concatenated into a single vector.
You can set the max_norm and norm_type.
For more details, you can refer to the documentation of each optimizer(like Adam, SGD and so on).
You can also refer the code in :func:`oneflow.nn.utils.clip_grad_norm_`
"""
for param_group in self.param_groups:
if param_group._enable_clip_grad:
clip_grad_norm_(
param_group.parameters,
param_group["clip_grad_max_norm"],
param_group["clip_grad_norm_type"],
True,
)
else:
warnings.warn(
"To enable clip_grad, passing the `clip_grad_max_norm` and `clip_grad_norm_type` parameters when instantializing the Optimizer."
)
def zero_grad(self, set_to_none: bool = False):
"""Sets the gradients of all optimized torch.Tensor s to zero.
Args:
set_to_none (bool): instead of setting to zero, set the grads to None.
This will in general have lower memory footprint, and can modestly
improve performance. However, it changes certain behaviors.
For example:
1. When the user tries to access a gradient and perform manual ops on
it, a None attribute or a Tensor full of 0s will behave differently.
2. If the user requests zero_grad(set_to_none=True) followed by a
backward pass, grads are guaranteed to be None for params that did not
receive a gradient.
3. Optimizers have a different behavior if the gradient is 0 or None
(in one case it does the step with a gradient of 0 and in the other
it skips the step altogether).
"""
for param_group in self.param_groups:
for param in param_group.parameters:
if param.grad is not None:
if set_to_none:
param.grad = None
else:
param.grad.zeros_()
def _parse_input_parameters(self, parameters):
"""
Supports such parameters:
1. Iterator: flow.optim.SGD(module.parameters(), lr=0.1)
2. List[Dict]: flow.optim.SGD([{"params": module1.parameters()}, {"params": module2.parameters()}])
3. List[Parameter or Tensor]: flow.optim.SGD([module.weight, module.bias])
"""
if isinstance(parameters, collections.abc.Iterator):
# Iterator
self.param_groups.append(
ParamGroup({"params": list(parameters)}, self._default_options)
)
elif isinstance(parameters, collections.abc.Iterable):
# List[Dict]
if isinstance(parameters[0], dict):
for param in parameters:
assert isinstance(param, dict)
self.param_groups.append(ParamGroup(param, self._default_options))
# List[Parameter or Tensor]
else:
self.param_groups.append(
ParamGroup({"params": parameters}, self._default_options)
)
else:
raise TypeError(
f"params argument given to the optimizer should be an iterable of Tensors or dicts, but got {type(parameters)}"
)
def _generate_grad_clip_conf_for_optim_conf(self, param_group, optimizer_conf):
if param_group._enable_clip_grad:
if (
param_group["clip_grad_max_norm"] == 1.0
and param_group["clip_grad_norm_type"] == 2.0
):
optimizer_conf.mutable_clip_conf().mutable_clip_by_global_norm().set_clip_norm(
param_group["clip_grad_max_norm"]
)
else:
warnings.warn(
"For now, nn.Graph only support clip grad with `clip_grad_max_norm == 1.0` and `clip_grad_norm_type == 2.0`."
)
@property
def support_sparse(self):
return False
def _check_variables_in_graph(self, vars_conf):
for param_group in self.param_groups:
for param in param_group.parameters:
if not param.requires_grad:
continue
if param not in vars_conf:
raise ValueError(
f"Parameter <{param}> is not in the corresponding nn.Graph/nn.Module."
" Please make sure you call the module's to(..)/to_consistent(...) method first,"
" then add the module's parameters into an optimizer."
)
def _check_variables_optimizer_bound(self, vars_conf):
for param_group in self.param_groups:
for param in param_group.parameters:
if not param.requires_grad:
continue
if vars_conf[param].bound_optimizer is None:
vars_conf[param].bound_optimizer = self
elif vars_conf[param].bound_optimizer is not self:
raise ValueError(
f"<{vars_conf[param].name}> is already bound to another optimizer."
)
def _generate_indexed_slices_optimizer_conf(self, job_conf, vars_conf):
if not self.support_sparse:
raise ValueError(f"{self.__class__} does not support sparse updating.")
for param_group in self.param_groups:
for param in param_group.parameters:
if not param.requires_grad:
continue
sparse_opt_conf = job_conf.mutable_indexed_slices_optimizer_conf()
sparse_variable_op_names = sparse_opt_conf.mutable_include_op_names()
sparse_variable_op_names.add_op_name(vars_conf[param].name)
| 38.336043 | 148 | 0.600028 | import collections
import warnings
from copy import deepcopy
from itertools import chain
from typing import Any, Callable, Dict, Union
from oneflow.framework.tensor import Tensor
from oneflow.nn.graph.block import TensorBlock
from oneflow.nn.parameter import Parameter
from oneflow.nn.utils.clip_grad import clip_grad_norm_
import oneflow as flow
class ParamGroup(object):
def __init__(
self, parameters: Dict[str, Any], default_options: Dict,
):
assert isinstance(parameters, dict) and "params" in parameters
assert not isinstance(parameters["params"], (Parameter, Tensor))
self._parameters = list()
for p in parameters["params"]:
if isinstance(p, (Parameter, Tensor)):
self._parameters.append(p)
elif isinstance(p, TensorBlock):
self._parameters.append(p.origin)
else:
raise ValueError(
"parameters in ParamGroup must be Tensor or TensorBlock."
)
self._options = deepcopy(default_options)
for key in self._options:
if key in parameters:
self._options[key] = parameters[key]
self._enable_clip_grad = False
if "clip_grad_max_norm" in parameters and "clip_grad_norm_type" in parameters:
self._enable_clip_grad = True
self._options["clip_grad_max_norm"] = parameters["clip_grad_max_norm"]
self._options["clip_grad_norm_type"] = parameters["clip_grad_norm_type"]
def __getitem__(self, key):
return self._options[key]
def __setitem__(self, key, value):
self._options[key] = value
def __contains__(self, key):
return self._options.__contains__(key)
def setdefault(self, key, value):
if key not in self._options:
self._options[key] = value
def items(self):
return self.__dict__.items()
@property
def options(self):
return self._options
@property
def parameters(self):
return self._parameters
class _SourceOpOnlyResourceDependenceMode:
def __init__(self):
self.guard_ = None
def __enter__(self):
self.guard = (
flow._oneflow_internal.eager.multi_client.SourceOpOnlyResourceDependenceModeGuard()
)
def __exit__(self, *args, **kwargs):
del self.guard
def _decorate_step(step):
def decorated_step(*args, **kwargs):
with _SourceOpOnlyResourceDependenceMode():
return step(*args, **kwargs)
return decorated_step
class Optimizer(object):
def __init__(self, parameters, options):
self.param_groups = list()
self._default_options = options
self._state = dict()
self._state["step"] = 0
self._parse_input_parameters(parameters)
self.step = _decorate_step(self.step)
def add_param_group(self, param_group) -> None:
raise NotImplementedError()
def load_state_dict(self, state_dict) -> None:
groups = self.param_groups
saved_groups = state_dict["param_groups"]
if len(groups) != len(saved_groups):
raise ValueError(
"loaded state dict has a different number of parameter groups"
)
param_lens = (len(g._parameters) for g in groups)
saved_lens = (len(g["params"]) for g in saved_groups)
if any(p_len != s_len for p_len, s_len in zip(param_lens, saved_lens)):
raise ValueError(
"loaded state dict contains a parameter group "
"that doesn't match the size of optimizer's group"
)
id_map = {
old_id: p
for old_id, p in zip(
chain.from_iterable((g["params"] for g in saved_groups)),
chain.from_iterable((g._parameters for g in groups)),
)
}
def cast(param, value):
if isinstance(value, Tensor):
if value.is_local:
value = value.to(param.device)
else:
value = value.to_consistent(
placement=param.placement, sbp=param.sbp
)
return value
elif isinstance(value, dict):
return {k: cast(param, v) for k, v in value.items()}
elif isinstance(value, collections.Iterable):
return type(value)(cast(param, v) for v in value)
else:
return value
state = dict()
for k, v in state_dict["state"].items():
if k in id_map:
param = id_map[k]
state[param] = cast(param, v)
else:
state[k] = v
self._state = state
def update_group(group, new_group):
group._options = deepcopy(new_group["_options"])
group._enable_clip_grad = new_group["_enable_clip_grad"]
return group
param_groups = [update_group(g, ng) for g, ng in zip(groups, saved_groups)]
self.param_groups = param_groups
def state_dict(self):
param_mappings = {}
start_index = 0
def pack_group(group):
nonlocal start_index
packed = {k: v for k, v in group.items() if k != "_parameters"}
param_mappings.update(
{
id(p): i
for i, p in enumerate(group._parameters, start_index)
if id(p) not in param_mappings
}
)
packed["params"] = [param_mappings[id(p)] for p in group._parameters]
start_index += len(packed["params"])
return packed
param_groups = [pack_group(g) for g in self.param_groups]
packed_state = {
(param_mappings[id(k)] if isinstance(k, Tensor) else k): v
for k, v in self._state.items()
}
return {
"state": packed_state,
"param_groups": param_groups,
}
def step(self, closure: Union[Callable, None] = None) -> Union[Tensor, None]:
raise NotImplementedError()
def clip_grad(self):
for param_group in self.param_groups:
if param_group._enable_clip_grad:
clip_grad_norm_(
param_group.parameters,
param_group["clip_grad_max_norm"],
param_group["clip_grad_norm_type"],
True,
)
else:
warnings.warn(
"To enable clip_grad, passing the `clip_grad_max_norm` and `clip_grad_norm_type` parameters when instantializing the Optimizer."
)
def zero_grad(self, set_to_none: bool = False):
for param_group in self.param_groups:
for param in param_group.parameters:
if param.grad is not None:
if set_to_none:
param.grad = None
else:
param.grad.zeros_()
def _parse_input_parameters(self, parameters):
if isinstance(parameters, collections.abc.Iterator):
self.param_groups.append(
ParamGroup({"params": list(parameters)}, self._default_options)
)
elif isinstance(parameters, collections.abc.Iterable):
if isinstance(parameters[0], dict):
for param in parameters:
assert isinstance(param, dict)
self.param_groups.append(ParamGroup(param, self._default_options))
else:
self.param_groups.append(
ParamGroup({"params": parameters}, self._default_options)
)
else:
raise TypeError(
f"params argument given to the optimizer should be an iterable of Tensors or dicts, but got {type(parameters)}"
)
def _generate_grad_clip_conf_for_optim_conf(self, param_group, optimizer_conf):
if param_group._enable_clip_grad:
if (
param_group["clip_grad_max_norm"] == 1.0
and param_group["clip_grad_norm_type"] == 2.0
):
optimizer_conf.mutable_clip_conf().mutable_clip_by_global_norm().set_clip_norm(
param_group["clip_grad_max_norm"]
)
else:
warnings.warn(
"For now, nn.Graph only support clip grad with `clip_grad_max_norm == 1.0` and `clip_grad_norm_type == 2.0`."
)
@property
def support_sparse(self):
return False
def _check_variables_in_graph(self, vars_conf):
for param_group in self.param_groups:
for param in param_group.parameters:
if not param.requires_grad:
continue
if param not in vars_conf:
raise ValueError(
f"Parameter <{param}> is not in the corresponding nn.Graph/nn.Module."
" Please make sure you call the module's to(..)/to_consistent(...) method first,"
" then add the module's parameters into an optimizer."
)
def _check_variables_optimizer_bound(self, vars_conf):
for param_group in self.param_groups:
for param in param_group.parameters:
if not param.requires_grad:
continue
if vars_conf[param].bound_optimizer is None:
vars_conf[param].bound_optimizer = self
elif vars_conf[param].bound_optimizer is not self:
raise ValueError(
f"<{vars_conf[param].name}> is already bound to another optimizer."
)
def _generate_indexed_slices_optimizer_conf(self, job_conf, vars_conf):
if not self.support_sparse:
raise ValueError(f"{self.__class__} does not support sparse updating.")
for param_group in self.param_groups:
for param in param_group.parameters:
if not param.requires_grad:
continue
sparse_opt_conf = job_conf.mutable_indexed_slices_optimizer_conf()
sparse_variable_op_names = sparse_opt_conf.mutable_include_op_names()
sparse_variable_op_names.add_op_name(vars_conf[param].name)
| true | true |
f71c971bf4dd805103974078d53aae515b91c0a1 | 1,361 | py | Python | petastorm/cache.py | cclauss/petastorm | 12fc6542005c6dc7c99997604b939536cca79fa9 | [
"Apache-2.0"
] | 1 | 2018-09-25T10:59:29.000Z | 2018-09-25T10:59:29.000Z | petastorm/cache.py | cclauss/petastorm | 12fc6542005c6dc7c99997604b939536cca79fa9 | [
"Apache-2.0"
] | null | null | null | petastorm/cache.py | cclauss/petastorm | 12fc6542005c6dc7c99997604b939536cca79fa9 | [
"Apache-2.0"
] | 1 | 2018-09-25T10:59:32.000Z | 2018-09-25T10:59:32.000Z | # Copyright (c) 2017-2018 Uber Technologies, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import abc
import six
@six.add_metaclass(abc.ABCMeta)
class CacheBase(object):
@abc.abstractmethod
def get(self, key, fill_cache_func):
"""Gets an entry from the cache implementation.
If there is a cache miss, ``fill_cache_func()`` will be evaluated to get the value.
:param key: A key identifying cache entry
:param fill_cache_func: This function will be evaluated (``fill_cache_func()``) to populate cache, if no
value is present in the cache.
:return: A value from cache
"""
pass
class NullCache(CacheBase):
"""A pass-through cache implementation: value generating function will be called each."""
def get(self, key, fill_cache_func):
return fill_cache_func()
| 33.195122 | 112 | 0.709772 |
import abc
import six
@six.add_metaclass(abc.ABCMeta)
class CacheBase(object):
@abc.abstractmethod
def get(self, key, fill_cache_func):
pass
class NullCache(CacheBase):
def get(self, key, fill_cache_func):
return fill_cache_func()
| true | true |
f71c98221a39db59c80de17a016146f0be85cd00 | 6,266 | py | Python | nicos_mlz/mira/devices/stargate.py | jkrueger1/nicos | 5f4ce66c312dedd78995f9d91e8a6e3c891b262b | [
"CC-BY-3.0",
"Apache-2.0",
"CC-BY-4.0"
] | null | null | null | nicos_mlz/mira/devices/stargate.py | jkrueger1/nicos | 5f4ce66c312dedd78995f9d91e8a6e3c891b262b | [
"CC-BY-3.0",
"Apache-2.0",
"CC-BY-4.0"
] | null | null | null | nicos_mlz/mira/devices/stargate.py | jkrueger1/nicos | 5f4ce66c312dedd78995f9d91e8a6e3c891b262b | [
"CC-BY-3.0",
"Apache-2.0",
"CC-BY-4.0"
] | null | null | null | # -*- coding: utf-8 -*-
# *****************************************************************************
# NICOS, the Networked Instrument Control System of the MLZ
# Copyright (c) 2009-2022 by the NICOS contributors (see AUTHORS)
#
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free Software
# Foundation; either version 2 of the License, or (at your option) any later
# version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# Module authors:
# Tobias Weber <tweber@frm2.tum.de>
#
# *****************************************************************************
"""Mira-Stargate.
This is the shielding of the analyzer with 11 blocks. The att axis does not
move any elements under the blocks, so we can move to a new block state at
any time (in this implementation, before starting the axis).
Only 0, 1 or 2 blocks may be opened at a time. The first and last block should
not be opened since they are stationary.
The blocks are controlled via a Festo valve arrangement of 11 stable valves
represented by two bits that can be moved into open (01) or closed (10)
positions.
Festo uses Modbus, and the 22 needed output bits are distributed in the lower 8
bits of three consecutive 16-bit holding registers (offset_out). Readback is
done in three different holding registers with addresses n, n+2, n+4.
"""
from time import monotonic
from nicos.core import SIMULATION, Attach, InvalidValueError, Param, listof, \
status
from nicos.devices import entangle
from nicos_mlz.mira.devices.axis import HoveringAxis
class Stargate(entangle.DigitalOutput):
"""Device for controlling the MIRA-Stargate blocks."""
valuetype = listof(int)
parameters = {
'offset_in': Param('Offset of digital input values',
type=int, mandatory=True),
'offset_out': Param('Offset of digital output values',
type=int, mandatory=True),
'chevron_att_angles': Param('att angle for shielding elements',
type=listof(listof(int)),
mandatory=True),
}
_started = 0
def doRead(self, maxage=0):
words = self._dev.ReadOutputWords([self.offset_in, 5])
bitvals = [words[0], words[2], words[4]]
chevrons = []
for bitval in bitvals:
for _ in range(4):
chevrons.append(int(bitval & 0b11 == 0b01))
bitval >>= 2
return chevrons[:11]
def doStatus(self, maxage=0):
if self._started and self._started + 3 > monotonic():
return status.BUSY, 'moving/waiting'
return status.OK, ''
def doStart(self, target):
bitvals = [0, 0, 0]
for curidx in range(len(target)):
curval = target[curidx]
byteidx = curidx // 4
bitidx = (curidx % 4) * 2
if curval:
bitvals[byteidx] |= (1 << bitidx)
else:
bitvals[byteidx] |= (1 << (bitidx+1))
self._dev.WriteOutputWords([self.offset_out] + bitvals)
self._started = monotonic()
def doIsAllowed(self, value):
if len(value) != 11:
raise InvalidValueError(self, 'list must have 11 entries')
# map everything to 0 or 1
value = [bool(v) for v in value]
# check allowed positions
if value == [True] * 11:
# open everything is allowed
return True, ''
if sum(value) > 2:
return False, 'cannot open more than 2 chevrons'
if value[0] or value[10]:
return False, 'cannot open first or last chevron'
return True, ''
def doReadFmtstr(self):
return '[' + ', '.join(['%d'] * 11) + ']'
def get_chevrons_for_att(self, att):
chevrons = []
for curidx in range(len(self.chevron_att_angles)):
maxmin = self.chevron_att_angles[curidx]
if len(maxmin) < 2:
chevrons.append(0)
continue
if maxmin[1] < att < maxmin[0]:
chevrons.append(1)
else:
chevrons.append(0)
return chevrons
class ATT(HoveringAxis):
attached_devices = {
'stargate': Attach('stargate switch device', Stargate),
}
parameters = {
'movestargate': Param('Whether to move the stargate with the axis',
type=bool, settable=True, default=True),
}
def _move_stargate(self):
if self.movestargate:
self._attached_stargate.start(
self._attached_stargate.get_chevrons_for_att(self.target))
else:
self.log.warning('moving stargate blocks is disabled')
def _preMoveAction(self):
self._move_stargate()
HoveringAxis._preMoveAction(self)
def doStart(self, target):
# Since the _preMoveAction is not executed in simulation mode,
# we have to move the stargate here too.
if self._mode == SIMULATION:
self._move_stargate()
HoveringAxis.doStart(self, target)
def doStatus(self, maxage=0):
if not self.movestargate:
return HoveringAxis.doStatus(self, maxage)
sgstat = self._attached_stargate.status(maxage)
if sgstat[0] == status.BUSY:
return status.BUSY, 'stargate moving'
axstat = HoveringAxis.doStatus(self, maxage)
if axstat[0] == status.BUSY:
return axstat
axvalue = HoveringAxis.doRead(self, maxage)
chevrons = list(self._attached_stargate.read(maxage))
if chevrons != self._attached_stargate.get_chevrons_for_att(axvalue):
return status.ERROR, 'invalid stargate position for att angle'
return axstat
| 34.811111 | 79 | 0.608682 |
from time import monotonic
from nicos.core import SIMULATION, Attach, InvalidValueError, Param, listof, \
status
from nicos.devices import entangle
from nicos_mlz.mira.devices.axis import HoveringAxis
class Stargate(entangle.DigitalOutput):
valuetype = listof(int)
parameters = {
'offset_in': Param('Offset of digital input values',
type=int, mandatory=True),
'offset_out': Param('Offset of digital output values',
type=int, mandatory=True),
'chevron_att_angles': Param('att angle for shielding elements',
type=listof(listof(int)),
mandatory=True),
}
_started = 0
def doRead(self, maxage=0):
words = self._dev.ReadOutputWords([self.offset_in, 5])
bitvals = [words[0], words[2], words[4]]
chevrons = []
for bitval in bitvals:
for _ in range(4):
chevrons.append(int(bitval & 0b11 == 0b01))
bitval >>= 2
return chevrons[:11]
def doStatus(self, maxage=0):
if self._started and self._started + 3 > monotonic():
return status.BUSY, 'moving/waiting'
return status.OK, ''
def doStart(self, target):
bitvals = [0, 0, 0]
for curidx in range(len(target)):
curval = target[curidx]
byteidx = curidx // 4
bitidx = (curidx % 4) * 2
if curval:
bitvals[byteidx] |= (1 << bitidx)
else:
bitvals[byteidx] |= (1 << (bitidx+1))
self._dev.WriteOutputWords([self.offset_out] + bitvals)
self._started = monotonic()
def doIsAllowed(self, value):
if len(value) != 11:
raise InvalidValueError(self, 'list must have 11 entries')
value = [bool(v) for v in value]
if value == [True] * 11:
return True, ''
if sum(value) > 2:
return False, 'cannot open more than 2 chevrons'
if value[0] or value[10]:
return False, 'cannot open first or last chevron'
return True, ''
def doReadFmtstr(self):
return '[' + ', '.join(['%d'] * 11) + ']'
def get_chevrons_for_att(self, att):
chevrons = []
for curidx in range(len(self.chevron_att_angles)):
maxmin = self.chevron_att_angles[curidx]
if len(maxmin) < 2:
chevrons.append(0)
continue
if maxmin[1] < att < maxmin[0]:
chevrons.append(1)
else:
chevrons.append(0)
return chevrons
class ATT(HoveringAxis):
attached_devices = {
'stargate': Attach('stargate switch device', Stargate),
}
parameters = {
'movestargate': Param('Whether to move the stargate with the axis',
type=bool, settable=True, default=True),
}
def _move_stargate(self):
if self.movestargate:
self._attached_stargate.start(
self._attached_stargate.get_chevrons_for_att(self.target))
else:
self.log.warning('moving stargate blocks is disabled')
def _preMoveAction(self):
self._move_stargate()
HoveringAxis._preMoveAction(self)
def doStart(self, target):
if self._mode == SIMULATION:
self._move_stargate()
HoveringAxis.doStart(self, target)
def doStatus(self, maxage=0):
if not self.movestargate:
return HoveringAxis.doStatus(self, maxage)
sgstat = self._attached_stargate.status(maxage)
if sgstat[0] == status.BUSY:
return status.BUSY, 'stargate moving'
axstat = HoveringAxis.doStatus(self, maxage)
if axstat[0] == status.BUSY:
return axstat
axvalue = HoveringAxis.doRead(self, maxage)
chevrons = list(self._attached_stargate.read(maxage))
if chevrons != self._attached_stargate.get_chevrons_for_att(axvalue):
return status.ERROR, 'invalid stargate position for att angle'
return axstat
| true | true |
f71c98c738d67bea14753699412d0bb3f45ce1c4 | 237 | py | Python | jina/types/arrays/__init__.py | slettner/jina | 4140961c62359e3acd540a6d88931665c6313824 | [
"Apache-2.0"
] | null | null | null | jina/types/arrays/__init__.py | slettner/jina | 4140961c62359e3acd540a6d88931665c6313824 | [
"Apache-2.0"
] | null | null | null | jina/types/arrays/__init__.py | slettner/jina | 4140961c62359e3acd540a6d88931665c6313824 | [
"Apache-2.0"
] | null | null | null | __copyright__ = "Copyright (c) 2020 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
from .document import DocumentArray
from .querylang import QueryLangArray
from .chunk import ChunkArray
from .match import MatchArray
| 29.625 | 74 | 0.801688 | __copyright__ = "Copyright (c) 2020 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
from .document import DocumentArray
from .querylang import QueryLangArray
from .chunk import ChunkArray
from .match import MatchArray
| true | true |
f71c9a76519602baf175d90363655dc76c65ea28 | 512 | py | Python | MobileRevelator/python/postbank_finanzassistent_decrypt.py | ohunecker/MR | b0c93436c7964d87a0b8154f8b7662b1731124b9 | [
"MIT"
] | 98 | 2019-02-03T22:50:24.000Z | 2022-03-17T12:50:56.000Z | MobileRevelator/python/postbank_finanzassistent_decrypt.py | cewatkins/MR | 5ba553fd0eb4c1d80842074a553119486f005822 | [
"MIT"
] | 10 | 2019-03-14T20:12:10.000Z | 2020-05-23T10:37:54.000Z | MobileRevelator/python/postbank_finanzassistent_decrypt.py | cewatkins/MR | 5ba553fd0eb4c1d80842074a553119486f005822 | [
"MIT"
] | 30 | 2019-02-03T22:50:27.000Z | 2022-03-30T12:37:30.000Z | #Filename="finanzassistent"
#Type=Prerun
import os
def main():
ctx.gui_setMainLabel("Postbank Finanzassistent: Extracting key");
error=""
dbkey="73839EC3A528910B235859947CC8424543D7B686"
ctx.gui_setMainLabel("Postbank: Key extracted: " + dbkey)
if not (ctx.fs_sqlcipher_decrypt(filename, filename + ".dec", dbkey)):
error="Error: Wrong key for decryption."
if (error==""):
return "Postbank Finanzassistent: Decryption of database successful."
return "" | 34.133333 | 78 | 0.6875 |
import os
def main():
ctx.gui_setMainLabel("Postbank Finanzassistent: Extracting key");
error=""
dbkey="73839EC3A528910B235859947CC8424543D7B686"
ctx.gui_setMainLabel("Postbank: Key extracted: " + dbkey)
if not (ctx.fs_sqlcipher_decrypt(filename, filename + ".dec", dbkey)):
error="Error: Wrong key for decryption."
if (error==""):
return "Postbank Finanzassistent: Decryption of database successful."
return "" | true | true |
f71c9ac104ae461bd523cc38b814d19111b44e47 | 1,166 | py | Python | google/ads/googleads/v10/enums/types/feed_item_target_device.py | JakobSteixner/google-ads-python | df2b802cc7e78295a4ece21cc7ef3787cd35dab0 | [
"Apache-2.0"
] | null | null | null | google/ads/googleads/v10/enums/types/feed_item_target_device.py | JakobSteixner/google-ads-python | df2b802cc7e78295a4ece21cc7ef3787cd35dab0 | [
"Apache-2.0"
] | null | null | null | google/ads/googleads/v10/enums/types/feed_item_target_device.py | JakobSteixner/google-ads-python | df2b802cc7e78295a4ece21cc7ef3787cd35dab0 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
__protobuf__ = proto.module(
package="google.ads.googleads.v10.enums",
marshal="google.ads.googleads.v10",
manifest={"FeedItemTargetDeviceEnum",},
)
class FeedItemTargetDeviceEnum(proto.Message):
r"""Container for enum describing possible data types for a feed
item target device.
"""
class FeedItemTargetDevice(proto.Enum):
r"""Possible data types for a feed item target device."""
UNSPECIFIED = 0
UNKNOWN = 1
MOBILE = 2
__all__ = tuple(sorted(__protobuf__.manifest))
| 29.15 | 74 | 0.716123 |
import proto
__protobuf__ = proto.module(
package="google.ads.googleads.v10.enums",
marshal="google.ads.googleads.v10",
manifest={"FeedItemTargetDeviceEnum",},
)
class FeedItemTargetDeviceEnum(proto.Message):
class FeedItemTargetDevice(proto.Enum):
UNSPECIFIED = 0
UNKNOWN = 1
MOBILE = 2
__all__ = tuple(sorted(__protobuf__.manifest))
| true | true |
f71c9b79db447996719fed63c8fac35684923c7b | 3,915 | py | Python | nova/scheduler/ironic_host_manager.py | gabriel-samfira/nova | 5ef07cc04dbf0216452ae358e57d9ddac51f1803 | [
"Apache-2.0"
] | null | null | null | nova/scheduler/ironic_host_manager.py | gabriel-samfira/nova | 5ef07cc04dbf0216452ae358e57d9ddac51f1803 | [
"Apache-2.0"
] | null | null | null | nova/scheduler/ironic_host_manager.py | gabriel-samfira/nova | 5ef07cc04dbf0216452ae358e57d9ddac51f1803 | [
"Apache-2.0"
] | null | null | null | # Copyright (c) 2012 NTT DOCOMO, INC.
# Copyright (c) 2011-2014 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Ironic host manager.
This host manager will consume all cpu's, disk space, and
ram from a host / node as it is supporting Baremetal hosts, which can not be
subdivided into multiple instances.
"""
from oslo.config import cfg
from oslo.serialization import jsonutils
from oslo.utils import timeutils
from nova.openstack.common import log as logging
import nova.scheduler.base_baremetal_host_manager as bbhm
from nova.scheduler import host_manager
host_manager_opts = [
cfg.ListOpt('baremetal_scheduler_default_filters',
default=[
'RetryFilter',
'AvailabilityZoneFilter',
'ComputeFilter',
'ComputeCapabilitiesFilter',
'ImagePropertiesFilter',
'ExactRamFilter',
'ExactDiskFilter',
'ExactCoreFilter',
],
help='Which filter class names to use for filtering '
'baremetal hosts when not specified in the request.'),
cfg.BoolOpt('scheduler_use_baremetal_filters',
default=False,
help='Flag to decide whether to use '
'baremetal_scheduler_default_filters or not.'),
]
CONF = cfg.CONF
CONF.register_opts(host_manager_opts)
LOG = logging.getLogger(__name__)
class IronicNodeState(bbhm.BaseBaremetalNodeState):
"""Mutable and immutable information tracked for a host.
This is an attempt to remove the ad-hoc data structures
previously used and lock down access.
"""
def update_from_compute_node(self, compute):
"""Update information about a host from its compute_node info."""
super(IronicNodeState, self).update_from_compute_node(compute)
self.total_usable_disk_gb = compute['local_gb']
self.hypervisor_type = compute.get('hypervisor_type')
self.hypervisor_version = compute.get('hypervisor_version')
self.hypervisor_hostname = compute.get('hypervisor_hostname')
self.cpu_info = compute.get('cpu_info')
if compute.get('supported_instances'):
self.supported_instances = jsonutils.loads(
compute.get('supported_instances'))
self.updated = compute['updated_at']
def consume_from_instance(self, instance):
"""Consume nodes entire resources regardless of instance request."""
super(IronicNodeState, self).consume_from_instance(instance)
self.updated = timeutils.utcnow()
class IronicHostManager(bbhm.BaseBaremetalHostManager):
"""Ironic HostManager class."""
def __init__(self):
super(IronicHostManager, self).__init__()
if CONF.scheduler_use_baremetal_filters:
baremetal_default = CONF.baremetal_scheduler_default_filters
CONF.scheduler_default_filters = baremetal_default
def host_state_cls(self, host, node, **kwargs):
"""Factory function/property to create a new HostState."""
compute = kwargs.get('compute')
if compute and compute.get('cpu_info') == 'baremetal cpu':
return IronicNodeState(host, node, **kwargs)
else:
return host_manager.HostState(host, node, **kwargs)
| 38.382353 | 78 | 0.676373 |
from oslo.config import cfg
from oslo.serialization import jsonutils
from oslo.utils import timeutils
from nova.openstack.common import log as logging
import nova.scheduler.base_baremetal_host_manager as bbhm
from nova.scheduler import host_manager
host_manager_opts = [
cfg.ListOpt('baremetal_scheduler_default_filters',
default=[
'RetryFilter',
'AvailabilityZoneFilter',
'ComputeFilter',
'ComputeCapabilitiesFilter',
'ImagePropertiesFilter',
'ExactRamFilter',
'ExactDiskFilter',
'ExactCoreFilter',
],
help='Which filter class names to use for filtering '
'baremetal hosts when not specified in the request.'),
cfg.BoolOpt('scheduler_use_baremetal_filters',
default=False,
help='Flag to decide whether to use '
'baremetal_scheduler_default_filters or not.'),
]
CONF = cfg.CONF
CONF.register_opts(host_manager_opts)
LOG = logging.getLogger(__name__)
class IronicNodeState(bbhm.BaseBaremetalNodeState):
def update_from_compute_node(self, compute):
super(IronicNodeState, self).update_from_compute_node(compute)
self.total_usable_disk_gb = compute['local_gb']
self.hypervisor_type = compute.get('hypervisor_type')
self.hypervisor_version = compute.get('hypervisor_version')
self.hypervisor_hostname = compute.get('hypervisor_hostname')
self.cpu_info = compute.get('cpu_info')
if compute.get('supported_instances'):
self.supported_instances = jsonutils.loads(
compute.get('supported_instances'))
self.updated = compute['updated_at']
def consume_from_instance(self, instance):
super(IronicNodeState, self).consume_from_instance(instance)
self.updated = timeutils.utcnow()
class IronicHostManager(bbhm.BaseBaremetalHostManager):
def __init__(self):
super(IronicHostManager, self).__init__()
if CONF.scheduler_use_baremetal_filters:
baremetal_default = CONF.baremetal_scheduler_default_filters
CONF.scheduler_default_filters = baremetal_default
def host_state_cls(self, host, node, **kwargs):
compute = kwargs.get('compute')
if compute and compute.get('cpu_info') == 'baremetal cpu':
return IronicNodeState(host, node, **kwargs)
else:
return host_manager.HostState(host, node, **kwargs)
| true | true |
f71c9c06c32c6896b3720e78b40f52d65c87972e | 2,059 | py | Python | keystone_reports.py | mheler/copystack | 11a30e1ca968208a96fa80f3afad8dc067342e44 | [
"Apache-2.0"
] | 4 | 2017-07-26T23:01:10.000Z | 2019-04-25T18:37:19.000Z | keystone_reports.py | mheler/copystack | 11a30e1ca968208a96fa80f3afad8dc067342e44 | [
"Apache-2.0"
] | 2 | 2017-11-02T10:50:04.000Z | 2021-12-13T19:43:54.000Z | keystone_reports.py | mheler/copystack | 11a30e1ca968208a96fa80f3afad8dc067342e44 | [
"Apache-2.0"
] | 6 | 2017-02-15T17:58:35.000Z | 2018-12-03T18:00:47.000Z | #!/usr/bin/env python
# Copyright (c) 2019 Rackspace US, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import argparse
import optparse
import keystone_common
import neutron_common
import nova_common
import glance_common
import cinder_common
from auth_stack2 import AuthStack
def main(opts, args):
auth = AuthStack()
print "From:", auth.from_auth_ip, " Username:", auth.from_username, " Project:", auth.from_tenant_name
print "To: ", auth.to_auth_ip, " Username:", auth.to_username, " Project:", auth.to_tenant_name
if opts.fromusers:
print "\n--------------- From Users: ------------------------"
keystone_common.print_user_names('from')
if opts.tousers:
print "\n--------------- To Users: ------------------------"
keystone_common.print_user_names('to')
if opts.fromprojects:
keystone_common.print_projects('from')
if opts.toprojects:
keystone_common.print_projects('to')
if __name__ == "__main__":
parser = optparse.OptionParser()
parser.add_option("-f", "--fromusers", action='store_true', dest='fromusers', help='Print FROM users')
parser.add_option("-t", "--tousers", action='store_true', dest='tousers', help='Print TO users')
parser.add_option("-p", "--fromprojects", action='store_true', dest='fromprojects', help='Print FROM projects (or tenants)')
parser.add_option("-r", "--toprojects", action='store_true', dest='toprojects', help='Print TO projects (or tenants)')
(opts, args) = parser.parse_args()
main(opts, args)
| 37.436364 | 128 | 0.687227 |
import json
import argparse
import optparse
import keystone_common
import neutron_common
import nova_common
import glance_common
import cinder_common
from auth_stack2 import AuthStack
def main(opts, args):
auth = AuthStack()
print "From:", auth.from_auth_ip, " Username:", auth.from_username, " Project:", auth.from_tenant_name
print "To: ", auth.to_auth_ip, " Username:", auth.to_username, " Project:", auth.to_tenant_name
if opts.fromusers:
print "\n--------------- From Users: ------------------------"
keystone_common.print_user_names('from')
if opts.tousers:
print "\n--------------- To Users: ------------------------"
keystone_common.print_user_names('to')
if opts.fromprojects:
keystone_common.print_projects('from')
if opts.toprojects:
keystone_common.print_projects('to')
if __name__ == "__main__":
parser = optparse.OptionParser()
parser.add_option("-f", "--fromusers", action='store_true', dest='fromusers', help='Print FROM users')
parser.add_option("-t", "--tousers", action='store_true', dest='tousers', help='Print TO users')
parser.add_option("-p", "--fromprojects", action='store_true', dest='fromprojects', help='Print FROM projects (or tenants)')
parser.add_option("-r", "--toprojects", action='store_true', dest='toprojects', help='Print TO projects (or tenants)')
(opts, args) = parser.parse_args()
main(opts, args)
| false | true |
f71c9cd673a863c06787408e99e849774b777b45 | 931 | py | Python | main.py | flatman123/device_auto_config_v0.0.1 | b6335e07735f937089c528130c4b50a6bd32641d | [
"MIT"
] | null | null | null | main.py | flatman123/device_auto_config_v0.0.1 | b6335e07735f937089c528130c4b50a6bd32641d | [
"MIT"
] | null | null | null | main.py | flatman123/device_auto_config_v0.0.1 | b6335e07735f937089c528130c4b50a6bd32641d | [
"MIT"
] | 1 | 2020-10-09T14:43:21.000Z | 2020-10-09T14:43:21.000Z | from decrypt_file import decrypt
from get_commands import fetch_commands
import netmiko
import os
import concurrent.futures
hosts = decrypt(f'{os.getcwd()}/device_json.gpg')
def send_commands(connection, host, commands):
connection.send_config_set(commands)
return
def run(ip_address):
for device in hosts:
device_info = {
"username": hosts[device][0],
"port": 22,
"device_type": hosts[device][-2],
"host": ip_address,
"verbose": True,
"password": hosts[device][1]
}
connect = netmiko.ConnectHandler(**device_info)
commands = fetch_commands(hosts[device][-1])
send_commands(connect, device_info['host'], commands)
return
if __name__ == '__main__':
with concurrent.futures.ThreadPoolExecutor() as executor:
host_addresses = [hosts[ip][2] for ip in hosts]
executor.map(run, host_addresses)
| 26.6 | 61 | 0.654135 | from decrypt_file import decrypt
from get_commands import fetch_commands
import netmiko
import os
import concurrent.futures
hosts = decrypt(f'{os.getcwd()}/device_json.gpg')
def send_commands(connection, host, commands):
connection.send_config_set(commands)
return
def run(ip_address):
for device in hosts:
device_info = {
"username": hosts[device][0],
"port": 22,
"device_type": hosts[device][-2],
"host": ip_address,
"verbose": True,
"password": hosts[device][1]
}
connect = netmiko.ConnectHandler(**device_info)
commands = fetch_commands(hosts[device][-1])
send_commands(connect, device_info['host'], commands)
return
if __name__ == '__main__':
with concurrent.futures.ThreadPoolExecutor() as executor:
host_addresses = [hosts[ip][2] for ip in hosts]
executor.map(run, host_addresses)
| true | true |
f71c9dde7d847171940268a4386ef04e1c81c1ea | 20,567 | py | Python | tmmPCECalc.py | NREL/PVwindow | df7091c9d1ebd280aca53c50015e3b1ee7a3183e | [
"BSD-2-Clause"
] | null | null | null | tmmPCECalc.py | NREL/PVwindow | df7091c9d1ebd280aca53c50015e3b1ee7a3183e | [
"BSD-2-Clause"
] | null | null | null | tmmPCECalc.py | NREL/PVwindow | df7091c9d1ebd280aca53c50015e3b1ee7a3183e | [
"BSD-2-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Thu Mar 4 12:29:21 2021
@author: aduell
"""
#import numpy as np
from numpy import pi, linspace, array, exp
#import tmm
from tmm import inc_tmm, inc_absorp_in_each_layer, inf
#import pandas as pd
#import tmm_vw as tmm
#import matplotlib.pyplot as plt
from matplotlib.pyplot import plot,figure,xlabel,ylabel,show,ylim,legend
from wpv import Layer, Stack
#import scipy.interpolate, scipy.integrate, pandas, sys
from scipy.interpolate import interp1d
from scipy.integrate import quad, trapz
from scipy.optimize import fsolve#, Bounds
import scipy.optimize
from pandas import read_excel
import sys
#import scipy
#from numericalunits import W, K, nm, m, cm, s, eV, meV, V, mA, c0, hPlanck, kB, e, A, ohm
#import sympy
#import sympy.solvers.solvers
assert sys.version_info >= (3,6), 'Requires Python 3.6+'
from pvlib.pvsystem import singlediode
import tmmPVColor as pvc
import CalculateVLTFromSpectrum as cvs
from CalculateVLTFromSpectrum import AM15G, cieplf
import vegas
# This whole thing uses microns for length
'''We determine the incident angle of the sun shining on the cell. Input is in degrees'''
def giveincangle(angle):
degree = pi/180
return angle*degree
inc_angle = giveincangle(0)
'''We determine the size and scaling of the photon wavelength scale. Units are um'''
num_lams = 500
lams = linspace(0.3,2.5,num=num_lams) #um
'''We are constants and help control units'''
q = 1.602176634e-19 #coulombs. elementary charge
c0 = 299792458 #m/s #Speed of light
hPlanck = 6.62607015e-34 #J*s 4.135667516e-15 #eV*s
kB = 1.380649e-23 #J/K 8.61733034e-5 #eV/K
'''Some units and terms'''
'''Tcell, Ti, To are cell temperature, inside temp and outside temp. Always in kelvin'''
'''Ui and Uo are overall heat-transfer coefficient ofr in side and outside. W/(m**2 *K)'''
'''AbsorberLayer is a number indicating the photoactive layer. If the fourth layer is the PV layer, input is 4'''
''''Rs is series resistance, Rsh is shunt resistance in ohms. See pveducation.org for more info'''
'''eta is the electron-hole pair extraction efficiency term. eta times all absorbed light in the PV layer gives the EQE'''
'''n = diode ideality factor. Used in singlediode equation
Ns = number of cells in series. Used in singlediode equation'''
'''Rtot is total thermal resistance of the window'''
'''We are all the different materials currently available
Thickness is in microns'''
def Glass(Thickness = 6000):
return Layer(Thickness,'nkLowFeGlass','i')
def TiO2(Thickness = 0.050):
return Layer(Thickness,'nkTiO2','c')
def FTO(Thickness = 0.250):
return Layer(Thickness,'nkFTO','c')
def MAPI(Thickness = 0.130):
return Layer(Thickness,'nkMAPI','c')
def AZO(Thickness = 0.200):
return Layer(Thickness,'nkAZO','c')
def ITO(Thickness = 0.200):
return Layer(Thickness,'nkITO','c')
def ITOlowE(Thickness = 0.075):
return Layer(Thickness,'nkITO','c')
def SnO2(Thickness = 0.05):
return Layer(Thickness,'nkSnO2','c')
def SnO2lowE(Thickness = 0.030):
return Layer(Thickness,'nkSnO2','c')
def SnO2lowEfat(Thickness = 0.050):
return Layer(Thickness,'nkSnO2','c')
def SiO2(Thickness = 0.024):
return Layer(Thickness,'nkSiO2','c')
def NiO(Thickness = 0.050):
return Layer(Thickness,'nkNiO','c')
def Ag(Thickness = 0.015):
return Layer(Thickness,'nkAg','c')
def TiO2lowE(Thickness = 0.030):
return Layer(Thickness,'nkTiO2','c')
def TiO2lowEfat(Thickness = 0.060):
return Layer(Thickness,'nkTiO2','c')
def Bleach(Thickness = 0.370):
return Layer(Thickness,'nkBleach','c')
def ClAlPc(Thickness = 0.300):
return Layer(Thickness,'nkClAlPc','c')
def C60(Thickness = 0.200):
return Layer(Thickness,'nkC60','c')
def IR(Thickness = 0.060):
return Layer(Thickness,'nkPTB7_ThIEICO_4F','c')
def MAPBr(Thickness = 0.500):
return Layer(Thickness,'nkMAPbBr3','c')
def EVA(Thickness = 3000):
return Layer(Thickness,'nkEVA','i')
'''We are boundary conditions corresponding to each material type
Can be changed to tune optimization range'''
GlassBound = (5999,6001)
TiO2Bound = (0.025,.1)
FTOBound = (0.1,0.5)
MAPIBound = (.06,.260)
AZOBound = (.1,.4)
ITOBound = (.1,.4)
ITOlowEBound = (0.03,.15)
SnO2Bound = (.025,.1)
SnO2lowEBound = (.015,.06)
SnO2lowEfatBound = (0.025,.1)
SiO2Bound = (.012,.05)
NiOBound = (.025,.1)
AgBound = (.0149, .0151)
TiO2lowEBound = (.015, .070)
TiO2lowEfatBound = (.03,.12)
BleachBound = (.180, .500)
ClAlPcBound = (.150, .600)
C60Bound = (.100,.400)
IRBound = (.030, .12)
MAPBrBound = (.250,1)
EVABound = (2999,3001)
'''I assemble a list of layer objects using Thicknesses and Materials'''
def GiveLayers(Thickness,Materials):
x = len(Materials)
if x == len(Thickness):
Layers = []
for i in range(x):
Layers.append(Materials[i](Thickness[i]))
return Layers
else:
raise ValueError ('layers and Thickness lengths do not match')
'''I give a list of boundaries from a list of materials. Dict is a dictionary containing the boundary conditions
All items in the dicitonary are labelled as 'Material'+'Bound' '''
'''
def GiveBounds(Materials, DictBound):
x = len(Materials)
Bounds = []
for i in range(x):
Bounds.append(DictBound[Materials[i].__name__ + 'Bound'])
Bounds = array(Bounds)
return Bounds
'''
'''I produce a Bounds object that defines the boundary conditions for optimization
The version above can be used to produce a list of bounds rather than an object'''
def GiveBounds(Materials, DictBound):
x = len(Materials)
lb = []
ub = []
for i in range(x):
lb.append(DictBound[Materials[i].__name__ + 'Bound'][0])
for i in range(x):
ub.append(DictBound[Materials[i].__name__ + 'Bound'][1])
bounds = scipy.optimize.Bounds(lb,ub)
return bounds
'''I give a list of thicknesses from a list of materials. Dict is a dictionary containing the thickness values
All items in the dicitonary are labelled as 'Material'+'Th' '''
def GiveThicks(Materials, DictTh):
x = len(Materials)
Th = []
for i in range(x):
Th.append(DictTh[Materials[i].__name__ + 'Th'])
return Th
'''Calculates Spectra Based on the layers of the cell
AbsorberLayer is an integer giving the position of the PV layer in the stack. Currently supports 1 PV layer'''
def Spectra(layers, AbsorberLayer):
thicks = [inf]
iorcs = ['i']
for layer in layers:
thicks.append(layer.d)
iorcs.append(layer.i_or_c)
thicks.append(inf)
iorcs.append('i')
thicks_bw = thicks[::-1]
iorcs_bw = iorcs[::-1]
Ts = []
Rfs = []
Rbs = []
AbsByAbsorbers = []
#EQEs2 = []
#IREQEs = []
layerchoice = AbsorberLayer
#layerchoice2 = 5
for lam in lams:
nks = [1]
for layer in layers:
nks.append(layer.nk(lam))
nks.append(1)
nks_bw = nks[::-1]
front_spol = inc_tmm('s',nks,thicks,iorcs,inc_angle,lam)
front_ppol = inc_tmm('p',nks,thicks,iorcs,inc_angle,lam)
back_spol = inc_tmm('s',nks_bw,thicks_bw,iorcs_bw,inc_angle,lam)
back_ppol = inc_tmm('p',nks_bw,thicks_bw,iorcs_bw,inc_angle,lam)
AbsByAbsorber_spol = inc_absorp_in_each_layer(front_spol)[layerchoice]
AbsByAbsorber_ppol = inc_absorp_in_each_layer(front_ppol)[layerchoice]
AbsByAbsorbers.append( (AbsByAbsorber_spol + AbsByAbsorber_ppol) / 2. )
# EQE_spol2 = tmm.inc_absorp_in_each_layer(front_spol)[layerchoice2]
# EQE_ppol2 = tmm.inc_absorp_in_each_layer(front_ppol)[layerchoice2]
# EQEs2.append( (EQE_spol2 + EQE_ppol2) / 2. )
Rfs.append( (front_spol['R']+front_ppol['R']) / 2.)
Rbs.append( (back_spol['R']+back_ppol['R']) / 2.)
Ts.append( (front_spol['T']+front_ppol['T']) / 2. )
Ts = array(Ts)
Rfs = array(Rfs)
Rbs = array(Rbs)
As = 1-Ts-Rfs
sanities = Ts+Rfs+As
AbsByAbsorbers = array(AbsByAbsorbers)
Spectra = {'AbsByAbsorbers':AbsByAbsorbers, 'Ts':Ts,'Rfs':Rfs,'Rbs':Rbs,'As':As,'Total':sanities}
return Spectra
''' Here I calculate VLT and spit it out to the screen'''
'''Gives a spectrum of VLT. Used for plotting'''
def VLTSpectrum(layers):
return Stack(layers)
'''Gives VLT as a single number'''
def VLT(layers):
VLTstack=Stack(layers)
return VLTstack.get_visible_light_transmission(lams,inc_angle)
'''This gives VLT as a single number. eliminates
need to recalculate AM15G and cieplf every iteration. Unclear if this will work for
optimization'''
def getFancyVLT(layers):#,lamrange,inc_angle):
integ = vegas.Integrator([lams])
Trans=Stack(layers)
numerator = integ(lambda lam: AM15G(lam)*cieplf(lam)*Trans.get_RAT(lam,inc_angle)[2], nitn=10, neval=100)[0]
denominator = integ(lambda lam: AM15G(lam)*cieplf(lam), nitn=10, neval=100)[0]
VLT = numerator/denominator
return VLT.mean
'''Gives minimum and maximum VLT based exclusively on the PV layer.
Only useful for judging VLT constraint for a given PV material
Requires input of single absorber layer with a tuple of (lb,ub)'''
def GiveMinMaxVLT(AbsorberType, Bounds):
minThick = GiveLayers([Bounds[0]], [AbsorberType])
maxThick = GiveLayers([Bounds[1]], [AbsorberType])
minimum = VLT(maxThick)
maximum = VLT(minThick)
return {'Material':AbsorberType.__name__,'minVLT':minimum, 'maxVLT':maximum, 'minThick':Bounds[0],
'maxThick':Bounds[1]}
'''Gives minimum and maximum VLT based exclusively on the PV layer.
Requires list of materials, absorbing layer, and absorber bounds'''
def GiveMinMaxVLTFromMaterials(Materials, AbsorberLayer, Bounds):
AbsorberType = Materials[AbsorberLayer-1]
minThick = GiveLayers([Bounds[0]], [AbsorberType])
maxThick = GiveLayers([Bounds[1]], [AbsorberType])
minimum = VLT(maxThick)
maximum = VLT(minThick)
return {'Material':AbsorberType.__name__,'minVLT':minimum, 'maxVLT':maximum, 'minThick':Bounds[0],
'maxThick':Bounds[1]}
# ******************** Here I add PCE calculation *********************#
'''This stuff imports a spreadsheet of the solar spectrum'''
#worksheet = pandas.read_excel('https://www.nrel.gov/grid/solar-resource/assets/data/astmg173.xls')
worksheet = read_excel('./Data/ASTMG173.xls')#('https://www.nrel.gov/grid/solar-resource/assets/data/astmg173.xls')
#worksheet = pandas.read_excel('/Users/lwheeler/Code/pv-window-bem/Data/astmg173.xls')
downloaded_array = array(worksheet)
# Wavelength is in column 0, AM1.5G data is column 2
AM15 = downloaded_array[1:, [0,2]]
# The first line should be 280.0 , 4.7309E-23
# The last line should be 4000.0, 7.1043E-03
# print(AM15)
# Interpolate to get a continuous function which I will be able to do integrals on:
'''Interpolated solar spectrum
when using, inputs must be within 300-2500 nm'''
AM15interp = interp1d(AM15[:,0]/1000, AM15[:,1])
# Here’s the plot, it looks correct:
'''Plot of the solar spectrum for verification'''
'''
y_values = np.array([AM15interp(x) for x in lams])
figure()
plot(lams , y_values)
xlabel("Wavelength (nm)")
ylabel("Spectral intensity (W/m$^2$/nm)")
title("Light from the sun");
show()
'''
'''I convert wavelength to energy. E_min and max are used for integration limits '''
Ephoton = hPlanck * c0 / lams *1e6 #J
E_min = min(Ephoton) #J energy units from hPlanck
E_max = max(Ephoton) #J energy units from hPlanck
'''I give the number of photons per......'''
def SPhotonsPerTEA(Ephoton):
λ = hPlanck * c0 / Ephoton *1e6 #um
return AM15interp(λ) * (1 / Ephoton) * (hPlanck * c0 / Ephoton**2) * 1e9
'''I give the power for each......'''
def PowerPerTEA(Ephoton):
return Ephoton * SPhotonsPerTEA(Ephoton)
'''I give the solar constant which is the W/m*2 emitted by the sun. Should be ~1000'''
def Solar_Constant(Ephoton):
#PowerPerTEA = lambda E : E * SPhotonsPerTEA(E)
return quad(PowerPerTEA,E_min,E_max, full_output=1)[0]
# quad() is ordinary integration; full_output=1 is (surprisingly) how you hide
# the messages warning about poor accuracy in integrating.
'''This is the solar constant value. It is called by optimization and used in a variety of functions here
Should always be ~1000'''
solar_constant = Solar_Constant(Ephoton)
'''I return an interpolated function of a spectrum relative to photon wavelength. Used for plotting'''
def GivelamsInterp(Parameter):
Curve = Parameter.round(8)
return interp1d(lams, Curve)
'''I return an interpolated function of a spectrum relative to photon energy'''
def GiveEInterp(Parameter):
Curve = Parameter.round(8)
return interp1d(Ephoton, Curve)
'''I give Q based on a given spectrum. Units are W/m^2
Input is a spectrum interpolated with respect to energy, E
eta should only be used if looking at a PV layer. Otherwise it is set to 1'''
def GiveQ(Spectra, eta = 1):#Spectra must be an interpolated function
def integrand(E):
return eta * Spectra(E) * PowerPerTEA(E)
return quad(integrand, E_min, E_max, full_output=1)[0]
'''
#trapz calcs
def GiveQ(Spectra, eta = 1):#Spectra must be an array
integrand = eta*Spectra*PowerPerTEA(Ephoton)
return -np.trapz(integrand, Ephoton)
'''
'''
def GivePhotons(Spectra, eta):#Spectra must be an interpolated function
def integrand(E):
return eta * Spectra(E) * SPhotonsPerTEA(E)
return quad(integrand, E_min, E_max)[0]
'''
# Here I input the spectrum of photons absorbed by the absorber material (Absorbed)
# and the electron-hole pair extraction efficiency (eta). EQE = eta * Absorbed
'''I give the rate of recombination for the solar cell, Units are photons/(s*m**2)'''
def RR0(eta,Absorbed,Tcell):
integrand = lambda E : eta * Absorbed(E) * (E)**2 / (exp(E / (kB * Tcell)) - 1)
integral = quad(integrand, E_min, E_max, full_output=1)[0]
return ((2 * pi) / (c0**2 * hPlanck**3)) * integral# / 1.60218e-19 #J/eV
#units = photons/(s*m**2)
'''I give the amount of energy converted to electricity in terms of photons, units are photons(s/m**2)'''
def Generated(eta,Absorbed):
integrand = lambda E : eta * Absorbed(E) * SPhotonsPerTEA(E)
# integral = quad(integrand, E_min, E_max, full_output=1)[0]
return quad(integrand, E_min, E_max, full_output=1)[0]
#units photons/(s*m**2)
'''
#Using trapezoidal rule for integration instaed of quad
#AbsByAbsorbers is an aray of intensities, not an interpolated function.
def RR0(eta,Absorbed,Tcell):
AbsByAbsorbers = AbsByAbsorbers.round(8)
integrand = eta * AbsByAbsorbers * (Ephoton)**2 / (np.exp(Ephoton / (kB * Tcell)) - 1)
integral = trapz(integrand, Ephoton)
return ((2 * np.pi) / (c0**2 * hPlanck**3)) * integral
def Generated(eta,Absorbed):
Absorbed = Absorbed.round(8)
integrand = eta * Absorbed * SPhotonsPerTEA(Ephoton)
# integral = quad(integrand, E_min, E_max, full_output=1)[0]
return np.trapz(integrand, Ephoton)
'''
'''I use the single diode equation to return the max power of the cell in watts
Check PVlib documentation for details'''
def Give_Pmp(eta, Absorbed, Rs, Rsh, Tcell, n = 1, Ns = 1):
data = singlediode(Generated(eta, Absorbed)*q, RR0(eta, Absorbed,Tcell)*q, Rs, Rsh, n*Ns*kB*Tcell/q, ivcurve_pnts = 500)
return data['p_mp']
'''I calculate equilibrium tmperature of the cell assuming the cell is infinitely thin
TotalAbs is the full absorptance of the stack as an array of intensities, uninterpolated.
Absorbed is PV layer absorptance interpolated
Temperature calculation is implicit so the numerical solver fsolve is used.
This equation is derived from Wheeler and Wheeler Detailed Balance Analysis of Photovoltaic Windows'''
def TcellCalc(TotalAbs, eta, Ti,To, Absorbed, Ui, Uo, Rs, Rsh):
AbsTotal = GiveEInterp(TotalAbs)
Qabs = GiveQ(AbsTotal)
Temp = lambda Tcell: (Qabs - Give_Pmp(eta,Absorbed,Rs,Rsh, Tcell) + Ui*Ti + Uo*To)/(Ui + Uo)-Tcell
return fsolve(Temp, 300)[0]
'''I use the single diode equation to produce an IV curve and power plot
I also return related values such as Voc, Isc, and Pmp in units volts, amps, and watts
See pvlib singlediode equation for more information'''
def GiveIVData(eta, Absorbed, Rs, Rsh,Tcell, n = 1, Ns = 1):
data = singlediode(Generated(eta, Absorbed)*q, RR0(eta, Absorbed, Tcell)*q, Rs, Rsh, n*Ns*kB*Tcell/q, ivcurve_pnts = 500)
Isc = data['i_sc']
Voc = data['v_oc']
Imp = data['i_mp']
Vmp = data['v_mp']
Pmp = data['p_mp']
Vvalues = array(data['v'])
Ivalues = array(data['i'])
#print('Isc = ', Isc, ', Voc = ', Voc, ', Imp = ', Imp, ', Vmp = ', Vmp, ', Pmp =', Pmp)
figure()
plot(Vvalues,Ivalues, label = 'IV')
xlabel('Voltage, (V)')
ylabel('Current (A) or Power (W/m^2)')
ylabel('Power (W/m^2)')
P_values = array([Ivalues * Vvalues])
plot(Vvalues , P_values.T, label = 'Power')
ylim(-1, 150)
legend(loc = 'upper right')
show()
return data
'''I give the solar heat gain coefficient. unitless numebr between 0 and 1
Ts is the transmission spectra. Must be a list of intensities, not an interpolated function
This equation comes form a combination of Wheeler and Wheeler Detailed Balance Analysis of Photovoltaic Windows
and equation 3.18 from Fundamentals of Heat and Mass Transfer 6ed Incropera'''
def SHGC(Ts, Ti, To, Tcell, Ui):
#Tcell = TcellCalc(As,Ti,To,eta,Absorbed)
Rtot = 1/Ui #This is approximate because Ui is assumed
#Included in GiveQ for simplicity but should not be used for calculating SHGC
TransTotal = GiveEInterp(Ts)
Qtrans = GiveQ(TransTotal,1)
return (Qtrans + Ui*(Tcell-Ti) - ((To-Ti)/Rtot))/solar_constant
'''I give max efficiency also called PCE'''
'''Absorbed must be an interpolated function of the absorption spectrum of the PV layer'''
def max_efficiency(eta,Absorbed,Tcell, Rs, Rsh):
#Tcell = TcellCalc(As,Ti,To,eta,Absorbed)
return Give_Pmp(eta, Absorbed, Rs, Rsh, Tcell) / solar_constant
'''I give important info about a solar cell such as PCE, SHGC, Temperature, etc'''
def GiveImportantInfo(Thickness, Materials,eta,Ti,To,Ui,Uo,Rs,Rsh,AbsorberLayer,Angle=0):
global inc_angle
inc_angle = giveincangle(Angle)
layers = GiveLayers(Thickness,Materials)
spectra = Spectra(layers ,AbsorberLayer)
AbsByAbsorbers = spectra['AbsByAbsorbers']
Ts = spectra['Ts']
Rfs = spectra['Rfs']
Rbs = spectra['Rbs']
As = spectra['As']
sanities = spectra['Total']
Absorbed = GiveEInterp(AbsByAbsorbers)
VLTcalc = cvs.getVLT(Ts,lams)#VLT(layers)
Tcell = TcellCalc(As,eta, Ti,To, Absorbed, Ui, Uo, Rs, Rsh)
#Absorbed = tpc.GiveEInterp(tpc.Spectra(tpc.GiveLayers(Thickness, Materials),4)['AbsByAbsorbers'])
data = GiveIVData(eta, Absorbed, Rs, Rsh,Tcell, n = 1, Ns = 1)
Isc = data['i_sc']
Voc = data['v_oc']
Imp = data['i_mp']
Vmp = data['v_mp']
Pmp = data['p_mp']
SHGCcalc = SHGC(Ts, Ti, To, Tcell, Ui)
PCE = max_efficiency(eta,Absorbed,Tcell, Rs, Rsh)
#Spectral Curves
figure()
plot(lams,Rfs,color='magenta',marker=None,label="$R_f$")
plot(lams,Ts,color='green',marker=None,label="$T$")
plot(lams,Rbs,color='purple',marker=None,label="$R_b$")
plot(lams,As,color='black',marker=None,label="A")
plot(lams,AbsByAbsorbers,color='black',linestyle='--',marker=None,label="AbsByAbsorber")
plot(lams,sanities,color='gold',marker=None,label="R+A+T")
plot(lams,VLTSpectrum(layers).cieplf(lams),color='red',marker=None,label="photopic")
xlabel('wavelength, $\mu$m')
ylabel('Intensity')
legend(loc = 'upper right')
show()
EphotoneV = Ephoton*6.241509e+18
figure()
plot(EphotoneV, Ts, color='magenta',marker=None,label="$T$")
plot(EphotoneV, Rfs,color='green',marker=None,label="$R_f$")
plot(EphotoneV, Rbs,color='orange',marker=None,label="$R_b$")
plot(EphotoneV, AbsByAbsorbers,color='black',marker=None,label="Abs")
#plot(Ephoton,tpc.VLTSpectrum(layers).cieplf(lams),color='red',marker=None,label="photopic")
legend(loc = 'upper right')
xlabel('Energy, eV')
ylabel('Intensity')
show()
pvc.GiveColorSwatch(Ts, Rfs)
pvc.plot_xy_on_fin(Ts, Rfs)
print('PCE = ',PCE,'VLT = ', VLTcalc, 'SHGC = ',SHGCcalc, 'Tcell = ',Tcell)#,'time to calculate PCE from scratch in seconds = ', TimePCE, 'Time to run optimizer in minutes = ',TimeOptimize/60)
return {'PCE':PCE, 'VLT':VLTcalc, 'SHGC':SHGCcalc, 'Tcell':Tcell,'Isc':Isc, 'Voc': Voc, 'Imp': Imp, 'Vmp': Vmp,'Pmp': Pmp}
| 37.876611 | 196 | 0.685516 |
from numpy import pi, linspace, array, exp
from tmm import inc_tmm, inc_absorp_in_each_layer, inf
from matplotlib.pyplot import plot,figure,xlabel,ylabel,show,ylim,legend
from wpv import Layer, Stack
from scipy.interpolate import interp1d
from scipy.integrate import quad, trapz
from scipy.optimize import fsolve
import scipy.optimize
from pandas import read_excel
import sys
assert sys.version_info >= (3,6), 'Requires Python 3.6+'
from pvlib.pvsystem import singlediode
import tmmPVColor as pvc
import CalculateVLTFromSpectrum as cvs
from CalculateVLTFromSpectrum import AM15G, cieplf
import vegas
def giveincangle(angle):
degree = pi/180
return angle*degree
inc_angle = giveincangle(0)
num_lams = 500
lams = linspace(0.3,2.5,num=num_lams)
q = 1.602176634e-19
c0 = 299792458 607015e-34 ef Glass(Thickness = 6000):
return Layer(Thickness,'nkLowFeGlass','i')
def TiO2(Thickness = 0.050):
return Layer(Thickness,'nkTiO2','c')
def FTO(Thickness = 0.250):
return Layer(Thickness,'nkFTO','c')
def MAPI(Thickness = 0.130):
return Layer(Thickness,'nkMAPI','c')
def AZO(Thickness = 0.200):
return Layer(Thickness,'nkAZO','c')
def ITO(Thickness = 0.200):
return Layer(Thickness,'nkITO','c')
def ITOlowE(Thickness = 0.075):
return Layer(Thickness,'nkITO','c')
def SnO2(Thickness = 0.05):
return Layer(Thickness,'nkSnO2','c')
def SnO2lowE(Thickness = 0.030):
return Layer(Thickness,'nkSnO2','c')
def SnO2lowEfat(Thickness = 0.050):
return Layer(Thickness,'nkSnO2','c')
def SiO2(Thickness = 0.024):
return Layer(Thickness,'nkSiO2','c')
def NiO(Thickness = 0.050):
return Layer(Thickness,'nkNiO','c')
def Ag(Thickness = 0.015):
return Layer(Thickness,'nkAg','c')
def TiO2lowE(Thickness = 0.030):
return Layer(Thickness,'nkTiO2','c')
def TiO2lowEfat(Thickness = 0.060):
return Layer(Thickness,'nkTiO2','c')
def Bleach(Thickness = 0.370):
return Layer(Thickness,'nkBleach','c')
def ClAlPc(Thickness = 0.300):
return Layer(Thickness,'nkClAlPc','c')
def C60(Thickness = 0.200):
return Layer(Thickness,'nkC60','c')
def IR(Thickness = 0.060):
return Layer(Thickness,'nkPTB7_ThIEICO_4F','c')
def MAPBr(Thickness = 0.500):
return Layer(Thickness,'nkMAPbBr3','c')
def EVA(Thickness = 3000):
return Layer(Thickness,'nkEVA','i')
GlassBound = (5999,6001)
TiO2Bound = (0.025,.1)
FTOBound = (0.1,0.5)
MAPIBound = (.06,.260)
AZOBound = (.1,.4)
ITOBound = (.1,.4)
ITOlowEBound = (0.03,.15)
SnO2Bound = (.025,.1)
SnO2lowEBound = (.015,.06)
SnO2lowEfatBound = (0.025,.1)
SiO2Bound = (.012,.05)
NiOBound = (.025,.1)
AgBound = (.0149, .0151)
TiO2lowEBound = (.015, .070)
TiO2lowEfatBound = (.03,.12)
BleachBound = (.180, .500)
ClAlPcBound = (.150, .600)
C60Bound = (.100,.400)
IRBound = (.030, .12)
MAPBrBound = (.250,1)
EVABound = (2999,3001)
def GiveLayers(Thickness,Materials):
x = len(Materials)
if x == len(Thickness):
Layers = []
for i in range(x):
Layers.append(Materials[i](Thickness[i]))
return Layers
else:
raise ValueError ('layers and Thickness lengths do not match')
def GiveBounds(Materials, DictBound):
x = len(Materials)
lb = []
ub = []
for i in range(x):
lb.append(DictBound[Materials[i].__name__ + 'Bound'][0])
for i in range(x):
ub.append(DictBound[Materials[i].__name__ + 'Bound'][1])
bounds = scipy.optimize.Bounds(lb,ub)
return bounds
def GiveThicks(Materials, DictTh):
x = len(Materials)
Th = []
for i in range(x):
Th.append(DictTh[Materials[i].__name__ + 'Th'])
return Th
def Spectra(layers, AbsorberLayer):
thicks = [inf]
iorcs = ['i']
for layer in layers:
thicks.append(layer.d)
iorcs.append(layer.i_or_c)
thicks.append(inf)
iorcs.append('i')
thicks_bw = thicks[::-1]
iorcs_bw = iorcs[::-1]
Ts = []
Rfs = []
Rbs = []
AbsByAbsorbers = []
layerchoice = AbsorberLayer
for lam in lams:
nks = [1]
for layer in layers:
nks.append(layer.nk(lam))
nks.append(1)
nks_bw = nks[::-1]
front_spol = inc_tmm('s',nks,thicks,iorcs,inc_angle,lam)
front_ppol = inc_tmm('p',nks,thicks,iorcs,inc_angle,lam)
back_spol = inc_tmm('s',nks_bw,thicks_bw,iorcs_bw,inc_angle,lam)
back_ppol = inc_tmm('p',nks_bw,thicks_bw,iorcs_bw,inc_angle,lam)
AbsByAbsorber_spol = inc_absorp_in_each_layer(front_spol)[layerchoice]
AbsByAbsorber_ppol = inc_absorp_in_each_layer(front_ppol)[layerchoice]
AbsByAbsorbers.append( (AbsByAbsorber_spol + AbsByAbsorber_ppol) / 2. )
Rfs.append( (front_spol['R']+front_ppol['R']) / 2.)
Rbs.append( (back_spol['R']+back_ppol['R']) / 2.)
Ts.append( (front_spol['T']+front_ppol['T']) / 2. )
Ts = array(Ts)
Rfs = array(Rfs)
Rbs = array(Rbs)
As = 1-Ts-Rfs
sanities = Ts+Rfs+As
AbsByAbsorbers = array(AbsByAbsorbers)
Spectra = {'AbsByAbsorbers':AbsByAbsorbers, 'Ts':Ts,'Rfs':Rfs,'Rbs':Rbs,'As':As,'Total':sanities}
return Spectra
def VLTSpectrum(layers):
return Stack(layers)
def VLT(layers):
VLTstack=Stack(layers)
return VLTstack.get_visible_light_transmission(lams,inc_angle)
def getFancyVLT(layers):
integ = vegas.Integrator([lams])
Trans=Stack(layers)
numerator = integ(lambda lam: AM15G(lam)*cieplf(lam)*Trans.get_RAT(lam,inc_angle)[2], nitn=10, neval=100)[0]
denominator = integ(lambda lam: AM15G(lam)*cieplf(lam), nitn=10, neval=100)[0]
VLT = numerator/denominator
return VLT.mean
def GiveMinMaxVLT(AbsorberType, Bounds):
minThick = GiveLayers([Bounds[0]], [AbsorberType])
maxThick = GiveLayers([Bounds[1]], [AbsorberType])
minimum = VLT(maxThick)
maximum = VLT(minThick)
return {'Material':AbsorberType.__name__,'minVLT':minimum, 'maxVLT':maximum, 'minThick':Bounds[0],
'maxThick':Bounds[1]}
def GiveMinMaxVLTFromMaterials(Materials, AbsorberLayer, Bounds):
AbsorberType = Materials[AbsorberLayer-1]
minThick = GiveLayers([Bounds[0]], [AbsorberType])
maxThick = GiveLayers([Bounds[1]], [AbsorberType])
minimum = VLT(maxThick)
maximum = VLT(minThick)
return {'Material':AbsorberType.__name__,'minVLT':minimum, 'maxVLT':maximum, 'minThick':Bounds[0],
'maxThick':Bounds[1]}
worksheet = read_excel('./Data/ASTMG173.xls')
downloaded_array = array(worksheet)
AM15 = downloaded_array[1:, [0,2]]
AM15interp = interp1d(AM15[:,0]/1000, AM15[:,1])
Ephoton = hPlanck * c0 / lams *1e6
E_min = min(Ephoton)
E_max = max(Ephoton)
def SPhotonsPerTEA(Ephoton):
λ = hPlanck * c0 / Ephoton *1e6
return AM15interp(λ) * (1 / Ephoton) * (hPlanck * c0 / Ephoton**2) * 1e9
def PowerPerTEA(Ephoton):
return Ephoton * SPhotonsPerTEA(Ephoton)
def Solar_Constant(Ephoton):
return quad(PowerPerTEA,E_min,E_max, full_output=1)[0]
solar_constant = Solar_Constant(Ephoton)
def GivelamsInterp(Parameter):
Curve = Parameter.round(8)
return interp1d(lams, Curve)
def GiveEInterp(Parameter):
Curve = Parameter.round(8)
return interp1d(Ephoton, Curve)
def GiveQ(Spectra, eta = 1):
def integrand(E):
return eta * Spectra(E) * PowerPerTEA(E)
return quad(integrand, E_min, E_max, full_output=1)[0]
def RR0(eta,Absorbed,Tcell):
integrand = lambda E : eta * Absorbed(E) * (E)**2 / (exp(E / (kB * Tcell)) - 1)
integral = quad(integrand, E_min, E_max, full_output=1)[0]
return ((2 * pi) / (c0**2 * hPlanck**3)) * integralf Generated(eta,Absorbed):
integrand = lambda E : eta * Absorbed(E) * SPhotonsPerTEA(E)
return quad(integrand, E_min, E_max, full_output=1)[0]
def Give_Pmp(eta, Absorbed, Rs, Rsh, Tcell, n = 1, Ns = 1):
data = singlediode(Generated(eta, Absorbed)*q, RR0(eta, Absorbed,Tcell)*q, Rs, Rsh, n*Ns*kB*Tcell/q, ivcurve_pnts = 500)
return data['p_mp']
def TcellCalc(TotalAbs, eta, Ti,To, Absorbed, Ui, Uo, Rs, Rsh):
AbsTotal = GiveEInterp(TotalAbs)
Qabs = GiveQ(AbsTotal)
Temp = lambda Tcell: (Qabs - Give_Pmp(eta,Absorbed,Rs,Rsh, Tcell) + Ui*Ti + Uo*To)/(Ui + Uo)-Tcell
return fsolve(Temp, 300)[0]
def GiveIVData(eta, Absorbed, Rs, Rsh,Tcell, n = 1, Ns = 1):
data = singlediode(Generated(eta, Absorbed)*q, RR0(eta, Absorbed, Tcell)*q, Rs, Rsh, n*Ns*kB*Tcell/q, ivcurve_pnts = 500)
Isc = data['i_sc']
Voc = data['v_oc']
Imp = data['i_mp']
Vmp = data['v_mp']
Pmp = data['p_mp']
Vvalues = array(data['v'])
Ivalues = array(data['i'])
figure()
plot(Vvalues,Ivalues, label = 'IV')
xlabel('Voltage, (V)')
ylabel('Current (A) or Power (W/m^2)')
ylabel('Power (W/m^2)')
P_values = array([Ivalues * Vvalues])
plot(Vvalues , P_values.T, label = 'Power')
ylim(-1, 150)
legend(loc = 'upper right')
show()
return data
def SHGC(Ts, Ti, To, Tcell, Ui):
Rtot = 1/Ui
TransTotal = GiveEInterp(Ts)
Qtrans = GiveQ(TransTotal,1)
return (Qtrans + Ui*(Tcell-Ti) - ((To-Ti)/Rtot))/solar_constant
def max_efficiency(eta,Absorbed,Tcell, Rs, Rsh):
return Give_Pmp(eta, Absorbed, Rs, Rsh, Tcell) / solar_constant
def GiveImportantInfo(Thickness, Materials,eta,Ti,To,Ui,Uo,Rs,Rsh,AbsorberLayer,Angle=0):
global inc_angle
inc_angle = giveincangle(Angle)
layers = GiveLayers(Thickness,Materials)
spectra = Spectra(layers ,AbsorberLayer)
AbsByAbsorbers = spectra['AbsByAbsorbers']
Ts = spectra['Ts']
Rfs = spectra['Rfs']
Rbs = spectra['Rbs']
As = spectra['As']
sanities = spectra['Total']
Absorbed = GiveEInterp(AbsByAbsorbers)
VLTcalc = cvs.getVLT(Ts,lams)
Tcell = TcellCalc(As,eta, Ti,To, Absorbed, Ui, Uo, Rs, Rsh)
data = GiveIVData(eta, Absorbed, Rs, Rsh,Tcell, n = 1, Ns = 1)
Isc = data['i_sc']
Voc = data['v_oc']
Imp = data['i_mp']
Vmp = data['v_mp']
Pmp = data['p_mp']
SHGCcalc = SHGC(Ts, Ti, To, Tcell, Ui)
PCE = max_efficiency(eta,Absorbed,Tcell, Rs, Rsh)
figure()
plot(lams,Rfs,color='magenta',marker=None,label="$R_f$")
plot(lams,Ts,color='green',marker=None,label="$T$")
plot(lams,Rbs,color='purple',marker=None,label="$R_b$")
plot(lams,As,color='black',marker=None,label="A")
plot(lams,AbsByAbsorbers,color='black',linestyle='--',marker=None,label="AbsByAbsorber")
plot(lams,sanities,color='gold',marker=None,label="R+A+T")
plot(lams,VLTSpectrum(layers).cieplf(lams),color='red',marker=None,label="photopic")
xlabel('wavelength, $\mu$m')
ylabel('Intensity')
legend(loc = 'upper right')
show()
EphotoneV = Ephoton*6.241509e+18
figure()
plot(EphotoneV, Ts, color='magenta',marker=None,label="$T$")
plot(EphotoneV, Rfs,color='green',marker=None,label="$R_f$")
plot(EphotoneV, Rbs,color='orange',marker=None,label="$R_b$")
plot(EphotoneV, AbsByAbsorbers,color='black',marker=None,label="Abs")
legend(loc = 'upper right')
xlabel('Energy, eV')
ylabel('Intensity')
show()
pvc.GiveColorSwatch(Ts, Rfs)
pvc.plot_xy_on_fin(Ts, Rfs)
print('PCE = ',PCE,'VLT = ', VLTcalc, 'SHGC = ',SHGCcalc, 'Tcell = ',Tcell)
return {'PCE':PCE, 'VLT':VLTcalc, 'SHGC':SHGCcalc, 'Tcell':Tcell,'Isc':Isc, 'Voc': Voc, 'Imp': Imp, 'Vmp': Vmp,'Pmp': Pmp}
| true | true |
f71c9f822c037dcf590935a9d553531d88e41ced | 15,021 | py | Python | tests/parallel/topics/TestFiniteTopicModel.py | co2meal/-bnpy-dev | 74f69afde6c9dac8de4c074842df53ae87a15ac1 | [
"BSD-3-Clause"
] | null | null | null | tests/parallel/topics/TestFiniteTopicModel.py | co2meal/-bnpy-dev | 74f69afde6c9dac8de4c074842df53ae87a15ac1 | [
"BSD-3-Clause"
] | null | null | null | tests/parallel/topics/TestFiniteTopicModel.py | co2meal/-bnpy-dev | 74f69afde6c9dac8de4c074842df53ae87a15ac1 | [
"BSD-3-Clause"
] | null | null | null | """
Collection of functions and classes for testing a naive version
of parallel execution for topic model variational inference.
Usage
-----
From a shell/terminal, use like a standard Python script.
$ python TestFiniteTopicModel.py --N 200 --nDoc 500 --K 50 --nWorkers 2
For some keyword args (like N, nDoc, K, nWorkers),
can use range syntax to easily compare performance as params change.
$ python TestFiniteTopicModel.py --nWorkers 2-4 --nDoc 1000
will repeat the test with 1000 documents for 2, 3, and 4 workers
Keyword Args
------------
* N : int or range
number of words per document for toy data
* nDoc : int or range
total number of documents to generate
* K : int or range
number of topics
* nWorkers : int or range
number of worker processes for parallel execution
* method : {'all', 'baseline', 'parallel', 'serial'}. Default = 'all'.
identifies which style of computation to perform
* nRepeat : int
number of times to repeat each called method
Methods
-----------
* runBenchmarkAcrossProblemSizes
Executable function that parses cmd-line args and runs benchmark tests.
* calcLocalParamsAndSummarize
Execute local and summary step on slice of data.
This is the function that we wish to call from each parallel worker.
"""
import os
import multiprocessing
from multiprocessing import sharedctypes
import warnings
import numpy as np
import unittest
import ctypes
import time
import itertools
import bnpy
def runBenchmarkAcrossProblemSizes(TestClass):
""" Execute speed benchmark across several problem sizes.
This is main function executed by running this file as script.
Parameters
--------
TestClass : constructor for a TestCase instance
Must offer a run_speed_benchmark method.
Post Condition
--------
Speed tests are executed, and results are printed to std out.
"""
import argparse
if 'OMP_NUM_THREADS' not in os.environ:
os.environ['OMP_NUM_THREADS'] = '1'
parser = argparse.ArgumentParser()
parser.add_argument('--nDoc', type=str, default='100')
parser.add_argument('--N', type=str, default='200')
parser.add_argument('--K', type=str, default='50')
parser.add_argument('--nWorkers', type=str, default='2')
parser.add_argument('--vocab_size', type=int, default=1000)
parser.add_argument('--nCoordAscentItersLP', type=int, default=100)
parser.add_argument('--convThrLP', type=float, default=0.001)
parser.add_argument('--method', type=str, default='all')
parser.add_argument('--nRepeat', type=int, default=1)
parser.add_argument('--verbose', type=int, default=0)
args = parser.parse_args()
NKDiterator = itertools.product(
rangeFromString(args.N),
rangeFromString(args.K),
rangeFromString(args.nDoc),
rangeFromString(args.nWorkers))
kwargs = dict(**args.__dict__)
print "Speed Test."
print " OMP_NUM_THREADS=%s" % (os.environ['OMP_NUM_THREADS'])
print " vocab_size=%d" % (args.vocab_size)
print " nCoordAscentItersLP=%d" % (args.nCoordAscentItersLP)
print " convThrLP=%.2e" % (args.convThrLP)
for (N, K, nDoc, nWorkers) in NKDiterator:
print '========================= nDoc %d N %d K=%d | nWorkers %d' \
% (nDoc, N, K, nWorkers)
kwargs['N'] = N
kwargs['K'] = K
kwargs['nDoc'] = nDoc
kwargs['nWorkers'] = nWorkers
# Create test instance with desired keyword args.
# Required first arg is string name of test we'll execute
myTest = TestClass('run_speed_benchmark', **kwargs)
myTest.setUp()
TimeInfo = myTest.run_speed_benchmark(method=args.method,
nRepeat=args.nRepeat)
myTest.tearDown() # closes all processes
def calcLocalParamsAndSummarize(Data, hmodel, start=None, stop=None, **kwargs):
''' Execute local step and summary step on slice of data.
Args
----
Data : bnpy Data object
hmodel : bnpy HModel
start : int or None
id of data item that starts desired slice
stop : int or None
id of data item that ends desired slice
Returns
-----------
SS : bnpy SuffStatBag
'''
sliceArgs = dict(cslice=(start, stop))
kwargs.update(sliceArgs)
LP = hmodel.obsModel.calc_local_params(Data, dict(), **kwargs)
LP = hmodel.allocModel.calc_local_params(Data, LP, **kwargs)
SS = hmodel.allocModel.get_global_suff_stats(Data, LP, **sliceArgs)
SS = hmodel.obsModel.get_global_suff_stats(Data, SS, LP, **sliceArgs)
return SS
def sliceGenerator(nDoc=0, nWorkers=0):
""" Iterate over slices given problem size and num workers
Yields
--------
(start,stop) : tuple
"""
batchSize = int(np.floor(nDoc / nWorkers))
for workerID in range(nWorkers):
start = workerID * batchSize
stop = (workerID + 1) * batchSize
if workerID == nWorkers - 1:
stop = nDoc
yield start, stop
class Worker(multiprocessing.Process):
""" Single "worker" process that processes tasks delivered via queues
"""
def __init__(self, uid, JobQueue, ResultQueue,
Data=None,
hmodel=None,
LPkwargs=None,
verbose=0):
super(Worker, self).__init__()
self.uid = uid
self.Data = Data
self.hmodel = hmodel
if LPkwargs is None:
LPkwargs = dict()
self.LPkwargs = LPkwargs
self.JobQueue = JobQueue
self.ResultQueue = ResultQueue
self.verbose = verbose
def printMsg(self, msg):
if self.verbose:
for line in msg.split("\n"):
print "#%d: %s" % (self.uid, line)
def run(self):
self.printMsg("process SetUp! pid=%d" % (os.getpid()))
# Construct iterator with sentinel value of None (for termination)
jobIterator = iter(self.JobQueue.get, None)
for jobArgs in jobIterator:
start, stop = jobArgs
SS = calcLocalParamsAndSummarize(
self.Data, self.hmodel,
start=start, stop=stop, **self.LPkwargs)
self.ResultQueue.put(SS)
self.JobQueue.task_done()
# Clean up
self.printMsg("process CleanUp! pid=%d" % (os.getpid()))
class Test(unittest.TestCase):
def shortDescription(self):
return None
def __init__(self, testname, seed=0, vocab_size=100,
nCoordAscentItersLP=100, convThrLP=0.01,
N=1000, nDoc=25, K=10, nWorkers=1, verbose=1,
**kwargs):
''' Create a new test harness for parallel topic model inference.
Post Condition Attributes
--------------
Data : bnpy DataObj dataset
hmodel : bnpy HModel
'''
super(type(self), self).__init__(testname)
self.nWorkers = nWorkers
self.verbose = verbose
self.N = N
self.K = K
self.nDoc = nDoc
self.LPkwargs = dict(nCoordAscentItersLP=nCoordAscentItersLP,
convThrLP=convThrLP)
PRNG = np.random.RandomState(seed)
topics = PRNG.gamma(1.0, 1.0, size=(K, vocab_size))
np.maximum(topics, 1e-30, out=topics)
topics /= topics.sum(axis=1)[:, np.newaxis]
topic_prior = 1.0 / K * np.ones(K)
self.Data = bnpy.data.WordsData.CreateToyDataFromLDAModel(
nWordsPerDoc=N, nDocTotal=nDoc, K=K, topics=topics,
seed=seed, topic_prior=topic_prior)
self.hmodel = bnpy.HModel.CreateEntireModel(
'VB', 'FiniteTopicModel', 'Mult',
dict(alpha=0.1, gamma=5),
dict(lam=0.1),
self.Data)
self.hmodel.init_global_params(self.Data, initname='randexamples', K=K)
def setUp(self, **kwargs):
''' Launch pool of worker processes, with queues to communicate with.
'''
# Create a JobQ (to hold tasks to be done)
# and a ResultsQ (to hold results of completed tasks)
manager = multiprocessing.Manager()
self.JobQ = manager.Queue()
self.ResultQ = manager.Queue()
# Launch desired number of worker processes
# We don't need to store references to these processes,
# We can get everything we need from JobQ and ResultsQ
for uid in range(self.nWorkers):
Worker(uid, self.JobQ, self.ResultQ,
Data=self.Data,
hmodel=self.hmodel,
LPkwargs=self.LPkwargs,
verbose=self.verbose).start()
def tearDown(self):
""" Shut down all the workers.
"""
self.shutdownWorkers()
def shutdownWorkers(self):
""" Shut down all worker processes.
"""
for workerID in range(self.nWorkers):
# Passing None to JobQ is shutdown signal
self.JobQ.put(None)
def run_baseline(self):
""" Execute on entire matrix (no slices) in master process.
"""
SSall = calcLocalParamsAndSummarize(self.Data, self.hmodel,
**self.LPkwargs)
return SSall
def run_serial(self):
""" Execute on slices processed in serial by master process.
"""
SSagg = None
for start, stop in sliceGenerator(self.nDoc, self.nWorkers):
SSslice = calcLocalParamsAndSummarize(
self.Data, self.hmodel, start, stop,
**self.LPkwargs)
if start == 0:
SSagg = SSslice
else:
SSagg += SSslice
return SSagg
def run_parallel(self):
""" Execute on slices processed by workers in parallel.
"""
# MAP!
# Create several tasks (one per worker) and add to job queue
for start, stop in sliceGenerator(self.nDoc, self.nWorkers):
self.JobQ.put((start, stop))
# REDUCE!
# Aggregate results across across all workers
# Avoids JobQueue.join() call (which blocks execution)
# Instead lets main process aggregate all results as they come in.
nTaskDone = 0
while nTaskDone < self.nWorkers:
if not self.ResultQ.empty():
SSchunk = self.ResultQ.get()
if nTaskDone == 0:
SS = SSchunk
else:
SS += SSchunk
nTaskDone += 1
# At this point all jobs are marked complete.
return SS
def test_correctness_serial(self):
''' Verify that the local step works as expected.
No parallelization here.
Just verifying that we can split computation up into >1 slice,
add up results from all slices and still get the same answer.
'''
print ''
SSbase = self.run_baseline()
SSserial = self.run_serial()
allcloseSS(SSbase, SSserial)
def test_correctness_parallel(self):
""" Verify that we can execute local step across several processes
Each process does the following:
* grab its chunk of data from a shared jobQueue
* performs computations on this chunk
* load the resulting suff statistics object into resultsQueue
"""
print ''
SSparallel = self.run_parallel()
SSbase = self.run_baseline()
allcloseSS(SSparallel, SSbase)
def test_speed(self, nRepeat=5):
""" Compare speed of different algorithms.
"""
print ''
Results = self.run_all_with_timer(nRepeat=nRepeat)
assert True
def run_speed_benchmark(self, method='all', nRepeat=3):
""" Compare speed of different algorithms.
"""
if method == 'all':
Results = self.run_all_with_timer(nRepeat=nRepeat)
elif method == 'parallel':
ptime = self.run_with_timer('run_parallel', nRepeat=nRepeat)
Results = dict(parallel_time=ptime)
elif method == 'serial':
ptime = self.run_with_timer('run_serial', nRepeat=nRepeat)
Results = dict(serial_time=ptime)
else:
ptime = self.run_with_timer('run_baseline', nRepeat=nRepeat)
Results = dict(base_time=ptime)
for key in ['base_time', 'serial_time', 'parallel_time']:
if key in Results:
try:
speedupval = Results[key.replace('time', 'speedup')]
speedupmsg = "| %8.3f speedup" % (speedupval)
except KeyError:
speedupmsg = ""
print "%18s | %8.3f sec %s" % (
key,
Results[key],
speedupmsg
)
return Results
def run_with_timer(self, funcToCall, nRepeat=3):
""" Timing experiment specified by funcToCall.
"""
starttime = time.time()
for r in xrange(nRepeat):
getattr(self, funcToCall)()
return (time.time() - starttime) / nRepeat
def run_all_with_timer(self, nRepeat=3):
""" Timing experiments with baseline, serial, and parallel versions.
"""
serial_time = self.run_with_timer('run_serial', nRepeat)
parallel_time = self.run_with_timer('run_parallel', nRepeat)
base_time = self.run_with_timer('run_baseline', nRepeat)
return dict(
base_time=base_time,
base_speedup=1.0,
serial_time=serial_time,
serial_speedup=base_time / serial_time,
parallel_time=parallel_time,
parallel_speedup=base_time / parallel_time,
)
def rangeFromString(commaString):
""" Convert a comma string like "1,5-7" into a list [1,5,6,7]
Returns
--------
myList : list of integers
Reference
-------
http://stackoverflow.com/questions/6405208/\
how-to-convert-numeric-string-ranges-to-a-list-in-python
"""
listOfLists = [rangeFromHyphen(r) for r in commaString.split(',')]
flatList = itertools.chain(*listOfLists)
return flatList
def rangeFromHyphen(hyphenString):
""" Convert a hyphen string like "5-7" into a list [5,6,7]
Returns
--------
myList : list of integers
"""
x = [int(x) for x in hyphenString.split('-')]
return range(x[0], x[-1] + 1)
def allcloseSS(SS1, SS2):
""" Verify that two suff stat bags have indistinguishable data.
"""
# Both A and B better give the same answer
for key in SS1._FieldDims.keys():
arr1 = getattr(SS1, key)
arr2 = getattr(SS2, key)
print key
if isinstance(arr1, float):
print arr1
print arr1
elif arr1.ndim == 1:
print arr1[:3]
print arr2[:3]
else:
print arr1[:2, :3]
print arr2[:2, :3]
assert np.allclose(arr1, arr2)
if __name__ == "__main__":
runBenchmarkAcrossProblemSizes(Test)
| 32.868709 | 79 | 0.601092 | """
Collection of functions and classes for testing a naive version
of parallel execution for topic model variational inference.
Usage
-----
From a shell/terminal, use like a standard Python script.
$ python TestFiniteTopicModel.py --N 200 --nDoc 500 --K 50 --nWorkers 2
For some keyword args (like N, nDoc, K, nWorkers),
can use range syntax to easily compare performance as params change.
$ python TestFiniteTopicModel.py --nWorkers 2-4 --nDoc 1000
will repeat the test with 1000 documents for 2, 3, and 4 workers
Keyword Args
------------
* N : int or range
number of words per document for toy data
* nDoc : int or range
total number of documents to generate
* K : int or range
number of topics
* nWorkers : int or range
number of worker processes for parallel execution
* method : {'all', 'baseline', 'parallel', 'serial'}. Default = 'all'.
identifies which style of computation to perform
* nRepeat : int
number of times to repeat each called method
Methods
-----------
* runBenchmarkAcrossProblemSizes
Executable function that parses cmd-line args and runs benchmark tests.
* calcLocalParamsAndSummarize
Execute local and summary step on slice of data.
This is the function that we wish to call from each parallel worker.
"""
import os
import multiprocessing
from multiprocessing import sharedctypes
import warnings
import numpy as np
import unittest
import ctypes
import time
import itertools
import bnpy
def runBenchmarkAcrossProblemSizes(TestClass):
""" Execute speed benchmark across several problem sizes.
This is main function executed by running this file as script.
Parameters
--------
TestClass : constructor for a TestCase instance
Must offer a run_speed_benchmark method.
Post Condition
--------
Speed tests are executed, and results are printed to std out.
"""
import argparse
if 'OMP_NUM_THREADS' not in os.environ:
os.environ['OMP_NUM_THREADS'] = '1'
parser = argparse.ArgumentParser()
parser.add_argument('--nDoc', type=str, default='100')
parser.add_argument('--N', type=str, default='200')
parser.add_argument('--K', type=str, default='50')
parser.add_argument('--nWorkers', type=str, default='2')
parser.add_argument('--vocab_size', type=int, default=1000)
parser.add_argument('--nCoordAscentItersLP', type=int, default=100)
parser.add_argument('--convThrLP', type=float, default=0.001)
parser.add_argument('--method', type=str, default='all')
parser.add_argument('--nRepeat', type=int, default=1)
parser.add_argument('--verbose', type=int, default=0)
args = parser.parse_args()
NKDiterator = itertools.product(
rangeFromString(args.N),
rangeFromString(args.K),
rangeFromString(args.nDoc),
rangeFromString(args.nWorkers))
kwargs = dict(**args.__dict__)
print "Speed Test."
print " OMP_NUM_THREADS=%s" % (os.environ['OMP_NUM_THREADS'])
print " vocab_size=%d" % (args.vocab_size)
print " nCoordAscentItersLP=%d" % (args.nCoordAscentItersLP)
print " convThrLP=%.2e" % (args.convThrLP)
for (N, K, nDoc, nWorkers) in NKDiterator:
print '========================= nDoc %d N %d K=%d | nWorkers %d' \
% (nDoc, N, K, nWorkers)
kwargs['N'] = N
kwargs['K'] = K
kwargs['nDoc'] = nDoc
kwargs['nWorkers'] = nWorkers
myTest = TestClass('run_speed_benchmark', **kwargs)
myTest.setUp()
TimeInfo = myTest.run_speed_benchmark(method=args.method,
nRepeat=args.nRepeat)
myTest.tearDown() # closes all processes
def calcLocalParamsAndSummarize(Data, hmodel, start=None, stop=None, **kwargs):
''' Execute local step and summary step on slice of data.
Args
----
Data : bnpy Data object
hmodel : bnpy HModel
start : int or None
id of data item that starts desired slice
stop : int or None
id of data item that ends desired slice
Returns
-----------
SS : bnpy SuffStatBag
'''
sliceArgs = dict(cslice=(start, stop))
kwargs.update(sliceArgs)
LP = hmodel.obsModel.calc_local_params(Data, dict(), **kwargs)
LP = hmodel.allocModel.calc_local_params(Data, LP, **kwargs)
SS = hmodel.allocModel.get_global_suff_stats(Data, LP, **sliceArgs)
SS = hmodel.obsModel.get_global_suff_stats(Data, SS, LP, **sliceArgs)
return SS
def sliceGenerator(nDoc=0, nWorkers=0):
""" Iterate over slices given problem size and num workers
Yields
--------
(start,stop) : tuple
"""
batchSize = int(np.floor(nDoc / nWorkers))
for workerID in range(nWorkers):
start = workerID * batchSize
stop = (workerID + 1) * batchSize
if workerID == nWorkers - 1:
stop = nDoc
yield start, stop
class Worker(multiprocessing.Process):
""" Single "worker" process that processes tasks delivered via queues
"""
def __init__(self, uid, JobQueue, ResultQueue,
Data=None,
hmodel=None,
LPkwargs=None,
verbose=0):
super(Worker, self).__init__()
self.uid = uid
self.Data = Data
self.hmodel = hmodel
if LPkwargs is None:
LPkwargs = dict()
self.LPkwargs = LPkwargs
self.JobQueue = JobQueue
self.ResultQueue = ResultQueue
self.verbose = verbose
def printMsg(self, msg):
if self.verbose:
for line in msg.split("\n"):
print "#%d: %s" % (self.uid, line)
def run(self):
self.printMsg("process SetUp! pid=%d" % (os.getpid()))
# Construct iterator with sentinel value of None (for termination)
jobIterator = iter(self.JobQueue.get, None)
for jobArgs in jobIterator:
start, stop = jobArgs
SS = calcLocalParamsAndSummarize(
self.Data, self.hmodel,
start=start, stop=stop, **self.LPkwargs)
self.ResultQueue.put(SS)
self.JobQueue.task_done()
# Clean up
self.printMsg("process CleanUp! pid=%d" % (os.getpid()))
class Test(unittest.TestCase):
def shortDescription(self):
return None
def __init__(self, testname, seed=0, vocab_size=100,
nCoordAscentItersLP=100, convThrLP=0.01,
N=1000, nDoc=25, K=10, nWorkers=1, verbose=1,
**kwargs):
''' Create a new test harness for parallel topic model inference.
Post Condition Attributes
--------------
Data : bnpy DataObj dataset
hmodel : bnpy HModel
'''
super(type(self), self).__init__(testname)
self.nWorkers = nWorkers
self.verbose = verbose
self.N = N
self.K = K
self.nDoc = nDoc
self.LPkwargs = dict(nCoordAscentItersLP=nCoordAscentItersLP,
convThrLP=convThrLP)
PRNG = np.random.RandomState(seed)
topics = PRNG.gamma(1.0, 1.0, size=(K, vocab_size))
np.maximum(topics, 1e-30, out=topics)
topics /= topics.sum(axis=1)[:, np.newaxis]
topic_prior = 1.0 / K * np.ones(K)
self.Data = bnpy.data.WordsData.CreateToyDataFromLDAModel(
nWordsPerDoc=N, nDocTotal=nDoc, K=K, topics=topics,
seed=seed, topic_prior=topic_prior)
self.hmodel = bnpy.HModel.CreateEntireModel(
'VB', 'FiniteTopicModel', 'Mult',
dict(alpha=0.1, gamma=5),
dict(lam=0.1),
self.Data)
self.hmodel.init_global_params(self.Data, initname='randexamples', K=K)
def setUp(self, **kwargs):
''' Launch pool of worker processes, with queues to communicate with.
'''
# Create a JobQ (to hold tasks to be done)
# and a ResultsQ (to hold results of completed tasks)
manager = multiprocessing.Manager()
self.JobQ = manager.Queue()
self.ResultQ = manager.Queue()
# Launch desired number of worker processes
# We don't need to store references to these processes,
for uid in range(self.nWorkers):
Worker(uid, self.JobQ, self.ResultQ,
Data=self.Data,
hmodel=self.hmodel,
LPkwargs=self.LPkwargs,
verbose=self.verbose).start()
def tearDown(self):
""" Shut down all the workers.
"""
self.shutdownWorkers()
def shutdownWorkers(self):
""" Shut down all worker processes.
"""
for workerID in range(self.nWorkers):
self.JobQ.put(None)
def run_baseline(self):
""" Execute on entire matrix (no slices) in master process.
"""
SSall = calcLocalParamsAndSummarize(self.Data, self.hmodel,
**self.LPkwargs)
return SSall
def run_serial(self):
""" Execute on slices processed in serial by master process.
"""
SSagg = None
for start, stop in sliceGenerator(self.nDoc, self.nWorkers):
SSslice = calcLocalParamsAndSummarize(
self.Data, self.hmodel, start, stop,
**self.LPkwargs)
if start == 0:
SSagg = SSslice
else:
SSagg += SSslice
return SSagg
def run_parallel(self):
""" Execute on slices processed by workers in parallel.
"""
for start, stop in sliceGenerator(self.nDoc, self.nWorkers):
self.JobQ.put((start, stop))
nTaskDone = 0
while nTaskDone < self.nWorkers:
if not self.ResultQ.empty():
SSchunk = self.ResultQ.get()
if nTaskDone == 0:
SS = SSchunk
else:
SS += SSchunk
nTaskDone += 1
return SS
def test_correctness_serial(self):
''' Verify that the local step works as expected.
No parallelization here.
Just verifying that we can split computation up into >1 slice,
add up results from all slices and still get the same answer.
'''
print ''
SSbase = self.run_baseline()
SSserial = self.run_serial()
allcloseSS(SSbase, SSserial)
def test_correctness_parallel(self):
""" Verify that we can execute local step across several processes
Each process does the following:
* grab its chunk of data from a shared jobQueue
* performs computations on this chunk
* load the resulting suff statistics object into resultsQueue
"""
print ''
SSparallel = self.run_parallel()
SSbase = self.run_baseline()
allcloseSS(SSparallel, SSbase)
def test_speed(self, nRepeat=5):
""" Compare speed of different algorithms.
"""
print ''
Results = self.run_all_with_timer(nRepeat=nRepeat)
assert True
def run_speed_benchmark(self, method='all', nRepeat=3):
""" Compare speed of different algorithms.
"""
if method == 'all':
Results = self.run_all_with_timer(nRepeat=nRepeat)
elif method == 'parallel':
ptime = self.run_with_timer('run_parallel', nRepeat=nRepeat)
Results = dict(parallel_time=ptime)
elif method == 'serial':
ptime = self.run_with_timer('run_serial', nRepeat=nRepeat)
Results = dict(serial_time=ptime)
else:
ptime = self.run_with_timer('run_baseline', nRepeat=nRepeat)
Results = dict(base_time=ptime)
for key in ['base_time', 'serial_time', 'parallel_time']:
if key in Results:
try:
speedupval = Results[key.replace('time', 'speedup')]
speedupmsg = "| %8.3f speedup" % (speedupval)
except KeyError:
speedupmsg = ""
print "%18s | %8.3f sec %s" % (
key,
Results[key],
speedupmsg
)
return Results
def run_with_timer(self, funcToCall, nRepeat=3):
""" Timing experiment specified by funcToCall.
"""
starttime = time.time()
for r in xrange(nRepeat):
getattr(self, funcToCall)()
return (time.time() - starttime) / nRepeat
def run_all_with_timer(self, nRepeat=3):
""" Timing experiments with baseline, serial, and parallel versions.
"""
serial_time = self.run_with_timer('run_serial', nRepeat)
parallel_time = self.run_with_timer('run_parallel', nRepeat)
base_time = self.run_with_timer('run_baseline', nRepeat)
return dict(
base_time=base_time,
base_speedup=1.0,
serial_time=serial_time,
serial_speedup=base_time / serial_time,
parallel_time=parallel_time,
parallel_speedup=base_time / parallel_time,
)
def rangeFromString(commaString):
""" Convert a comma string like "1,5-7" into a list [1,5,6,7]
Returns
--------
myList : list of integers
Reference
-------
http://stackoverflow.com/questions/6405208/\
how-to-convert-numeric-string-ranges-to-a-list-in-python
"""
listOfLists = [rangeFromHyphen(r) for r in commaString.split(',')]
flatList = itertools.chain(*listOfLists)
return flatList
def rangeFromHyphen(hyphenString):
""" Convert a hyphen string like "5-7" into a list [5,6,7]
Returns
--------
myList : list of integers
"""
x = [int(x) for x in hyphenString.split('-')]
return range(x[0], x[-1] + 1)
def allcloseSS(SS1, SS2):
""" Verify that two suff stat bags have indistinguishable data.
"""
for key in SS1._FieldDims.keys():
arr1 = getattr(SS1, key)
arr2 = getattr(SS2, key)
print key
if isinstance(arr1, float):
print arr1
print arr1
elif arr1.ndim == 1:
print arr1[:3]
print arr2[:3]
else:
print arr1[:2, :3]
print arr2[:2, :3]
assert np.allclose(arr1, arr2)
if __name__ == "__main__":
runBenchmarkAcrossProblemSizes(Test)
| false | true |
f71c9f9d367cb8155ed384c51b60c4ecac3f16c3 | 447 | py | Python | plan_marker/migrations/0003_auto_20150829_1529.py | oskgeek/tdl_fitness | e61da8b4b216147ba1e5d9b64db75f2cf8568759 | [
"Apache-2.0"
] | null | null | null | plan_marker/migrations/0003_auto_20150829_1529.py | oskgeek/tdl_fitness | e61da8b4b216147ba1e5d9b64db75f2cf8568759 | [
"Apache-2.0"
] | null | null | null | plan_marker/migrations/0003_auto_20150829_1529.py | oskgeek/tdl_fitness | e61da8b4b216147ba1e5d9b64db75f2cf8568759 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('plan_marker', '0002_userprofile_plan_created'),
]
operations = [
migrations.AlterField(
model_name='userprofile',
name='plan_created',
field=models.CharField(max_length=255, null=True, blank=True),
),
]
| 22.35 | 74 | 0.630872 |
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('plan_marker', '0002_userprofile_plan_created'),
]
operations = [
migrations.AlterField(
model_name='userprofile',
name='plan_created',
field=models.CharField(max_length=255, null=True, blank=True),
),
]
| true | true |
f71ca0e23cd8fb822e78350418aeea8241322271 | 1,142 | py | Python | sequenceplot/__init__.py | kickingvegas/SequencePlot | 82514e0dc1a3e670ea727041219dc7a69fd9e96b | [
"Apache-2.0"
] | 3 | 2017-07-23T22:32:22.000Z | 2020-05-03T20:16:36.000Z | sequenceplot/__init__.py | kickingvegas/SequencePlot | 82514e0dc1a3e670ea727041219dc7a69fd9e96b | [
"Apache-2.0"
] | null | null | null | sequenceplot/__init__.py | kickingvegas/SequencePlot | 82514e0dc1a3e670ea727041219dc7a69fd9e96b | [
"Apache-2.0"
] | 1 | 2021-09-10T08:45:39.000Z | 2021-09-10T08:45:39.000Z | #!/usr/bin/env python
# Copyright 2012 Yummy Melon Software LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Author: Charles Y. Choi
#
"""
sequenceplot is a module that generates UML sequence diagrams using the UMLGraph package.
"""
__version__ = '0.4'
class SyntaxError(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
def picEscapeString(buf):
result = buf.replace('"', '\\"')
return result
from SequenceObject import SequenceObject
from Placeholder import Placeholder
from Actor import Actor
from SequenceDiagram import SequenceDiagram
| 26.55814 | 89 | 0.738179 |
__version__ = '0.4'
class SyntaxError(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
def picEscapeString(buf):
result = buf.replace('"', '\\"')
return result
from SequenceObject import SequenceObject
from Placeholder import Placeholder
from Actor import Actor
from SequenceDiagram import SequenceDiagram
| true | true |
f71ca16a0d7d9c01229a650639558eb2857cf6b5 | 681 | py | Python | python/discord.py/example-bot.py | martian17/Community-Bin | e7a1471571227fdda3929a9cdd9a3cce743156df | [
"MIT"
] | null | null | null | python/discord.py/example-bot.py | martian17/Community-Bin | e7a1471571227fdda3929a9cdd9a3cce743156df | [
"MIT"
] | null | null | null | python/discord.py/example-bot.py | martian17/Community-Bin | e7a1471571227fdda3929a9cdd9a3cce743156df | [
"MIT"
] | null | null | null | # This is an example of a very basic discord bot in python
import discord
from discord.ext import commands
bot = commands.Bot(command_prefix=".", description="A basic discord bot")
@bot.event
async def on_ready():
print("I'm online!")
@commands.command(name="ping")
async def _ping(ctx):
latency = bot.latency * 1000 # convert to ms
embed = discord.Embed(
title="Pong!", # make an embed to send
description=f"My latency is {latency:.2f}ms",
)
await ctx.send(embed=embed)
bot.add_command(_ping)
if __name__ == "__main__": # make sure the file isn't being imported
bot.run("YOUR_TOKEN_HERE") # put your own bot token in here
| 23.482759 | 73 | 0.678414 |
import discord
from discord.ext import commands
bot = commands.Bot(command_prefix=".", description="A basic discord bot")
@bot.event
async def on_ready():
print("I'm online!")
@commands.command(name="ping")
async def _ping(ctx):
latency = bot.latency * 1000 # convert to ms
embed = discord.Embed(
title="Pong!", # make an embed to send
description=f"My latency is {latency:.2f}ms",
)
await ctx.send(embed=embed)
bot.add_command(_ping)
if __name__ == "__main__": # make sure the file isn't being imported
bot.run("YOUR_TOKEN_HERE")
| true | true |
f71ca30466bc275ef559c5fc42e0c93a4703385c | 1,407 | py | Python | csvkit/convert/__init__.py | tthibo/csvkit | fb12c7df32504b51b9def6e3cff41c36147616cf | [
"MIT"
] | 2 | 2015-03-06T15:22:02.000Z | 2016-03-11T13:35:48.000Z | csvkit/convert/__init__.py | tthibo/csvkit | fb12c7df32504b51b9def6e3cff41c36147616cf | [
"MIT"
] | null | null | null | csvkit/convert/__init__.py | tthibo/csvkit | fb12c7df32504b51b9def6e3cff41c36147616cf | [
"MIT"
] | null | null | null | #!/usr/bin/env python
from csvitself import csv2csv
from fixed import fixed2csv
from js import json2csv
from xls import xls2csv
SUPPORTED_FORMATS = ['fixed', 'xls', 'csv']
def convert(f, format, schema=None, key=None, **kwargs):
"""
Convert a file of a specified format to CSV.
"""
if not f:
raise ValueError('f must not be None')
if not format:
raise ValueError('format must not be None')
if format == 'fixed':
if not schema:
raise ValueError('schema must not be null when format is "fixed"')
return fixed2csv(f, schema, **kwargs)
elif format == 'xls':
return xls2csv(f, **kwargs)
elif format == 'js':
return json2csv(f, key, **kwargs)
elif format == 'csv':
return csv2csv(f, **kwargs)
else:
raise ValueError('format "%s" is not supported' % format)
def guess_format(filename):
"""
Try to guess a file's format based on its extension (or lack thereof).
"""
last_period = filename.rfind('.')
if last_period == -1:
# No extension: assume fixed-width
return 'fixed'
extension = filename[last_period + 1:]
if extension == 'xls':
return extension
elif extension in ['json', 'js']:
return 'js'
elif extension == 'csv':
return extension
elif extension == 'fixed':
return extension
return None
| 25.125 | 78 | 0.606254 |
from csvitself import csv2csv
from fixed import fixed2csv
from js import json2csv
from xls import xls2csv
SUPPORTED_FORMATS = ['fixed', 'xls', 'csv']
def convert(f, format, schema=None, key=None, **kwargs):
if not f:
raise ValueError('f must not be None')
if not format:
raise ValueError('format must not be None')
if format == 'fixed':
if not schema:
raise ValueError('schema must not be null when format is "fixed"')
return fixed2csv(f, schema, **kwargs)
elif format == 'xls':
return xls2csv(f, **kwargs)
elif format == 'js':
return json2csv(f, key, **kwargs)
elif format == 'csv':
return csv2csv(f, **kwargs)
else:
raise ValueError('format "%s" is not supported' % format)
def guess_format(filename):
last_period = filename.rfind('.')
if last_period == -1:
return 'fixed'
extension = filename[last_period + 1:]
if extension == 'xls':
return extension
elif extension in ['json', 'js']:
return 'js'
elif extension == 'csv':
return extension
elif extension == 'fixed':
return extension
return None
| true | true |
f71ca381286ae5e3aa87acbe71537fe119e50954 | 4,491 | py | Python | demoNN.py | zelhar/mg21 | f8392aba7deb63aa85f3d137ef81dea1bb742b41 | [
"MIT"
] | null | null | null | demoNN.py | zelhar/mg21 | f8392aba7deb63aa85f3d137ef81dea1bb742b41 | [
"MIT"
] | null | null | null | demoNN.py | zelhar/mg21 | f8392aba7deb63aa85f3d137ef81dea1bb742b41 | [
"MIT"
] | null | null | null | import torch
from torch import nn
from torch.utils.data import DataLoader, Dataset, TensorDataset
from torchvision import datasets
from torchvision.transforms import ToTensor, Lambda, Compose
import matplotlib.pyplot as plt
import torch.distributions as D
import torch.nn.functional as F
# Download training data from open datasets.
training_data = datasets.FashionMNIST(
root="data",
train=True,
download=True,
transform=ToTensor(),
)
# Download test data from open datasets.
test_data = datasets.FashionMNIST(
root="data",
train=False,
download=True,
transform=ToTensor(),
)
batch_size = 64
# Create data loaders.
train_dataloader = DataLoader(training_data, batch_size=batch_size)
test_dataloader = DataLoader(test_data, batch_size=batch_size)
for X, y in test_dataloader:
print("Shape of X [N, C, H, W]: ", X.shape)
print("Shape of y: ", y.shape, y.dtype)
break
# testing synthetic dataset
x = torch.randn((100,3,28,28))
d = TensorDataset(x)
z = d.__getitem__(2) # retuns 1-tuple of tensor (no label)
z[0].shape
# with labels
y = torch.randint(low=0, high=1, size=(100,))
d = TensorDataset(x,y)
z = d.__getitem__(2) # retuns 1-tuple of tensor (no label)
z[0].shape
z[1].shape
# Get cpu or gpu device for training.
device = "cuda" if torch.cuda.is_available() else "cpu"
print("Using {} device".format(device))
# Define model
class NeuralNetwork(nn.Module):
def __init__(self):
super(NeuralNetwork, self).__init__()
self.flatten = nn.Flatten()
self.linear_relu_stack = nn.Sequential(
nn.Linear(28*28, 512),
nn.ReLU(),
nn.Linear(512, 512),
nn.ReLU(),
nn.Linear(512, 10)
)
def forward(self, x):
x = self.flatten(x)
logits = self.linear_relu_stack(x)
return logits
model = NeuralNetwork().to(device)
print(model)
loss_fn = nn.CrossEntropyLoss()
optimizer = torch.optim.SGD(model.parameters(), lr=1e-3)
def train(dataloader, model, loss_fn, optimizer):
size = len(dataloader.dataset)
model.train()
for batch, (X, y) in enumerate(dataloader):
X, y = X.to(device), y.to(device)
# Compute prediction error
pred = model(X)
loss = loss_fn(pred, y)
# Backpropagation
optimizer.zero_grad()
loss.backward()
optimizer.step()
if batch % 100 == 0:
loss, current = loss.item(), batch * len(X)
print(f"loss: {loss:>7f} [{current:>5d}/{size:>5d}]")
def test(dataloader, model, loss_fn):
size = len(dataloader.dataset)
num_batches = len(dataloader)
model.eval()
test_loss, correct = 0, 0
with torch.no_grad():
for X, y in dataloader:
X, y = X.to(device), y.to(device)
pred = model(X)
test_loss += loss_fn(pred, y).item()
correct += (pred.argmax(1) == y).type(torch.float).sum().item()
test_loss /= num_batches
correct /= size
print(f"Test Error: \n Accuracy: {(100*correct):>0.1f}%, Avg loss: {test_loss:>8f} \n")
epochs = 5
for t in range(epochs):
print(f"Epoch {t+1}\n-------------------------------")
train(train_dataloader, model, loss_fn, optimizer)
test(test_dataloader, model, loss_fn)
print("Done!")
bce = nn.BCELoss(reduction="none")
x = torch.tensor(0.5)
y = torch.tensor(0.7)
bce(x,y)
f = lambda x, y: y * torch.log(x) + (1-y) * torch.log(1-x)
f(x,y)
torch.softmax(torch.tensor([1,2,3]), 0, torch.float64)
# generate mixed distributions
m = D.OneHotCategorical(torch.tensor([1,2,3,6]))
m.sample()
m.sample_n(10)
m.sample((3,4))
m = D.Normal(torch.tensor([0,10.0]), torch.tensor([1.0,2]))
m.sample((3,4))
# Example of target with class indices
loss = nn.CrossEntropyLoss()
input = torch.randn(3, 5, requires_grad=True)
target = torch.empty(3, dtype=torch.long).random_(5)
output = loss(input, target)
output.backward()
# Example of target with class probabilities
input = torch.randn(3, 5, requires_grad=True)
target = torch.randn(3, 5).softmax(dim=1)
output = loss(input, target)
output.backward()
input = torch.randn((3, 2), requires_grad=True)
target = torch.rand((3, 2), requires_grad=False)
loss = F.binary_cross_entropy(F.sigmoid(input), target)
loss.backward()
loss = nn.BCELoss(reduction="none")
x = torch.tensor([0,0.25,0.5,0.75,1])
F.binary_cross_entropy(x,x,reduction="none")
loss(x,x)
x = torch.tensor([0,25,0.5,0.75,1])
y = torch.tensor([0,0.25,0.5,0.75,1])
loss(x,y)
| 25.959538 | 91 | 0.649521 | import torch
from torch import nn
from torch.utils.data import DataLoader, Dataset, TensorDataset
from torchvision import datasets
from torchvision.transforms import ToTensor, Lambda, Compose
import matplotlib.pyplot as plt
import torch.distributions as D
import torch.nn.functional as F
training_data = datasets.FashionMNIST(
root="data",
train=True,
download=True,
transform=ToTensor(),
)
test_data = datasets.FashionMNIST(
root="data",
train=False,
download=True,
transform=ToTensor(),
)
batch_size = 64
train_dataloader = DataLoader(training_data, batch_size=batch_size)
test_dataloader = DataLoader(test_data, batch_size=batch_size)
for X, y in test_dataloader:
print("Shape of X [N, C, H, W]: ", X.shape)
print("Shape of y: ", y.shape, y.dtype)
break
x = torch.randn((100,3,28,28))
d = TensorDataset(x)
z = d.__getitem__(2)
z[0].shape
y = torch.randint(low=0, high=1, size=(100,))
d = TensorDataset(x,y)
z = d.__getitem__(2)
z[0].shape
z[1].shape
device = "cuda" if torch.cuda.is_available() else "cpu"
print("Using {} device".format(device))
class NeuralNetwork(nn.Module):
def __init__(self):
super(NeuralNetwork, self).__init__()
self.flatten = nn.Flatten()
self.linear_relu_stack = nn.Sequential(
nn.Linear(28*28, 512),
nn.ReLU(),
nn.Linear(512, 512),
nn.ReLU(),
nn.Linear(512, 10)
)
def forward(self, x):
x = self.flatten(x)
logits = self.linear_relu_stack(x)
return logits
model = NeuralNetwork().to(device)
print(model)
loss_fn = nn.CrossEntropyLoss()
optimizer = torch.optim.SGD(model.parameters(), lr=1e-3)
def train(dataloader, model, loss_fn, optimizer):
size = len(dataloader.dataset)
model.train()
for batch, (X, y) in enumerate(dataloader):
X, y = X.to(device), y.to(device)
pred = model(X)
loss = loss_fn(pred, y)
optimizer.zero_grad()
loss.backward()
optimizer.step()
if batch % 100 == 0:
loss, current = loss.item(), batch * len(X)
print(f"loss: {loss:>7f} [{current:>5d}/{size:>5d}]")
def test(dataloader, model, loss_fn):
size = len(dataloader.dataset)
num_batches = len(dataloader)
model.eval()
test_loss, correct = 0, 0
with torch.no_grad():
for X, y in dataloader:
X, y = X.to(device), y.to(device)
pred = model(X)
test_loss += loss_fn(pred, y).item()
correct += (pred.argmax(1) == y).type(torch.float).sum().item()
test_loss /= num_batches
correct /= size
print(f"Test Error: \n Accuracy: {(100*correct):>0.1f}%, Avg loss: {test_loss:>8f} \n")
epochs = 5
for t in range(epochs):
print(f"Epoch {t+1}\n-------------------------------")
train(train_dataloader, model, loss_fn, optimizer)
test(test_dataloader, model, loss_fn)
print("Done!")
bce = nn.BCELoss(reduction="none")
x = torch.tensor(0.5)
y = torch.tensor(0.7)
bce(x,y)
f = lambda x, y: y * torch.log(x) + (1-y) * torch.log(1-x)
f(x,y)
torch.softmax(torch.tensor([1,2,3]), 0, torch.float64)
m = D.OneHotCategorical(torch.tensor([1,2,3,6]))
m.sample()
m.sample_n(10)
m.sample((3,4))
m = D.Normal(torch.tensor([0,10.0]), torch.tensor([1.0,2]))
m.sample((3,4))
loss = nn.CrossEntropyLoss()
input = torch.randn(3, 5, requires_grad=True)
target = torch.empty(3, dtype=torch.long).random_(5)
output = loss(input, target)
output.backward()
input = torch.randn(3, 5, requires_grad=True)
target = torch.randn(3, 5).softmax(dim=1)
output = loss(input, target)
output.backward()
input = torch.randn((3, 2), requires_grad=True)
target = torch.rand((3, 2), requires_grad=False)
loss = F.binary_cross_entropy(F.sigmoid(input), target)
loss.backward()
loss = nn.BCELoss(reduction="none")
x = torch.tensor([0,0.25,0.5,0.75,1])
F.binary_cross_entropy(x,x,reduction="none")
loss(x,x)
x = torch.tensor([0,25,0.5,0.75,1])
y = torch.tensor([0,0.25,0.5,0.75,1])
loss(x,y)
| true | true |
f71ca389de2acdd4122644dc61a4fb411c6d4bf0 | 4,451 | py | Python | geoist/snoopy/algorithms/correlator_algorithms/cross_correlator.py | CHEN-Zhaohui/geoist | 06a00db3e0ed3d92abf3e45b7b3bfbef6a858a5b | [
"MIT"
] | 53 | 2018-11-17T03:29:55.000Z | 2022-03-18T02:36:25.000Z | geoist/snoopy/algorithms/correlator_algorithms/cross_correlator.py | CHEN-Zhaohui/geoist | 06a00db3e0ed3d92abf3e45b7b3bfbef6a858a5b | [
"MIT"
] | 3 | 2018-11-28T11:37:51.000Z | 2019-01-30T01:52:45.000Z | geoist/snoopy/algorithms/correlator_algorithms/cross_correlator.py | CHEN-Zhaohui/geoist | 06a00db3e0ed3d92abf3e45b7b3bfbef6a858a5b | [
"MIT"
] | 35 | 2018-11-17T03:29:57.000Z | 2022-03-23T17:57:06.000Z | # coding=utf-8
from geoist.snoopy.algorithms.correlator_algorithms import CorrelatorAlgorithm
from geoist.snoopy.modules.correlation_result import CorrelationResult
from geoist.snoopy.constants import (DEFAULT_SHIFT_IMPACT,
DEFAULT_ALLOWED_SHIFT_SECONDS)
class CrossCorrelator(CorrelatorAlgorithm):
"""
Method 1: CrossCorrelation algorithm.
Ideas come from Paul Bourke(http://paulbourke.net/miscellaneous/correlate/).
"""
def __init__(self, time_series_a, time_series_b, max_shift_seconds=None, shift_impact=None):
"""
Initializer
:param TimeSeries time_series_a: TimeSeries a.
:param TimeSeries time_series_b: TimeSeries b.
:param int max_shift_milliseconds: allowed maximal shift seconds.
:param time_period: if given, correlate the data inside the time period only.
"""
super(CrossCorrelator, self).__init__(self.__class__.__name__, time_series_a, time_series_b)
self.shift_impact = shift_impact or DEFAULT_SHIFT_IMPACT
if max_shift_seconds is not None:
self.max_shift_milliseconds = max_shift_seconds
else:
self.max_shift_milliseconds = DEFAULT_ALLOWED_SHIFT_SECONDS * 1000
def _detect_correlation(self):
"""
Detect correlation by computing correlation coefficients for all allowed shift steps,
then take the maximum.
"""
correlations = []
shifted_correlations = []
self.time_series_a.normalize()
self.time_series_b.normalize()
a, b = self.time_series_a.align(self.time_series_b)
a_values, b_values = a.values, b.values
a_avg, b_avg = a.average(), b.average()
a_stdev, b_stdev = a.stdev(), b.stdev()
n = len(a)
denom = a_stdev * b_stdev * n
# Find the maximal shift steps according to the maximal shift seconds.
allowed_shift_step = self._find_allowed_shift(a.timestamps)
if allowed_shift_step:
shift_upper_bound = allowed_shift_step
shift_lower_bound = -allowed_shift_step
else:
shift_upper_bound = 1
shift_lower_bound = 0
for delay in range(shift_lower_bound, shift_upper_bound):
delay_in_seconds = a.timestamps[abs(delay)] - a.timestamps[0]
if delay < 0:
delay_in_seconds = -delay_in_seconds
s = 0
for i in range(n):
j = i + delay
if j < 0 or j >= n:
continue
else:
s += ((a_values[i] - a_avg) * (b_values[j] - b_avg))
r = s / denom if denom != 0 else s
correlations.append([delay_in_seconds, r])
# Take shift into account to create a "shifted correlation coefficient".
if self.max_shift_milliseconds:
shifted_correlations.append(r * (1 + float(delay_in_seconds) / self.max_shift_milliseconds * self.shift_impact))
else:
shifted_correlations.append(r)
max_correlation = list(max(correlations, key=lambda k: k[1]))
max_shifted_correlation = max(shifted_correlations)
max_correlation.append(max_shifted_correlation)
self.correlation_result = CorrelationResult(*max_correlation)
def _find_allowed_shift(self, timestamps):
"""
Find the maximum allowed shift steps based on max_shift_milliseconds.
param list timestamps: timestamps of a time series.
"""
init_ts = timestamps[0]
residual_timestamps = [ts - init_ts for ts in timestamps]
n = len(residual_timestamps)
return self._find_first_bigger(residual_timestamps, self.max_shift_milliseconds, 0, n)
def _find_first_bigger(self, timestamps, target, lower_bound, upper_bound):
"""
Find the first element in timestamps whose value is bigger than target.
param list values: list of timestamps(epoch number).
param target: target value.
param lower_bound: lower bound for binary search.
param upper_bound: upper bound for binary search.
"""
while lower_bound < upper_bound:
pos = lower_bound + (upper_bound - lower_bound) / 2
pos = int(pos)
if timestamps[pos] > target:
upper_bound = pos
else:
lower_bound = pos + 1
return pos
| 43.637255 | 128 | 0.642103 |
from geoist.snoopy.algorithms.correlator_algorithms import CorrelatorAlgorithm
from geoist.snoopy.modules.correlation_result import CorrelationResult
from geoist.snoopy.constants import (DEFAULT_SHIFT_IMPACT,
DEFAULT_ALLOWED_SHIFT_SECONDS)
class CrossCorrelator(CorrelatorAlgorithm):
def __init__(self, time_series_a, time_series_b, max_shift_seconds=None, shift_impact=None):
super(CrossCorrelator, self).__init__(self.__class__.__name__, time_series_a, time_series_b)
self.shift_impact = shift_impact or DEFAULT_SHIFT_IMPACT
if max_shift_seconds is not None:
self.max_shift_milliseconds = max_shift_seconds
else:
self.max_shift_milliseconds = DEFAULT_ALLOWED_SHIFT_SECONDS * 1000
def _detect_correlation(self):
correlations = []
shifted_correlations = []
self.time_series_a.normalize()
self.time_series_b.normalize()
a, b = self.time_series_a.align(self.time_series_b)
a_values, b_values = a.values, b.values
a_avg, b_avg = a.average(), b.average()
a_stdev, b_stdev = a.stdev(), b.stdev()
n = len(a)
denom = a_stdev * b_stdev * n
allowed_shift_step = self._find_allowed_shift(a.timestamps)
if allowed_shift_step:
shift_upper_bound = allowed_shift_step
shift_lower_bound = -allowed_shift_step
else:
shift_upper_bound = 1
shift_lower_bound = 0
for delay in range(shift_lower_bound, shift_upper_bound):
delay_in_seconds = a.timestamps[abs(delay)] - a.timestamps[0]
if delay < 0:
delay_in_seconds = -delay_in_seconds
s = 0
for i in range(n):
j = i + delay
if j < 0 or j >= n:
continue
else:
s += ((a_values[i] - a_avg) * (b_values[j] - b_avg))
r = s / denom if denom != 0 else s
correlations.append([delay_in_seconds, r])
if self.max_shift_milliseconds:
shifted_correlations.append(r * (1 + float(delay_in_seconds) / self.max_shift_milliseconds * self.shift_impact))
else:
shifted_correlations.append(r)
max_correlation = list(max(correlations, key=lambda k: k[1]))
max_shifted_correlation = max(shifted_correlations)
max_correlation.append(max_shifted_correlation)
self.correlation_result = CorrelationResult(*max_correlation)
def _find_allowed_shift(self, timestamps):
init_ts = timestamps[0]
residual_timestamps = [ts - init_ts for ts in timestamps]
n = len(residual_timestamps)
return self._find_first_bigger(residual_timestamps, self.max_shift_milliseconds, 0, n)
def _find_first_bigger(self, timestamps, target, lower_bound, upper_bound):
while lower_bound < upper_bound:
pos = lower_bound + (upper_bound - lower_bound) / 2
pos = int(pos)
if timestamps[pos] > target:
upper_bound = pos
else:
lower_bound = pos + 1
return pos
| true | true |
f71ca44defb36643ad8a93f4726f956b8b913e57 | 346 | py | Python | Algorithms/746/min-cost-climbing-stairs.py | M-Quadra/LeetCode-problems | 0cc100aa1e50b02df289f04fe2e0b97239eb9895 | [
"MIT"
] | null | null | null | Algorithms/746/min-cost-climbing-stairs.py | M-Quadra/LeetCode-problems | 0cc100aa1e50b02df289f04fe2e0b97239eb9895 | [
"MIT"
] | null | null | null | Algorithms/746/min-cost-climbing-stairs.py | M-Quadra/LeetCode-problems | 0cc100aa1e50b02df289f04fe2e0b97239eb9895 | [
"MIT"
] | null | null | null | from typing import List
class Solution:
def minCostClimbingStairs(self, cost: List[int]) -> int:
dp = [0x7FFFFFFF for _ in range(len(cost)+2)]
dp[0] = dp[1] = 0
for i, v in enumerate(cost):
v += dp[i]
dp[i+1] = min(dp[i+1], v)
dp[i+2] = min(dp[i+2], v)
return dp[len(cost)] | 31.454545 | 60 | 0.508671 | from typing import List
class Solution:
def minCostClimbingStairs(self, cost: List[int]) -> int:
dp = [0x7FFFFFFF for _ in range(len(cost)+2)]
dp[0] = dp[1] = 0
for i, v in enumerate(cost):
v += dp[i]
dp[i+1] = min(dp[i+1], v)
dp[i+2] = min(dp[i+2], v)
return dp[len(cost)] | true | true |
f71ca45c2a4d1c7deaea184b4a83e5e006c32425 | 90 | py | Python | regtests/str/mul.py | bpmbank/PythonJS | 591a80afd8233fb715493591db2b68f1748558d9 | [
"BSD-3-Clause"
] | 319 | 2015-01-02T11:34:16.000Z | 2022-03-25T00:43:33.000Z | regtests/str/mul.py | bpmbank/PythonJS | 591a80afd8233fb715493591db2b68f1748558d9 | [
"BSD-3-Clause"
] | 10 | 2015-02-03T02:33:09.000Z | 2021-11-09T21:41:00.000Z | regtests/str/mul.py | bpmbank/PythonJS | 591a80afd8233fb715493591db2b68f1748558d9 | [
"BSD-3-Clause"
] | 61 | 2015-01-02T12:01:56.000Z | 2021-12-08T07:16:16.000Z | """string multiplication"""
def main():
a = 'hi'
b = a * 2
TestError( b == 'hihi' )
| 10 | 27 | 0.522222 |
def main():
a = 'hi'
b = a * 2
TestError( b == 'hihi' )
| true | true |
f71ca4a04ecbc21aada0d63286c6160730dff7df | 1,204 | py | Python | pyro/distributions/reflected.py | ajrcampbell/pyro | 37680e6d08f20cda95729427143f17875484b21d | [
"MIT"
] | null | null | null | pyro/distributions/reflected.py | ajrcampbell/pyro | 37680e6d08f20cda95729427143f17875484b21d | [
"MIT"
] | null | null | null | pyro/distributions/reflected.py | ajrcampbell/pyro | 37680e6d08f20cda95729427143f17875484b21d | [
"MIT"
] | null | null | null | from torch.distributions import constraints
from torch.distributions.transforms import AbsTransform
from pyro.distributions.torch import TransformedDistribution
class ReflectedDistribution(TransformedDistribution):
"""
Equivalent to ``TransformedDistribution(base_dist, AbsTransform())``,
but additionally supports :meth:`log_prob` .
:param ~torch.distributions.Distribution base_dist: The distribution to
reflect.
"""
support = constraints.positive
def __init__(self, base_dist, validate_args=None):
if base_dist.event_shape:
raise ValueError("Only univariate distributions can be reflected.")
super().__init__(base_dist, AbsTransform(), validate_args)
def expand(self, batch_shape, _instance=None):
new = self._get_checked_instance(type(self), _instance)
return super().expand(batch_shape, _instance=new)
def log_prob(self, value):
if self._validate_args:
self._validate_sample(value)
dim = max(len(self.batch_shape), value.dim())
plus_minus = value.new_tensor([1., -1.]).reshape((2,) + (1,) * dim)
return self.base_dist.log_prob(plus_minus * value).logsumexp(0)
| 37.625 | 79 | 0.709302 | from torch.distributions import constraints
from torch.distributions.transforms import AbsTransform
from pyro.distributions.torch import TransformedDistribution
class ReflectedDistribution(TransformedDistribution):
support = constraints.positive
def __init__(self, base_dist, validate_args=None):
if base_dist.event_shape:
raise ValueError("Only univariate distributions can be reflected.")
super().__init__(base_dist, AbsTransform(), validate_args)
def expand(self, batch_shape, _instance=None):
new = self._get_checked_instance(type(self), _instance)
return super().expand(batch_shape, _instance=new)
def log_prob(self, value):
if self._validate_args:
self._validate_sample(value)
dim = max(len(self.batch_shape), value.dim())
plus_minus = value.new_tensor([1., -1.]).reshape((2,) + (1,) * dim)
return self.base_dist.log_prob(plus_minus * value).logsumexp(0)
| true | true |
f71ca57230e7a9c4e629ca823816dd4a71bdd7a4 | 572 | py | Python | localflavor/in_/models.py | stephendwolff/django-localflavor | 082d8539d2797c431bec38fe85e7894ea74b07ac | [
"BSD-3-Clause"
] | null | null | null | localflavor/in_/models.py | stephendwolff/django-localflavor | 082d8539d2797c431bec38fe85e7894ea74b07ac | [
"BSD-3-Clause"
] | null | null | null | localflavor/in_/models.py | stephendwolff/django-localflavor | 082d8539d2797c431bec38fe85e7894ea74b07ac | [
"BSD-3-Clause"
] | null | null | null | from django.utils.translation import ugettext_lazy as _
from django.db.models.fields import CharField
from .in_states import STATE_CHOICES
class INStateField(CharField):
"""
A model field that forms represent as a ``forms.INStateField`` field and
stores the two-letter Indian state abbreviation in the database.
"""
description = _("Indian state (two uppercase letters)")
def __init__(self, *args, **kwargs):
kwargs['choices'] = STATE_CHOICES
kwargs['max_length'] = 2
super(INStateField, self).__init__(*args, **kwargs)
| 31.777778 | 76 | 0.708042 | from django.utils.translation import ugettext_lazy as _
from django.db.models.fields import CharField
from .in_states import STATE_CHOICES
class INStateField(CharField):
description = _("Indian state (two uppercase letters)")
def __init__(self, *args, **kwargs):
kwargs['choices'] = STATE_CHOICES
kwargs['max_length'] = 2
super(INStateField, self).__init__(*args, **kwargs)
| true | true |
f71ca6f85ce1ce3a97c314e0b3fd3109c786d615 | 31,747 | py | Python | bot/orders/models.py | psemdel/py-trading-bot | 69da4164b3f6a3ed3e6dc81d5aefc0273b4cb019 | [
"MIT"
] | null | null | null | bot/orders/models.py | psemdel/py-trading-bot | 69da4164b3f6a3ed3e6dc81d5aefc0273b4cb019 | [
"MIT"
] | 1 | 2022-02-07T21:13:55.000Z | 2022-02-07T21:13:55.000Z | bot/orders/models.py | psemdel/py-trading-bot | 69da4164b3f6a3ed3e6dc81d5aefc0273b4cb019 | [
"MIT"
] | null | null | null | from django.db import models
from django.utils import timezone
from django.db.models import Q
import asyncio
from ib_insync import IB, Stock, MarketOrder, util
from core.common import empty_append
from core.indicators import rel_dif
import vectorbtpro as vbt
import sys
import math
import pandas as pd
import numpy as np
from trading_bot.settings import (PERFORM_ORDER, USE_IB_FOR_DATA,DIC_PERFORM_ORDER,
IB_LOCALHOST, IB_PORT)
### Interactive brockers and data retrieval ###
'''
Contains:
- Communication with Interactive brokers
- Retrieval of live data (Interactive brokers or YFinance)
- Performing order
- Models for financial products, stock exchanges...
Note: for some reasons, it does not work if myIB class is not in models
'''
## All symbols must be from same stock exchange
def retrieve_data(symbols,period,**kwargs):
try:
IBok=True
for symbol in symbols:
if kwargs.get("index",False):
action=Index.objects.get(symbol=symbol)
else:
action=Action.objects.get(symbol=symbol)
if action.stock_ex.ib_ticker in ["BVME.ETF"]:
IBok=False
break
index_symbol=exchange_to_symbol(action)
if (USE_IB_FOR_DATA and IBok) or kwargs.get("useIB",False):
fig= ''.join(x for x in period if x.isdigit())
if period.find("d")!=-1:
period_ib=fig +" D"
elif period.find("mo")!=-1:
period_ib=fig +" M"
elif period.find("y")!=-1:
period_ib=fig +" Y"
#Time period of one bar. Must be one of: ‘1 secs’, ‘5 secs’, ‘10 secs’ 15 secs’, ‘30 secs’, ‘1 min’, ‘2 mins’, ‘3 mins’, ‘5 mins’, ‘10 mins’, ‘15 mins’, ‘20 mins’, ‘30 mins’, ‘1 hour’, ‘2 hours’, ‘3 hours’, ‘4 hours’, ‘8 hours’, ‘1 day’, ‘1 week’, ‘1 month’.
if kwargs.get("interval",False):
fig= ''.join(x for x in kwargs.get("interval") if x.isdigit())
if period.find("m")!=-1:
interval=fig +" mins"
elif period.find("h")!=-1:
interval=fig +" hours"
elif period.find("d")!=-1:
interval=fig +" day"
else:
interval='1 day'
open_=[]
close=[]
low=[]
high=[]
myIB=MyIB()
for symbol in symbols:
action=Action.objects.get(symbol=symbol)
contract = Stock(action.ib_ticker(),action.stock_ex.ib_ticker, action.currency.symbol)
bars = myIB.ib.reqHistoricalData(
contract,
endDateTime='',
durationStr=period_ib, #"10 D","1 M"
barSizeSetting=interval, #"1 day", "1 min"
whatToShow='TRADES',
useRTH=True,
formatDate=1)
df=util.df(bars)
open_=empty_append(open_,df["open"].values,axis=1)
close=empty_append(close,df["close"].values,axis=1)
high=empty_append(high,df["high"].values,axis=1)
low=empty_append(low,df["low"].values,axis=1)
volume=empty_append(low,df["volume"].values,axis=1)
cours_open=pd.DataFrame(data=open_,index=df["date"],columns=symbols)
cours_close=pd.DataFrame(data=close,index=df["date"],columns=symbols)
cours_low=pd.DataFrame(data=low,index=df["date"],columns=symbols)
cours_high=pd.DataFrame(data=high,index=df["date"],columns=symbols)
cours_volume=pd.DataFrame(data=volume,index=df["date"],columns=symbols)
action=Action.objects.get(symbol=index_symbol)
contract = Stock(action.ib_ticker(),action.stock_ex.ib_ticker, action.currency.symbol)
bars = myIB.ib.reqHistoricalData(
contract,
endDateTime='',
durationStr=period_ib, #"10 D","1 M"
barSizeSetting=interval, #"1 day", "1 min"
whatToShow='TRADES',
useRTH=True,
formatDate=1)
df=util.df(bars)
cours_open_ind=df["open"]
cours_close_ind=df["close"]
cours_high_ind=df["high"]
cours_low_ind=df["low"]
cours_volume_ind=df["volume"]
#Volume
if len(cours_close_ind)!=len(cours_close):
print("cours index is different from cours length")
myIB.disconnect()
else:
all_symbols=symbols+[index_symbol]
cours=vbt.YFData.fetch(all_symbols, period=period,missing_index='drop',**kwargs)
cours_action=cours.select(symbols)
cours_open =cours_action.get('Open')
cours_high=cours_action.get('High')
cours_low=cours_action.get('Low')
cours_close=cours_action.get('Close')
cours_volume=cours_action.get('Volume')
print("number of days retrieved: " + str(np.shape(cours_close)[0]))
cours_index=cours.select(index_symbol)
cours_open_ind =cours_index.get('Open')
cours_high_ind=cours_index.get('High')
cours_low_ind=cours_index.get('Low')
cours_close_ind=cours_index.get('Close')
cours_volume_ind=cours_index.get('Volume')
debug=False
if debug:
for symbol in all_symbols:
data=vbt.YFData.fetch(symbol, period=period,**kwargs)
#knowing what we drop
close_debug=data.get("Close")
for ii in range(len(close_debug)):
if math.isnan(close_debug.values[ii]):
print(symbol)
print("dropping at least " + str(close_debug.index[ii]))
return cours_high, cours_low, cours_close, cours_open, cours_volume, \
cours_high_ind, cours_low_ind, cours_close_ind, cours_open_ind,\
cours_volume_ind
except Exception as msg:
print(msg)
print("exception in " + __name__)
_, e_, exc_tb = sys.exc_info()
print("line " + str(exc_tb.tb_lineno))
print(msg)
def exchange_to_symbol(action):
if action.stock_ex.ib_ticker=="SBF":
return "^FCHI"
elif action.stock_ex.ib_ticker=="IBIS":
return "^GDAXI"
elif action.stock_ex.ib_ticker=="NASDAQ":
return "^IXIC"
elif action.stock_ex.ib_ticker=="BVME.ETF":
return "^IXIC" #it is only ETF anyhow
def get_exchange_actions(exchange):
cat=ActionCategory.objects.get(short="ACT")
stockEx=StockEx.objects.get(name=exchange)
c1 = Q(category=cat)
c2 = Q(stock_ex=stockEx)
actions=Action.objects.filter(c1 & c2)
return [ob.symbol for ob in actions]
def retrieve_ib_pf():
myIB=MyIB()
pf=[]
pf_short=[]
for pos in myIB.ib.positions():
contract=pos.contract
action=Action.objects.get(ib_ticker=contract.localSymbol)
if pos.position>0:
pf.append(action.symbol)
else:
pf_short.append(action.symbol)
myIB.disconnect()
return pf, pf_short
#for SL check
def get_last_price(symbol,**kwargs):
try:
if kwargs.get("index",False):
action=Index.objects.get(symbol=symbol)
else:
action=Action.objects.get(symbol=symbol)
if USE_IB_FOR_DATA and action.stock_ex.ib_ticker not in ["BVME.ETF"]:
myIB=MyIB()
contract = Stock(action.ib_ticker(),action.stock_ex.ib_ticker, action.currency.symbol)
cours_pres=myIB.get_last_price(contract)
myIB.disconnect()
else: #YF
cours=vbt.YFData.fetch([symbol], period="2d")
cours_close=cours.get("Close")
cours_pres=cours_close[symbol].iloc[-1]
return cours_pres
except Exception as msg:
print(symbol)
print("exception in " + __name__)
print(msg)
def get_ratio(symbol,**kwargs):
try:
if kwargs.get("index",False):
action=Index.objects.get(symbol=symbol)
else:
action=Action.objects.get(symbol=symbol)
if USE_IB_FOR_DATA and action.stock_ex.ib_ticker not in ["BVME.ETF"]:
myIB=MyIB()
contract = Stock(action.ib_ticker(),action.stock_ex.ib_ticker, action.currency.symbol)
cours_pres=myIB.get_last_price(contract)
cours_ref, cours_open=myIB.get_past_closing_price(contract)
if kwargs.get("opening",False):
cours_pres=cours_open
myIB.disconnect()
else: #YF
cours=vbt.YFData.fetch([symbol], period="2d")
cours_close=cours.get("Close")
cours_ref=cours_close[symbol].iloc[0]
if kwargs.get("opening",False):
cours_open=cours.get("Open")
cours_pres=cours_open[symbol].iloc[-1]
else:
cours_pres=cours_close[symbol].iloc[-1]
return rel_dif(cours_pres,
cours_ref
)*100
except Exception as msg:
print(symbol)
print("exception in " + __name__)
print(msg)
class MyIB():
def __init__(self):
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
self.ib = IB()
self.ib.connect(host=IB_LOCALHOST, port=IB_PORT, clientId=1)
def cash_balance(self):
try:
for v in self.ib.accountSummary():
if v.tag == 'CashBalance':
return float(v.value)
except:
return 0
def test(self,symbol):
action=Action.objects.get(symbol=symbol)
contract = Stock(action.ib_ticker(),action.stock_ex.ib_ticker, action.currency.symbol)
print(self.ib.qualifyContracts(contract))
def retrieve(self,contract,period):
bars = self.ib.reqHistoricalData(
contract,
endDateTime='',
durationStr=period, #"10 D","1 M"
barSizeSetting='1 hour', #"1 day", "1 min"
whatToShow='TRADES',
useRTH=True,
formatDate=1)
return util.df(bars)
def get_last_price(self,contract):
m_data = self.ib.reqMktData(contract)
while m_data.last != m_data.last: #Wait until data is in.
self.ib.sleep(0.01)
self.ib.cancelMktData(contract)
return m_data.last
def get_past_closing_price(self,contract):
period="2 D"
bars = self.ib.reqHistoricalData(
contract,
endDateTime='',
durationStr=period, #"10 D","1 M"
barSizeSetting='1 day', #"1 day", "1 min"
whatToShow='TRADES',
useRTH=True,
formatDate=1)
df=util.df(bars)
return df.iloc[0]["close"], df.iloc[-1]["open"]
def place(self,buy,ticker,currency,exchange,**kwargs): #quantity in euros
if ticker=="AAA":
print("ticker not found")
return "", 0
else:
contract = Stock(ticker, exchange, currency)
self.ib.qualifyContracts(contract)
if buy:
order_size=kwargs.get("order_size",0)
last_price=self.get_last_price(contract)
quantity=math.floor(order_size/last_price)
order = MarketOrder('BUY', quantity)
else:
quantity=kwargs.get("quantity",0)
order = MarketOrder('SELL', quantity)
trade = self.ib.placeOrder(contract, order)
self.ib.sleep(1.0)
if trade.orderStatus.status == 'Filled':
fill = trade.fills[-1]
txt=f'{fill.time} - {fill.execution.side} {fill.contract.symbol} {fill.execution.shares} @ {fill.execution.avgPrice}'
price=fill.execution.avgPrice
return txt, price, quantity
def exit_order(self,symbol,strategy, exchange,short,**kwargs):
#type check necessary for indexes
try:
pf= get_pf(strategy, exchange,short)
ocap=get_order_capital(strategy, exchange,short)
if kwargs.get("index",False):
index=Index.objects.get(symbol=symbol) #actually should be more complex
if short:
action=index.etf_short
else:
action=index.etf_long
else:
action=Action.objects.get(symbol=symbol)
if symbol in pf.retrieve():
c1 = Q(action=action)
c2 = Q(active=True)
order=Order.objects.filter(c1 & c2)
#profit
if len(order)>0:
txt, order[0].exiting_price, quantity= self.place(False,
action.ib_ticker(),
action.currency.symbol,
action.stock_ex.ib_ticker,
quantity=order[0].quantity)
order[0].exiting_date=timezone.now()
if order[0].entering_price is not None:
order[0].profit=order[0].exiting_price-order[0].entering_price
order[0].profit_percent=(order[0].exiting_price/order[0].entering_price-1)*100
order[0].active=False
order[0].save()
ocap.capital+=1
ocap.save()
pf.remove(symbol)
pf.save()
return True
else:
print("order not found " + symbol)
return False
return False
except Exception as msg:
print("exception in exit")
print(msg)
_, e_, exc_tb = sys.exc_info()
print("line " + str(exc_tb.tb_lineno))
pass
def entry_order(self,symbol,strategy, exchange,short,**kwargs):
try:
#type check necessary for indexes
pf= get_pf(strategy, exchange,short)
order_size=5000
ocap=get_order_capital(strategy, exchange,short)
#accountSummary
if kwargs.get("index",False):
index=Index.objects.get(symbol=symbol)
if short:
action=index.etf_short
else:
action=index.etf_long
else:
action=Action.objects.get(symbol=symbol)
excluded=Excluded.objects.get(name="all") #list of actions completely excluded from entries
if (symbol not in pf.retrieve() and
symbol not in excluded.retrieve() and
ocap.capital>0 and
order_size<=self.cash_balance()):
order=Order(action=action, pf=pf)
txt, order.entering_price, order.quantity= self.place(True,
action.ib_ticker(),
action.currency.symbol,
action.stock_ex.ib_ticker,
order_size=order_size)
if kwargs.get("sl",False):
sl=kwargs.get("sl")
order.sl_threshold=order.entering_price*(1-sl)
order.save()
pf.append(symbol)
pf.save()
ocap.capital-=1
ocap.save()
return True
return False
except Exception as msg:
print("exception in " + __name__)
print(msg)
_, e_, exc_tb = sys.exc_info()
print("line " + str(exc_tb.tb_lineno))
pass
def disconnect(self):
self.ib.disconnect()
def check_hold_duration(symbol,strategy, exchange,short,**kwargs):
#type check necessary for indexes
try:
pf= get_pf(strategy, exchange,short)
#accountSummary
if kwargs.get("index",False):
index=Index.objects.get(symbol=symbol)
if short:
action=index.etf_short
else:
action=index.etf_long
else:
action=Action.objects.get(symbol=symbol)
if symbol in pf.retrieve():
c1 = Q(action=action)
c2 = Q(active=True)
order=Order.objects.filter(c1 & c2)
if len(order)>0:
delta=timezone.now()-order[0].entering_date
return delta.days
return 0
except Exception as msg:
print("exception in " + __name__)
print(msg)
return 0
def entry_order(symbol,strategy, exchange,short,**kwargs):
if PERFORM_ORDER and DIC_PERFORM_ORDER[strategy]:
myIB=MyIB()
return myIB.entry_order(symbol,strategy, exchange,short,**kwargs), True
else:
return entry_order_test(symbol,strategy, exchange,short,**kwargs), False
def exit_order(symbol,strategy, exchange,short,**kwargs):
if PERFORM_ORDER and DIC_PERFORM_ORDER[strategy]:
myIB=MyIB()
return myIB.exit_order(symbol,strategy, exchange,short,**kwargs), True
else:
return exit_order_test(symbol,strategy, exchange,short,**kwargs), False
def entry_order_test(symbol,strategy, exchange,short,**kwargs):
try:
#type check necessary for indexes
pf= get_pf(strategy, exchange,short)
ocap=get_order_capital(strategy, exchange,short)
if kwargs.get("index",False):
index=Index.objects.get(symbol=symbol)
if short:
action=index.etf_short
else:
action=index.etf_long
else:
action=Action.objects.get(symbol=symbol)
symbol2=action.symbol
excluded=Excluded.objects.get(name="all") #list of actions completely excluded from entries
if (symbol2 not in pf.retrieve() and
symbol2 not in excluded.retrieve() and
ocap.capital>0):
order=Order(action=action, pf=pf)
order.entering_price=1.0
order.save()
#post telegram
pf.append(symbol2)
pf.save()
ocap.capital-=1 #also for short
ocap.save()
return True
return False
except Exception as msg:
print("exception in " + __name__)
print(msg)
_, e_, exc_tb = sys.exc_info()
print("line " + str(exc_tb.tb_lineno))
pass
def exit_order_test(symbol,strategy, exchange,short,**kwargs):
try:
pf= get_pf(strategy, exchange,short)
ocap=get_order_capital(strategy, exchange,short)
if kwargs.get("index",False):
index=Index.objects.get(symbol=symbol) #actually should be more complex
if short:
action=index.etf_short
else:
action=index.etf_long
else:
action=Action.objects.get(symbol=symbol)
symbol2=action.symbol
if symbol2 in pf.retrieve():
c1 = Q(action=action)
c2 = Q(active=True)
order=Order.objects.filter(c1 & c2)
#post telegram
#price
#profit
if len(order)>0:
order[0].exiting_date=timezone.now()
order[0].active=False
order[0].save()
ocap.capital+=1 #also for short
ocap.save()
pf.remove(symbol2)
pf.save()
return True
return False
except Exception as msg:
print("exception in " + __name__)
print(msg)
_, e_, exc_tb = sys.exc_info()
print("line " + str(exc_tb.tb_lineno))
pass
class Currency(models.Model):
name=models.CharField(max_length=100, blank=False)
symbol=models.CharField(max_length=100, blank=False,default="A")
def __str__(self):
return self.name
class Fees(models.Model):
name=models.CharField(max_length=100, blank=False, default="fee")
fixed=models.DecimalField(max_digits=100, decimal_places=5)
percent=models.DecimalField(max_digits=100, decimal_places=5)
def __str__(self):
return self.name
class StockEx(models.Model):
name=models.CharField(max_length=100, blank=False)
fees=models.ForeignKey('Fees',on_delete=models.CASCADE)
ib_ticker=models.CharField(max_length=15, blank=True,default="AAA")
opening_time=models.TimeField(default="09:00:00")
closing_time=models.TimeField(default="17:00:00")
def __str__(self):
return self.name
class Strategy(models.Model):
name=models.CharField(max_length=100, blank=False)
def __str__(self):
return self.name
### Index is like action, but it had to be separated, as an index cannot be bought directly
class Index(models.Model):
symbol=models.CharField(max_length=15, blank=False, primary_key=True)
ib_ticker=models.CharField(max_length=15, blank=True,default="AAA")
name=models.CharField(max_length=100, blank=False)
stock_ex=models.ForeignKey('StockEx',on_delete=models.CASCADE)
currency=models.ForeignKey('Currency',on_delete=models.CASCADE)
etf_long=models.ForeignKey('Action',on_delete=models.PROTECT,default=0,related_name='etf_long')
etf_short=models.ForeignKey('Action',on_delete=models.PROTECT, default=0,related_name='etf_short')
class Meta:
ordering = ["name"]
def ib_ticker(self):
return self.ib_ticker
def __str__(self):
return self.name
class Action(models.Model):
symbol=models.CharField(max_length=15, blank=False, primary_key=True)
ib_ticker=models.CharField(max_length=15, blank=True,default="AAA")
name=models.CharField(max_length=100, blank=False)
stock_ex=models.ForeignKey('StockEx',on_delete=models.CASCADE)
currency=models.ForeignKey('Currency',on_delete=models.CASCADE)
category=models.ForeignKey('ActionCategory',on_delete=models.CASCADE,blank=True)
strategy=models.ForeignKey('Strategy',on_delete=models.CASCADE,blank=True,default=0)
class Meta:
ordering = ["name"]
def ib_ticker(self):
t=self.symbol.split(".")
return t[0]
def __str__(self):
return self.name
class Order(models.Model):
action=models.ForeignKey('Action',on_delete=models.CASCADE)
pf=models.ForeignKey('PF',on_delete=models.SET_NULL,blank=True,null=True)
active=models.BooleanField(blank=False,default=True)
entering_date=models.DateTimeField(null=False, blank=False, auto_now_add=True)#default=timezone.now())
exiting_date=models.DateTimeField(null=True, blank=True)
entering_price=models.DecimalField(max_digits=100, decimal_places=5,blank=True,null=True)
exiting_price=models.DecimalField(max_digits=100, decimal_places=5,blank=True,null=True)
sl_threshold=models.DecimalField(max_digits=100, decimal_places=5,blank=True,null=True)
profit=models.DecimalField(max_digits=100, decimal_places=5,blank=True,null=True)
profit_percent=models.DecimalField(max_digits=100, decimal_places=5,blank=True,null=True)
quantity=models.DecimalField(max_digits=100, decimal_places=5,blank=True,null=True)
def __str__(self):
return self.action.name + " "+ str(self.entering_date)
def pf_retrieve_all(**kwargs):
arr=[]
for pf in PF.objects.filter(short=kwargs.get("short",False)):
cat=ActionCategory.objects.get(short="ACT")
c1 = Q(category=cat)
if kwargs.get("opening")=="9h":
stockEx1=StockEx.objects.filter(name="Paris")
stockEx2=StockEx.objects.filter(name="XETRA")
c2 = Q(stock_ex=stockEx1[0])
c3 = Q(stock_ex=stockEx2[0])
actions=pf.actions.filter(c1 & (c2|c3))
elif kwargs.get("opening")=="15h":
stockEx1=StockEx.objects.filter(name="Nasdaq")
c2 = Q(stock_ex=stockEx1[0])
actions=pf.actions.filter(c1 & c2)
else:
actions=pf.actions.filter(c1)
for action in actions:
if not action.symbol in arr:
arr.append(action.symbol)
return arr
### Portfolio for a given strategy (used as name presently)
class PF(models.Model):
# can be replaced with ib.positions() or ib.portfolio()
name=models.CharField(max_length=100, blank=False)
actions=models.ManyToManyField(Action,blank=True)
short=models.BooleanField(blank=False,default=False)
strategy=models.ForeignKey('Strategy',on_delete=models.CASCADE,blank=True)
stock_ex=models.ForeignKey('StockEx',on_delete=models.CASCADE,blank=True,default=2)
def __str__(self):
return self.name
def retrieve(self):
arr=[]
for action in self.actions.all():
arr.append(action.symbol)
return arr
def remove(self,symbol):
a = Action.objects.get(symbol=symbol)
try:
self.actions.remove(a)
self.save()
except Exception as msg:
print("exception in remove_symbol")
print(symbol)
print(msg)
_, e_, exc_tb = sys.exc_info()
print("line " + str(exc_tb.tb_lineno))
pass
def append(self,symbol):
try:
a = Action.objects.get(symbol=symbol)
self.actions.add(a)
self.save()
except Exception as msg:
print("exception in " + __name__)
print(symbol)
print(msg)
_, e_, exc_tb = sys.exc_info()
print("line " + str(exc_tb.tb_lineno))
pass
def get_pf(strategy, exchange,short):
s=Strategy.objects.get(name=strategy)
e=StockEx.objects.get(name=exchange)
c1 = Q(stock_ex=e)
c2 = Q(strategy=s)
c3 = Q(short=short)
return PF.objects.get(c1 & c2 & c3)
### To distinguish between ETF, actions, indexes...
class ActionCategory(models.Model):
short=models.CharField(max_length=15, blank=False, default="AAA", primary_key=True)
name=models.CharField(max_length=100, blank=False)
def __str__(self):
return self.name
###To define the capital assigned to one strategy.
###Not used presently
class Capital(models.Model):
#self.ib.accountSummary()
capital=models.DecimalField(max_digits=100, decimal_places=5,blank=True,null=True)
name=models.CharField(max_length=100, blank=False,default="")
strategy=models.ForeignKey('Strategy',on_delete=models.CASCADE,blank=True)
stock_ex=models.ForeignKey('StockEx',on_delete=models.CASCADE,blank=True,default=2)
def __str__(self):
return self.name
def get_capital(strategy, exchange,short):
s=Strategy.objects.get(name=strategy)
e=StockEx.objects.get(name=exchange)
c1 = Q(stock_ex=e)
c2 = Q(strategy=s)
c3 = Q(short=short)
return Capital.objects.get(c1 & c2 & c3)
###To define the number of orders assigned to one strategy
###1 means that only one action can be owned at a time using this strategy
class OrderCapital(models.Model):
capital=models.DecimalField(max_digits=100, decimal_places=5,blank=True,null=True)
name=models.CharField(max_length=100, blank=False,default="")
strategy=models.ForeignKey('Strategy',on_delete=models.CASCADE,blank=True)
stock_ex=models.ForeignKey('StockEx',on_delete=models.CASCADE,blank=True,default=2)
def __str__(self):
return self.name
def get_order_capital(strategy, exchange,short):
s=Strategy.objects.get(name=strategy)
e=StockEx.objects.get(name=exchange)
c1 = Q(stock_ex=e)
c2 = Q(strategy=s)
return OrderCapital.objects.get(c1 & c2)
###For strategy using two time frame, in the slow one (10 days) candidates are defined
###And on daily basis the other strategy decides which of the candidate is really bought or sold
class Candidates(models.Model):
name=models.CharField(max_length=100, blank=False)
actions=models.ManyToManyField(Action,blank=True)
strategy=models.ForeignKey('Strategy',on_delete=models.CASCADE,blank=True,default=1)
stock_ex=models.ForeignKey('StockEx',on_delete=models.CASCADE,blank=True,default=2)
def reset(self):
for a in self.actions.all():
self.actions.remove(a)
self.save()
def append(self,symbol): #so we can name as for list
a = Action.objects.get(symbol=symbol)
self.actions.add(a)
self.save()
def retrieve(self):
arr=[]
for action in self.actions.all():
arr.append(action.symbol)
return arr
def __str__(self):
return self.name
def get_candidates(strategy, exchange):
s=Strategy.objects.get(name=strategy)
e=StockEx.objects.get(name=exchange)
c1 = Q(stock_ex=e)
c2 = Q(strategy=s)
return Candidates.objects.get(c1 & c2)
### List of actions provisory excluded for a strategy as it risks to perform bad
class Excluded(models.Model):
name=models.CharField(max_length=100, blank=False)
actions=models.ManyToManyField(Action,blank=True)
strategy=models.ForeignKey('Strategy',on_delete=models.CASCADE,blank=True)
def reset(self):
for a in self.actions.all():
self.actions.remove(a)
self.save()
def append(self,symbol):
a = Action.objects.get(symbol=symbol)
self.actions.add(a)
self.save()
def remove(self,symbol):
a = Action.objects.get(symbol=symbol)
try:
self.actions.remove(a)
self.save()
except Exception as msg:
print("exception in " + __name__)
print(symbol)
print(msg)
_, e_, exc_tb = sys.exc_info()
print("line " + str(exc_tb.tb_lineno))
pass
def retrieve(self):
arr=[]
for action in self.actions.all():
arr.append(action.symbol)
return arr
def __str__(self):
return self.name
### Define a list of actions and indexes that can be traded using the defined strategy
class StratCandidates(models.Model):
name=models.CharField(max_length=100, blank=False)
actions=models.ManyToManyField(Action,blank=True)
indexes=models.ManyToManyField(Index,blank=True)
strategy=models.ForeignKey('Strategy',on_delete=models.CASCADE,blank=True,default=0)
def retrieve(self):
arr=[]
for action in self.actions.all():
arr.append(action.symbol)
return arr
def __str__(self):
return self.name | 35.953567 | 270 | 0.576936 | from django.db import models
from django.utils import timezone
from django.db.models import Q
import asyncio
from ib_insync import IB, Stock, MarketOrder, util
from core.common import empty_append
from core.indicators import rel_dif
import vectorbtpro as vbt
import sys
import math
import pandas as pd
import numpy as np
from trading_bot.settings import (PERFORM_ORDER, USE_IB_FOR_DATA,DIC_PERFORM_ORDER,
IB_LOCALHOST, IB_PORT)
e):
action=Index.objects.get(symbol=symbol)
else:
action=Action.objects.get(symbol=symbol)
if action.stock_ex.ib_ticker in ["BVME.ETF"]:
IBok=False
break
index_symbol=exchange_to_symbol(action)
if (USE_IB_FOR_DATA and IBok) or kwargs.get("useIB",False):
fig= ''.join(x for x in period if x.isdigit())
if period.find("d")!=-1:
period_ib=fig +" D"
elif period.find("mo")!=-1:
period_ib=fig +" M"
elif period.find("y")!=-1:
period_ib=fig +" Y"
if kwargs.get("interval",False):
fig= ''.join(x for x in kwargs.get("interval") if x.isdigit())
if period.find("m")!=-1:
interval=fig +" mins"
elif period.find("h")!=-1:
interval=fig +" hours"
elif period.find("d")!=-1:
interval=fig +" day"
else:
interval='1 day'
open_=[]
close=[]
low=[]
high=[]
myIB=MyIB()
for symbol in symbols:
action=Action.objects.get(symbol=symbol)
contract = Stock(action.ib_ticker(),action.stock_ex.ib_ticker, action.currency.symbol)
bars = myIB.ib.reqHistoricalData(
contract,
endDateTime='',
durationStr=period_ib,
barSizeSetting=interval,
whatToShow='TRADES',
useRTH=True,
formatDate=1)
df=util.df(bars)
open_=empty_append(open_,df["open"].values,axis=1)
close=empty_append(close,df["close"].values,axis=1)
high=empty_append(high,df["high"].values,axis=1)
low=empty_append(low,df["low"].values,axis=1)
volume=empty_append(low,df["volume"].values,axis=1)
cours_open=pd.DataFrame(data=open_,index=df["date"],columns=symbols)
cours_close=pd.DataFrame(data=close,index=df["date"],columns=symbols)
cours_low=pd.DataFrame(data=low,index=df["date"],columns=symbols)
cours_high=pd.DataFrame(data=high,index=df["date"],columns=symbols)
cours_volume=pd.DataFrame(data=volume,index=df["date"],columns=symbols)
action=Action.objects.get(symbol=index_symbol)
contract = Stock(action.ib_ticker(),action.stock_ex.ib_ticker, action.currency.symbol)
bars = myIB.ib.reqHistoricalData(
contract,
endDateTime='',
durationStr=period_ib,
barSizeSetting=interval,
whatToShow='TRADES',
useRTH=True,
formatDate=1)
df=util.df(bars)
cours_open_ind=df["open"]
cours_close_ind=df["close"]
cours_high_ind=df["high"]
cours_low_ind=df["low"]
cours_volume_ind=df["volume"]
if len(cours_close_ind)!=len(cours_close):
print("cours index is different from cours length")
myIB.disconnect()
else:
all_symbols=symbols+[index_symbol]
cours=vbt.YFData.fetch(all_symbols, period=period,missing_index='drop',**kwargs)
cours_action=cours.select(symbols)
cours_open =cours_action.get('Open')
cours_high=cours_action.get('High')
cours_low=cours_action.get('Low')
cours_close=cours_action.get('Close')
cours_volume=cours_action.get('Volume')
print("number of days retrieved: " + str(np.shape(cours_close)[0]))
cours_index=cours.select(index_symbol)
cours_open_ind =cours_index.get('Open')
cours_high_ind=cours_index.get('High')
cours_low_ind=cours_index.get('Low')
cours_close_ind=cours_index.get('Close')
cours_volume_ind=cours_index.get('Volume')
debug=False
if debug:
for symbol in all_symbols:
data=vbt.YFData.fetch(symbol, period=period,**kwargs)
close_debug=data.get("Close")
for ii in range(len(close_debug)):
if math.isnan(close_debug.values[ii]):
print(symbol)
print("dropping at least " + str(close_debug.index[ii]))
return cours_high, cours_low, cours_close, cours_open, cours_volume, \
cours_high_ind, cours_low_ind, cours_close_ind, cours_open_ind,\
cours_volume_ind
except Exception as msg:
print(msg)
print("exception in " + __name__)
_, e_, exc_tb = sys.exc_info()
print("line " + str(exc_tb.tb_lineno))
print(msg)
def exchange_to_symbol(action):
if action.stock_ex.ib_ticker=="SBF":
return "^FCHI"
elif action.stock_ex.ib_ticker=="IBIS":
return "^GDAXI"
elif action.stock_ex.ib_ticker=="NASDAQ":
return "^IXIC"
elif action.stock_ex.ib_ticker=="BVME.ETF":
return "^IXIC"
def get_exchange_actions(exchange):
cat=ActionCategory.objects.get(short="ACT")
stockEx=StockEx.objects.get(name=exchange)
c1 = Q(category=cat)
c2 = Q(stock_ex=stockEx)
actions=Action.objects.filter(c1 & c2)
return [ob.symbol for ob in actions]
def retrieve_ib_pf():
myIB=MyIB()
pf=[]
pf_short=[]
for pos in myIB.ib.positions():
contract=pos.contract
action=Action.objects.get(ib_ticker=contract.localSymbol)
if pos.position>0:
pf.append(action.symbol)
else:
pf_short.append(action.symbol)
myIB.disconnect()
return pf, pf_short
def get_last_price(symbol,**kwargs):
try:
if kwargs.get("index",False):
action=Index.objects.get(symbol=symbol)
else:
action=Action.objects.get(symbol=symbol)
if USE_IB_FOR_DATA and action.stock_ex.ib_ticker not in ["BVME.ETF"]:
myIB=MyIB()
contract = Stock(action.ib_ticker(),action.stock_ex.ib_ticker, action.currency.symbol)
cours_pres=myIB.get_last_price(contract)
myIB.disconnect()
else:
cours=vbt.YFData.fetch([symbol], period="2d")
cours_close=cours.get("Close")
cours_pres=cours_close[symbol].iloc[-1]
return cours_pres
except Exception as msg:
print(symbol)
print("exception in " + __name__)
print(msg)
def get_ratio(symbol,**kwargs):
try:
if kwargs.get("index",False):
action=Index.objects.get(symbol=symbol)
else:
action=Action.objects.get(symbol=symbol)
if USE_IB_FOR_DATA and action.stock_ex.ib_ticker not in ["BVME.ETF"]:
myIB=MyIB()
contract = Stock(action.ib_ticker(),action.stock_ex.ib_ticker, action.currency.symbol)
cours_pres=myIB.get_last_price(contract)
cours_ref, cours_open=myIB.get_past_closing_price(contract)
if kwargs.get("opening",False):
cours_pres=cours_open
myIB.disconnect()
else:
cours=vbt.YFData.fetch([symbol], period="2d")
cours_close=cours.get("Close")
cours_ref=cours_close[symbol].iloc[0]
if kwargs.get("opening",False):
cours_open=cours.get("Open")
cours_pres=cours_open[symbol].iloc[-1]
else:
cours_pres=cours_close[symbol].iloc[-1]
return rel_dif(cours_pres,
cours_ref
)*100
except Exception as msg:
print(symbol)
print("exception in " + __name__)
print(msg)
class MyIB():
def __init__(self):
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
self.ib = IB()
self.ib.connect(host=IB_LOCALHOST, port=IB_PORT, clientId=1)
def cash_balance(self):
try:
for v in self.ib.accountSummary():
if v.tag == 'CashBalance':
return float(v.value)
except:
return 0
def test(self,symbol):
action=Action.objects.get(symbol=symbol)
contract = Stock(action.ib_ticker(),action.stock_ex.ib_ticker, action.currency.symbol)
print(self.ib.qualifyContracts(contract))
def retrieve(self,contract,period):
bars = self.ib.reqHistoricalData(
contract,
endDateTime='',
durationStr=period,
barSizeSetting='1 hour',
whatToShow='TRADES',
useRTH=True,
formatDate=1)
return util.df(bars)
def get_last_price(self,contract):
m_data = self.ib.reqMktData(contract)
while m_data.last != m_data.last:
self.ib.sleep(0.01)
self.ib.cancelMktData(contract)
return m_data.last
def get_past_closing_price(self,contract):
period="2 D"
bars = self.ib.reqHistoricalData(
contract,
endDateTime='',
durationStr=period,
barSizeSetting='1 day',
whatToShow='TRADES',
useRTH=True,
formatDate=1)
df=util.df(bars)
return df.iloc[0]["close"], df.iloc[-1]["open"]
def place(self,buy,ticker,currency,exchange,**kwargs):
if ticker=="AAA":
print("ticker not found")
return "", 0
else:
contract = Stock(ticker, exchange, currency)
self.ib.qualifyContracts(contract)
if buy:
order_size=kwargs.get("order_size",0)
last_price=self.get_last_price(contract)
quantity=math.floor(order_size/last_price)
order = MarketOrder('BUY', quantity)
else:
quantity=kwargs.get("quantity",0)
order = MarketOrder('SELL', quantity)
trade = self.ib.placeOrder(contract, order)
self.ib.sleep(1.0)
if trade.orderStatus.status == 'Filled':
fill = trade.fills[-1]
txt=f'{fill.time} - {fill.execution.side} {fill.contract.symbol} {fill.execution.shares} @ {fill.execution.avgPrice}'
price=fill.execution.avgPrice
return txt, price, quantity
def exit_order(self,symbol,strategy, exchange,short,**kwargs):
try:
pf= get_pf(strategy, exchange,short)
ocap=get_order_capital(strategy, exchange,short)
if kwargs.get("index",False):
index=Index.objects.get(symbol=symbol)
if short:
action=index.etf_short
else:
action=index.etf_long
else:
action=Action.objects.get(symbol=symbol)
if symbol in pf.retrieve():
c1 = Q(action=action)
c2 = Q(active=True)
order=Order.objects.filter(c1 & c2)
if len(order)>0:
txt, order[0].exiting_price, quantity= self.place(False,
action.ib_ticker(),
action.currency.symbol,
action.stock_ex.ib_ticker,
quantity=order[0].quantity)
order[0].exiting_date=timezone.now()
if order[0].entering_price is not None:
order[0].profit=order[0].exiting_price-order[0].entering_price
order[0].profit_percent=(order[0].exiting_price/order[0].entering_price-1)*100
order[0].active=False
order[0].save()
ocap.capital+=1
ocap.save()
pf.remove(symbol)
pf.save()
return True
else:
print("order not found " + symbol)
return False
return False
except Exception as msg:
print("exception in exit")
print(msg)
_, e_, exc_tb = sys.exc_info()
print("line " + str(exc_tb.tb_lineno))
pass
def entry_order(self,symbol,strategy, exchange,short,**kwargs):
try:
pf= get_pf(strategy, exchange,short)
order_size=5000
ocap=get_order_capital(strategy, exchange,short)
if kwargs.get("index",False):
index=Index.objects.get(symbol=symbol)
if short:
action=index.etf_short
else:
action=index.etf_long
else:
action=Action.objects.get(symbol=symbol)
excluded=Excluded.objects.get(name="all")
if (symbol not in pf.retrieve() and
symbol not in excluded.retrieve() and
ocap.capital>0 and
order_size<=self.cash_balance()):
order=Order(action=action, pf=pf)
txt, order.entering_price, order.quantity= self.place(True,
action.ib_ticker(),
action.currency.symbol,
action.stock_ex.ib_ticker,
order_size=order_size)
if kwargs.get("sl",False):
sl=kwargs.get("sl")
order.sl_threshold=order.entering_price*(1-sl)
order.save()
pf.append(symbol)
pf.save()
ocap.capital-=1
ocap.save()
return True
return False
except Exception as msg:
print("exception in " + __name__)
print(msg)
_, e_, exc_tb = sys.exc_info()
print("line " + str(exc_tb.tb_lineno))
pass
def disconnect(self):
self.ib.disconnect()
def check_hold_duration(symbol,strategy, exchange,short,**kwargs):
try:
pf= get_pf(strategy, exchange,short)
if kwargs.get("index",False):
index=Index.objects.get(symbol=symbol)
if short:
action=index.etf_short
else:
action=index.etf_long
else:
action=Action.objects.get(symbol=symbol)
if symbol in pf.retrieve():
c1 = Q(action=action)
c2 = Q(active=True)
order=Order.objects.filter(c1 & c2)
if len(order)>0:
delta=timezone.now()-order[0].entering_date
return delta.days
return 0
except Exception as msg:
print("exception in " + __name__)
print(msg)
return 0
def entry_order(symbol,strategy, exchange,short,**kwargs):
if PERFORM_ORDER and DIC_PERFORM_ORDER[strategy]:
myIB=MyIB()
return myIB.entry_order(symbol,strategy, exchange,short,**kwargs), True
else:
return entry_order_test(symbol,strategy, exchange,short,**kwargs), False
def exit_order(symbol,strategy, exchange,short,**kwargs):
if PERFORM_ORDER and DIC_PERFORM_ORDER[strategy]:
myIB=MyIB()
return myIB.exit_order(symbol,strategy, exchange,short,**kwargs), True
else:
return exit_order_test(symbol,strategy, exchange,short,**kwargs), False
def entry_order_test(symbol,strategy, exchange,short,**kwargs):
try:
pf= get_pf(strategy, exchange,short)
ocap=get_order_capital(strategy, exchange,short)
if kwargs.get("index",False):
index=Index.objects.get(symbol=symbol)
if short:
action=index.etf_short
else:
action=index.etf_long
else:
action=Action.objects.get(symbol=symbol)
symbol2=action.symbol
excluded=Excluded.objects.get(name="all")
if (symbol2 not in pf.retrieve() and
symbol2 not in excluded.retrieve() and
ocap.capital>0):
order=Order(action=action, pf=pf)
order.entering_price=1.0
order.save()
pf.append(symbol2)
pf.save()
ocap.capital-=1
ocap.save()
return True
return False
except Exception as msg:
print("exception in " + __name__)
print(msg)
_, e_, exc_tb = sys.exc_info()
print("line " + str(exc_tb.tb_lineno))
pass
def exit_order_test(symbol,strategy, exchange,short,**kwargs):
try:
pf= get_pf(strategy, exchange,short)
ocap=get_order_capital(strategy, exchange,short)
if kwargs.get("index",False):
index=Index.objects.get(symbol=symbol)
if short:
action=index.etf_short
else:
action=index.etf_long
else:
action=Action.objects.get(symbol=symbol)
symbol2=action.symbol
if symbol2 in pf.retrieve():
c1 = Q(action=action)
c2 = Q(active=True)
order=Order.objects.filter(c1 & c2)
if len(order)>0:
order[0].exiting_date=timezone.now()
order[0].active=False
order[0].save()
ocap.capital+=1
ocap.save()
pf.remove(symbol2)
pf.save()
return True
return False
except Exception as msg:
print("exception in " + __name__)
print(msg)
_, e_, exc_tb = sys.exc_info()
print("line " + str(exc_tb.tb_lineno))
pass
class Currency(models.Model):
name=models.CharField(max_length=100, blank=False)
symbol=models.CharField(max_length=100, blank=False,default="A")
def __str__(self):
return self.name
class Fees(models.Model):
name=models.CharField(max_length=100, blank=False, default="fee")
fixed=models.DecimalField(max_digits=100, decimal_places=5)
percent=models.DecimalField(max_digits=100, decimal_places=5)
def __str__(self):
return self.name
class StockEx(models.Model):
name=models.CharField(max_length=100, blank=False)
fees=models.ForeignKey('Fees',on_delete=models.CASCADE)
ib_ticker=models.CharField(max_length=15, blank=True,default="AAA")
opening_time=models.TimeField(default="09:00:00")
closing_time=models.TimeField(default="17:00:00")
def __str__(self):
return self.name
class Strategy(models.Model):
name=models.CharField(max_length=100, blank=False)
def __str__(self):
return self.name
ame=models.CharField(max_length=100, blank=False)
stock_ex=models.ForeignKey('StockEx',on_delete=models.CASCADE)
currency=models.ForeignKey('Currency',on_delete=models.CASCADE)
etf_long=models.ForeignKey('Action',on_delete=models.PROTECT,default=0,related_name='etf_long')
etf_short=models.ForeignKey('Action',on_delete=models.PROTECT, default=0,related_name='etf_short')
class Meta:
ordering = ["name"]
def ib_ticker(self):
return self.ib_ticker
def __str__(self):
return self.name
class Action(models.Model):
symbol=models.CharField(max_length=15, blank=False, primary_key=True)
ib_ticker=models.CharField(max_length=15, blank=True,default="AAA")
name=models.CharField(max_length=100, blank=False)
stock_ex=models.ForeignKey('StockEx',on_delete=models.CASCADE)
currency=models.ForeignKey('Currency',on_delete=models.CASCADE)
category=models.ForeignKey('ActionCategory',on_delete=models.CASCADE,blank=True)
strategy=models.ForeignKey('Strategy',on_delete=models.CASCADE,blank=True,default=0)
class Meta:
ordering = ["name"]
def ib_ticker(self):
t=self.symbol.split(".")
return t[0]
def __str__(self):
return self.name
class Order(models.Model):
action=models.ForeignKey('Action',on_delete=models.CASCADE)
pf=models.ForeignKey('PF',on_delete=models.SET_NULL,blank=True,null=True)
active=models.BooleanField(blank=False,default=True)
entering_date=models.DateTimeField(null=False, blank=False, auto_now_add=True)
exiting_date=models.DateTimeField(null=True, blank=True)
entering_price=models.DecimalField(max_digits=100, decimal_places=5,blank=True,null=True)
exiting_price=models.DecimalField(max_digits=100, decimal_places=5,blank=True,null=True)
sl_threshold=models.DecimalField(max_digits=100, decimal_places=5,blank=True,null=True)
profit=models.DecimalField(max_digits=100, decimal_places=5,blank=True,null=True)
profit_percent=models.DecimalField(max_digits=100, decimal_places=5,blank=True,null=True)
quantity=models.DecimalField(max_digits=100, decimal_places=5,blank=True,null=True)
def __str__(self):
return self.action.name + " "+ str(self.entering_date)
def pf_retrieve_all(**kwargs):
arr=[]
for pf in PF.objects.filter(short=kwargs.get("short",False)):
cat=ActionCategory.objects.get(short="ACT")
c1 = Q(category=cat)
if kwargs.get("opening")=="9h":
stockEx1=StockEx.objects.filter(name="Paris")
stockEx2=StockEx.objects.filter(name="XETRA")
c2 = Q(stock_ex=stockEx1[0])
c3 = Q(stock_ex=stockEx2[0])
actions=pf.actions.filter(c1 & (c2|c3))
elif kwargs.get("opening")=="15h":
stockEx1=StockEx.objects.filter(name="Nasdaq")
c2 = Q(stock_ex=stockEx1[0])
actions=pf.actions.filter(c1 & c2)
else:
actions=pf.actions.filter(c1)
for action in actions:
if not action.symbol in arr:
arr.append(action.symbol)
return arr
ield(Action,blank=True)
short=models.BooleanField(blank=False,default=False)
strategy=models.ForeignKey('Strategy',on_delete=models.CASCADE,blank=True)
stock_ex=models.ForeignKey('StockEx',on_delete=models.CASCADE,blank=True,default=2)
def __str__(self):
return self.name
def retrieve(self):
arr=[]
for action in self.actions.all():
arr.append(action.symbol)
return arr
def remove(self,symbol):
a = Action.objects.get(symbol=symbol)
try:
self.actions.remove(a)
self.save()
except Exception as msg:
print("exception in remove_symbol")
print(symbol)
print(msg)
_, e_, exc_tb = sys.exc_info()
print("line " + str(exc_tb.tb_lineno))
pass
def append(self,symbol):
try:
a = Action.objects.get(symbol=symbol)
self.actions.add(a)
self.save()
except Exception as msg:
print("exception in " + __name__)
print(symbol)
print(msg)
_, e_, exc_tb = sys.exc_info()
print("line " + str(exc_tb.tb_lineno))
pass
def get_pf(strategy, exchange,short):
s=Strategy.objects.get(name=strategy)
e=StockEx.objects.get(name=exchange)
c1 = Q(stock_ex=e)
c2 = Q(strategy=s)
c3 = Q(short=short)
return PF.objects.get(c1 & c2 & c3)
="AAA", primary_key=True)
name=models.CharField(max_length=100, blank=False)
def __str__(self):
return self.name
harField(max_length=100, blank=False,default="")
strategy=models.ForeignKey('Strategy',on_delete=models.CASCADE,blank=True)
stock_ex=models.ForeignKey('StockEx',on_delete=models.CASCADE,blank=True,default=2)
def __str__(self):
return self.name
def get_capital(strategy, exchange,short):
s=Strategy.objects.get(name=strategy)
e=StockEx.objects.get(name=exchange)
c1 = Q(stock_ex=e)
c2 = Q(strategy=s)
c3 = Q(short=short)
return Capital.objects.get(c1 & c2 & c3)
lank=True)
stock_ex=models.ForeignKey('StockEx',on_delete=models.CASCADE,blank=True,default=2)
def __str__(self):
return self.name
def get_order_capital(strategy, exchange,short):
s=Strategy.objects.get(name=strategy)
e=StockEx.objects.get(name=exchange)
c1 = Q(stock_ex=e)
c2 = Q(strategy=s)
return OrderCapital.objects.get(c1 & c2)
for a in self.actions.all():
self.actions.remove(a)
self.save()
def append(self,symbol):
a = Action.objects.get(symbol=symbol)
self.actions.add(a)
self.save()
def retrieve(self):
arr=[]
for action in self.actions.all():
arr.append(action.symbol)
return arr
def __str__(self):
return self.name
def get_candidates(strategy, exchange):
s=Strategy.objects.get(name=strategy)
e=StockEx.objects.get(name=exchange)
c1 = Q(stock_ex=e)
c2 = Q(strategy=s)
return Candidates.objects.get(c1 & c2)
egy=models.ForeignKey('Strategy',on_delete=models.CASCADE,blank=True)
def reset(self):
for a in self.actions.all():
self.actions.remove(a)
self.save()
def append(self,symbol):
a = Action.objects.get(symbol=symbol)
self.actions.add(a)
self.save()
def remove(self,symbol):
a = Action.objects.get(symbol=symbol)
try:
self.actions.remove(a)
self.save()
except Exception as msg:
print("exception in " + __name__)
print(symbol)
print(msg)
_, e_, exc_tb = sys.exc_info()
print("line " + str(exc_tb.tb_lineno))
pass
def retrieve(self):
arr=[]
for action in self.actions.all():
arr.append(action.symbol)
return arr
def __str__(self):
return self.name
.ManyToManyField(Index,blank=True)
strategy=models.ForeignKey('Strategy',on_delete=models.CASCADE,blank=True,default=0)
def retrieve(self):
arr=[]
for action in self.actions.all():
arr.append(action.symbol)
return arr
def __str__(self):
return self.name | true | true |
f71ca7306894b8080e9f8813e913c2b35a942d36 | 851 | py | Python | src/lib/enums.py | BlackParure/AI-StarCraft-II | 7feee4addff9881b3c735791f4a43421f813fcfc | [
"Apache-2.0"
] | 7 | 2019-01-17T16:46:24.000Z | 2020-09-09T06:35:26.000Z | src/lib/enums.py | BlackParure/AI-StarCraft-II | 7feee4addff9881b3c735791f4a43421f813fcfc | [
"Apache-2.0"
] | null | null | null | src/lib/enums.py | BlackParure/AI-StarCraft-II | 7feee4addff9881b3c735791f4a43421f813fcfc | [
"Apache-2.0"
] | null | null | null | from easydict import EasyDict as edict
# the corresponding semantics to the index of
# obs.observation.feature_minimap and obs.observation.feature_screen
feature_mini_id = edict()
feature_mini_id.HEIGHT_MAP = 0
feature_mini_id.VISIBILITY = 1
feature_mini_id.CREEP = 2
feature_mini_id.CAMERA = 3
feature_mini_id.PLAYER_ID = 4
feature_mini_id.PLAYER_RELATIVE = 5
feature_mini_id.PLAYER_SELECTED = 6
feature_screen_id = edict()
feature_screen_id.HEIGHT_MAP = 0
feature_screen_id.VISIBILITY = 1
feature_screen_id.CREEP = 2
feature_screen_id.POWER = 3
feature_screen_id.PLAYER_ID = 4
feature_screen_id.PLAYER_RELATIVE = 5
feature_screen_id.UNIT_TYPE = 6
feature_screen_id.SELECTED = 7
feature_screen_id.HIT_POINTS = 8
feature_screen_id.ENERGY = 9
feature_screen_id.SHIELDS = 10
feature_screen_id.UNIT_DENSITY = 11
feature_screen_id.UNIT_DENSITY_AA = 12
| 29.344828 | 68 | 0.836663 | from easydict import EasyDict as edict
feature_mini_id = edict()
feature_mini_id.HEIGHT_MAP = 0
feature_mini_id.VISIBILITY = 1
feature_mini_id.CREEP = 2
feature_mini_id.CAMERA = 3
feature_mini_id.PLAYER_ID = 4
feature_mini_id.PLAYER_RELATIVE = 5
feature_mini_id.PLAYER_SELECTED = 6
feature_screen_id = edict()
feature_screen_id.HEIGHT_MAP = 0
feature_screen_id.VISIBILITY = 1
feature_screen_id.CREEP = 2
feature_screen_id.POWER = 3
feature_screen_id.PLAYER_ID = 4
feature_screen_id.PLAYER_RELATIVE = 5
feature_screen_id.UNIT_TYPE = 6
feature_screen_id.SELECTED = 7
feature_screen_id.HIT_POINTS = 8
feature_screen_id.ENERGY = 9
feature_screen_id.SHIELDS = 10
feature_screen_id.UNIT_DENSITY = 11
feature_screen_id.UNIT_DENSITY_AA = 12
| true | true |
f71ca8df5ac6d2ef263acfbbb27f84f925bf74a8 | 455 | py | Python | projects_api/migrations/0032_user.py | sorianos/profile-rest-api | 453b326cf067a07455772c32050a17c31b5dc71a | [
"MIT"
] | null | null | null | projects_api/migrations/0032_user.py | sorianos/profile-rest-api | 453b326cf067a07455772c32050a17c31b5dc71a | [
"MIT"
] | 5 | 2021-03-19T11:56:51.000Z | 2022-02-10T14:08:09.000Z | projects_api/migrations/0032_user.py | sorianos/profile-rest-api | 453b326cf067a07455772c32050a17c31b5dc71a | [
"MIT"
] | 1 | 2020-10-29T17:41:34.000Z | 2020-10-29T17:41:34.000Z | # Generated by Django 2.2 on 2021-01-12 07:30
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('projects_api', '0031_auto_20201217_2330'),
]
operations = [
migrations.CreateModel(
name='User',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
],
),
]
| 22.75 | 114 | 0.589011 |
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('projects_api', '0031_auto_20201217_2330'),
]
operations = [
migrations.CreateModel(
name='User',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
],
),
]
| true | true |
f71ca96e2c4377bd676e8a3d35dfed029ac7363e | 16,669 | py | Python | web2py/applications/ControleEstoque/languages/fr.py | GuizaoBR/Controle-Estoque | b4d7e3c665a14ea77224fa448aaf7e3d4d6fe4ed | [
"Apache-2.0"
] | null | null | null | web2py/applications/ControleEstoque/languages/fr.py | GuizaoBR/Controle-Estoque | b4d7e3c665a14ea77224fa448aaf7e3d4d6fe4ed | [
"Apache-2.0"
] | null | null | null | web2py/applications/ControleEstoque/languages/fr.py | GuizaoBR/Controle-Estoque | b4d7e3c665a14ea77224fa448aaf7e3d4d6fe4ed | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
{
'!langcode!': 'fr',
'!langname!': 'Français',
'"update" is an optional expression like "field1=\'newvalue\'". You cannot update or delete the results of a JOIN': '"update" est une expression optionnelle comme "champ1=\'nouvellevaleur\'". Vous ne pouvez mettre à jour ou supprimer les résultats d\'un JOIN',
'%d/%m/%Y': '%d/%m/%Y',
'%d/%m/%Y %H:%M:%S': '%d/%m/%Y %H:%M:%S',
'%s %%{row} deleted': '%s lignes supprimées',
'%s %%{row} updated': '%s lignes mises à jour',
'%s selected': '%s sélectionné',
'%Y-%m-%d': '%Y-%m-%d',
'%Y-%m-%d %H:%M:%S': '%Y-%m-%d %H:%M:%S',
'(**%.0d MB**)': '(**%.0d MB**)',
'**%(items)s** %%{item(items)}, **%(bytes)s** %%{byte(bytes)}': '**%(items)s** %%{item(items)}, **%(bytes)s** %%{byte(bytes)}',
'**%(items)s** items, **%(bytes)s** %%{byte(bytes)}': '**%(items)s** items, **%(bytes)s** %%{byte(bytes)}',
'**not available** (requires the Python [[guppy http://pypi.python.org/pypi/guppy/ popup]] library)': '**not available** (requires the Python [[guppy http://pypi.python.org/pypi/guppy/ popup]] library)',
'?': '?',
'@markmin\x01(**%.0d MB**)': '(**%.0d MB**)',
'@markmin\x01**%(items)s** %%{item(items)}, **%(bytes)s** %%{byte(bytes)}': '**%(items)s** %%{item(items)}, **%(bytes)s** %%{byte(bytes)}',
'@markmin\x01**%(items)s** items, **%(bytes)s** %%{byte(bytes)}': '**%(items)s** items, **%(bytes)s** %%{byte(bytes)}',
'@markmin\x01**not available** (requires the Python [[guppy http://pypi.python.org/pypi/guppy/ popup]] library)': '**not available** (requires the Python [[guppy http://pypi.python.org/pypi/guppy/ popup]] library)',
'@markmin\x01``**not available**``:red (requires the Python [[guppy http://pypi.python.org/pypi/guppy/ popup]] library)': '``**not available**``:red (requires the Python [[guppy http://pypi.python.org/pypi/guppy/ popup]] library)',
'@markmin\x01An error occured, please [[reload %s]] the page': 'An error occured, please [[reload %s]] the page',
'@markmin\x01Cache contains items up to **%(hours)02d** %%{hour(hours)} **%(min)02d** %%{minute(min)} **%(sec)02d** %%{second(sec)} old.': 'Cache contains items up to **%(hours)02d** %%{hour(hours)} **%(min)02d** %%{minute(min)} **%(sec)02d** %%{second(sec)} old.',
'@markmin\x01DISK contains items up to **%(hours)02d** %%{hour(hours)} **%(min)02d** %%{minute(min)} **%(sec)02d** %%{second(sec)} old.': 'DISK contains items up to **%(hours)02d** %%{hour(hours)} **%(min)02d** %%{minute(min)} **%(sec)02d** %%{second(sec)} old.',
'@markmin\x01Hit Ratio: **%(ratio)s%%** (**%(hits)s** %%{hit(hits)} and **%(misses)s** %%{miss(misses)})': 'Hit Ratio: **%(ratio)s%%** (**%(hits)s** %%{hit(hits)} and **%(misses)s** %%{miss(misses)})',
'@markmin\x01Number of entries: **%s**': 'Number of entries: **%s**',
'@markmin\x01RAM contains items up to **%(hours)02d** %%{hour(hours)} **%(min)02d** %%{minute(min)} **%(sec)02d** %%{second(sec)} old.': 'RAM contains items up to **%(hours)02d** %%{hour(hours)} **%(min)02d** %%{minute(min)} **%(sec)02d** %%{second(sec)} old.',
'``**not available**``:red (requires the Python [[guppy http://pypi.python.org/pypi/guppy/ popup]] library)': '``**not available**``:red (requires the Python [[guppy http://pypi.python.org/pypi/guppy/ popup]] library)',
'A new password was emailed to you': 'A new password was emailed to you',
'about': 'à propos',
'About': 'À propos',
'Access Control': "Contrôle d'accès",
'admin': 'admin',
'Admin language': 'Admin language',
'Administrative Interface': "Interface d'administration",
'Administrative interface': "Interface d'administration",
'administrative interface': 'administrative interface',
'Ajax Recipes': 'Recettes Ajax',
'An error occured, please [[reload %s]] the page': 'An error occured, please [[reload %s]] the page',
'appadmin is disabled because insecure channel': "appadmin est désactivée parce que le canal n'est pas sécurisé",
'Apply changes': 'Apply changes',
'Are you sure you want to delete this object?': 'Êtes-vous sûr de vouloir supprimer cet objet?',
'Authentication': 'Authentification',
'Authentication code': 'Authentication code',
'Available Databases and Tables': 'Bases de données et tables disponibles',
'Buy this book': 'Acheter ce livre',
"Buy web2py's book": "Buy web2py's book",
'cache': 'cache',
'Cache': 'Cache',
'Cache Cleared': 'Cache Cleared',
'Cache contains items up to **%(hours)02d** %%{hour(hours)} **%(min)02d** %%{minute(min)} **%(sec)02d** %%{second(sec)} old.': 'Cache contains items up to **%(hours)02d** %%{hour(hours)} **%(min)02d** %%{minute(min)} **%(sec)02d** %%{second(sec)} old.',
'Cache Keys': 'Cache Keys',
'Cannot be empty': 'Ne peut pas être vide',
'change password': 'changer le mot de passe',
'Change password': 'Change password',
'Check to delete': 'Cliquez pour supprimer',
'Check to delete:': 'Cliquez pour supprimer:',
'Clear CACHE?': 'Vider le CACHE?',
'Clear DISK': 'Vider le DISQUE',
'Clear RAM': 'Vider la RAM',
'Click on the link %(link)s to reset your password': 'Click on the link %(link)s to reset your password',
'Client IP': 'IP client',
'Community': 'Communauté',
'Components and Plugins': 'Composants et Plugiciels',
'Config.ini': 'Config.ini',
'Controller': 'Contrôleur',
'Copyright': "Droit d'auteur",
'Created By': 'Créé par',
'created by': 'created by',
'Created On': 'Créé le',
'Current request': 'Demande actuelle',
'Current response': 'Réponse actuelle',
'Current session': 'Session en cours',
'customize me!': 'personnalisez-moi!',
'data uploaded': 'données téléchargées',
'Database': 'base de données',
'Database %s select': 'base de données %s selectionnée',
'Database Administration (appadmin)': 'Database Administration (appadmin)',
'db': 'db',
'DB Model': 'Modèle BD',
'Delete:': 'Supprimer:',
'Demo': 'Démo',
'Deployment Recipes': 'Recettes de déploiement',
'Description': 'Description',
'design': 'design',
'Design': 'Design',
'direction: ltr': 'direction: ltr',
'DISK': 'DISQUE',
'Disk Cache Keys': 'Clés de cache du disque',
'Disk Cleared': 'Disque vidé',
'DISK contains items up to **%(hours)02d** %%{hour(hours)} **%(min)02d** %%{minute(min)} **%(sec)02d** %%{second(sec)} old.': 'DISK contains items up to **%(hours)02d** %%{hour(hours)} **%(min)02d** %%{minute(min)} **%(sec)02d** %%{second(sec)} old.',
'Documentation': 'Documentation',
"Don't know what to do?": 'Vous ne savez pas quoi faire?',
'done!': 'fait!',
'Download': 'Téléchargement',
'E-mail': 'Courriel',
'Edit': 'Éditer',
'Edit current record': "Modifier l'enregistrement courant",
'edit profile': 'modifier le profil',
'Edit This App': 'Modifier cette application',
'Email and SMS': 'Courriel et texto',
'Email sent': 'Email sent',
'Email verification': 'Email verification',
'Email verified': 'Email verified',
'Enter an integer between %(min)g and %(max)g': 'Enter an integer between %(min)g and %(max)g',
'enter an integer between %(min)g and %(max)g': 'entrez un entier entre %(min)g et %(max)g',
'Errors': 'Erreurs',
'export as csv file': 'exporter sous forme de fichier csv',
'FAQ': 'FAQ',
'First name': 'Prénom',
'Forms and Validators': 'Formulaires et Validateurs',
'Free Applications': 'Applications gratuites',
'Function disabled': 'Fonction désactivée',
'Graph Model': 'Représentation graphique du modèle',
'Group %(group_id)s created': '%(group_id)s groupe créé',
'Group %(group_id)s deleted': 'Group %(group_id)s deleted',
'Group ID': 'ID du groupe',
'Group uniquely assigned to user %(id)s': "Groupe unique attribué à l'utilisateur %(id)s",
'Groups': 'Groupes',
'Hello World': 'Bonjour le monde',
'Helping web2py': 'Aider web2py',
'Hit Ratio: **%(ratio)s%%** (**%(hits)s** %%{hit(hits)} and **%(misses)s** %%{miss(misses)})': 'Hit Ratio: **%(ratio)s%%** (**%(hits)s** %%{hit(hits)} and **%(misses)s** %%{miss(misses)})',
'Home': 'Accueil',
'How did you get here?': 'How did you get here?',
'import': 'importer',
'Import/Export': 'Importer/Exporter',
'Incorrect code. {0} more attempt(s) remaining.': 'Incorrect code. {0} more attempt(s) remaining.',
'Index': 'Index',
'insert new': 'insérer un nouveau',
'insert new %s': 'insérer un nouveau %s',
'Insufficient privileges': 'Insufficient privileges',
'Internal State': 'État interne',
'Introduction': 'Présentation',
'Invalid email': 'Courriel invalide',
'Invalid key': 'Invalid key',
'Invalid login': 'Invalid login',
'Invalid password': 'Invalid password',
'Invalid Query': 'Requête Invalide',
'invalid request': 'requête invalide',
'Invalid reset password': 'Invalid reset password',
'Invalid user': 'Invalid user',
'Invalid username': 'Invalid username',
'Invitation to join %(site)s': 'Invitation to join %(site)s',
'Is Active': 'Est actif',
'Key': 'Clé',
'Key verified': 'Key verified',
'Last name': 'Nom',
'Layout': 'Mise en page',
'Layout Plugins': 'Plugins de mise en page',
'Layouts': 'Mises en page',
'Live chat': 'Clavardage en direct',
'Live Chat': 'Clavardage en direct',
'Loading...': 'Chargement...',
'loading...': 'chargement...',
'Log In': 'Connexion',
'Logged in': 'Connecté',
'Logged out': 'Logged out',
'login': 'connexion',
'Login': 'Connexion',
'Login disabled by administrator': 'Login disabled by administrator',
'logout': 'déconnexion',
'lost password': 'mot de passe perdu',
'Lost Password': 'Mot de passe perdu',
'Lost password?': 'Mot de passe perdu?',
'lost password?': 'mot de passe perdu?',
'Main Menu': 'Menu principal',
'Manage %(action)s': 'Manage %(action)s',
'Manage Access Control': 'Manage Access Control',
'Manage Cache': 'Gérer le Cache',
'Memberships': 'Memberships',
'Menu Model': 'Menu modèle',
'Modified By': 'Modifié par',
'Modified On': 'Modifié le',
'My Sites': 'Mes sites',
'Name': 'Nom',
'New password': 'New password',
'New Record': 'Nouvel enregistrement',
'new record inserted': 'nouvel enregistrement inséré',
'next %s rows': '%s prochaine lignes',
'next 100 rows': '100 prochaines lignes',
'No databases in this application': "Cette application n'a pas de bases de données",
'no package selected': 'no package selected',
'Number of entries: **%s**': 'Number of entries: **%s**',
'Object or table name': 'Objet ou nom de table',
'Old password': 'Old password',
'Online book': 'Online book',
'Online examples': 'Exemples en ligne',
'or import from csv file': "ou importer d'un fichier CSV",
'Origin': 'Origine',
'Other Plugins': 'Autres Plugiciels',
'Other Recipes': 'Autres recettes',
'Overview': 'Présentation',
'password': 'mot de passe',
'Password': 'Mot de passe',
'Password changed': 'Password changed',
"Password fields don't match": 'Les mots de passe ne correspondent pas',
'Password reset': 'Password reset',
'Password retrieve': 'Password retrieve',
'Permission': 'Permission',
'Permissions': 'Permissions',
'please input your password again': "S'il vous plaît entrer votre mot de passe à nouveau",
'Plugins': 'Plugiciels',
'Powered by': 'Alimenté par',
'Preface': 'Préface',
'previous %s rows': '%s lignes précédentes',
'previous 100 rows': '100 lignes précédentes',
'profile': 'profil',
'Profile updated': 'Profile updated',
'pygraphviz library not found': 'Bibliothèque pygraphviz introuvable',
'Python': 'Python',
'Query:': 'Requête:',
'Quick Examples': 'Exemples Rapides',
'RAM': 'RAM',
'RAM Cache Keys': 'RAM Cache Keys',
'Ram Cleared': 'Ram vidée',
'RAM contains items up to **%(hours)02d** %%{hour(hours)} **%(min)02d** %%{minute(min)} **%(sec)02d** %%{second(sec)} old.': 'RAM contains items up to **%(hours)02d** %%{hour(hours)} **%(min)02d** %%{minute(min)} **%(sec)02d** %%{second(sec)} old.',
'Readme': 'Lisez-moi',
'Recipes': 'Recettes',
'Record': 'enregistrement',
'Record %(id)s created': 'Enregistrement %(id)s créé',
'Record %(id)s deleted': 'Record %(id)s deleted',
'Record %(id)s read': 'Record %(id)s read',
'Record %(id)s updated': 'Enregistrement %(id)s modifié',
'Record Created': 'Enregistrement créé',
'Record Deleted': 'Record Deleted',
'record does not exist': "l'archive n'existe pas",
'Record ID': "ID de l'enregistrement",
'Record id': "id de l'enregistrement",
'Record Updated': 'Enregistrement modifié',
'Register': "S'inscrire",
'register': "s'inscrire",
'Registration identifier': "Identifiant d'inscription",
'Registration is pending approval': 'Registration is pending approval',
'Registration key': "Clé d'enregistrement",
'Registration needs verification': 'Registration needs verification',
'Registration successful': 'Inscription réussie',
'Remember me (for 30 days)': 'Se souvenir de moi (pendant 30 jours)',
'Request reset password': 'Demande de réinitialiser le mot clé',
'Reset Password key': 'Réinitialiser le mot clé',
'Resources': 'Ressources',
'Role': 'Rôle',
'Roles': 'Rôles',
'Rows in Table': 'Lignes du tableau',
'Rows selected': 'Lignes sélectionnées',
'Save model as...': 'Enregistrer le modèle sous...',
'Semantic': 'Sémantique',
'Services': 'Services',
'Sign Up': "S'inscrire",
'Size of cache:': 'Taille de la mémoire cache:',
'state': 'état',
'Statistics': 'Statistiques',
'Stylesheet': 'Feuille de style',
'submit': 'soumettre',
'Submit': 'Soumettre',
'Support': 'Soutien',
'Sure you want to delete this object?': 'Êtes-vous sûr de vouloir supprimer cet objet?',
'Table': 'tableau',
'Table name': 'Nom du tableau',
'The "query" is a condition like "db.table1.field1==\'value\'". Something like "db.table1.field1==db.table2.field2" results in a SQL JOIN.': 'La "requête" est une condition comme "db.table1.champ1==\'valeur\'". Quelque chose comme "db.table1.champ1==db.table2.champ2" résulte en un JOIN SQL.',
'The Core': 'Le noyau',
'The output of the file is a dictionary that was rendered by the view %s': 'La sortie de ce fichier est un dictionnaire qui été restitué par la vue %s',
'The Views': 'Les Vues',
'This App': 'Cette Appli',
'This code was emailed to you and is required for login.': 'This code was emailed to you and is required for login.',
'This email already has an account': 'This email already has an account',
'This is a copy of the scaffolding application': "Ceci est une copie de l'application échafaudage",
'Time in Cache (h:m:s)': 'Temps en Cache (h:m:s)',
'Timestamp': 'Horodatage',
'Traceback': 'Traceback',
'Twitter': 'Twitter',
'Two-step Login Authentication Code': 'Two-step Login Authentication Code',
'unable to parse csv file': "incapable d'analyser le fichier cvs",
'Unable to send email': 'Unable to send email',
'Update:': 'Mise à jour:',
'Use (...)&(...) for AND, (...)|(...) for OR, and ~(...) for NOT to build more complex queries.': 'Employez (...)&(...) pour AND, (...)|(...) pour OR, and ~(...) pour NOT afin de construire des requêtes plus complexes.',
'User': 'User',
'User %(id)s is impersonating %(other_id)s': 'User %(id)s is impersonating %(other_id)s',
'User %(id)s Logged-in': 'Utilisateur %(id)s connecté',
'User %(id)s Logged-out': 'User %(id)s Logged-out',
'User %(id)s Password changed': 'User %(id)s Password changed',
'User %(id)s Password reset': 'User %(id)s Password reset',
'User %(id)s Password retrieved': 'User %(id)s Password retrieved',
'User %(id)s Profile updated': 'User %(id)s Profile updated',
'User %(id)s Registered': 'Utilisateur %(id)s enregistré',
'User %(id)s Username retrieved': 'User %(id)s Username retrieved',
'User %(id)s Verification email sent': 'User %(id)s Verification email sent',
'User %(id)s verified registration key': 'User %(id)s verified registration key',
'User ID': 'ID utilisateur',
'User Voice': "Voix de l'utilisateur",
'Username': 'Username',
'Username already taken': 'Username already taken',
'Username retrieve': 'Username retrieve',
'Users': 'Users',
'Verify Password': 'Vérifiez le mot de passe',
'Videos': 'Vidéos',
'View': 'Présentation',
'Web2py': 'Web2py',
'Welcome': 'Bienvenue',
'Welcome %(username)s! Click on the link %(link)s to verify your email': 'Welcome %(username)s! Click on the link %(link)s to verify your email',
'Welcome %s': 'Bienvenue %s',
'Welcome to web2py': 'Bienvenue à web2py',
'Welcome to web2py!': 'Bienvenue à web2py!',
'Which called the function %s located in the file %s': 'Qui a appelé la fonction %s se trouvant dans le fichier %s',
'Working...': 'Working...',
'You are successfully running web2py': 'Vous exécutez avec succès web2py',
'You can modify this application and adapt it to your needs': "Vous pouvez modifier cette application et l'adapter à vos besoins",
'You have been invited to join %(site)s, click %(link)s to complete the process': 'You have been invited to join %(site)s, click %(link)s to complete the process',
'You visited the url %s': "Vous avez visité l'URL %s",
'Your password is: %(password)s': 'Your password is: %(password)s',
'Your temporary login code is {0}': 'Your temporary login code is {0}',
'Your username is: %(username)s': 'Your username is: %(username)s',
'Your username was emailed to you': 'Your username was emailed to you',
}
| 52.91746 | 293 | 0.669866 |
{
'!langcode!': 'fr',
'!langname!': 'Français',
'"update" is an optional expression like "field1=\'newvalue\'". You cannot update or delete the results of a JOIN': '"update" est une expression optionnelle comme "champ1=\'nouvellevaleur\'". Vous ne pouvez mettre à jour ou supprimer les résultats d\'un JOIN',
'%d/%m/%Y': '%d/%m/%Y',
'%d/%m/%Y %H:%M:%S': '%d/%m/%Y %H:%M:%S',
'%s %%{row} deleted': '%s lignes supprimées',
'%s %%{row} updated': '%s lignes mises à jour',
'%s selected': '%s sélectionné',
'%Y-%m-%d': '%Y-%m-%d',
'%Y-%m-%d %H:%M:%S': '%Y-%m-%d %H:%M:%S',
'(**%.0d MB**)': '(**%.0d MB**)',
'**%(items)s** %%{item(items)}, **%(bytes)s** %%{byte(bytes)}': '**%(items)s** %%{item(items)}, **%(bytes)s** %%{byte(bytes)}',
'**%(items)s** items, **%(bytes)s** %%{byte(bytes)}': '**%(items)s** items, **%(bytes)s** %%{byte(bytes)}',
'**not available** (requires the Python [[guppy http://pypi.python.org/pypi/guppy/ popup]] library)': '**not available** (requires the Python [[guppy http://pypi.python.org/pypi/guppy/ popup]] library)',
'?': '?',
'@markmin\x01(**%.0d MB**)': '(**%.0d MB**)',
'@markmin\x01**%(items)s** %%{item(items)}, **%(bytes)s** %%{byte(bytes)}': '**%(items)s** %%{item(items)}, **%(bytes)s** %%{byte(bytes)}',
'@markmin\x01**%(items)s** items, **%(bytes)s** %%{byte(bytes)}': '**%(items)s** items, **%(bytes)s** %%{byte(bytes)}',
'@markmin\x01**not available** (requires the Python [[guppy http://pypi.python.org/pypi/guppy/ popup]] library)': '**not available** (requires the Python [[guppy http://pypi.python.org/pypi/guppy/ popup]] library)',
'@markmin\x01``**not available**``:red (requires the Python [[guppy http://pypi.python.org/pypi/guppy/ popup]] library)': '``**not available**``:red (requires the Python [[guppy http://pypi.python.org/pypi/guppy/ popup]] library)',
'@markmin\x01An error occured, please [[reload %s]] the page': 'An error occured, please [[reload %s]] the page',
'@markmin\x01Cache contains items up to **%(hours)02d** %%{hour(hours)} **%(min)02d** %%{minute(min)} **%(sec)02d** %%{second(sec)} old.': 'Cache contains items up to **%(hours)02d** %%{hour(hours)} **%(min)02d** %%{minute(min)} **%(sec)02d** %%{second(sec)} old.',
'@markmin\x01DISK contains items up to **%(hours)02d** %%{hour(hours)} **%(min)02d** %%{minute(min)} **%(sec)02d** %%{second(sec)} old.': 'DISK contains items up to **%(hours)02d** %%{hour(hours)} **%(min)02d** %%{minute(min)} **%(sec)02d** %%{second(sec)} old.',
'@markmin\x01Hit Ratio: **%(ratio)s%%** (**%(hits)s** %%{hit(hits)} and **%(misses)s** %%{miss(misses)})': 'Hit Ratio: **%(ratio)s%%** (**%(hits)s** %%{hit(hits)} and **%(misses)s** %%{miss(misses)})',
'@markmin\x01Number of entries: **%s**': 'Number of entries: **%s**',
'@markmin\x01RAM contains items up to **%(hours)02d** %%{hour(hours)} **%(min)02d** %%{minute(min)} **%(sec)02d** %%{second(sec)} old.': 'RAM contains items up to **%(hours)02d** %%{hour(hours)} **%(min)02d** %%{minute(min)} **%(sec)02d** %%{second(sec)} old.',
'``**not available**``:red (requires the Python [[guppy http://pypi.python.org/pypi/guppy/ popup]] library)': '``**not available**``:red (requires the Python [[guppy http://pypi.python.org/pypi/guppy/ popup]] library)',
'A new password was emailed to you': 'A new password was emailed to you',
'about': 'à propos',
'About': 'À propos',
'Access Control': "Contrôle d'accès",
'admin': 'admin',
'Admin language': 'Admin language',
'Administrative Interface': "Interface d'administration",
'Administrative interface': "Interface d'administration",
'administrative interface': 'administrative interface',
'Ajax Recipes': 'Recettes Ajax',
'An error occured, please [[reload %s]] the page': 'An error occured, please [[reload %s]] the page',
'appadmin is disabled because insecure channel': "appadmin est désactivée parce que le canal n'est pas sécurisé",
'Apply changes': 'Apply changes',
'Are you sure you want to delete this object?': 'Êtes-vous sûr de vouloir supprimer cet objet?',
'Authentication': 'Authentification',
'Authentication code': 'Authentication code',
'Available Databases and Tables': 'Bases de données et tables disponibles',
'Buy this book': 'Acheter ce livre',
"Buy web2py's book": "Buy web2py's book",
'cache': 'cache',
'Cache': 'Cache',
'Cache Cleared': 'Cache Cleared',
'Cache contains items up to **%(hours)02d** %%{hour(hours)} **%(min)02d** %%{minute(min)} **%(sec)02d** %%{second(sec)} old.': 'Cache contains items up to **%(hours)02d** %%{hour(hours)} **%(min)02d** %%{minute(min)} **%(sec)02d** %%{second(sec)} old.',
'Cache Keys': 'Cache Keys',
'Cannot be empty': 'Ne peut pas être vide',
'change password': 'changer le mot de passe',
'Change password': 'Change password',
'Check to delete': 'Cliquez pour supprimer',
'Check to delete:': 'Cliquez pour supprimer:',
'Clear CACHE?': 'Vider le CACHE?',
'Clear DISK': 'Vider le DISQUE',
'Clear RAM': 'Vider la RAM',
'Click on the link %(link)s to reset your password': 'Click on the link %(link)s to reset your password',
'Client IP': 'IP client',
'Community': 'Communauté',
'Components and Plugins': 'Composants et Plugiciels',
'Config.ini': 'Config.ini',
'Controller': 'Contrôleur',
'Copyright': "Droit d'auteur",
'Created By': 'Créé par',
'created by': 'created by',
'Created On': 'Créé le',
'Current request': 'Demande actuelle',
'Current response': 'Réponse actuelle',
'Current session': 'Session en cours',
'customize me!': 'personnalisez-moi!',
'data uploaded': 'données téléchargées',
'Database': 'base de données',
'Database %s select': 'base de données %s selectionnée',
'Database Administration (appadmin)': 'Database Administration (appadmin)',
'db': 'db',
'DB Model': 'Modèle BD',
'Delete:': 'Supprimer:',
'Demo': 'Démo',
'Deployment Recipes': 'Recettes de déploiement',
'Description': 'Description',
'design': 'design',
'Design': 'Design',
'direction: ltr': 'direction: ltr',
'DISK': 'DISQUE',
'Disk Cache Keys': 'Clés de cache du disque',
'Disk Cleared': 'Disque vidé',
'DISK contains items up to **%(hours)02d** %%{hour(hours)} **%(min)02d** %%{minute(min)} **%(sec)02d** %%{second(sec)} old.': 'DISK contains items up to **%(hours)02d** %%{hour(hours)} **%(min)02d** %%{minute(min)} **%(sec)02d** %%{second(sec)} old.',
'Documentation': 'Documentation',
"Don't know what to do?": 'Vous ne savez pas quoi faire?',
'done!': 'fait!',
'Download': 'Téléchargement',
'E-mail': 'Courriel',
'Edit': 'Éditer',
'Edit current record': "Modifier l'enregistrement courant",
'edit profile': 'modifier le profil',
'Edit This App': 'Modifier cette application',
'Email and SMS': 'Courriel et texto',
'Email sent': 'Email sent',
'Email verification': 'Email verification',
'Email verified': 'Email verified',
'Enter an integer between %(min)g and %(max)g': 'Enter an integer between %(min)g and %(max)g',
'enter an integer between %(min)g and %(max)g': 'entrez un entier entre %(min)g et %(max)g',
'Errors': 'Erreurs',
'export as csv file': 'exporter sous forme de fichier csv',
'FAQ': 'FAQ',
'First name': 'Prénom',
'Forms and Validators': 'Formulaires et Validateurs',
'Free Applications': 'Applications gratuites',
'Function disabled': 'Fonction désactivée',
'Graph Model': 'Représentation graphique du modèle',
'Group %(group_id)s created': '%(group_id)s groupe créé',
'Group %(group_id)s deleted': 'Group %(group_id)s deleted',
'Group ID': 'ID du groupe',
'Group uniquely assigned to user %(id)s': "Groupe unique attribué à l'utilisateur %(id)s",
'Groups': 'Groupes',
'Hello World': 'Bonjour le monde',
'Helping web2py': 'Aider web2py',
'Hit Ratio: **%(ratio)s%%** (**%(hits)s** %%{hit(hits)} and **%(misses)s** %%{miss(misses)})': 'Hit Ratio: **%(ratio)s%%** (**%(hits)s** %%{hit(hits)} and **%(misses)s** %%{miss(misses)})',
'Home': 'Accueil',
'How did you get here?': 'How did you get here?',
'import': 'importer',
'Import/Export': 'Importer/Exporter',
'Incorrect code. {0} more attempt(s) remaining.': 'Incorrect code. {0} more attempt(s) remaining.',
'Index': 'Index',
'insert new': 'insérer un nouveau',
'insert new %s': 'insérer un nouveau %s',
'Insufficient privileges': 'Insufficient privileges',
'Internal State': 'État interne',
'Introduction': 'Présentation',
'Invalid email': 'Courriel invalide',
'Invalid key': 'Invalid key',
'Invalid login': 'Invalid login',
'Invalid password': 'Invalid password',
'Invalid Query': 'Requête Invalide',
'invalid request': 'requête invalide',
'Invalid reset password': 'Invalid reset password',
'Invalid user': 'Invalid user',
'Invalid username': 'Invalid username',
'Invitation to join %(site)s': 'Invitation to join %(site)s',
'Is Active': 'Est actif',
'Key': 'Clé',
'Key verified': 'Key verified',
'Last name': 'Nom',
'Layout': 'Mise en page',
'Layout Plugins': 'Plugins de mise en page',
'Layouts': 'Mises en page',
'Live chat': 'Clavardage en direct',
'Live Chat': 'Clavardage en direct',
'Loading...': 'Chargement...',
'loading...': 'chargement...',
'Log In': 'Connexion',
'Logged in': 'Connecté',
'Logged out': 'Logged out',
'login': 'connexion',
'Login': 'Connexion',
'Login disabled by administrator': 'Login disabled by administrator',
'logout': 'déconnexion',
'lost password': 'mot de passe perdu',
'Lost Password': 'Mot de passe perdu',
'Lost password?': 'Mot de passe perdu?',
'lost password?': 'mot de passe perdu?',
'Main Menu': 'Menu principal',
'Manage %(action)s': 'Manage %(action)s',
'Manage Access Control': 'Manage Access Control',
'Manage Cache': 'Gérer le Cache',
'Memberships': 'Memberships',
'Menu Model': 'Menu modèle',
'Modified By': 'Modifié par',
'Modified On': 'Modifié le',
'My Sites': 'Mes sites',
'Name': 'Nom',
'New password': 'New password',
'New Record': 'Nouvel enregistrement',
'new record inserted': 'nouvel enregistrement inséré',
'next %s rows': '%s prochaine lignes',
'next 100 rows': '100 prochaines lignes',
'No databases in this application': "Cette application n'a pas de bases de données",
'no package selected': 'no package selected',
'Number of entries: **%s**': 'Number of entries: **%s**',
'Object or table name': 'Objet ou nom de table',
'Old password': 'Old password',
'Online book': 'Online book',
'Online examples': 'Exemples en ligne',
'or import from csv file': "ou importer d'un fichier CSV",
'Origin': 'Origine',
'Other Plugins': 'Autres Plugiciels',
'Other Recipes': 'Autres recettes',
'Overview': 'Présentation',
'password': 'mot de passe',
'Password': 'Mot de passe',
'Password changed': 'Password changed',
"Password fields don't match": 'Les mots de passe ne correspondent pas',
'Password reset': 'Password reset',
'Password retrieve': 'Password retrieve',
'Permission': 'Permission',
'Permissions': 'Permissions',
'please input your password again': "S'il vous plaît entrer votre mot de passe à nouveau",
'Plugins': 'Plugiciels',
'Powered by': 'Alimenté par',
'Preface': 'Préface',
'previous %s rows': '%s lignes précédentes',
'previous 100 rows': '100 lignes précédentes',
'profile': 'profil',
'Profile updated': 'Profile updated',
'pygraphviz library not found': 'Bibliothèque pygraphviz introuvable',
'Python': 'Python',
'Query:': 'Requête:',
'Quick Examples': 'Exemples Rapides',
'RAM': 'RAM',
'RAM Cache Keys': 'RAM Cache Keys',
'Ram Cleared': 'Ram vidée',
'RAM contains items up to **%(hours)02d** %%{hour(hours)} **%(min)02d** %%{minute(min)} **%(sec)02d** %%{second(sec)} old.': 'RAM contains items up to **%(hours)02d** %%{hour(hours)} **%(min)02d** %%{minute(min)} **%(sec)02d** %%{second(sec)} old.',
'Readme': 'Lisez-moi',
'Recipes': 'Recettes',
'Record': 'enregistrement',
'Record %(id)s created': 'Enregistrement %(id)s créé',
'Record %(id)s deleted': 'Record %(id)s deleted',
'Record %(id)s read': 'Record %(id)s read',
'Record %(id)s updated': 'Enregistrement %(id)s modifié',
'Record Created': 'Enregistrement créé',
'Record Deleted': 'Record Deleted',
'record does not exist': "l'archive n'existe pas",
'Record ID': "ID de l'enregistrement",
'Record id': "id de l'enregistrement",
'Record Updated': 'Enregistrement modifié',
'Register': "S'inscrire",
'register': "s'inscrire",
'Registration identifier': "Identifiant d'inscription",
'Registration is pending approval': 'Registration is pending approval',
'Registration key': "Clé d'enregistrement",
'Registration needs verification': 'Registration needs verification',
'Registration successful': 'Inscription réussie',
'Remember me (for 30 days)': 'Se souvenir de moi (pendant 30 jours)',
'Request reset password': 'Demande de réinitialiser le mot clé',
'Reset Password key': 'Réinitialiser le mot clé',
'Resources': 'Ressources',
'Role': 'Rôle',
'Roles': 'Rôles',
'Rows in Table': 'Lignes du tableau',
'Rows selected': 'Lignes sélectionnées',
'Save model as...': 'Enregistrer le modèle sous...',
'Semantic': 'Sémantique',
'Services': 'Services',
'Sign Up': "S'inscrire",
'Size of cache:': 'Taille de la mémoire cache:',
'state': 'état',
'Statistics': 'Statistiques',
'Stylesheet': 'Feuille de style',
'submit': 'soumettre',
'Submit': 'Soumettre',
'Support': 'Soutien',
'Sure you want to delete this object?': 'Êtes-vous sûr de vouloir supprimer cet objet?',
'Table': 'tableau',
'Table name': 'Nom du tableau',
'The "query" is a condition like "db.table1.field1==\'value\'". Something like "db.table1.field1==db.table2.field2" results in a SQL JOIN.': 'La "requête" est une condition comme "db.table1.champ1==\'valeur\'". Quelque chose comme "db.table1.champ1==db.table2.champ2" résulte en un JOIN SQL.',
'The Core': 'Le noyau',
'The output of the file is a dictionary that was rendered by the view %s': 'La sortie de ce fichier est un dictionnaire qui été restitué par la vue %s',
'The Views': 'Les Vues',
'This App': 'Cette Appli',
'This code was emailed to you and is required for login.': 'This code was emailed to you and is required for login.',
'This email already has an account': 'This email already has an account',
'This is a copy of the scaffolding application': "Ceci est une copie de l'application échafaudage",
'Time in Cache (h:m:s)': 'Temps en Cache (h:m:s)',
'Timestamp': 'Horodatage',
'Traceback': 'Traceback',
'Twitter': 'Twitter',
'Two-step Login Authentication Code': 'Two-step Login Authentication Code',
'unable to parse csv file': "incapable d'analyser le fichier cvs",
'Unable to send email': 'Unable to send email',
'Update:': 'Mise à jour:',
'Use (...)&(...) for AND, (...)|(...) for OR, and ~(...) for NOT to build more complex queries.': 'Employez (...)&(...) pour AND, (...)|(...) pour OR, and ~(...) pour NOT afin de construire des requêtes plus complexes.',
'User': 'User',
'User %(id)s is impersonating %(other_id)s': 'User %(id)s is impersonating %(other_id)s',
'User %(id)s Logged-in': 'Utilisateur %(id)s connecté',
'User %(id)s Logged-out': 'User %(id)s Logged-out',
'User %(id)s Password changed': 'User %(id)s Password changed',
'User %(id)s Password reset': 'User %(id)s Password reset',
'User %(id)s Password retrieved': 'User %(id)s Password retrieved',
'User %(id)s Profile updated': 'User %(id)s Profile updated',
'User %(id)s Registered': 'Utilisateur %(id)s enregistré',
'User %(id)s Username retrieved': 'User %(id)s Username retrieved',
'User %(id)s Verification email sent': 'User %(id)s Verification email sent',
'User %(id)s verified registration key': 'User %(id)s verified registration key',
'User ID': 'ID utilisateur',
'User Voice': "Voix de l'utilisateur",
'Username': 'Username',
'Username already taken': 'Username already taken',
'Username retrieve': 'Username retrieve',
'Users': 'Users',
'Verify Password': 'Vérifiez le mot de passe',
'Videos': 'Vidéos',
'View': 'Présentation',
'Web2py': 'Web2py',
'Welcome': 'Bienvenue',
'Welcome %(username)s! Click on the link %(link)s to verify your email': 'Welcome %(username)s! Click on the link %(link)s to verify your email',
'Welcome %s': 'Bienvenue %s',
'Welcome to web2py': 'Bienvenue à web2py',
'Welcome to web2py!': 'Bienvenue à web2py!',
'Which called the function %s located in the file %s': 'Qui a appelé la fonction %s se trouvant dans le fichier %s',
'Working...': 'Working...',
'You are successfully running web2py': 'Vous exécutez avec succès web2py',
'You can modify this application and adapt it to your needs': "Vous pouvez modifier cette application et l'adapter à vos besoins",
'You have been invited to join %(site)s, click %(link)s to complete the process': 'You have been invited to join %(site)s, click %(link)s to complete the process',
'You visited the url %s': "Vous avez visité l'URL %s",
'Your password is: %(password)s': 'Your password is: %(password)s',
'Your temporary login code is {0}': 'Your temporary login code is {0}',
'Your username is: %(username)s': 'Your username is: %(username)s',
'Your username was emailed to you': 'Your username was emailed to you',
}
| true | true |
f71ca9b490a0a319f83ff81055834fce51a392e2 | 701 | py | Python | tests/ext/test_envconfig.py | Zipmatch/zipmatch-content | ead1caca63aaa4acdb092747ed03203670b50e63 | [
"BSD-3-Clause"
] | null | null | null | tests/ext/test_envconfig.py | Zipmatch/zipmatch-content | ead1caca63aaa4acdb092747ed03203670b50e63 | [
"BSD-3-Clause"
] | null | null | null | tests/ext/test_envconfig.py | Zipmatch/zipmatch-content | ead1caca63aaa4acdb092747ed03203670b50e63 | [
"BSD-3-Clause"
] | null | null | null | import pytest
from content.ext.envconfig import EnvConfig
@pytest.mark.parametrize('use_init_app', [True, False])
def test_ext_init(app, mocker, use_init_app):
mock_init_app = mocker.patch.object(EnvConfig, 'init_app')
if use_init_app:
ext = EnvConfig()
ext.init_app(app)
else:
EnvConfig(app)
assert mock_init_app.called_with(app)
@pytest.mark.parametrize('value, expected', [
(1, 1),
('x', 'x'),
('[1, "x"]', [1, 'x']),
('123abc', '123abc')
])
def test_envconfig(app, monkeypatch, value, expected):
monkeypatch.setenv('APP_TEST_VALUE', value)
env = EnvConfig()
env.init_app(app)
assert app.config['TEST_VALUE'] == expected
| 25.035714 | 62 | 0.653352 | import pytest
from content.ext.envconfig import EnvConfig
@pytest.mark.parametrize('use_init_app', [True, False])
def test_ext_init(app, mocker, use_init_app):
mock_init_app = mocker.patch.object(EnvConfig, 'init_app')
if use_init_app:
ext = EnvConfig()
ext.init_app(app)
else:
EnvConfig(app)
assert mock_init_app.called_with(app)
@pytest.mark.parametrize('value, expected', [
(1, 1),
('x', 'x'),
('[1, "x"]', [1, 'x']),
('123abc', '123abc')
])
def test_envconfig(app, monkeypatch, value, expected):
monkeypatch.setenv('APP_TEST_VALUE', value)
env = EnvConfig()
env.init_app(app)
assert app.config['TEST_VALUE'] == expected
| true | true |
f71ca9df83a8f9e1e8cf5e848d1ced2172679a2a | 8,631 | py | Python | 2019/07_AmplificationCircuit/amp.py | deanearlwright/AdventOfCode | ca4cf6315c0efa38bd7748fb6f4bc99e7934871d | [
"MIT"
] | 1 | 2021-01-03T23:09:28.000Z | 2021-01-03T23:09:28.000Z | 2019/07_AmplificationCircuit/amp.py | deanearlwright/AdventOfCode | ca4cf6315c0efa38bd7748fb6f4bc99e7934871d | [
"MIT"
] | 6 | 2020-12-26T21:02:42.000Z | 2020-12-26T21:02:52.000Z | 2019/07_AmplificationCircuit/amp.py | deanearlwright/AdventOfCode | ca4cf6315c0efa38bd7748fb6f4bc99e7934871d | [
"MIT"
] | null | null | null | # ======================================================================
# Amplification Circuit
# Advent of Code 2019 Day 07 -- Eric Wastl -- https://adventofcode.com
#
# Computer simulation by Dr. Dean Earl Wright III
# ======================================================================
# ======================================================================
# u o m a p . p y
# ======================================================================
"Amps for Amplification Circuit problem for Advent of Code 2019 Day 07"
# ----------------------------------------------------------------------
# import
# ----------------------------------------------------------------------
from __future__ import print_function
from itertools import permutations
import intcode
# ----------------------------------------------------------------------
# constants
# ----------------------------------------------------------------------
PHASES = '01234'
FEEDBACK = '56789'
LETTERS = 'ABCDE'
# ======================================================================
# Amps
# ======================================================================
class Amps(object):
"""Object representing a series of amplifiers"""
def __init__(self, num=5, inp=0, text=None, feedback=False):
# 1. Start with no amplifiers
self.amps = []
self.num = num
self.inp = inp
self.text = text
self.output = 0
self.phases = None
self.feedback = feedback
#print("Creating amplifiers feedback=%s" % (feedback))
# 2. Create as many amplifiers as needed
assert num <= 5
for indx in range(num):
# 3. Create an amplifier and add it to the list
self.amps.append(Amp(letter=LETTERS[indx], text=text))
def find_best(self, watch=False):
"Find the ordering of phases to maximize output"
#print("find_best feedback=%s watch=%s" % (self.feedback, watch))
# 1. Start with a very poor output
best_output = 0
# 2. loop for all of the permutations of the phases
if self.feedback:
phase_numbers = FEEDBACK
else:
phase_numbers = PHASES
for phases in list(permutations(phase_numbers)):
# 3, Run this set of phases
if self.feedback:
output = self.run_feedback(phases=phases, inp=self.inp, watch=watch)
else:
output = self.run_series(phases=phases, inp=self.inp)
# 4. If this is better that what we had before, save it
if output > best_output:
best_output = output
self.output = output
self.phases = phases
if watch:
print("Setting best to %d for phase %s" % (output, phases))
# 5. Return the best output
return best_output
def run_series(self, phases=PHASES, inp=None):
"Run all the amplifiers in series"
# 1. Start with no final output and the initial input
self.output = None
if inp is None:
inp = self.inp
# 2. Run all the amplifiers in turn
for indx in range(self.num):
# 3. Run one amplifier
output = self.amps[indx].run(inp=inp, phase=int(phases[indx]))
# 4. If there was a problem exit
if output is None:
break
# 5. Set up to run the next amplifier
inp = output
# 6. Return the result from the last amplifier run
return output
def run_feedback(self, phases=FEEDBACK, inp=None, watch=False):
"Run all the amplifiers in series with a feedback loop"
# 1. Start with no final output and the initial input
self.output = None
inputs = [0, 0, 0, 0, 0, 0]
status = [intcode.STOP_RUN,
intcode.STOP_RUN,
intcode.STOP_RUN,
intcode.STOP_RUN,
intcode.STOP_RUN,
intcode.STOP_RUN,]
outputs = [0, 0, 0, 0, 0, 0]
if inp is None:
inputs[0] = self.inp
else:
inputs[0] = inp
# 2. Reset all of the amplifiers
for indx in range(self.num):
self.amps[indx].computer = None
# 3. Run amplifiers until done:
while status[0] != intcode.STOP_HLT:
if watch:
print('Starting feedback loop with input=%s' % (inputs[0]))
# 4. Run all the amplifiers in turn
for indx in range(self.num):
# 5. Run one amplifier
output = self.amps[indx].fb_run(inp=inputs[indx], phase=int(phases[indx]))
# 6. If there was a problem exit
if output is None:
return None
# 7. Set up to run the next amplifier
if watch:
print("phases=%s, amp %s output=%s" % (phases, indx, output))
status[indx] = output[0]
output = output[1]
outputs[indx] = output
inputs[0] = output
inputs[indx+1] = output
# 8. Return the result from the last amplifier run
return output
# ======================================================================
# Amp
# ======================================================================
class Amp(object): #pylint: disable=R0903
"""Object representing a series of amplifier"""
def __init__(self, letter='Z', text=None):
# 1. Store the values
self.letter = letter
self.text = text
self.computer = None
def run(self, phase=0, inp=0):
"Return the result of running the computer with inputs phase and inp"
# 1. Create a computer with the program from text
self.computer = intcode.IntCode(text=self.text)
# 3. Run the computer with inputs
result = self.computer.run(inp=[phase, inp])
# 4. Make sure it ended with a halt instruction
if result != intcode.STOP_HLT:
print("amplifier %s input=[%d,%d] ended with %d" %
(self.letter, phase, inp, result))
return None
# 5. Return the output
output = self.computer.outputs()
if len(output) != 1:
print("amplifier %s input=[%d,%d] ended produced %d outputs" %
(self.letter, phase, inp, len(output)))
return None
return output[0]
def fb_run(self, phase=0, inp=0):
"Return the status and output of running the amplifier with inputs phase and inp"
# 1. Create a computer with the program from text (if not already created)
if self.computer is None:
self.computer = intcode.IntCode(text=self.text)
inp = [phase, inp]
else:
inp = [inp]
# 3. Run the computer with inputs
#print("Running computer with input = %s, counter=%s" % (inp, self.computer.counter))
result = self.computer.run(inp=inp)
# 4. Make sure it ended with a halt instruction or input instruction
if result not in (intcode.STOP_HLT, intcode.STOP_INP):
print("amplifier %s input=%s ended with %d" % (self.letter, inp, result))
return None
# 5. Return the result and output
output = self.computer.outputs()
if len(output) != 1:
print("amplifier %s input=%s ended produced %d outputs" %
(self.letter, inp, len(output)))
return None
return (result, output[0])
# ----------------------------------------------------------------------
# module initialization
# ----------------------------------------------------------------------
if __name__ == '__main__':
pass
# ======================================================================
# end u o m a p . p y end
# ======================================================================
| 36.884615 | 94 | 0.446993 |
from __future__ import print_function
from itertools import permutations
import intcode
PHASES = '01234'
FEEDBACK = '56789'
LETTERS = 'ABCDE'
class Amps(object):
def __init__(self, num=5, inp=0, text=None, feedback=False):
self.amps = []
self.num = num
self.inp = inp
self.text = text
self.output = 0
self.phases = None
self.feedback = feedback
assert num <= 5
for indx in range(num):
self.amps.append(Amp(letter=LETTERS[indx], text=text))
def find_best(self, watch=False):
best_output = 0
if self.feedback:
phase_numbers = FEEDBACK
else:
phase_numbers = PHASES
for phases in list(permutations(phase_numbers)):
if self.feedback:
output = self.run_feedback(phases=phases, inp=self.inp, watch=watch)
else:
output = self.run_series(phases=phases, inp=self.inp)
if output > best_output:
best_output = output
self.output = output
self.phases = phases
if watch:
print("Setting best to %d for phase %s" % (output, phases))
return best_output
def run_series(self, phases=PHASES, inp=None):
self.output = None
if inp is None:
inp = self.inp
for indx in range(self.num):
output = self.amps[indx].run(inp=inp, phase=int(phases[indx]))
if output is None:
break
inp = output
return output
def run_feedback(self, phases=FEEDBACK, inp=None, watch=False):
self.output = None
inputs = [0, 0, 0, 0, 0, 0]
status = [intcode.STOP_RUN,
intcode.STOP_RUN,
intcode.STOP_RUN,
intcode.STOP_RUN,
intcode.STOP_RUN,
intcode.STOP_RUN,]
outputs = [0, 0, 0, 0, 0, 0]
if inp is None:
inputs[0] = self.inp
else:
inputs[0] = inp
for indx in range(self.num):
self.amps[indx].computer = None
while status[0] != intcode.STOP_HLT:
if watch:
print('Starting feedback loop with input=%s' % (inputs[0]))
for indx in range(self.num):
output = self.amps[indx].fb_run(inp=inputs[indx], phase=int(phases[indx]))
if output is None:
return None
if watch:
print("phases=%s, amp %s output=%s" % (phases, indx, output))
status[indx] = output[0]
output = output[1]
outputs[indx] = output
inputs[0] = output
inputs[indx+1] = output
return output
class Amp(object):
def __init__(self, letter='Z', text=None):
self.letter = letter
self.text = text
self.computer = None
def run(self, phase=0, inp=0):
self.computer = intcode.IntCode(text=self.text)
result = self.computer.run(inp=[phase, inp])
if result != intcode.STOP_HLT:
print("amplifier %s input=[%d,%d] ended with %d" %
(self.letter, phase, inp, result))
return None
output = self.computer.outputs()
if len(output) != 1:
print("amplifier %s input=[%d,%d] ended produced %d outputs" %
(self.letter, phase, inp, len(output)))
return None
return output[0]
def fb_run(self, phase=0, inp=0):
if self.computer is None:
self.computer = intcode.IntCode(text=self.text)
inp = [phase, inp]
else:
inp = [inp]
result = self.computer.run(inp=inp)
if result not in (intcode.STOP_HLT, intcode.STOP_INP):
print("amplifier %s input=%s ended with %d" % (self.letter, inp, result))
return None
output = self.computer.outputs()
if len(output) != 1:
print("amplifier %s input=%s ended produced %d outputs" %
(self.letter, inp, len(output)))
return None
return (result, output[0])
if __name__ == '__main__':
pass
| true | true |
f71caa1994d573bc106273e8c7f0d7dd6210d086 | 61,405 | py | Python | configure.py | luyangny/Cat-detection | 6bdf989520ca6aba4cde30e48a6ea869db6eeee6 | [
"Apache-2.0"
] | null | null | null | configure.py | luyangny/Cat-detection | 6bdf989520ca6aba4cde30e48a6ea869db6eeee6 | [
"Apache-2.0"
] | null | null | null | configure.py | luyangny/Cat-detection | 6bdf989520ca6aba4cde30e48a6ea869db6eeee6 | [
"Apache-2.0"
] | null | null | null | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""configure script to get build parameters from user."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import errno
import os
import platform
import re
import subprocess
import sys
# pylint: disable=g-import-not-at-top
try:
from shutil import which
except ImportError:
from distutils.spawn import find_executable as which
# pylint: enable=g-import-not-at-top
_DEFAULT_CUDA_VERSION = '9.0'
_DEFAULT_CUDNN_VERSION = '7'
_DEFAULT_NCCL_VERSION = '2.2'
_DEFAULT_CUDA_COMPUTE_CAPABILITIES = '3.5,7.0'
_DEFAULT_CUDA_PATH = '/usr/local/cuda'
_DEFAULT_CUDA_PATH_LINUX = '/opt/cuda'
_DEFAULT_CUDA_PATH_WIN = ('C:/Program Files/NVIDIA GPU Computing '
'Toolkit/CUDA/v%s' % _DEFAULT_CUDA_VERSION)
_TF_OPENCL_VERSION = '1.2'
_DEFAULT_COMPUTECPP_TOOLKIT_PATH = '/usr/local/computecpp'
_DEFAULT_TRISYCL_INCLUDE_DIR = '/usr/local/triSYCL/include'
_SUPPORTED_ANDROID_NDK_VERSIONS = [10, 11, 12, 13, 14, 15, 16]
_DEFAULT_PROMPT_ASK_ATTEMPTS = 10
_TF_WORKSPACE_ROOT = os.path.abspath(os.path.dirname(__file__))
_TF_BAZELRC_FILENAME = '.tf_configure.bazelrc'
_TF_BAZELRC = os.path.join(_TF_WORKSPACE_ROOT, _TF_BAZELRC_FILENAME)
_TF_WORKSPACE = os.path.join(_TF_WORKSPACE_ROOT, 'WORKSPACE')
if platform.machine() == 'ppc64le':
_DEFAULT_TENSORRT_PATH_LINUX = '/usr/lib/powerpc64le-linux-gnu/'
else:
_DEFAULT_TENSORRT_PATH_LINUX = '/usr/lib/%s-linux-gnu' % platform.machine()
class UserInputError(Exception):
pass
def is_windows():
return platform.system() == 'Windows'
def is_linux():
return platform.system() == 'Linux'
def is_macos():
return platform.system() == 'Darwin'
def is_ppc64le():
return platform.machine() == 'ppc64le'
def is_cygwin():
return platform.system().startswith('CYGWIN_NT')
def get_input(question):
try:
try:
answer = raw_input(question)
except NameError:
answer = input(question) # pylint: disable=bad-builtin
except EOFError:
answer = ''
return answer
def symlink_force(target, link_name):
"""Force symlink, equivalent of 'ln -sf'.
Args:
target: items to link to.
link_name: name of the link.
"""
try:
os.symlink(target, link_name)
except OSError as e:
if e.errno == errno.EEXIST:
os.remove(link_name)
os.symlink(target, link_name)
else:
raise e
def sed_in_place(filename, old, new):
"""Replace old string with new string in file.
Args:
filename: string for filename.
old: string to replace.
new: new string to replace to.
"""
with open(filename, 'r') as f:
filedata = f.read()
newdata = filedata.replace(old, new)
with open(filename, 'w') as f:
f.write(newdata)
def write_to_bazelrc(line):
with open(_TF_BAZELRC, 'a') as f:
f.write(line + '\n')
def write_action_env_to_bazelrc(var_name, var):
write_to_bazelrc('build --action_env %s="%s"' % (var_name, str(var)))
def run_shell(cmd, allow_non_zero=False):
if allow_non_zero:
try:
output = subprocess.check_output(cmd)
except subprocess.CalledProcessError as e:
output = e.output
else:
output = subprocess.check_output(cmd)
return output.decode('UTF-8').strip()
def cygpath(path):
"""Convert path from posix to windows."""
return os.path.abspath(path).replace('\\', '/')
def get_python_path(environ_cp, python_bin_path):
"""Get the python site package paths."""
python_paths = []
if environ_cp.get('PYTHONPATH'):
python_paths = environ_cp.get('PYTHONPATH').split(':')
try:
library_paths = run_shell([
python_bin_path, '-c',
'import site; print("\\n".join(site.getsitepackages()))'
]).split('\n')
except subprocess.CalledProcessError:
library_paths = [
run_shell([
python_bin_path, '-c',
'from distutils.sysconfig import get_python_lib;'
'print(get_python_lib())'
])
]
all_paths = set(python_paths + library_paths)
paths = []
for path in all_paths:
if os.path.isdir(path):
paths.append(path)
return paths
def get_python_major_version(python_bin_path):
"""Get the python major version."""
return run_shell([python_bin_path, '-c', 'import sys; print(sys.version[0])'])
def setup_python(environ_cp):
"""Setup python related env variables."""
# Get PYTHON_BIN_PATH, default is the current running python.
default_python_bin_path = sys.executable
ask_python_bin_path = ('Please specify the location of python. [Default is '
'%s]: ') % default_python_bin_path
while True:
python_bin_path = get_from_env_or_user_or_default(
environ_cp, 'PYTHON_BIN_PATH', ask_python_bin_path,
default_python_bin_path)
# Check if the path is valid
if os.path.isfile(python_bin_path) and os.access(python_bin_path, os.X_OK):
break
elif not os.path.exists(python_bin_path):
print('Invalid python path: %s cannot be found.' % python_bin_path)
else:
print('%s is not executable. Is it the python binary?' % python_bin_path)
environ_cp['PYTHON_BIN_PATH'] = ''
# Convert python path to Windows style before checking lib and version
if is_windows() or is_cygwin():
python_bin_path = cygpath(python_bin_path)
# Get PYTHON_LIB_PATH
python_lib_path = environ_cp.get('PYTHON_LIB_PATH')
if not python_lib_path:
python_lib_paths = get_python_path(environ_cp, python_bin_path)
if environ_cp.get('USE_DEFAULT_PYTHON_LIB_PATH') == '1':
python_lib_path = python_lib_paths[0]
else:
print('Found possible Python library paths:\n %s' %
'\n '.join(python_lib_paths))
default_python_lib_path = python_lib_paths[0]
python_lib_path = get_input(
'Please input the desired Python library path to use. '
'Default is [%s]\n' % python_lib_paths[0])
if not python_lib_path:
python_lib_path = default_python_lib_path
environ_cp['PYTHON_LIB_PATH'] = python_lib_path
python_major_version = get_python_major_version(python_bin_path)
# Convert python path to Windows style before writing into bazel.rc
if is_windows() or is_cygwin():
python_lib_path = cygpath(python_lib_path)
# Set-up env variables used by python_configure.bzl
write_action_env_to_bazelrc('PYTHON_BIN_PATH', python_bin_path)
write_action_env_to_bazelrc('PYTHON_LIB_PATH', python_lib_path)
write_to_bazelrc('build --python_path=\"%s"' % python_bin_path)
environ_cp['PYTHON_BIN_PATH'] = python_bin_path
# Write tools/python_bin_path.sh
with open(
os.path.join(_TF_WORKSPACE_ROOT, 'tools', 'python_bin_path.sh'),
'w') as f:
f.write('export PYTHON_BIN_PATH="%s"' % python_bin_path)
def reset_tf_configure_bazelrc(workspace_path):
"""Reset file that contains customized config settings."""
open(_TF_BAZELRC, 'w').close()
bazelrc_path = os.path.join(workspace_path, '.bazelrc')
data = []
if os.path.exists(bazelrc_path):
with open(bazelrc_path, 'r') as f:
data = f.read().splitlines()
with open(bazelrc_path, 'w') as f:
for l in data:
if _TF_BAZELRC_FILENAME in l:
continue
f.write('%s\n' % l)
if is_windows():
tf_bazelrc_path = _TF_BAZELRC.replace('\\', '/')
else:
tf_bazelrc_path = _TF_BAZELRC
f.write('import %s\n' % tf_bazelrc_path)
def cleanup_makefile():
"""Delete any leftover BUILD files from the Makefile build.
These files could interfere with Bazel parsing.
"""
makefile_download_dir = os.path.join(_TF_WORKSPACE_ROOT, 'tensorflow',
'contrib', 'makefile', 'downloads')
if os.path.isdir(makefile_download_dir):
for root, _, filenames in os.walk(makefile_download_dir):
for f in filenames:
if f.endswith('BUILD'):
os.remove(os.path.join(root, f))
def get_var(environ_cp,
var_name,
query_item,
enabled_by_default,
question=None,
yes_reply=None,
no_reply=None):
"""Get boolean input from user.
If var_name is not set in env, ask user to enable query_item or not. If the
response is empty, use the default.
Args:
environ_cp: copy of the os.environ.
var_name: string for name of environment variable, e.g. "TF_NEED_HDFS".
query_item: string for feature related to the variable, e.g. "Hadoop File
System".
enabled_by_default: boolean for default behavior.
question: optional string for how to ask for user input.
yes_reply: optional string for reply when feature is enabled.
no_reply: optional string for reply when feature is disabled.
Returns:
boolean value of the variable.
Raises:
UserInputError: if an environment variable is set, but it cannot be
interpreted as a boolean indicator, assume that the user has made a
scripting error, and will continue to provide invalid input.
Raise the error to avoid infinitely looping.
"""
if not question:
question = 'Do you wish to build TensorFlow with %s support?' % query_item
if not yes_reply:
yes_reply = '%s support will be enabled for TensorFlow.' % query_item
if not no_reply:
no_reply = 'No %s' % yes_reply
yes_reply += '\n'
no_reply += '\n'
if enabled_by_default:
question += ' [Y/n]: '
else:
question += ' [y/N]: '
var = environ_cp.get(var_name)
if var is not None:
var_content = var.strip().lower()
true_strings = ('1', 't', 'true', 'y', 'yes')
false_strings = ('0', 'f', 'false', 'n', 'no')
if var_content in true_strings:
var = True
elif var_content in false_strings:
var = False
else:
raise UserInputError(
'Environment variable %s must be set as a boolean indicator.\n'
'The following are accepted as TRUE : %s.\n'
'The following are accepted as FALSE: %s.\n'
'Current value is %s.' % (var_name, ', '.join(true_strings),
', '.join(false_strings), var))
while var is None:
user_input_origin = get_input(question)
user_input = user_input_origin.strip().lower()
if user_input == 'y':
print(yes_reply)
var = True
elif user_input == 'n':
print(no_reply)
var = False
elif not user_input:
if enabled_by_default:
print(yes_reply)
var = True
else:
print(no_reply)
var = False
else:
print('Invalid selection: %s' % user_input_origin)
return var
def set_build_var(environ_cp,
var_name,
query_item,
option_name,
enabled_by_default,
bazel_config_name=None):
"""Set if query_item will be enabled for the build.
Ask user if query_item will be enabled. Default is used if no input is given.
Set subprocess environment variable and write to .bazelrc if enabled.
Args:
environ_cp: copy of the os.environ.
var_name: string for name of environment variable, e.g. "TF_NEED_HDFS".
query_item: string for feature related to the variable, e.g. "Hadoop File
System".
option_name: string for option to define in .bazelrc.
enabled_by_default: boolean for default behavior.
bazel_config_name: Name for Bazel --config argument to enable build feature.
"""
var = str(int(get_var(environ_cp, var_name, query_item, enabled_by_default)))
environ_cp[var_name] = var
if var == '1':
write_to_bazelrc('build --define %s=true' % option_name)
elif bazel_config_name is not None:
# TODO(mikecase): Migrate all users of configure.py to use --config Bazel
# options and not to set build configs through environment variables.
write_to_bazelrc(
'build:%s --define %s=true' % (bazel_config_name, option_name))
def set_action_env_var(environ_cp,
var_name,
query_item,
enabled_by_default,
question=None,
yes_reply=None,
no_reply=None):
"""Set boolean action_env variable.
Ask user if query_item will be enabled. Default is used if no input is given.
Set environment variable and write to .bazelrc.
Args:
environ_cp: copy of the os.environ.
var_name: string for name of environment variable, e.g. "TF_NEED_HDFS".
query_item: string for feature related to the variable, e.g. "Hadoop File
System".
enabled_by_default: boolean for default behavior.
question: optional string for how to ask for user input.
yes_reply: optional string for reply when feature is enabled.
no_reply: optional string for reply when feature is disabled.
"""
var = int(
get_var(environ_cp, var_name, query_item, enabled_by_default, question,
yes_reply, no_reply))
write_action_env_to_bazelrc(var_name, var)
environ_cp[var_name] = str(var)
def convert_version_to_int(version):
"""Convert a version number to a integer that can be used to compare.
Version strings of the form X.YZ and X.Y.Z-xxxxx are supported. The
'xxxxx' part, for instance 'homebrew' on OS/X, is ignored.
Args:
version: a version to be converted
Returns:
An integer if converted successfully, otherwise return None.
"""
version = version.split('-')[0]
version_segments = version.split('.')
for seg in version_segments:
if not seg.isdigit():
return None
version_str = ''.join(['%03d' % int(seg) for seg in version_segments])
return int(version_str)
def check_bazel_version(min_version):
"""Check installed bazel version is at least min_version.
Args:
min_version: string for minimum bazel version.
Returns:
The bazel version detected.
"""
if which('bazel') is None:
print('Cannot find bazel. Please install bazel.')
sys.exit(0)
curr_version = run_shell(
['bazel', '--batch', '--bazelrc=/dev/null', 'version'])
for line in curr_version.split('\n'):
if 'Build label: ' in line:
curr_version = line.split('Build label: ')[1]
break
min_version_int = convert_version_to_int(min_version)
curr_version_int = convert_version_to_int(curr_version)
# Check if current bazel version can be detected properly.
if not curr_version_int:
print('WARNING: current bazel installation is not a release version.')
print('Make sure you are running at least bazel %s' % min_version)
return curr_version
print('You have bazel %s installed.' % curr_version)
if curr_version_int < min_version_int:
print('Please upgrade your bazel installation to version %s or higher to '
'build TensorFlow!' % min_version)
sys.exit(0)
return curr_version
def set_cc_opt_flags(environ_cp):
"""Set up architecture-dependent optimization flags.
Also append CC optimization flags to bazel.rc..
Args:
environ_cp: copy of the os.environ.
"""
if is_ppc64le():
# gcc on ppc64le does not support -march, use mcpu instead
default_cc_opt_flags = '-mcpu=native'
elif is_windows():
default_cc_opt_flags = '/arch:AVX'
else:
default_cc_opt_flags = '-march=native'
question = ('Please specify optimization flags to use during compilation when'
' bazel option "--config=opt" is specified [Default is %s]: '
) % default_cc_opt_flags
cc_opt_flags = get_from_env_or_user_or_default(environ_cp, 'CC_OPT_FLAGS',
question, default_cc_opt_flags)
for opt in cc_opt_flags.split():
write_to_bazelrc('build:opt --copt=%s' % opt)
# It should be safe on the same build host.
if not is_ppc64le() and not is_windows():
write_to_bazelrc('build:opt --host_copt=-march=native')
write_to_bazelrc('build:opt --define with_default_optimizations=true')
def set_tf_cuda_clang(environ_cp):
"""set TF_CUDA_CLANG action_env.
Args:
environ_cp: copy of the os.environ.
"""
question = 'Do you want to use clang as CUDA compiler?'
yes_reply = 'Clang will be used as CUDA compiler.'
no_reply = 'nvcc will be used as CUDA compiler.'
set_action_env_var(
environ_cp,
'TF_CUDA_CLANG',
None,
False,
question=question,
yes_reply=yes_reply,
no_reply=no_reply)
def set_tf_download_clang(environ_cp):
"""Set TF_DOWNLOAD_CLANG action_env."""
question = 'Do you wish to download a fresh release of clang? (Experimental)'
yes_reply = 'Clang will be downloaded and used to compile tensorflow.'
no_reply = 'Clang will not be downloaded.'
set_action_env_var(
environ_cp,
'TF_DOWNLOAD_CLANG',
None,
False,
question=question,
yes_reply=yes_reply,
no_reply=no_reply)
def get_from_env_or_user_or_default(environ_cp, var_name, ask_for_var,
var_default):
"""Get var_name either from env, or user or default.
If var_name has been set as environment variable, use the preset value, else
ask for user input. If no input is provided, the default is used.
Args:
environ_cp: copy of the os.environ.
var_name: string for name of environment variable, e.g. "TF_NEED_HDFS".
ask_for_var: string for how to ask for user input.
var_default: default value string.
Returns:
string value for var_name
"""
var = environ_cp.get(var_name)
if not var:
var = get_input(ask_for_var)
print('\n')
if not var:
var = var_default
return var
def set_clang_cuda_compiler_path(environ_cp):
"""Set CLANG_CUDA_COMPILER_PATH."""
default_clang_path = which('clang') or ''
ask_clang_path = ('Please specify which clang should be used as device and '
'host compiler. [Default is %s]: ') % default_clang_path
while True:
clang_cuda_compiler_path = get_from_env_or_user_or_default(
environ_cp, 'CLANG_CUDA_COMPILER_PATH', ask_clang_path,
default_clang_path)
if os.path.exists(clang_cuda_compiler_path):
break
# Reset and retry
print('Invalid clang path: %s cannot be found.' % clang_cuda_compiler_path)
environ_cp['CLANG_CUDA_COMPILER_PATH'] = ''
# Set CLANG_CUDA_COMPILER_PATH
environ_cp['CLANG_CUDA_COMPILER_PATH'] = clang_cuda_compiler_path
write_action_env_to_bazelrc('CLANG_CUDA_COMPILER_PATH',
clang_cuda_compiler_path)
def prompt_loop_or_load_from_env(environ_cp,
var_name,
var_default,
ask_for_var,
check_success,
error_msg,
suppress_default_error=False,
n_ask_attempts=_DEFAULT_PROMPT_ASK_ATTEMPTS):
"""Loop over user prompts for an ENV param until receiving a valid response.
For the env param var_name, read from the environment or verify user input
until receiving valid input. When done, set var_name in the environ_cp to its
new value.
Args:
environ_cp: (Dict) copy of the os.environ.
var_name: (String) string for name of environment variable, e.g. "TF_MYVAR".
var_default: (String) default value string.
ask_for_var: (String) string for how to ask for user input.
check_success: (Function) function that takes one argument and returns a
boolean. Should return True if the value provided is considered valid. May
contain a complex error message if error_msg does not provide enough
information. In that case, set suppress_default_error to True.
error_msg: (String) String with one and only one '%s'. Formatted with each
invalid response upon check_success(input) failure.
suppress_default_error: (Bool) Suppress the above error message in favor of
one from the check_success function.
n_ask_attempts: (Integer) Number of times to query for valid input before
raising an error and quitting.
Returns:
[String] The value of var_name after querying for input.
Raises:
UserInputError: if a query has been attempted n_ask_attempts times without
success, assume that the user has made a scripting error, and will
continue to provide invalid input. Raise the error to avoid infinitely
looping.
"""
default = environ_cp.get(var_name) or var_default
full_query = '%s [Default is %s]: ' % (
ask_for_var,
default,
)
for _ in range(n_ask_attempts):
val = get_from_env_or_user_or_default(environ_cp, var_name, full_query,
default)
if check_success(val):
break
if not suppress_default_error:
print(error_msg % val)
environ_cp[var_name] = ''
else:
raise UserInputError(
'Invalid %s setting was provided %d times in a row. '
'Assuming to be a scripting mistake.' % (var_name, n_ask_attempts))
environ_cp[var_name] = val
return val
def create_android_ndk_rule(environ_cp):
"""Set ANDROID_NDK_HOME and write Android NDK WORKSPACE rule."""
if is_windows() or is_cygwin():
default_ndk_path = cygpath(
'%s/Android/Sdk/ndk-bundle' % environ_cp['APPDATA'])
elif is_macos():
default_ndk_path = '%s/library/Android/Sdk/ndk-bundle' % environ_cp['HOME']
else:
default_ndk_path = '%s/Android/Sdk/ndk-bundle' % environ_cp['HOME']
def valid_ndk_path(path):
return (os.path.exists(path) and
os.path.exists(os.path.join(path, 'source.properties')))
android_ndk_home_path = prompt_loop_or_load_from_env(
environ_cp,
var_name='ANDROID_NDK_HOME',
var_default=default_ndk_path,
ask_for_var='Please specify the home path of the Android NDK to use.',
check_success=valid_ndk_path,
error_msg=('The path %s or its child file "source.properties" '
'does not exist.'))
write_action_env_to_bazelrc('ANDROID_NDK_HOME', android_ndk_home_path)
write_action_env_to_bazelrc('ANDROID_NDK_API_LEVEL',
check_ndk_level(android_ndk_home_path))
def create_android_sdk_rule(environ_cp):
"""Set Android variables and write Android SDK WORKSPACE rule."""
if is_windows() or is_cygwin():
default_sdk_path = cygpath('%s/Android/Sdk' % environ_cp['APPDATA'])
elif is_macos():
default_sdk_path = '%s/library/Android/Sdk' % environ_cp['HOME']
else:
default_sdk_path = '%s/Android/Sdk' % environ_cp['HOME']
def valid_sdk_path(path):
return (os.path.exists(path) and
os.path.exists(os.path.join(path, 'platforms')) and
os.path.exists(os.path.join(path, 'build-tools')))
android_sdk_home_path = prompt_loop_or_load_from_env(
environ_cp,
var_name='ANDROID_SDK_HOME',
var_default=default_sdk_path,
ask_for_var='Please specify the home path of the Android SDK to use.',
check_success=valid_sdk_path,
error_msg=('Either %s does not exist, or it does not contain the '
'subdirectories "platforms" and "build-tools".'))
platforms = os.path.join(android_sdk_home_path, 'platforms')
api_levels = sorted(os.listdir(platforms))
api_levels = [x.replace('android-', '') for x in api_levels]
def valid_api_level(api_level):
return os.path.exists(
os.path.join(android_sdk_home_path, 'platforms',
'android-' + api_level))
android_api_level = prompt_loop_or_load_from_env(
environ_cp,
var_name='ANDROID_API_LEVEL',
var_default=api_levels[-1],
ask_for_var=('Please specify the Android SDK API level to use. '
'[Available levels: %s]') % api_levels,
check_success=valid_api_level,
error_msg='Android-%s is not present in the SDK path.')
build_tools = os.path.join(android_sdk_home_path, 'build-tools')
versions = sorted(os.listdir(build_tools))
def valid_build_tools(version):
return os.path.exists(
os.path.join(android_sdk_home_path, 'build-tools', version))
android_build_tools_version = prompt_loop_or_load_from_env(
environ_cp,
var_name='ANDROID_BUILD_TOOLS_VERSION',
var_default=versions[-1],
ask_for_var=('Please specify an Android build tools version to use. '
'[Available versions: %s]') % versions,
check_success=valid_build_tools,
error_msg=('The selected SDK does not have build-tools version %s '
'available.'))
write_action_env_to_bazelrc('ANDROID_BUILD_TOOLS_VERSION',
android_build_tools_version)
write_action_env_to_bazelrc('ANDROID_SDK_API_LEVEL', android_api_level)
write_action_env_to_bazelrc('ANDROID_SDK_HOME', android_sdk_home_path)
def check_ndk_level(android_ndk_home_path):
"""Check the revision number of an Android NDK path."""
properties_path = '%s/source.properties' % android_ndk_home_path
if is_windows() or is_cygwin():
properties_path = cygpath(properties_path)
with open(properties_path, 'r') as f:
filedata = f.read()
revision = re.search(r'Pkg.Revision = (\d+)', filedata)
if revision:
ndk_api_level = revision.group(1)
else:
raise Exception('Unable to parse NDK revision.')
if int(ndk_api_level) not in _SUPPORTED_ANDROID_NDK_VERSIONS:
print('WARNING: The API level of the NDK in %s is %s, which is not '
'supported by Bazel (officially supported versions: %s). Please use '
'another version. Compiling Android targets may result in confusing '
'errors.\n' % (android_ndk_home_path, ndk_api_level,
_SUPPORTED_ANDROID_NDK_VERSIONS))
return ndk_api_level
def set_gcc_host_compiler_path(environ_cp):
"""Set GCC_HOST_COMPILER_PATH."""
default_gcc_host_compiler_path = which('gcc') or ''
cuda_bin_symlink = '%s/bin/gcc' % environ_cp.get('CUDA_TOOLKIT_PATH')
if os.path.islink(cuda_bin_symlink):
# os.readlink is only available in linux
default_gcc_host_compiler_path = os.path.realpath(cuda_bin_symlink)
gcc_host_compiler_path = prompt_loop_or_load_from_env(
environ_cp,
var_name='GCC_HOST_COMPILER_PATH',
var_default=default_gcc_host_compiler_path,
ask_for_var=
'Please specify which gcc should be used by nvcc as the host compiler.',
check_success=os.path.exists,
error_msg='Invalid gcc path. %s cannot be found.',
)
write_action_env_to_bazelrc('GCC_HOST_COMPILER_PATH', gcc_host_compiler_path)
def reformat_version_sequence(version_str, sequence_count):
"""Reformat the version string to have the given number of sequences.
For example:
Given (7, 2) -> 7.0
(7.0.1, 2) -> 7.0
(5, 1) -> 5
(5.0.3.2, 1) -> 5
Args:
version_str: String, the version string.
sequence_count: int, an integer.
Returns:
string, reformatted version string.
"""
v = version_str.split('.')
if len(v) < sequence_count:
v = v + (['0'] * (sequence_count - len(v)))
return '.'.join(v[:sequence_count])
def set_tf_cuda_version(environ_cp):
"""Set CUDA_TOOLKIT_PATH and TF_CUDA_VERSION."""
ask_cuda_version = (
'Please specify the CUDA SDK version you want to use. '
'[Leave empty to default to CUDA %s]: ') % _DEFAULT_CUDA_VERSION
for _ in range(_DEFAULT_PROMPT_ASK_ATTEMPTS):
# Configure the Cuda SDK version to use.
tf_cuda_version = get_from_env_or_user_or_default(
environ_cp, 'TF_CUDA_VERSION', ask_cuda_version, _DEFAULT_CUDA_VERSION)
tf_cuda_version = reformat_version_sequence(str(tf_cuda_version), 2)
# Find out where the CUDA toolkit is installed
default_cuda_path = _DEFAULT_CUDA_PATH
if is_windows() or is_cygwin():
default_cuda_path = cygpath(
environ_cp.get('CUDA_PATH', _DEFAULT_CUDA_PATH_WIN))
elif is_linux():
# If the default doesn't exist, try an alternative default.
if (not os.path.exists(default_cuda_path)
) and os.path.exists(_DEFAULT_CUDA_PATH_LINUX):
default_cuda_path = _DEFAULT_CUDA_PATH_LINUX
ask_cuda_path = ('Please specify the location where CUDA %s toolkit is'
' installed. Refer to README.md for more details. '
'[Default is %s]: ') % (tf_cuda_version, default_cuda_path)
cuda_toolkit_path = get_from_env_or_user_or_default(
environ_cp, 'CUDA_TOOLKIT_PATH', ask_cuda_path, default_cuda_path)
if is_windows() or is_cygwin():
cuda_toolkit_path = cygpath(cuda_toolkit_path)
if is_windows():
cuda_rt_lib_paths = ['lib/x64/cudart.lib']
elif is_linux():
cuda_rt_lib_paths = [
'%s/libcudart.so.%s' % (x, tf_cuda_version) for x in [
'lib64',
'lib/powerpc64le-linux-gnu',
'lib/x86_64-linux-gnu',
]
]
elif is_macos():
cuda_rt_lib_paths = ['lib/libcudart.%s.dylib' % tf_cuda_version]
cuda_toolkit_paths_full = [
os.path.join(cuda_toolkit_path, x) for x in cuda_rt_lib_paths
]
if any([os.path.exists(x) for x in cuda_toolkit_paths_full]):
break
# Reset and retry
print('Invalid path to CUDA %s toolkit. %s cannot be found' %
(tf_cuda_version, cuda_toolkit_paths_full))
environ_cp['TF_CUDA_VERSION'] = ''
environ_cp['CUDA_TOOLKIT_PATH'] = ''
else:
raise UserInputError('Invalid TF_CUDA_SETTING setting was provided %d '
'times in a row. Assuming to be a scripting mistake.' %
_DEFAULT_PROMPT_ASK_ATTEMPTS)
# Set CUDA_TOOLKIT_PATH and TF_CUDA_VERSION
environ_cp['CUDA_TOOLKIT_PATH'] = cuda_toolkit_path
write_action_env_to_bazelrc('CUDA_TOOLKIT_PATH', cuda_toolkit_path)
environ_cp['TF_CUDA_VERSION'] = tf_cuda_version
write_action_env_to_bazelrc('TF_CUDA_VERSION', tf_cuda_version)
def set_tf_cudnn_version(environ_cp):
"""Set CUDNN_INSTALL_PATH and TF_CUDNN_VERSION."""
ask_cudnn_version = (
'Please specify the cuDNN version you want to use. '
'[Leave empty to default to cuDNN %s.0]: ') % _DEFAULT_CUDNN_VERSION
for _ in range(_DEFAULT_PROMPT_ASK_ATTEMPTS):
tf_cudnn_version = get_from_env_or_user_or_default(
environ_cp, 'TF_CUDNN_VERSION', ask_cudnn_version,
_DEFAULT_CUDNN_VERSION)
tf_cudnn_version = reformat_version_sequence(str(tf_cudnn_version), 1)
default_cudnn_path = environ_cp.get('CUDA_TOOLKIT_PATH')
ask_cudnn_path = (r'Please specify the location where cuDNN %s library is '
'installed. Refer to README.md for more details. [Default'
' is %s]: ') % (tf_cudnn_version, default_cudnn_path)
cudnn_install_path = get_from_env_or_user_or_default(
environ_cp, 'CUDNN_INSTALL_PATH', ask_cudnn_path, default_cudnn_path)
# Result returned from "read" will be used unexpanded. That make "~"
# unusable. Going through one more level of expansion to handle that.
cudnn_install_path = os.path.realpath(
os.path.expanduser(cudnn_install_path))
if is_windows() or is_cygwin():
cudnn_install_path = cygpath(cudnn_install_path)
if is_windows():
cuda_dnn_lib_path = 'lib/x64/cudnn.lib'
cuda_dnn_lib_alt_path = 'lib/x64/cudnn.lib'
elif is_linux():
cuda_dnn_lib_path = 'lib64/libcudnn.so.%s' % tf_cudnn_version
cuda_dnn_lib_alt_path = 'libcudnn.so.%s' % tf_cudnn_version
elif is_macos():
cuda_dnn_lib_path = 'lib/libcudnn.%s.dylib' % tf_cudnn_version
cuda_dnn_lib_alt_path = 'libcudnn.%s.dylib' % tf_cudnn_version
cuda_dnn_lib_path_full = os.path.join(cudnn_install_path, cuda_dnn_lib_path)
cuda_dnn_lib_alt_path_full = os.path.join(cudnn_install_path,
cuda_dnn_lib_alt_path)
if os.path.exists(cuda_dnn_lib_path_full) or os.path.exists(
cuda_dnn_lib_alt_path_full):
break
# Try another alternative for Linux
if is_linux():
ldconfig_bin = which('ldconfig') or '/sbin/ldconfig'
cudnn_path_from_ldconfig = run_shell([ldconfig_bin, '-p'])
cudnn_path_from_ldconfig = re.search('.*libcudnn.so .* => (.*)',
cudnn_path_from_ldconfig)
if cudnn_path_from_ldconfig:
cudnn_path_from_ldconfig = cudnn_path_from_ldconfig.group(1)
if os.path.exists(
'%s.%s' % (cudnn_path_from_ldconfig, tf_cudnn_version)):
cudnn_install_path = os.path.dirname(cudnn_path_from_ldconfig)
break
# Reset and Retry
print(
'Invalid path to cuDNN %s toolkit. None of the following files can be '
'found:' % tf_cudnn_version)
print(cuda_dnn_lib_path_full)
print(cuda_dnn_lib_alt_path_full)
if is_linux():
print('%s.%s' % (cudnn_path_from_ldconfig, tf_cudnn_version))
environ_cp['TF_CUDNN_VERSION'] = ''
else:
raise UserInputError('Invalid TF_CUDNN setting was provided %d '
'times in a row. Assuming to be a scripting mistake.' %
_DEFAULT_PROMPT_ASK_ATTEMPTS)
# Set CUDNN_INSTALL_PATH and TF_CUDNN_VERSION
environ_cp['CUDNN_INSTALL_PATH'] = cudnn_install_path
write_action_env_to_bazelrc('CUDNN_INSTALL_PATH', cudnn_install_path)
environ_cp['TF_CUDNN_VERSION'] = tf_cudnn_version
write_action_env_to_bazelrc('TF_CUDNN_VERSION', tf_cudnn_version)
def is_cuda_compatible(lib, cuda_ver, cudnn_ver):
"""Check compatibility between given library and cudnn/cudart libraries."""
ldd_bin = which('ldd') or '/usr/bin/ldd'
ldd_out = run_shell([ldd_bin, lib], True)
ldd_out = ldd_out.split(os.linesep)
cudnn_pattern = re.compile('.*libcudnn.so\\.?(.*) =>.*$')
cuda_pattern = re.compile('.*libcudart.so\\.?(.*) =>.*$')
cudnn = None
cudart = None
cudnn_ok = True # assume no cudnn dependency by default
cuda_ok = True # assume no cuda dependency by default
for line in ldd_out:
if 'libcudnn.so' in line:
cudnn = cudnn_pattern.search(line)
cudnn_ok = False
elif 'libcudart.so' in line:
cudart = cuda_pattern.search(line)
cuda_ok = False
if cudnn and len(cudnn.group(1)):
cudnn = convert_version_to_int(cudnn.group(1))
if cudart and len(cudart.group(1)):
cudart = convert_version_to_int(cudart.group(1))
if cudnn is not None:
cudnn_ok = (cudnn == cudnn_ver)
if cudart is not None:
cuda_ok = (cudart == cuda_ver)
return cudnn_ok and cuda_ok
def set_tf_tensorrt_install_path(environ_cp):
"""Set TENSORRT_INSTALL_PATH and TF_TENSORRT_VERSION.
Adapted from code contributed by Sami Kama (https://github.com/samikama).
Args:
environ_cp: copy of the os.environ.
Raises:
ValueError: if this method was called under non-Linux platform.
UserInputError: if user has provided invalid input multiple times.
"""
if not is_linux():
raise ValueError('Currently TensorRT is only supported on Linux platform.')
# Ask user whether to add TensorRT support.
if str(int(get_var(environ_cp, 'TF_NEED_TENSORRT', 'TensorRT',
False))) != '1':
return
for _ in range(_DEFAULT_PROMPT_ASK_ATTEMPTS):
ask_tensorrt_path = (r'Please specify the location where TensorRT is '
'installed. [Default is %s]:') % (
_DEFAULT_TENSORRT_PATH_LINUX)
trt_install_path = get_from_env_or_user_or_default(
environ_cp, 'TENSORRT_INSTALL_PATH', ask_tensorrt_path,
_DEFAULT_TENSORRT_PATH_LINUX)
# Result returned from "read" will be used unexpanded. That make "~"
# unusable. Going through one more level of expansion to handle that.
trt_install_path = os.path.realpath(os.path.expanduser(trt_install_path))
def find_libs(search_path):
"""Search for libnvinfer.so in "search_path"."""
fl = set()
if os.path.exists(search_path) and os.path.isdir(search_path):
fl.update([
os.path.realpath(os.path.join(search_path, x))
for x in os.listdir(search_path)
if 'libnvinfer.so' in x
])
return fl
possible_files = find_libs(trt_install_path)
possible_files.update(find_libs(os.path.join(trt_install_path, 'lib')))
possible_files.update(find_libs(os.path.join(trt_install_path, 'lib64')))
cuda_ver = convert_version_to_int(environ_cp['TF_CUDA_VERSION'])
cudnn_ver = convert_version_to_int(environ_cp['TF_CUDNN_VERSION'])
nvinfer_pattern = re.compile('.*libnvinfer.so.?(.*)$')
highest_ver = [0, None, None]
for lib_file in possible_files:
if is_cuda_compatible(lib_file, cuda_ver, cudnn_ver):
matches = nvinfer_pattern.search(lib_file)
if len(matches.groups()) == 0:
continue
ver_str = matches.group(1)
ver = convert_version_to_int(ver_str) if len(ver_str) else 0
if ver > highest_ver[0]:
highest_ver = [ver, ver_str, lib_file]
if highest_ver[1] is not None:
trt_install_path = os.path.dirname(highest_ver[2])
tf_tensorrt_version = highest_ver[1]
break
# Try another alternative from ldconfig.
ldconfig_bin = which('ldconfig') or '/sbin/ldconfig'
ldconfig_output = run_shell([ldconfig_bin, '-p'])
search_result = re.search('.*libnvinfer.so\\.?([0-9.]*).* => (.*)',
ldconfig_output)
if search_result:
libnvinfer_path_from_ldconfig = search_result.group(2)
if os.path.exists(libnvinfer_path_from_ldconfig):
if is_cuda_compatible(libnvinfer_path_from_ldconfig, cuda_ver,
cudnn_ver):
trt_install_path = os.path.dirname(libnvinfer_path_from_ldconfig)
tf_tensorrt_version = search_result.group(1)
break
# Reset and Retry
if possible_files:
print('TensorRT libraries found in one the following directories',
'are not compatible with selected cuda and cudnn installations')
print(trt_install_path)
print(os.path.join(trt_install_path, 'lib'))
print(os.path.join(trt_install_path, 'lib64'))
if search_result:
print(libnvinfer_path_from_ldconfig)
else:
print(
'Invalid path to TensorRT. None of the following files can be found:')
print(trt_install_path)
print(os.path.join(trt_install_path, 'lib'))
print(os.path.join(trt_install_path, 'lib64'))
if search_result:
print(libnvinfer_path_from_ldconfig)
else:
raise UserInputError('Invalid TF_TENSORRT setting was provided %d '
'times in a row. Assuming to be a scripting mistake.' %
_DEFAULT_PROMPT_ASK_ATTEMPTS)
# Set TENSORRT_INSTALL_PATH and TF_TENSORRT_VERSION
environ_cp['TENSORRT_INSTALL_PATH'] = trt_install_path
write_action_env_to_bazelrc('TENSORRT_INSTALL_PATH', trt_install_path)
environ_cp['TF_TENSORRT_VERSION'] = tf_tensorrt_version
write_action_env_to_bazelrc('TF_TENSORRT_VERSION', tf_tensorrt_version)
def set_tf_nccl_install_path(environ_cp):
"""Set NCCL_INSTALL_PATH and TF_NCCL_VERSION.
Args:
environ_cp: copy of the os.environ.
Raises:
ValueError: if this method was called under non-Linux platform.
UserInputError: if user has provided invalid input multiple times.
"""
if not is_linux():
raise ValueError('Currently NCCL is only supported on Linux platforms.')
ask_nccl_version = (
'Please specify the NCCL version you want to use. If NCCL %s is not '
'installed, then you can use version 1.3 that can be fetched '
'automatically but it may have worse performance with multiple GPUs. '
'[Default is %s]: ') % (_DEFAULT_NCCL_VERSION, _DEFAULT_NCCL_VERSION)
for _ in range(_DEFAULT_PROMPT_ASK_ATTEMPTS):
tf_nccl_version = get_from_env_or_user_or_default(
environ_cp, 'TF_NCCL_VERSION', ask_nccl_version, _DEFAULT_NCCL_VERSION)
tf_nccl_version = reformat_version_sequence(str(tf_nccl_version), 1)
if tf_nccl_version == '1':
break # No need to get install path, NCCL 1 is a GitHub repo.
# TODO(csigg): Look with ldconfig first if we can find the library in paths
# like /usr/lib/x86_64-linux-gnu and the header file in the corresponding
# include directory. This is where the NCCL .deb packages install them.
# Then ask the user if we should use that. Instead of a single
# NCCL_INSTALL_PATH, pass separate NCCL_LIB_PATH and NCCL_HDR_PATH to
# nccl_configure.bzl
default_nccl_path = environ_cp.get('CUDA_TOOLKIT_PATH')
ask_nccl_path = (r'Please specify the location where NCCL %s library is '
'installed. Refer to README.md for more details. [Default '
'is %s]:') % (tf_nccl_version, default_nccl_path)
nccl_install_path = get_from_env_or_user_or_default(
environ_cp, 'NCCL_INSTALL_PATH', ask_nccl_path, default_nccl_path)
# Result returned from "read" will be used unexpanded. That make "~"
# unusable. Going through one more level of expansion to handle that.
nccl_install_path = os.path.realpath(os.path.expanduser(nccl_install_path))
if is_windows() or is_cygwin():
nccl_install_path = cygpath(nccl_install_path)
if is_windows():
nccl_lib_path = 'lib/x64/nccl.lib'
elif is_linux():
nccl_lib_path = 'lib/libnccl.so.%s' % tf_nccl_version
elif is_macos():
nccl_lib_path = 'lib/libnccl.%s.dylib' % tf_nccl_version
nccl_lib_path = os.path.join(nccl_install_path, nccl_lib_path)
nccl_hdr_path = os.path.join(nccl_install_path, 'include/nccl.h')
if os.path.exists(nccl_lib_path) and os.path.exists(nccl_hdr_path):
# Set NCCL_INSTALL_PATH
environ_cp['NCCL_INSTALL_PATH'] = nccl_install_path
write_action_env_to_bazelrc('NCCL_INSTALL_PATH', nccl_install_path)
break
# Reset and Retry
print('Invalid path to NCCL %s toolkit, %s or %s not found. Please use the '
'O/S agnostic package of NCCL 2' % (tf_nccl_version, nccl_lib_path,
nccl_hdr_path))
environ_cp['TF_NCCL_VERSION'] = ''
else:
raise UserInputError('Invalid TF_NCCL setting was provided %d '
'times in a row. Assuming to be a scripting mistake.' %
_DEFAULT_PROMPT_ASK_ATTEMPTS)
# Set TF_NCCL_VERSION
environ_cp['TF_NCCL_VERSION'] = tf_nccl_version
write_action_env_to_bazelrc('TF_NCCL_VERSION', tf_nccl_version)
def get_native_cuda_compute_capabilities(environ_cp):
"""Get native cuda compute capabilities.
Args:
environ_cp: copy of the os.environ.
Returns:
string of native cuda compute capabilities, separated by comma.
"""
device_query_bin = os.path.join(
environ_cp.get('CUDA_TOOLKIT_PATH'), 'extras/demo_suite/deviceQuery')
if os.path.isfile(device_query_bin) and os.access(device_query_bin, os.X_OK):
try:
output = run_shell(device_query_bin).split('\n')
pattern = re.compile('[0-9]*\\.[0-9]*')
output = [pattern.search(x) for x in output if 'Capability' in x]
output = ','.join(x.group() for x in output if x is not None)
except subprocess.CalledProcessError:
output = ''
else:
output = ''
return output
def set_tf_cuda_compute_capabilities(environ_cp):
"""Set TF_CUDA_COMPUTE_CAPABILITIES."""
while True:
native_cuda_compute_capabilities = get_native_cuda_compute_capabilities(
environ_cp)
if not native_cuda_compute_capabilities:
default_cuda_compute_capabilities = _DEFAULT_CUDA_COMPUTE_CAPABILITIES
else:
default_cuda_compute_capabilities = native_cuda_compute_capabilities
ask_cuda_compute_capabilities = (
'Please specify a list of comma-separated '
'Cuda compute capabilities you want to '
'build with.\nYou can find the compute '
'capability of your device at: '
'https://developer.nvidia.com/cuda-gpus.\nPlease'
' note that each additional compute '
'capability significantly increases your '
'build time and binary size. [Default is: %s]: ' %
default_cuda_compute_capabilities)
tf_cuda_compute_capabilities = get_from_env_or_user_or_default(
environ_cp, 'TF_CUDA_COMPUTE_CAPABILITIES',
ask_cuda_compute_capabilities, default_cuda_compute_capabilities)
# Check whether all capabilities from the input is valid
all_valid = True
# Remove all whitespace characters before splitting the string
# that users may insert by accident, as this will result in error
tf_cuda_compute_capabilities = ''.join(tf_cuda_compute_capabilities.split())
for compute_capability in tf_cuda_compute_capabilities.split(','):
m = re.match('[0-9]+.[0-9]+', compute_capability)
if not m:
print('Invalid compute capability: ' % compute_capability)
all_valid = False
else:
ver = int(m.group(0).split('.')[0])
if ver < 3:
print('Only compute capabilities 3.0 or higher are supported.')
all_valid = False
if all_valid:
break
# Reset and Retry
environ_cp['TF_CUDA_COMPUTE_CAPABILITIES'] = ''
# Set TF_CUDA_COMPUTE_CAPABILITIES
environ_cp['TF_CUDA_COMPUTE_CAPABILITIES'] = tf_cuda_compute_capabilities
write_action_env_to_bazelrc('TF_CUDA_COMPUTE_CAPABILITIES',
tf_cuda_compute_capabilities)
def set_other_cuda_vars(environ_cp):
"""Set other CUDA related variables."""
# If CUDA is enabled, always use GPU during build and test.
if environ_cp.get('TF_CUDA_CLANG') == '1':
write_to_bazelrc('build --config=cuda_clang')
write_to_bazelrc('test --config=cuda_clang')
else:
write_to_bazelrc('build --config=cuda')
write_to_bazelrc('test --config=cuda')
def set_host_cxx_compiler(environ_cp):
"""Set HOST_CXX_COMPILER."""
default_cxx_host_compiler = which('g++') or ''
host_cxx_compiler = prompt_loop_or_load_from_env(
environ_cp,
var_name='HOST_CXX_COMPILER',
var_default=default_cxx_host_compiler,
ask_for_var=('Please specify which C++ compiler should be used as the '
'host C++ compiler.'),
check_success=os.path.exists,
error_msg='Invalid C++ compiler path. %s cannot be found.',
)
write_action_env_to_bazelrc('HOST_CXX_COMPILER', host_cxx_compiler)
def set_host_c_compiler(environ_cp):
"""Set HOST_C_COMPILER."""
default_c_host_compiler = which('gcc') or ''
host_c_compiler = prompt_loop_or_load_from_env(
environ_cp,
var_name='HOST_C_COMPILER',
var_default=default_c_host_compiler,
ask_for_var=('Please specify which C compiler should be used as the host '
'C compiler.'),
check_success=os.path.exists,
error_msg='Invalid C compiler path. %s cannot be found.',
)
write_action_env_to_bazelrc('HOST_C_COMPILER', host_c_compiler)
def set_computecpp_toolkit_path(environ_cp):
"""Set COMPUTECPP_TOOLKIT_PATH."""
def toolkit_exists(toolkit_path):
"""Check if a computecpp toolkit path is valid."""
if is_linux():
sycl_rt_lib_path = 'lib/libComputeCpp.so'
else:
sycl_rt_lib_path = ''
sycl_rt_lib_path_full = os.path.join(toolkit_path, sycl_rt_lib_path)
exists = os.path.exists(sycl_rt_lib_path_full)
if not exists:
print('Invalid SYCL %s library path. %s cannot be found' %
(_TF_OPENCL_VERSION, sycl_rt_lib_path_full))
return exists
computecpp_toolkit_path = prompt_loop_or_load_from_env(
environ_cp,
var_name='COMPUTECPP_TOOLKIT_PATH',
var_default=_DEFAULT_COMPUTECPP_TOOLKIT_PATH,
ask_for_var=(
'Please specify the location where ComputeCpp for SYCL %s is '
'installed.' % _TF_OPENCL_VERSION),
check_success=toolkit_exists,
error_msg='Invalid SYCL compiler path. %s cannot be found.',
suppress_default_error=True)
write_action_env_to_bazelrc('COMPUTECPP_TOOLKIT_PATH',
computecpp_toolkit_path)
def set_trisycl_include_dir(environ_cp):
"""Set TRISYCL_INCLUDE_DIR."""
ask_trisycl_include_dir = ('Please specify the location of the triSYCL '
'include directory. (Use --config=sycl_trisycl '
'when building with Bazel) '
'[Default is %s]: ') % (
_DEFAULT_TRISYCL_INCLUDE_DIR)
while True:
trisycl_include_dir = get_from_env_or_user_or_default(
environ_cp, 'TRISYCL_INCLUDE_DIR', ask_trisycl_include_dir,
_DEFAULT_TRISYCL_INCLUDE_DIR)
if os.path.exists(trisycl_include_dir):
break
print('Invalid triSYCL include directory, %s cannot be found' %
(trisycl_include_dir))
# Set TRISYCL_INCLUDE_DIR
environ_cp['TRISYCL_INCLUDE_DIR'] = trisycl_include_dir
write_action_env_to_bazelrc('TRISYCL_INCLUDE_DIR', trisycl_include_dir)
def set_mpi_home(environ_cp):
"""Set MPI_HOME."""
default_mpi_home = which('mpirun') or which('mpiexec') or ''
default_mpi_home = os.path.dirname(os.path.dirname(default_mpi_home))
def valid_mpi_path(mpi_home):
exists = (
os.path.exists(os.path.join(mpi_home, 'include')) and
os.path.exists(os.path.join(mpi_home, 'lib')))
if not exists:
print('Invalid path to the MPI Toolkit. %s or %s cannot be found' %
(os.path.join(mpi_home, 'include'),
os.path.exists(os.path.join(mpi_home, 'lib'))))
return exists
_ = prompt_loop_or_load_from_env(
environ_cp,
var_name='MPI_HOME',
var_default=default_mpi_home,
ask_for_var='Please specify the MPI toolkit folder.',
check_success=valid_mpi_path,
error_msg='',
suppress_default_error=True)
def set_other_mpi_vars(environ_cp):
"""Set other MPI related variables."""
# Link the MPI header files
mpi_home = environ_cp.get('MPI_HOME')
symlink_force('%s/include/mpi.h' % mpi_home, 'third_party/mpi/mpi.h')
# Determine if we use OpenMPI or MVAPICH, these require different header files
# to be included here to make bazel dependency checker happy
if os.path.exists(os.path.join(mpi_home, 'include/mpi_portable_platform.h')):
symlink_force(
os.path.join(mpi_home, 'include/mpi_portable_platform.h'),
'third_party/mpi/mpi_portable_platform.h')
# TODO(gunan): avoid editing files in configure
sed_in_place('third_party/mpi/mpi.bzl', 'MPI_LIB_IS_OPENMPI=False',
'MPI_LIB_IS_OPENMPI=True')
else:
# MVAPICH / MPICH
symlink_force(
os.path.join(mpi_home, 'include/mpio.h'), 'third_party/mpi/mpio.h')
symlink_force(
os.path.join(mpi_home, 'include/mpicxx.h'), 'third_party/mpi/mpicxx.h')
# TODO(gunan): avoid editing files in configure
sed_in_place('third_party/mpi/mpi.bzl', 'MPI_LIB_IS_OPENMPI=True',
'MPI_LIB_IS_OPENMPI=False')
if os.path.exists(os.path.join(mpi_home, 'lib/libmpi.so')):
symlink_force(
os.path.join(mpi_home, 'lib/libmpi.so'), 'third_party/mpi/libmpi.so')
else:
raise ValueError('Cannot find the MPI library file in %s/lib' % mpi_home)
def set_system_libs_flag(environ_cp):
syslibs = environ_cp.get('TF_SYSTEM_LIBS', '')
if syslibs and syslibs != '':
if ',' in syslibs:
syslibs = ','.join(sorted(syslibs.split(',')))
else:
syslibs = ','.join(sorted(syslibs.split()))
write_action_env_to_bazelrc('TF_SYSTEM_LIBS', syslibs)
if 'PREFIX' in environ_cp:
write_to_bazelrc('build --define=PREFIX=%s' % environ_cp['PREFIX'])
if 'LIBDIR' in environ_cp:
write_to_bazelrc('build --define=LIBDIR=%s' % environ_cp['LIBDIR'])
if 'INCLUDEDIR' in environ_cp:
write_to_bazelrc('build --define=INCLUDEDIR=%s' % environ_cp['INCLUDEDIR'])
def set_windows_build_flags(environ_cp):
"""Set Windows specific build options."""
# The non-monolithic build is not supported yet
write_to_bazelrc('build --config monolithic')
# Suppress warning messages
write_to_bazelrc('build --copt=-w --host_copt=-w')
# Output more verbose information when something goes wrong
write_to_bazelrc('build --verbose_failures')
# The host and target platforms are the same in Windows build. So we don't
# have to distinct them. This avoids building the same targets twice.
write_to_bazelrc('build --distinct_host_configuration=false')
# Enable short object file path to avoid long path issue on Windows.
# TODO(pcloudy): Remove this flag when upgrading Bazel to 0.16.0
# Short object file path will be enabled by default.
write_to_bazelrc('build --experimental_shortened_obj_file_path=true')
# When building zip file for some py_binary and py_test targets, don't
# include its dependencies. This is for:
# 1. Running python tests against the system installed TF pip package.
# 2. Avoiding redundant files in
# //tensorflow/tools/pip_package:simple_console_windows,
# which is a py_binary used during creating TF pip package.
# See https://github.com/tensorflow/tensorflow/issues/22390
write_to_bazelrc('build --define=no_tensorflow_py_deps=true')
if get_var(
environ_cp, 'TF_OVERRIDE_EIGEN_STRONG_INLINE', 'Eigen strong inline',
True, ('Would you like to override eigen strong inline for some C++ '
'compilation to reduce the compilation time?'),
'Eigen strong inline overridden.', 'Not overriding eigen strong inline, '
'some compilations could take more than 20 mins.'):
# Due to a known MSVC compiler issue
# https://github.com/tensorflow/tensorflow/issues/10521
# Overriding eigen strong inline speeds up the compiling of
# conv_grad_ops_3d.cc and conv_ops_3d.cc by 20 minutes,
# but this also hurts the performance. Let users decide what they want.
write_to_bazelrc('build --define=override_eigen_strong_inline=true')
def config_info_line(name, help_text):
"""Helper function to print formatted help text for Bazel config options."""
print('\t--config=%-12s\t# %s' % (name, help_text))
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
'--workspace',
type=str,
default=_TF_WORKSPACE_ROOT,
help='The absolute path to your active Bazel workspace.')
args = parser.parse_args()
# Make a copy of os.environ to be clear when functions and getting and setting
# environment variables.
environ_cp = dict(os.environ)
check_bazel_version('0.15.0')
reset_tf_configure_bazelrc(args.workspace)
cleanup_makefile()
setup_python(environ_cp)
if is_windows():
environ_cp['TF_NEED_AWS'] = '0'
environ_cp['TF_NEED_GCP'] = '0'
environ_cp['TF_NEED_HDFS'] = '0'
environ_cp['TF_NEED_JEMALLOC'] = '0'
environ_cp['TF_NEED_KAFKA'] = '0'
environ_cp['TF_NEED_OPENCL_SYCL'] = '0'
environ_cp['TF_NEED_COMPUTECPP'] = '0'
environ_cp['TF_NEED_OPENCL'] = '0'
environ_cp['TF_CUDA_CLANG'] = '0'
environ_cp['TF_NEED_TENSORRT'] = '0'
# TODO(ibiryukov): Investigate using clang as a cpu or cuda compiler on
# Windows.
environ_cp['TF_DOWNLOAD_CLANG'] = '0'
environ_cp['TF_ENABLE_XLA'] = '0'
environ_cp['TF_NEED_MPI'] = '0'
environ_cp['TF_SET_ANDROID_WORKSPACE'] = '0'
if is_macos():
environ_cp['TF_NEED_JEMALLOC'] = '0'
environ_cp['TF_NEED_TENSORRT'] = '0'
# The numpy package on ppc64le uses OpenBLAS which has multi-threading
# issues that lead to incorrect answers. Set OMP_NUM_THREADS=1 at
# runtime to allow the Tensorflow testcases which compare numpy
# results to Tensorflow results to succeed.
if is_ppc64le():
write_action_env_to_bazelrc('OMP_NUM_THREADS', 1)
set_build_var(environ_cp, 'TF_NEED_JEMALLOC', 'jemalloc as malloc',
'with_jemalloc', True)
set_build_var(environ_cp, 'TF_NEED_GCP', 'Google Cloud Platform',
'with_gcp_support', True, 'gcp')
set_build_var(environ_cp, 'TF_NEED_HDFS', 'Hadoop File System',
'with_hdfs_support', True, 'hdfs')
set_build_var(environ_cp, 'TF_NEED_AWS', 'Amazon AWS Platform',
'with_aws_support', True, 'aws')
set_build_var(environ_cp, 'TF_NEED_KAFKA', 'Apache Kafka Platform',
'with_kafka_support', True, 'kafka')
set_build_var(environ_cp, 'TF_ENABLE_XLA', 'XLA JIT', 'with_xla_support',
False, 'xla')
set_action_env_var(environ_cp, 'TF_NEED_OPENCL_SYCL', 'OpenCL SYCL', False)
if environ_cp.get('TF_NEED_OPENCL_SYCL') == '1':
set_host_cxx_compiler(environ_cp)
set_host_c_compiler(environ_cp)
set_action_env_var(environ_cp, 'TF_NEED_COMPUTECPP', 'ComputeCPP', True)
if environ_cp.get('TF_NEED_COMPUTECPP') == '1':
set_computecpp_toolkit_path(environ_cp)
else:
set_trisycl_include_dir(environ_cp)
set_action_env_var(environ_cp, 'TF_NEED_ROCM', 'ROCm', False)
if (environ_cp.get('TF_NEED_ROCM') == '1' and
'LD_LIBRARY_PATH' in environ_cp and
environ_cp.get('LD_LIBRARY_PATH') != '1'):
write_action_env_to_bazelrc('LD_LIBRARY_PATH',
environ_cp.get('LD_LIBRARY_PATH'))
set_action_env_var(environ_cp, 'TF_NEED_CUDA', 'CUDA', False)
if (environ_cp.get('TF_NEED_CUDA') == '1' and
'TF_CUDA_CONFIG_REPO' not in environ_cp):
set_tf_cuda_version(environ_cp)
set_tf_cudnn_version(environ_cp)
if is_linux():
set_tf_tensorrt_install_path(environ_cp)
set_tf_nccl_install_path(environ_cp)
set_tf_cuda_compute_capabilities(environ_cp)
if 'LD_LIBRARY_PATH' in environ_cp and environ_cp.get(
'LD_LIBRARY_PATH') != '1':
write_action_env_to_bazelrc('LD_LIBRARY_PATH',
environ_cp.get('LD_LIBRARY_PATH'))
set_tf_cuda_clang(environ_cp)
if environ_cp.get('TF_CUDA_CLANG') == '1':
# Ask whether we should download the clang toolchain.
set_tf_download_clang(environ_cp)
if environ_cp.get('TF_DOWNLOAD_CLANG') != '1':
# Set up which clang we should use as the cuda / host compiler.
set_clang_cuda_compiler_path(environ_cp)
else:
# Use downloaded LLD for linking.
write_to_bazelrc('build:cuda_clang --config=download_clang_use_lld')
write_to_bazelrc('test:cuda_clang --config=download_clang_use_lld')
else:
# Set up which gcc nvcc should use as the host compiler
# No need to set this on Windows
if not is_windows():
set_gcc_host_compiler_path(environ_cp)
set_other_cuda_vars(environ_cp)
else:
# CUDA not required. Ask whether we should download the clang toolchain and
# use it for the CPU build.
set_tf_download_clang(environ_cp)
if environ_cp.get('TF_DOWNLOAD_CLANG') == '1':
write_to_bazelrc('build --config=download_clang')
write_to_bazelrc('test --config=download_clang')
# SYCL / ROCm / CUDA are mutually exclusive.
# At most 1 GPU platform can be configured.
gpu_platform_count = 0
if environ_cp.get('TF_NEED_OPENCL_SYCL') == '1':
gpu_platform_count += 1
if environ_cp.get('TF_NEED_ROCM') == '1':
gpu_platform_count += 1
if environ_cp.get('TF_NEED_CUDA') == '1':
gpu_platform_count += 1
if gpu_platform_count >= 2:
raise UserInputError('SYCL / CUDA / ROCm are mututally exclusive. '
'At most 1 GPU platform can be configured.')
set_build_var(environ_cp, 'TF_NEED_MPI', 'MPI', 'with_mpi_support', False)
if environ_cp.get('TF_NEED_MPI') == '1':
set_mpi_home(environ_cp)
set_other_mpi_vars(environ_cp)
set_cc_opt_flags(environ_cp)
set_system_libs_flag(environ_cp)
if is_windows():
set_windows_build_flags(environ_cp)
# Add a config option to build TensorFlow 2.0 API.
write_to_bazelrc('build:v2 --define=tf_api_version=2')
if get_var(environ_cp, 'TF_SET_ANDROID_WORKSPACE', 'android workspace', False,
('Would you like to interactively configure ./WORKSPACE for '
'Android builds?'), 'Searching for NDK and SDK installations.',
'Not configuring the WORKSPACE for Android builds.'):
create_android_ndk_rule(environ_cp)
create_android_sdk_rule(environ_cp)
# On Windows, we don't have MKL support and the build is always monolithic.
# So no need to print the following message.
# TODO(pcloudy): remove the following if check when they make sense on Windows
if not is_windows():
print('Preconfigured Bazel build configs. You can use any of the below by '
'adding "--config=<>" to your build command. See tools/bazel.rc for '
'more details.')
config_info_line('mkl', 'Build with MKL support.')
config_info_line('monolithic', 'Config for mostly static monolithic build.')
config_info_line('gdr', 'Build with GDR support.')
config_info_line('verbs', 'Build with libverbs support.')
config_info_line('ngraph', 'Build with Intel nGraph support.')
if __name__ == '__main__':
main()
| 37.442073 | 80 | 0.689651 |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import errno
import os
import platform
import re
import subprocess
import sys
try:
from shutil import which
except ImportError:
from distutils.spawn import find_executable as which
_DEFAULT_CUDA_VERSION = '9.0'
_DEFAULT_CUDNN_VERSION = '7'
_DEFAULT_NCCL_VERSION = '2.2'
_DEFAULT_CUDA_COMPUTE_CAPABILITIES = '3.5,7.0'
_DEFAULT_CUDA_PATH = '/usr/local/cuda'
_DEFAULT_CUDA_PATH_LINUX = '/opt/cuda'
_DEFAULT_CUDA_PATH_WIN = ('C:/Program Files/NVIDIA GPU Computing '
'Toolkit/CUDA/v%s' % _DEFAULT_CUDA_VERSION)
_TF_OPENCL_VERSION = '1.2'
_DEFAULT_COMPUTECPP_TOOLKIT_PATH = '/usr/local/computecpp'
_DEFAULT_TRISYCL_INCLUDE_DIR = '/usr/local/triSYCL/include'
_SUPPORTED_ANDROID_NDK_VERSIONS = [10, 11, 12, 13, 14, 15, 16]
_DEFAULT_PROMPT_ASK_ATTEMPTS = 10
_TF_WORKSPACE_ROOT = os.path.abspath(os.path.dirname(__file__))
_TF_BAZELRC_FILENAME = '.tf_configure.bazelrc'
_TF_BAZELRC = os.path.join(_TF_WORKSPACE_ROOT, _TF_BAZELRC_FILENAME)
_TF_WORKSPACE = os.path.join(_TF_WORKSPACE_ROOT, 'WORKSPACE')
if platform.machine() == 'ppc64le':
_DEFAULT_TENSORRT_PATH_LINUX = '/usr/lib/powerpc64le-linux-gnu/'
else:
_DEFAULT_TENSORRT_PATH_LINUX = '/usr/lib/%s-linux-gnu' % platform.machine()
class UserInputError(Exception):
pass
def is_windows():
return platform.system() == 'Windows'
def is_linux():
return platform.system() == 'Linux'
def is_macos():
return platform.system() == 'Darwin'
def is_ppc64le():
return platform.machine() == 'ppc64le'
def is_cygwin():
return platform.system().startswith('CYGWIN_NT')
def get_input(question):
try:
try:
answer = raw_input(question)
except NameError:
answer = input(question)
except EOFError:
answer = ''
return answer
def symlink_force(target, link_name):
try:
os.symlink(target, link_name)
except OSError as e:
if e.errno == errno.EEXIST:
os.remove(link_name)
os.symlink(target, link_name)
else:
raise e
def sed_in_place(filename, old, new):
with open(filename, 'r') as f:
filedata = f.read()
newdata = filedata.replace(old, new)
with open(filename, 'w') as f:
f.write(newdata)
def write_to_bazelrc(line):
with open(_TF_BAZELRC, 'a') as f:
f.write(line + '\n')
def write_action_env_to_bazelrc(var_name, var):
write_to_bazelrc('build --action_env %s="%s"' % (var_name, str(var)))
def run_shell(cmd, allow_non_zero=False):
if allow_non_zero:
try:
output = subprocess.check_output(cmd)
except subprocess.CalledProcessError as e:
output = e.output
else:
output = subprocess.check_output(cmd)
return output.decode('UTF-8').strip()
def cygpath(path):
return os.path.abspath(path).replace('\\', '/')
def get_python_path(environ_cp, python_bin_path):
python_paths = []
if environ_cp.get('PYTHONPATH'):
python_paths = environ_cp.get('PYTHONPATH').split(':')
try:
library_paths = run_shell([
python_bin_path, '-c',
'import site; print("\\n".join(site.getsitepackages()))'
]).split('\n')
except subprocess.CalledProcessError:
library_paths = [
run_shell([
python_bin_path, '-c',
'from distutils.sysconfig import get_python_lib;'
'print(get_python_lib())'
])
]
all_paths = set(python_paths + library_paths)
paths = []
for path in all_paths:
if os.path.isdir(path):
paths.append(path)
return paths
def get_python_major_version(python_bin_path):
return run_shell([python_bin_path, '-c', 'import sys; print(sys.version[0])'])
def setup_python(environ_cp):
default_python_bin_path = sys.executable
ask_python_bin_path = ('Please specify the location of python. [Default is '
'%s]: ') % default_python_bin_path
while True:
python_bin_path = get_from_env_or_user_or_default(
environ_cp, 'PYTHON_BIN_PATH', ask_python_bin_path,
default_python_bin_path)
if os.path.isfile(python_bin_path) and os.access(python_bin_path, os.X_OK):
break
elif not os.path.exists(python_bin_path):
print('Invalid python path: %s cannot be found.' % python_bin_path)
else:
print('%s is not executable. Is it the python binary?' % python_bin_path)
environ_cp['PYTHON_BIN_PATH'] = ''
if is_windows() or is_cygwin():
python_bin_path = cygpath(python_bin_path)
python_lib_path = environ_cp.get('PYTHON_LIB_PATH')
if not python_lib_path:
python_lib_paths = get_python_path(environ_cp, python_bin_path)
if environ_cp.get('USE_DEFAULT_PYTHON_LIB_PATH') == '1':
python_lib_path = python_lib_paths[0]
else:
print('Found possible Python library paths:\n %s' %
'\n '.join(python_lib_paths))
default_python_lib_path = python_lib_paths[0]
python_lib_path = get_input(
'Please input the desired Python library path to use. '
'Default is [%s]\n' % python_lib_paths[0])
if not python_lib_path:
python_lib_path = default_python_lib_path
environ_cp['PYTHON_LIB_PATH'] = python_lib_path
python_major_version = get_python_major_version(python_bin_path)
if is_windows() or is_cygwin():
python_lib_path = cygpath(python_lib_path)
write_action_env_to_bazelrc('PYTHON_BIN_PATH', python_bin_path)
write_action_env_to_bazelrc('PYTHON_LIB_PATH', python_lib_path)
write_to_bazelrc('build --python_path=\"%s"' % python_bin_path)
environ_cp['PYTHON_BIN_PATH'] = python_bin_path
with open(
os.path.join(_TF_WORKSPACE_ROOT, 'tools', 'python_bin_path.sh'),
'w') as f:
f.write('export PYTHON_BIN_PATH="%s"' % python_bin_path)
def reset_tf_configure_bazelrc(workspace_path):
open(_TF_BAZELRC, 'w').close()
bazelrc_path = os.path.join(workspace_path, '.bazelrc')
data = []
if os.path.exists(bazelrc_path):
with open(bazelrc_path, 'r') as f:
data = f.read().splitlines()
with open(bazelrc_path, 'w') as f:
for l in data:
if _TF_BAZELRC_FILENAME in l:
continue
f.write('%s\n' % l)
if is_windows():
tf_bazelrc_path = _TF_BAZELRC.replace('\\', '/')
else:
tf_bazelrc_path = _TF_BAZELRC
f.write('import %s\n' % tf_bazelrc_path)
def cleanup_makefile():
makefile_download_dir = os.path.join(_TF_WORKSPACE_ROOT, 'tensorflow',
'contrib', 'makefile', 'downloads')
if os.path.isdir(makefile_download_dir):
for root, _, filenames in os.walk(makefile_download_dir):
for f in filenames:
if f.endswith('BUILD'):
os.remove(os.path.join(root, f))
def get_var(environ_cp,
var_name,
query_item,
enabled_by_default,
question=None,
yes_reply=None,
no_reply=None):
if not question:
question = 'Do you wish to build TensorFlow with %s support?' % query_item
if not yes_reply:
yes_reply = '%s support will be enabled for TensorFlow.' % query_item
if not no_reply:
no_reply = 'No %s' % yes_reply
yes_reply += '\n'
no_reply += '\n'
if enabled_by_default:
question += ' [Y/n]: '
else:
question += ' [y/N]: '
var = environ_cp.get(var_name)
if var is not None:
var_content = var.strip().lower()
true_strings = ('1', 't', 'true', 'y', 'yes')
false_strings = ('0', 'f', 'false', 'n', 'no')
if var_content in true_strings:
var = True
elif var_content in false_strings:
var = False
else:
raise UserInputError(
'Environment variable %s must be set as a boolean indicator.\n'
'The following are accepted as TRUE : %s.\n'
'The following are accepted as FALSE: %s.\n'
'Current value is %s.' % (var_name, ', '.join(true_strings),
', '.join(false_strings), var))
while var is None:
user_input_origin = get_input(question)
user_input = user_input_origin.strip().lower()
if user_input == 'y':
print(yes_reply)
var = True
elif user_input == 'n':
print(no_reply)
var = False
elif not user_input:
if enabled_by_default:
print(yes_reply)
var = True
else:
print(no_reply)
var = False
else:
print('Invalid selection: %s' % user_input_origin)
return var
def set_build_var(environ_cp,
var_name,
query_item,
option_name,
enabled_by_default,
bazel_config_name=None):
var = str(int(get_var(environ_cp, var_name, query_item, enabled_by_default)))
environ_cp[var_name] = var
if var == '1':
write_to_bazelrc('build --define %s=true' % option_name)
elif bazel_config_name is not None:
write_to_bazelrc(
'build:%s --define %s=true' % (bazel_config_name, option_name))
def set_action_env_var(environ_cp,
var_name,
query_item,
enabled_by_default,
question=None,
yes_reply=None,
no_reply=None):
var = int(
get_var(environ_cp, var_name, query_item, enabled_by_default, question,
yes_reply, no_reply))
write_action_env_to_bazelrc(var_name, var)
environ_cp[var_name] = str(var)
def convert_version_to_int(version):
version = version.split('-')[0]
version_segments = version.split('.')
for seg in version_segments:
if not seg.isdigit():
return None
version_str = ''.join(['%03d' % int(seg) for seg in version_segments])
return int(version_str)
def check_bazel_version(min_version):
if which('bazel') is None:
print('Cannot find bazel. Please install bazel.')
sys.exit(0)
curr_version = run_shell(
['bazel', '--batch', '--bazelrc=/dev/null', 'version'])
for line in curr_version.split('\n'):
if 'Build label: ' in line:
curr_version = line.split('Build label: ')[1]
break
min_version_int = convert_version_to_int(min_version)
curr_version_int = convert_version_to_int(curr_version)
if not curr_version_int:
print('WARNING: current bazel installation is not a release version.')
print('Make sure you are running at least bazel %s' % min_version)
return curr_version
print('You have bazel %s installed.' % curr_version)
if curr_version_int < min_version_int:
print('Please upgrade your bazel installation to version %s or higher to '
'build TensorFlow!' % min_version)
sys.exit(0)
return curr_version
def set_cc_opt_flags(environ_cp):
if is_ppc64le():
default_cc_opt_flags = '-mcpu=native'
elif is_windows():
default_cc_opt_flags = '/arch:AVX'
else:
default_cc_opt_flags = '-march=native'
question = ('Please specify optimization flags to use during compilation when'
' bazel option "--config=opt" is specified [Default is %s]: '
) % default_cc_opt_flags
cc_opt_flags = get_from_env_or_user_or_default(environ_cp, 'CC_OPT_FLAGS',
question, default_cc_opt_flags)
for opt in cc_opt_flags.split():
write_to_bazelrc('build:opt --copt=%s' % opt)
if not is_ppc64le() and not is_windows():
write_to_bazelrc('build:opt --host_copt=-march=native')
write_to_bazelrc('build:opt --define with_default_optimizations=true')
def set_tf_cuda_clang(environ_cp):
question = 'Do you want to use clang as CUDA compiler?'
yes_reply = 'Clang will be used as CUDA compiler.'
no_reply = 'nvcc will be used as CUDA compiler.'
set_action_env_var(
environ_cp,
'TF_CUDA_CLANG',
None,
False,
question=question,
yes_reply=yes_reply,
no_reply=no_reply)
def set_tf_download_clang(environ_cp):
question = 'Do you wish to download a fresh release of clang? (Experimental)'
yes_reply = 'Clang will be downloaded and used to compile tensorflow.'
no_reply = 'Clang will not be downloaded.'
set_action_env_var(
environ_cp,
'TF_DOWNLOAD_CLANG',
None,
False,
question=question,
yes_reply=yes_reply,
no_reply=no_reply)
def get_from_env_or_user_or_default(environ_cp, var_name, ask_for_var,
var_default):
var = environ_cp.get(var_name)
if not var:
var = get_input(ask_for_var)
print('\n')
if not var:
var = var_default
return var
def set_clang_cuda_compiler_path(environ_cp):
default_clang_path = which('clang') or ''
ask_clang_path = ('Please specify which clang should be used as device and '
'host compiler. [Default is %s]: ') % default_clang_path
while True:
clang_cuda_compiler_path = get_from_env_or_user_or_default(
environ_cp, 'CLANG_CUDA_COMPILER_PATH', ask_clang_path,
default_clang_path)
if os.path.exists(clang_cuda_compiler_path):
break
print('Invalid clang path: %s cannot be found.' % clang_cuda_compiler_path)
environ_cp['CLANG_CUDA_COMPILER_PATH'] = ''
environ_cp['CLANG_CUDA_COMPILER_PATH'] = clang_cuda_compiler_path
write_action_env_to_bazelrc('CLANG_CUDA_COMPILER_PATH',
clang_cuda_compiler_path)
def prompt_loop_or_load_from_env(environ_cp,
var_name,
var_default,
ask_for_var,
check_success,
error_msg,
suppress_default_error=False,
n_ask_attempts=_DEFAULT_PROMPT_ASK_ATTEMPTS):
default = environ_cp.get(var_name) or var_default
full_query = '%s [Default is %s]: ' % (
ask_for_var,
default,
)
for _ in range(n_ask_attempts):
val = get_from_env_or_user_or_default(environ_cp, var_name, full_query,
default)
if check_success(val):
break
if not suppress_default_error:
print(error_msg % val)
environ_cp[var_name] = ''
else:
raise UserInputError(
'Invalid %s setting was provided %d times in a row. '
'Assuming to be a scripting mistake.' % (var_name, n_ask_attempts))
environ_cp[var_name] = val
return val
def create_android_ndk_rule(environ_cp):
if is_windows() or is_cygwin():
default_ndk_path = cygpath(
'%s/Android/Sdk/ndk-bundle' % environ_cp['APPDATA'])
elif is_macos():
default_ndk_path = '%s/library/Android/Sdk/ndk-bundle' % environ_cp['HOME']
else:
default_ndk_path = '%s/Android/Sdk/ndk-bundle' % environ_cp['HOME']
def valid_ndk_path(path):
return (os.path.exists(path) and
os.path.exists(os.path.join(path, 'source.properties')))
android_ndk_home_path = prompt_loop_or_load_from_env(
environ_cp,
var_name='ANDROID_NDK_HOME',
var_default=default_ndk_path,
ask_for_var='Please specify the home path of the Android NDK to use.',
check_success=valid_ndk_path,
error_msg=('The path %s or its child file "source.properties" '
'does not exist.'))
write_action_env_to_bazelrc('ANDROID_NDK_HOME', android_ndk_home_path)
write_action_env_to_bazelrc('ANDROID_NDK_API_LEVEL',
check_ndk_level(android_ndk_home_path))
def create_android_sdk_rule(environ_cp):
if is_windows() or is_cygwin():
default_sdk_path = cygpath('%s/Android/Sdk' % environ_cp['APPDATA'])
elif is_macos():
default_sdk_path = '%s/library/Android/Sdk' % environ_cp['HOME']
else:
default_sdk_path = '%s/Android/Sdk' % environ_cp['HOME']
def valid_sdk_path(path):
return (os.path.exists(path) and
os.path.exists(os.path.join(path, 'platforms')) and
os.path.exists(os.path.join(path, 'build-tools')))
android_sdk_home_path = prompt_loop_or_load_from_env(
environ_cp,
var_name='ANDROID_SDK_HOME',
var_default=default_sdk_path,
ask_for_var='Please specify the home path of the Android SDK to use.',
check_success=valid_sdk_path,
error_msg=('Either %s does not exist, or it does not contain the '
'subdirectories "platforms" and "build-tools".'))
platforms = os.path.join(android_sdk_home_path, 'platforms')
api_levels = sorted(os.listdir(platforms))
api_levels = [x.replace('android-', '') for x in api_levels]
def valid_api_level(api_level):
return os.path.exists(
os.path.join(android_sdk_home_path, 'platforms',
'android-' + api_level))
android_api_level = prompt_loop_or_load_from_env(
environ_cp,
var_name='ANDROID_API_LEVEL',
var_default=api_levels[-1],
ask_for_var=('Please specify the Android SDK API level to use. '
'[Available levels: %s]') % api_levels,
check_success=valid_api_level,
error_msg='Android-%s is not present in the SDK path.')
build_tools = os.path.join(android_sdk_home_path, 'build-tools')
versions = sorted(os.listdir(build_tools))
def valid_build_tools(version):
return os.path.exists(
os.path.join(android_sdk_home_path, 'build-tools', version))
android_build_tools_version = prompt_loop_or_load_from_env(
environ_cp,
var_name='ANDROID_BUILD_TOOLS_VERSION',
var_default=versions[-1],
ask_for_var=('Please specify an Android build tools version to use. '
'[Available versions: %s]') % versions,
check_success=valid_build_tools,
error_msg=('The selected SDK does not have build-tools version %s '
'available.'))
write_action_env_to_bazelrc('ANDROID_BUILD_TOOLS_VERSION',
android_build_tools_version)
write_action_env_to_bazelrc('ANDROID_SDK_API_LEVEL', android_api_level)
write_action_env_to_bazelrc('ANDROID_SDK_HOME', android_sdk_home_path)
def check_ndk_level(android_ndk_home_path):
properties_path = '%s/source.properties' % android_ndk_home_path
if is_windows() or is_cygwin():
properties_path = cygpath(properties_path)
with open(properties_path, 'r') as f:
filedata = f.read()
revision = re.search(r'Pkg.Revision = (\d+)', filedata)
if revision:
ndk_api_level = revision.group(1)
else:
raise Exception('Unable to parse NDK revision.')
if int(ndk_api_level) not in _SUPPORTED_ANDROID_NDK_VERSIONS:
print('WARNING: The API level of the NDK in %s is %s, which is not '
'supported by Bazel (officially supported versions: %s). Please use '
'another version. Compiling Android targets may result in confusing '
'errors.\n' % (android_ndk_home_path, ndk_api_level,
_SUPPORTED_ANDROID_NDK_VERSIONS))
return ndk_api_level
def set_gcc_host_compiler_path(environ_cp):
default_gcc_host_compiler_path = which('gcc') or ''
cuda_bin_symlink = '%s/bin/gcc' % environ_cp.get('CUDA_TOOLKIT_PATH')
if os.path.islink(cuda_bin_symlink):
default_gcc_host_compiler_path = os.path.realpath(cuda_bin_symlink)
gcc_host_compiler_path = prompt_loop_or_load_from_env(
environ_cp,
var_name='GCC_HOST_COMPILER_PATH',
var_default=default_gcc_host_compiler_path,
ask_for_var=
'Please specify which gcc should be used by nvcc as the host compiler.',
check_success=os.path.exists,
error_msg='Invalid gcc path. %s cannot be found.',
)
write_action_env_to_bazelrc('GCC_HOST_COMPILER_PATH', gcc_host_compiler_path)
def reformat_version_sequence(version_str, sequence_count):
v = version_str.split('.')
if len(v) < sequence_count:
v = v + (['0'] * (sequence_count - len(v)))
return '.'.join(v[:sequence_count])
def set_tf_cuda_version(environ_cp):
ask_cuda_version = (
'Please specify the CUDA SDK version you want to use. '
'[Leave empty to default to CUDA %s]: ') % _DEFAULT_CUDA_VERSION
for _ in range(_DEFAULT_PROMPT_ASK_ATTEMPTS):
tf_cuda_version = get_from_env_or_user_or_default(
environ_cp, 'TF_CUDA_VERSION', ask_cuda_version, _DEFAULT_CUDA_VERSION)
tf_cuda_version = reformat_version_sequence(str(tf_cuda_version), 2)
default_cuda_path = _DEFAULT_CUDA_PATH
if is_windows() or is_cygwin():
default_cuda_path = cygpath(
environ_cp.get('CUDA_PATH', _DEFAULT_CUDA_PATH_WIN))
elif is_linux():
if (not os.path.exists(default_cuda_path)
) and os.path.exists(_DEFAULT_CUDA_PATH_LINUX):
default_cuda_path = _DEFAULT_CUDA_PATH_LINUX
ask_cuda_path = ('Please specify the location where CUDA %s toolkit is'
' installed. Refer to README.md for more details. '
'[Default is %s]: ') % (tf_cuda_version, default_cuda_path)
cuda_toolkit_path = get_from_env_or_user_or_default(
environ_cp, 'CUDA_TOOLKIT_PATH', ask_cuda_path, default_cuda_path)
if is_windows() or is_cygwin():
cuda_toolkit_path = cygpath(cuda_toolkit_path)
if is_windows():
cuda_rt_lib_paths = ['lib/x64/cudart.lib']
elif is_linux():
cuda_rt_lib_paths = [
'%s/libcudart.so.%s' % (x, tf_cuda_version) for x in [
'lib64',
'lib/powerpc64le-linux-gnu',
'lib/x86_64-linux-gnu',
]
]
elif is_macos():
cuda_rt_lib_paths = ['lib/libcudart.%s.dylib' % tf_cuda_version]
cuda_toolkit_paths_full = [
os.path.join(cuda_toolkit_path, x) for x in cuda_rt_lib_paths
]
if any([os.path.exists(x) for x in cuda_toolkit_paths_full]):
break
# Reset and retry
print('Invalid path to CUDA %s toolkit. %s cannot be found' %
(tf_cuda_version, cuda_toolkit_paths_full))
environ_cp['TF_CUDA_VERSION'] = ''
environ_cp['CUDA_TOOLKIT_PATH'] = ''
else:
raise UserInputError('Invalid TF_CUDA_SETTING setting was provided %d '
'times in a row. Assuming to be a scripting mistake.' %
_DEFAULT_PROMPT_ASK_ATTEMPTS)
# Set CUDA_TOOLKIT_PATH and TF_CUDA_VERSION
environ_cp['CUDA_TOOLKIT_PATH'] = cuda_toolkit_path
write_action_env_to_bazelrc('CUDA_TOOLKIT_PATH', cuda_toolkit_path)
environ_cp['TF_CUDA_VERSION'] = tf_cuda_version
write_action_env_to_bazelrc('TF_CUDA_VERSION', tf_cuda_version)
def set_tf_cudnn_version(environ_cp):
ask_cudnn_version = (
'Please specify the cuDNN version you want to use. '
'[Leave empty to default to cuDNN %s.0]: ') % _DEFAULT_CUDNN_VERSION
for _ in range(_DEFAULT_PROMPT_ASK_ATTEMPTS):
tf_cudnn_version = get_from_env_or_user_or_default(
environ_cp, 'TF_CUDNN_VERSION', ask_cudnn_version,
_DEFAULT_CUDNN_VERSION)
tf_cudnn_version = reformat_version_sequence(str(tf_cudnn_version), 1)
default_cudnn_path = environ_cp.get('CUDA_TOOLKIT_PATH')
ask_cudnn_path = (r'Please specify the location where cuDNN %s library is '
'installed. Refer to README.md for more details. [Default'
' is %s]: ') % (tf_cudnn_version, default_cudnn_path)
cudnn_install_path = get_from_env_or_user_or_default(
environ_cp, 'CUDNN_INSTALL_PATH', ask_cudnn_path, default_cudnn_path)
# Result returned from "read" will be used unexpanded. That make "~"
# unusable. Going through one more level of expansion to handle that.
cudnn_install_path = os.path.realpath(
os.path.expanduser(cudnn_install_path))
if is_windows() or is_cygwin():
cudnn_install_path = cygpath(cudnn_install_path)
if is_windows():
cuda_dnn_lib_path = 'lib/x64/cudnn.lib'
cuda_dnn_lib_alt_path = 'lib/x64/cudnn.lib'
elif is_linux():
cuda_dnn_lib_path = 'lib64/libcudnn.so.%s' % tf_cudnn_version
cuda_dnn_lib_alt_path = 'libcudnn.so.%s' % tf_cudnn_version
elif is_macos():
cuda_dnn_lib_path = 'lib/libcudnn.%s.dylib' % tf_cudnn_version
cuda_dnn_lib_alt_path = 'libcudnn.%s.dylib' % tf_cudnn_version
cuda_dnn_lib_path_full = os.path.join(cudnn_install_path, cuda_dnn_lib_path)
cuda_dnn_lib_alt_path_full = os.path.join(cudnn_install_path,
cuda_dnn_lib_alt_path)
if os.path.exists(cuda_dnn_lib_path_full) or os.path.exists(
cuda_dnn_lib_alt_path_full):
break
# Try another alternative for Linux
if is_linux():
ldconfig_bin = which('ldconfig') or '/sbin/ldconfig'
cudnn_path_from_ldconfig = run_shell([ldconfig_bin, '-p'])
cudnn_path_from_ldconfig = re.search('.*libcudnn.so .* => (.*)',
cudnn_path_from_ldconfig)
if cudnn_path_from_ldconfig:
cudnn_path_from_ldconfig = cudnn_path_from_ldconfig.group(1)
if os.path.exists(
'%s.%s' % (cudnn_path_from_ldconfig, tf_cudnn_version)):
cudnn_install_path = os.path.dirname(cudnn_path_from_ldconfig)
break
# Reset and Retry
print(
'Invalid path to cuDNN %s toolkit. None of the following files can be '
'found:' % tf_cudnn_version)
print(cuda_dnn_lib_path_full)
print(cuda_dnn_lib_alt_path_full)
if is_linux():
print('%s.%s' % (cudnn_path_from_ldconfig, tf_cudnn_version))
environ_cp['TF_CUDNN_VERSION'] = ''
else:
raise UserInputError('Invalid TF_CUDNN setting was provided %d '
'times in a row. Assuming to be a scripting mistake.' %
_DEFAULT_PROMPT_ASK_ATTEMPTS)
# Set CUDNN_INSTALL_PATH and TF_CUDNN_VERSION
environ_cp['CUDNN_INSTALL_PATH'] = cudnn_install_path
write_action_env_to_bazelrc('CUDNN_INSTALL_PATH', cudnn_install_path)
environ_cp['TF_CUDNN_VERSION'] = tf_cudnn_version
write_action_env_to_bazelrc('TF_CUDNN_VERSION', tf_cudnn_version)
def is_cuda_compatible(lib, cuda_ver, cudnn_ver):
ldd_bin = which('ldd') or '/usr/bin/ldd'
ldd_out = run_shell([ldd_bin, lib], True)
ldd_out = ldd_out.split(os.linesep)
cudnn_pattern = re.compile('.*libcudnn.so\\.?(.*) =>.*$')
cuda_pattern = re.compile('.*libcudart.so\\.?(.*) =>.*$')
cudnn = None
cudart = None
cudnn_ok = True # assume no cudnn dependency by default
cuda_ok = True # assume no cuda dependency by default
for line in ldd_out:
if 'libcudnn.so' in line:
cudnn = cudnn_pattern.search(line)
cudnn_ok = False
elif 'libcudart.so' in line:
cudart = cuda_pattern.search(line)
cuda_ok = False
if cudnn and len(cudnn.group(1)):
cudnn = convert_version_to_int(cudnn.group(1))
if cudart and len(cudart.group(1)):
cudart = convert_version_to_int(cudart.group(1))
if cudnn is not None:
cudnn_ok = (cudnn == cudnn_ver)
if cudart is not None:
cuda_ok = (cudart == cuda_ver)
return cudnn_ok and cuda_ok
def set_tf_tensorrt_install_path(environ_cp):
if not is_linux():
raise ValueError('Currently TensorRT is only supported on Linux platform.')
# Ask user whether to add TensorRT support.
if str(int(get_var(environ_cp, 'TF_NEED_TENSORRT', 'TensorRT',
False))) != '1':
return
for _ in range(_DEFAULT_PROMPT_ASK_ATTEMPTS):
ask_tensorrt_path = (r'Please specify the location where TensorRT is '
'installed. [Default is %s]:') % (
_DEFAULT_TENSORRT_PATH_LINUX)
trt_install_path = get_from_env_or_user_or_default(
environ_cp, 'TENSORRT_INSTALL_PATH', ask_tensorrt_path,
_DEFAULT_TENSORRT_PATH_LINUX)
# Result returned from "read" will be used unexpanded. That make "~"
# unusable. Going through one more level of expansion to handle that.
trt_install_path = os.path.realpath(os.path.expanduser(trt_install_path))
def find_libs(search_path):
fl = set()
if os.path.exists(search_path) and os.path.isdir(search_path):
fl.update([
os.path.realpath(os.path.join(search_path, x))
for x in os.listdir(search_path)
if 'libnvinfer.so' in x
])
return fl
possible_files = find_libs(trt_install_path)
possible_files.update(find_libs(os.path.join(trt_install_path, 'lib')))
possible_files.update(find_libs(os.path.join(trt_install_path, 'lib64')))
cuda_ver = convert_version_to_int(environ_cp['TF_CUDA_VERSION'])
cudnn_ver = convert_version_to_int(environ_cp['TF_CUDNN_VERSION'])
nvinfer_pattern = re.compile('.*libnvinfer.so.?(.*)$')
highest_ver = [0, None, None]
for lib_file in possible_files:
if is_cuda_compatible(lib_file, cuda_ver, cudnn_ver):
matches = nvinfer_pattern.search(lib_file)
if len(matches.groups()) == 0:
continue
ver_str = matches.group(1)
ver = convert_version_to_int(ver_str) if len(ver_str) else 0
if ver > highest_ver[0]:
highest_ver = [ver, ver_str, lib_file]
if highest_ver[1] is not None:
trt_install_path = os.path.dirname(highest_ver[2])
tf_tensorrt_version = highest_ver[1]
break
# Try another alternative from ldconfig.
ldconfig_bin = which('ldconfig') or '/sbin/ldconfig'
ldconfig_output = run_shell([ldconfig_bin, '-p'])
search_result = re.search('.*libnvinfer.so\\.?([0-9.]*).* => (.*)',
ldconfig_output)
if search_result:
libnvinfer_path_from_ldconfig = search_result.group(2)
if os.path.exists(libnvinfer_path_from_ldconfig):
if is_cuda_compatible(libnvinfer_path_from_ldconfig, cuda_ver,
cudnn_ver):
trt_install_path = os.path.dirname(libnvinfer_path_from_ldconfig)
tf_tensorrt_version = search_result.group(1)
break
# Reset and Retry
if possible_files:
print('TensorRT libraries found in one the following directories',
'are not compatible with selected cuda and cudnn installations')
print(trt_install_path)
print(os.path.join(trt_install_path, 'lib'))
print(os.path.join(trt_install_path, 'lib64'))
if search_result:
print(libnvinfer_path_from_ldconfig)
else:
print(
'Invalid path to TensorRT. None of the following files can be found:')
print(trt_install_path)
print(os.path.join(trt_install_path, 'lib'))
print(os.path.join(trt_install_path, 'lib64'))
if search_result:
print(libnvinfer_path_from_ldconfig)
else:
raise UserInputError('Invalid TF_TENSORRT setting was provided %d '
'times in a row. Assuming to be a scripting mistake.' %
_DEFAULT_PROMPT_ASK_ATTEMPTS)
# Set TENSORRT_INSTALL_PATH and TF_TENSORRT_VERSION
environ_cp['TENSORRT_INSTALL_PATH'] = trt_install_path
write_action_env_to_bazelrc('TENSORRT_INSTALL_PATH', trt_install_path)
environ_cp['TF_TENSORRT_VERSION'] = tf_tensorrt_version
write_action_env_to_bazelrc('TF_TENSORRT_VERSION', tf_tensorrt_version)
def set_tf_nccl_install_path(environ_cp):
if not is_linux():
raise ValueError('Currently NCCL is only supported on Linux platforms.')
ask_nccl_version = (
'Please specify the NCCL version you want to use. If NCCL %s is not '
'installed, then you can use version 1.3 that can be fetched '
'automatically but it may have worse performance with multiple GPUs. '
'[Default is %s]: ') % (_DEFAULT_NCCL_VERSION, _DEFAULT_NCCL_VERSION)
for _ in range(_DEFAULT_PROMPT_ASK_ATTEMPTS):
tf_nccl_version = get_from_env_or_user_or_default(
environ_cp, 'TF_NCCL_VERSION', ask_nccl_version, _DEFAULT_NCCL_VERSION)
tf_nccl_version = reformat_version_sequence(str(tf_nccl_version), 1)
if tf_nccl_version == '1':
break # No need to get install path, NCCL 1 is a GitHub repo.
# TODO(csigg): Look with ldconfig first if we can find the library in paths
# like /usr/lib/x86_64-linux-gnu and the header file in the corresponding
# include directory. This is where the NCCL .deb packages install them.
# Then ask the user if we should use that. Instead of a single
# NCCL_INSTALL_PATH, pass separate NCCL_LIB_PATH and NCCL_HDR_PATH to
# nccl_configure.bzl
default_nccl_path = environ_cp.get('CUDA_TOOLKIT_PATH')
ask_nccl_path = (r'Please specify the location where NCCL %s library is '
'installed. Refer to README.md for more details. [Default '
'is %s]:') % (tf_nccl_version, default_nccl_path)
nccl_install_path = get_from_env_or_user_or_default(
environ_cp, 'NCCL_INSTALL_PATH', ask_nccl_path, default_nccl_path)
# Result returned from "read" will be used unexpanded. That make "~"
# unusable. Going through one more level of expansion to handle that.
nccl_install_path = os.path.realpath(os.path.expanduser(nccl_install_path))
if is_windows() or is_cygwin():
nccl_install_path = cygpath(nccl_install_path)
if is_windows():
nccl_lib_path = 'lib/x64/nccl.lib'
elif is_linux():
nccl_lib_path = 'lib/libnccl.so.%s' % tf_nccl_version
elif is_macos():
nccl_lib_path = 'lib/libnccl.%s.dylib' % tf_nccl_version
nccl_lib_path = os.path.join(nccl_install_path, nccl_lib_path)
nccl_hdr_path = os.path.join(nccl_install_path, 'include/nccl.h')
if os.path.exists(nccl_lib_path) and os.path.exists(nccl_hdr_path):
# Set NCCL_INSTALL_PATH
environ_cp['NCCL_INSTALL_PATH'] = nccl_install_path
write_action_env_to_bazelrc('NCCL_INSTALL_PATH', nccl_install_path)
break
# Reset and Retry
print('Invalid path to NCCL %s toolkit, %s or %s not found. Please use the '
'O/S agnostic package of NCCL 2' % (tf_nccl_version, nccl_lib_path,
nccl_hdr_path))
environ_cp['TF_NCCL_VERSION'] = ''
else:
raise UserInputError('Invalid TF_NCCL setting was provided %d '
'times in a row. Assuming to be a scripting mistake.' %
_DEFAULT_PROMPT_ASK_ATTEMPTS)
# Set TF_NCCL_VERSION
environ_cp['TF_NCCL_VERSION'] = tf_nccl_version
write_action_env_to_bazelrc('TF_NCCL_VERSION', tf_nccl_version)
def get_native_cuda_compute_capabilities(environ_cp):
device_query_bin = os.path.join(
environ_cp.get('CUDA_TOOLKIT_PATH'), 'extras/demo_suite/deviceQuery')
if os.path.isfile(device_query_bin) and os.access(device_query_bin, os.X_OK):
try:
output = run_shell(device_query_bin).split('\n')
pattern = re.compile('[0-9]*\\.[0-9]*')
output = [pattern.search(x) for x in output if 'Capability' in x]
output = ','.join(x.group() for x in output if x is not None)
except subprocess.CalledProcessError:
output = ''
else:
output = ''
return output
def set_tf_cuda_compute_capabilities(environ_cp):
while True:
native_cuda_compute_capabilities = get_native_cuda_compute_capabilities(
environ_cp)
if not native_cuda_compute_capabilities:
default_cuda_compute_capabilities = _DEFAULT_CUDA_COMPUTE_CAPABILITIES
else:
default_cuda_compute_capabilities = native_cuda_compute_capabilities
ask_cuda_compute_capabilities = (
'Please specify a list of comma-separated '
'Cuda compute capabilities you want to '
'build with.\nYou can find the compute '
'capability of your device at: '
'https://developer.nvidia.com/cuda-gpus.\nPlease'
' note that each additional compute '
'capability significantly increases your '
'build time and binary size. [Default is: %s]: ' %
default_cuda_compute_capabilities)
tf_cuda_compute_capabilities = get_from_env_or_user_or_default(
environ_cp, 'TF_CUDA_COMPUTE_CAPABILITIES',
ask_cuda_compute_capabilities, default_cuda_compute_capabilities)
# Check whether all capabilities from the input is valid
all_valid = True
# Remove all whitespace characters before splitting the string
# that users may insert by accident, as this will result in error
tf_cuda_compute_capabilities = ''.join(tf_cuda_compute_capabilities.split())
for compute_capability in tf_cuda_compute_capabilities.split(','):
m = re.match('[0-9]+.[0-9]+', compute_capability)
if not m:
print('Invalid compute capability: ' % compute_capability)
all_valid = False
else:
ver = int(m.group(0).split('.')[0])
if ver < 3:
print('Only compute capabilities 3.0 or higher are supported.')
all_valid = False
if all_valid:
break
# Reset and Retry
environ_cp['TF_CUDA_COMPUTE_CAPABILITIES'] = ''
# Set TF_CUDA_COMPUTE_CAPABILITIES
environ_cp['TF_CUDA_COMPUTE_CAPABILITIES'] = tf_cuda_compute_capabilities
write_action_env_to_bazelrc('TF_CUDA_COMPUTE_CAPABILITIES',
tf_cuda_compute_capabilities)
def set_other_cuda_vars(environ_cp):
# If CUDA is enabled, always use GPU during build and test.
if environ_cp.get('TF_CUDA_CLANG') == '1':
write_to_bazelrc('build --config=cuda_clang')
write_to_bazelrc('test --config=cuda_clang')
else:
write_to_bazelrc('build --config=cuda')
write_to_bazelrc('test --config=cuda')
def set_host_cxx_compiler(environ_cp):
default_cxx_host_compiler = which('g++') or ''
host_cxx_compiler = prompt_loop_or_load_from_env(
environ_cp,
var_name='HOST_CXX_COMPILER',
var_default=default_cxx_host_compiler,
ask_for_var=('Please specify which C++ compiler should be used as the '
'host C++ compiler.'),
check_success=os.path.exists,
error_msg='Invalid C++ compiler path. %s cannot be found.',
)
write_action_env_to_bazelrc('HOST_CXX_COMPILER', host_cxx_compiler)
def set_host_c_compiler(environ_cp):
default_c_host_compiler = which('gcc') or ''
host_c_compiler = prompt_loop_or_load_from_env(
environ_cp,
var_name='HOST_C_COMPILER',
var_default=default_c_host_compiler,
ask_for_var=('Please specify which C compiler should be used as the host '
'C compiler.'),
check_success=os.path.exists,
error_msg='Invalid C compiler path. %s cannot be found.',
)
write_action_env_to_bazelrc('HOST_C_COMPILER', host_c_compiler)
def set_computecpp_toolkit_path(environ_cp):
def toolkit_exists(toolkit_path):
if is_linux():
sycl_rt_lib_path = 'lib/libComputeCpp.so'
else:
sycl_rt_lib_path = ''
sycl_rt_lib_path_full = os.path.join(toolkit_path, sycl_rt_lib_path)
exists = os.path.exists(sycl_rt_lib_path_full)
if not exists:
print('Invalid SYCL %s library path. %s cannot be found' %
(_TF_OPENCL_VERSION, sycl_rt_lib_path_full))
return exists
computecpp_toolkit_path = prompt_loop_or_load_from_env(
environ_cp,
var_name='COMPUTECPP_TOOLKIT_PATH',
var_default=_DEFAULT_COMPUTECPP_TOOLKIT_PATH,
ask_for_var=(
'Please specify the location where ComputeCpp for SYCL %s is '
'installed.' % _TF_OPENCL_VERSION),
check_success=toolkit_exists,
error_msg='Invalid SYCL compiler path. %s cannot be found.',
suppress_default_error=True)
write_action_env_to_bazelrc('COMPUTECPP_TOOLKIT_PATH',
computecpp_toolkit_path)
def set_trisycl_include_dir(environ_cp):
ask_trisycl_include_dir = ('Please specify the location of the triSYCL '
'include directory. (Use --config=sycl_trisycl '
'when building with Bazel) '
'[Default is %s]: ') % (
_DEFAULT_TRISYCL_INCLUDE_DIR)
while True:
trisycl_include_dir = get_from_env_or_user_or_default(
environ_cp, 'TRISYCL_INCLUDE_DIR', ask_trisycl_include_dir,
_DEFAULT_TRISYCL_INCLUDE_DIR)
if os.path.exists(trisycl_include_dir):
break
print('Invalid triSYCL include directory, %s cannot be found' %
(trisycl_include_dir))
# Set TRISYCL_INCLUDE_DIR
environ_cp['TRISYCL_INCLUDE_DIR'] = trisycl_include_dir
write_action_env_to_bazelrc('TRISYCL_INCLUDE_DIR', trisycl_include_dir)
def set_mpi_home(environ_cp):
default_mpi_home = which('mpirun') or which('mpiexec') or ''
default_mpi_home = os.path.dirname(os.path.dirname(default_mpi_home))
def valid_mpi_path(mpi_home):
exists = (
os.path.exists(os.path.join(mpi_home, 'include')) and
os.path.exists(os.path.join(mpi_home, 'lib')))
if not exists:
print('Invalid path to the MPI Toolkit. %s or %s cannot be found' %
(os.path.join(mpi_home, 'include'),
os.path.exists(os.path.join(mpi_home, 'lib'))))
return exists
_ = prompt_loop_or_load_from_env(
environ_cp,
var_name='MPI_HOME',
var_default=default_mpi_home,
ask_for_var='Please specify the MPI toolkit folder.',
check_success=valid_mpi_path,
error_msg='',
suppress_default_error=True)
def set_other_mpi_vars(environ_cp):
# Link the MPI header files
mpi_home = environ_cp.get('MPI_HOME')
symlink_force('%s/include/mpi.h' % mpi_home, 'third_party/mpi/mpi.h')
# Determine if we use OpenMPI or MVAPICH, these require different header files
# to be included here to make bazel dependency checker happy
if os.path.exists(os.path.join(mpi_home, 'include/mpi_portable_platform.h')):
symlink_force(
os.path.join(mpi_home, 'include/mpi_portable_platform.h'),
'third_party/mpi/mpi_portable_platform.h')
# TODO(gunan): avoid editing files in configure
sed_in_place('third_party/mpi/mpi.bzl', 'MPI_LIB_IS_OPENMPI=False',
'MPI_LIB_IS_OPENMPI=True')
else:
# MVAPICH / MPICH
symlink_force(
os.path.join(mpi_home, 'include/mpio.h'), 'third_party/mpi/mpio.h')
symlink_force(
os.path.join(mpi_home, 'include/mpicxx.h'), 'third_party/mpi/mpicxx.h')
# TODO(gunan): avoid editing files in configure
sed_in_place('third_party/mpi/mpi.bzl', 'MPI_LIB_IS_OPENMPI=True',
'MPI_LIB_IS_OPENMPI=False')
if os.path.exists(os.path.join(mpi_home, 'lib/libmpi.so')):
symlink_force(
os.path.join(mpi_home, 'lib/libmpi.so'), 'third_party/mpi/libmpi.so')
else:
raise ValueError('Cannot find the MPI library file in %s/lib' % mpi_home)
def set_system_libs_flag(environ_cp):
syslibs = environ_cp.get('TF_SYSTEM_LIBS', '')
if syslibs and syslibs != '':
if ',' in syslibs:
syslibs = ','.join(sorted(syslibs.split(',')))
else:
syslibs = ','.join(sorted(syslibs.split()))
write_action_env_to_bazelrc('TF_SYSTEM_LIBS', syslibs)
if 'PREFIX' in environ_cp:
write_to_bazelrc('build --define=PREFIX=%s' % environ_cp['PREFIX'])
if 'LIBDIR' in environ_cp:
write_to_bazelrc('build --define=LIBDIR=%s' % environ_cp['LIBDIR'])
if 'INCLUDEDIR' in environ_cp:
write_to_bazelrc('build --define=INCLUDEDIR=%s' % environ_cp['INCLUDEDIR'])
def set_windows_build_flags(environ_cp):
# The non-monolithic build is not supported yet
write_to_bazelrc('build --config monolithic')
# Suppress warning messages
write_to_bazelrc('build --copt=-w --host_copt=-w')
# Output more verbose information when something goes wrong
write_to_bazelrc('build --verbose_failures')
# The host and target platforms are the same in Windows build. So we don't
write_to_bazelrc('build --distinct_host_configuration=false')
write_to_bazelrc('build --experimental_shortened_obj_file_path=true')
# include its dependencies. This is for:
# 1. Running python tests against the system installed TF pip package.
# 2. Avoiding redundant files in
# //tensorflow/tools/pip_package:simple_console_windows,
# which is a py_binary used during creating TF pip package.
# See https://github.com/tensorflow/tensorflow/issues/22390
write_to_bazelrc('build --define=no_tensorflow_py_deps=true')
if get_var(
environ_cp, 'TF_OVERRIDE_EIGEN_STRONG_INLINE', 'Eigen strong inline',
True, ('Would you like to override eigen strong inline for some C++ '
'compilation to reduce the compilation time?'),
'Eigen strong inline overridden.', 'Not overriding eigen strong inline, '
'some compilations could take more than 20 mins.'):
# Due to a known MSVC compiler issue
# https://github.com/tensorflow/tensorflow/issues/10521
# Overriding eigen strong inline speeds up the compiling of
# conv_grad_ops_3d.cc and conv_ops_3d.cc by 20 minutes,
# but this also hurts the performance. Let users decide what they want.
write_to_bazelrc('build --define=override_eigen_strong_inline=true')
def config_info_line(name, help_text):
print('\t--config=%-12s\t
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
'--workspace',
type=str,
default=_TF_WORKSPACE_ROOT,
help='The absolute path to your active Bazel workspace.')
args = parser.parse_args()
# Make a copy of os.environ to be clear when functions and getting and setting
# environment variables.
environ_cp = dict(os.environ)
check_bazel_version('0.15.0')
reset_tf_configure_bazelrc(args.workspace)
cleanup_makefile()
setup_python(environ_cp)
if is_windows():
environ_cp['TF_NEED_AWS'] = '0'
environ_cp['TF_NEED_GCP'] = '0'
environ_cp['TF_NEED_HDFS'] = '0'
environ_cp['TF_NEED_JEMALLOC'] = '0'
environ_cp['TF_NEED_KAFKA'] = '0'
environ_cp['TF_NEED_OPENCL_SYCL'] = '0'
environ_cp['TF_NEED_COMPUTECPP'] = '0'
environ_cp['TF_NEED_OPENCL'] = '0'
environ_cp['TF_CUDA_CLANG'] = '0'
environ_cp['TF_NEED_TENSORRT'] = '0'
# TODO(ibiryukov): Investigate using clang as a cpu or cuda compiler on
# Windows.
environ_cp['TF_DOWNLOAD_CLANG'] = '0'
environ_cp['TF_ENABLE_XLA'] = '0'
environ_cp['TF_NEED_MPI'] = '0'
environ_cp['TF_SET_ANDROID_WORKSPACE'] = '0'
if is_macos():
environ_cp['TF_NEED_JEMALLOC'] = '0'
environ_cp['TF_NEED_TENSORRT'] = '0'
# The numpy package on ppc64le uses OpenBLAS which has multi-threading
# issues that lead to incorrect answers. Set OMP_NUM_THREADS=1 at
# runtime to allow the Tensorflow testcases which compare numpy
# results to Tensorflow results to succeed.
if is_ppc64le():
write_action_env_to_bazelrc('OMP_NUM_THREADS', 1)
set_build_var(environ_cp, 'TF_NEED_JEMALLOC', 'jemalloc as malloc',
'with_jemalloc', True)
set_build_var(environ_cp, 'TF_NEED_GCP', 'Google Cloud Platform',
'with_gcp_support', True, 'gcp')
set_build_var(environ_cp, 'TF_NEED_HDFS', 'Hadoop File System',
'with_hdfs_support', True, 'hdfs')
set_build_var(environ_cp, 'TF_NEED_AWS', 'Amazon AWS Platform',
'with_aws_support', True, 'aws')
set_build_var(environ_cp, 'TF_NEED_KAFKA', 'Apache Kafka Platform',
'with_kafka_support', True, 'kafka')
set_build_var(environ_cp, 'TF_ENABLE_XLA', 'XLA JIT', 'with_xla_support',
False, 'xla')
set_action_env_var(environ_cp, 'TF_NEED_OPENCL_SYCL', 'OpenCL SYCL', False)
if environ_cp.get('TF_NEED_OPENCL_SYCL') == '1':
set_host_cxx_compiler(environ_cp)
set_host_c_compiler(environ_cp)
set_action_env_var(environ_cp, 'TF_NEED_COMPUTECPP', 'ComputeCPP', True)
if environ_cp.get('TF_NEED_COMPUTECPP') == '1':
set_computecpp_toolkit_path(environ_cp)
else:
set_trisycl_include_dir(environ_cp)
set_action_env_var(environ_cp, 'TF_NEED_ROCM', 'ROCm', False)
if (environ_cp.get('TF_NEED_ROCM') == '1' and
'LD_LIBRARY_PATH' in environ_cp and
environ_cp.get('LD_LIBRARY_PATH') != '1'):
write_action_env_to_bazelrc('LD_LIBRARY_PATH',
environ_cp.get('LD_LIBRARY_PATH'))
set_action_env_var(environ_cp, 'TF_NEED_CUDA', 'CUDA', False)
if (environ_cp.get('TF_NEED_CUDA') == '1' and
'TF_CUDA_CONFIG_REPO' not in environ_cp):
set_tf_cuda_version(environ_cp)
set_tf_cudnn_version(environ_cp)
if is_linux():
set_tf_tensorrt_install_path(environ_cp)
set_tf_nccl_install_path(environ_cp)
set_tf_cuda_compute_capabilities(environ_cp)
if 'LD_LIBRARY_PATH' in environ_cp and environ_cp.get(
'LD_LIBRARY_PATH') != '1':
write_action_env_to_bazelrc('LD_LIBRARY_PATH',
environ_cp.get('LD_LIBRARY_PATH'))
set_tf_cuda_clang(environ_cp)
if environ_cp.get('TF_CUDA_CLANG') == '1':
# Ask whether we should download the clang toolchain.
set_tf_download_clang(environ_cp)
if environ_cp.get('TF_DOWNLOAD_CLANG') != '1':
# Set up which clang we should use as the cuda / host compiler.
set_clang_cuda_compiler_path(environ_cp)
else:
# Use downloaded LLD for linking.
write_to_bazelrc('build:cuda_clang --config=download_clang_use_lld')
write_to_bazelrc('test:cuda_clang --config=download_clang_use_lld')
else:
# Set up which gcc nvcc should use as the host compiler
# No need to set this on Windows
if not is_windows():
set_gcc_host_compiler_path(environ_cp)
set_other_cuda_vars(environ_cp)
else:
# CUDA not required. Ask whether we should download the clang toolchain and
# use it for the CPU build.
set_tf_download_clang(environ_cp)
if environ_cp.get('TF_DOWNLOAD_CLANG') == '1':
write_to_bazelrc('build --config=download_clang')
write_to_bazelrc('test --config=download_clang')
# SYCL / ROCm / CUDA are mutually exclusive.
# At most 1 GPU platform can be configured.
gpu_platform_count = 0
if environ_cp.get('TF_NEED_OPENCL_SYCL') == '1':
gpu_platform_count += 1
if environ_cp.get('TF_NEED_ROCM') == '1':
gpu_platform_count += 1
if environ_cp.get('TF_NEED_CUDA') == '1':
gpu_platform_count += 1
if gpu_platform_count >= 2:
raise UserInputError('SYCL / CUDA / ROCm are mututally exclusive. '
'At most 1 GPU platform can be configured.')
set_build_var(environ_cp, 'TF_NEED_MPI', 'MPI', 'with_mpi_support', False)
if environ_cp.get('TF_NEED_MPI') == '1':
set_mpi_home(environ_cp)
set_other_mpi_vars(environ_cp)
set_cc_opt_flags(environ_cp)
set_system_libs_flag(environ_cp)
if is_windows():
set_windows_build_flags(environ_cp)
# Add a config option to build TensorFlow 2.0 API.
write_to_bazelrc('build:v2 --define=tf_api_version=2')
if get_var(environ_cp, 'TF_SET_ANDROID_WORKSPACE', 'android workspace', False,
('Would you like to interactively configure ./WORKSPACE for '
'Android builds?'), 'Searching for NDK and SDK installations.',
'Not configuring the WORKSPACE for Android builds.'):
create_android_ndk_rule(environ_cp)
create_android_sdk_rule(environ_cp)
# On Windows, we don't have MKL support and the build is always monolithic.
if not is_windows():
print('Preconfigured Bazel build configs. You can use any of the below by '
'adding "--config=<>" to your build command. See tools/bazel.rc for '
'more details.')
config_info_line('mkl', 'Build with MKL support.')
config_info_line('monolithic', 'Config for mostly static monolithic build.')
config_info_line('gdr', 'Build with GDR support.')
config_info_line('verbs', 'Build with libverbs support.')
config_info_line('ngraph', 'Build with Intel nGraph support.')
if __name__ == '__main__':
main()
| true | true |
f71caae6f9c23667ccfce560a4892f8c3a10bf60 | 7,955 | py | Python | utils/dataset_preprocess.py | eliasyin/LCF-ATEPC | 83ae8a729b617ae34f562e5f52b62cb366dcc103 | [
"MIT"
] | 137 | 2019-12-18T15:38:18.000Z | 2022-03-26T15:26:19.000Z | utils/dataset_preprocess.py | eliasyin/LCF-ATEPC | 83ae8a729b617ae34f562e5f52b62cb366dcc103 | [
"MIT"
] | 45 | 2019-12-20T08:24:12.000Z | 2022-03-31T12:43:19.000Z | utils/dataset_preprocess.py | eliasyin/LCF-ATEPC | 83ae8a729b617ae34f562e5f52b62cb366dcc103 | [
"MIT"
] | 34 | 2020-01-03T02:59:18.000Z | 2022-03-30T01:44:09.000Z | import os
import copy
def is_similar(s1, s2):
count = 0.0
for token in s1.split(' '):
if token in s2:
count += 1
# if count / len(s1.split(' ')) >= 0.7 and abs(len(s1.split(' '))-len(s2.split(' '))<5):
if count / len(s1.split(' ')) >= 0.7 and count / len(s2.split(' ')) >= 0.7:
return True
else:
return False
def assemble_aspects(fname):
fin = open(fname, 'r', encoding='utf-8', newline='\n', errors='ignore')
lines = fin.readlines()
fin.close()
for i in range(len(lines)):
lines[i] = lines[i].replace('$ t $','$T$').strip()
def unify_same_samples(same_samples):
text = same_samples[0][0].replace('$T$', same_samples[0][1])
polarities = [-1]*len(text.split())
tags=['O']*len(text.split())
samples = []
for sample in same_samples:
# print(sample)
polarities_tmp = copy.deepcopy(polarities)
try:
asp_begin = (sample[0].split().index('$T$'))
asp_end = sample[0].split().index('$T$')+len(sample[1].split())
for i in range(asp_begin, asp_end):
polarities_tmp[i] = int(sample[2])+1
if i - sample[0].split().index('$T$')<1:
tags[i] = 'B-ASP'
else:
tags[i] = 'I-ASP'
samples.append([text, tags, polarities_tmp])
except:
pass
return samples
samples = []
aspects_in_one_sentence = []
for i in range(0, len(lines), 3):
# aspects_in_one_sentence.append([lines[i], lines[i + 1], lines[i + 2]])
if len(aspects_in_one_sentence) == 0:
aspects_in_one_sentence.append([lines[i], lines[i + 1], lines[i + 2]])
continue
if is_similar(aspects_in_one_sentence[-1][0], lines[i]):
aspects_in_one_sentence.append([lines[i], lines[i + 1], lines[i + 2]])
else:
samples.extend(unify_same_samples(aspects_in_one_sentence))
aspects_in_one_sentence = []
aspects_in_one_sentence.append([lines[i], lines[i + 1], lines[i + 2]])
return samples
def split_aspects(sentence):
single_aspect_with_contex = []
aspect_num = len(sentence[1].split("|"))
aspects = sentence[1].split("|")
polarity = sentence[2].split("|")
pre_position = 0
aspect_contex = sentence[0]
for i in range(aspect_num):
aspect_contex = aspect_contex.replace("$A$", aspects[i], 1)
single_aspect_with_contex.append(
(aspect_contex[pre_position:aspect_contex.find("$A$")], aspects[i], polarity[i]))
pre_position = aspect_contex.find(aspects[i]) + len(aspects[i]) + 1
return single_aspect_with_contex
# 将数据集中的aspect切割出来
def refactor_dataset(fname, dist_fname):
lines = []
samples = assemble_aspects(fname)
for sample in samples:
for token_index in range(len(sample[1])):
token, label, polarty = sample[0].split()[token_index], sample[1][token_index], sample[2][token_index]
lines.append(token + " " + label + " " + str(polarty))
lines.append('\n')
# 写之前,先检验文件是否存在,存在就删掉
if os.path.exists(dist_fname):
os.remove(dist_fname)
fout = open(dist_fname, 'w', encoding='utf8')
for line in lines:
fout.writelines((line+'\n').replace('\n\n', '\n'))
fout.close()
# 将数据集中的aspect切割出来
def refactor_chinese_dataset(fname, train_fname,test_fname):
lines = []
samples = assemble_aspects(fname)
positive = 0
negative = 0
sum = 0
# refactor testset
for sample in samples[:int(len(samples)/5)]:
for token_index in range(len(sample[1])):
token, label, polarty = sample[0].split()[token_index], sample[1][token_index], sample[2][token_index]
lines.append(token + " " + label + " " + str(polarty))
lines.append('\n')
if 1 in sample[2]:
positive+=1
else:negative+=1
sum+=1
print(train_fname+f"sum={sum} positive={positive} negative={negative}")
if os.path.exists(test_fname):
os.remove(test_fname)
fout = open(test_fname, 'w', encoding='utf8')
for line in lines:
fout.writelines((line+'\n').replace('\n\n', '\n'))
fout.close()
positive = 0
negative = 0
sum = 0
# refactor trainset
for sample in samples[int(len(samples)/5):]:
for token_index in range(len(sample[1])):
tokens = sample[0].split()
token, label, polarty = sample[0].split()[token_index], sample[1][token_index], sample[2][token_index]
lines.append(token + " " + label + " " + str(polarty))
lines.append('\n')
if 1 in sample[2]:
positive+=1
else:negative+=1
sum+=1
print(train_fname+f"sum={sum} positive={positive} negative={negative}")
if os.path.exists(train_fname):
os.remove(train_fname)
fout = open(train_fname, 'w', encoding='utf8')
for line in lines:
fout.writelines((line + '\n').replace('\n\n', '\n'))
fout.close()
def detect_error_in_dataset(dataset):
f = open(dataset, 'r', encoding='utf8')
lines = f.readlines()
for i in range(0, len(lines), 3):
# print(lines[i].replace('$T$', lines[i + 1].replace('\n', '')))
if i + 3 < len(lines):
if is_similar(lines[i],lines[i+3]) and len((lines[i]+" "+ lines[i+1]).split()) != len((lines[i+3]+" "+ lines[i+4]).split()):
print(lines[i].replace('$T$', lines[i+1].replace('\n','')))
print(lines[i+3].replace('$T$', lines[i+4].replace('\n','')))
if __name__ == "__main__":
# # chinese datasets
# refactor_chinese_dataset(
# r"chinese_atepc_dataset/camera_output.txt",
# r"chinese_atepc_datasets/camera.atepc.train.dat",
# r"chinese_atepc_datasets/camera.atepc.test.dat",
# )
# refactor_chinese_dataset(
# r"chinese_atepc_datasets/car_output.txt",
# r"chinese_atepc_datasets/car.atepc.train.dat",
# r"chinese_atepc_datasets/car.atepc.test.dat",
# )
# refactor_chinese_dataset(
# r"chinese_atepc_datasets/notebook_output.txt",
# r"chinese_atepc_datasets/notebook.atepc.train.dat",
# r"chinese_atepc_datasets/notebook.atepc.test.dat",
# )
# refactor_chinese_dataset(
# r"chinese_atepc_datasets/phone_output.txt",
# r"chinese_atepc_datasets/phone.atepc.train.dat",
# r"chinese_atepc_datasets/phone.atepc.test.dat",
# )
# detect_error_in_dataset( r"../datasets/semeval14/Laptops_Train.xml.seg")
# detect_error_in_dataset( r"../datasets/semeval14/Laptops_Test_Gold.xml.seg")
# detect_error_in_dataset( r"../datasets/semeval14/Restaurants_Train.xml.seg")
# detect_error_in_dataset( r"../datasets/semeval14/Restaurants_Test_Gold.xml.seg")
# detect_error_in_dataset( r"../datasets/acl-14-short-data/train.raw")
# # 笔记本数据集
# refactor_dataset(
# r"../datasets/semeval14/Laptops_Train.xml.seg",
# r"../atepc_datasets/laptop/Laptops.atepc.train.dat",
# )
# refactor_dataset(
# r"../datasets/semeval14/Laptops_Test_Gold.xml.seg",
# r"../atepc_datasets/laptop/Laptops.atepc.test.dat",
# )
# 餐厅数据集
refactor_dataset(
r"../datasets/semeval14/Restaurants_Train.xml.seg",
r"../atepc_datasets/restaurant/Restaurants.atepc.train.dat",
)
refactor_dataset(
r"../datasets/semeval14/Restaurants_Test_Gold.xml.seg",
r"../atepc_datasets/restaurant/Restaurants.atepc.test.dat",
)
# # 推特数据集
# refactor_dataset(
# r"../datasets/acl-14-short-data/train.raw",
# r"../atepc_datasets/twitter/twitter.atepc.train.dat",
# )
# refactor_dataset(
# r"../datasets/acl-14-short-data/test.raw",
# r"../atepc_datasets/twitter/twitter.atepc.test.dat",
# ) | 36.828704 | 136 | 0.595726 | import os
import copy
def is_similar(s1, s2):
count = 0.0
for token in s1.split(' '):
if token in s2:
count += 1
if count / len(s1.split(' ')) >= 0.7 and count / len(s2.split(' ')) >= 0.7:
return True
else:
return False
def assemble_aspects(fname):
fin = open(fname, 'r', encoding='utf-8', newline='\n', errors='ignore')
lines = fin.readlines()
fin.close()
for i in range(len(lines)):
lines[i] = lines[i].replace('$ t $','$T$').strip()
def unify_same_samples(same_samples):
text = same_samples[0][0].replace('$T$', same_samples[0][1])
polarities = [-1]*len(text.split())
tags=['O']*len(text.split())
samples = []
for sample in same_samples:
polarities_tmp = copy.deepcopy(polarities)
try:
asp_begin = (sample[0].split().index('$T$'))
asp_end = sample[0].split().index('$T$')+len(sample[1].split())
for i in range(asp_begin, asp_end):
polarities_tmp[i] = int(sample[2])+1
if i - sample[0].split().index('$T$')<1:
tags[i] = 'B-ASP'
else:
tags[i] = 'I-ASP'
samples.append([text, tags, polarities_tmp])
except:
pass
return samples
samples = []
aspects_in_one_sentence = []
for i in range(0, len(lines), 3):
if len(aspects_in_one_sentence) == 0:
aspects_in_one_sentence.append([lines[i], lines[i + 1], lines[i + 2]])
continue
if is_similar(aspects_in_one_sentence[-1][0], lines[i]):
aspects_in_one_sentence.append([lines[i], lines[i + 1], lines[i + 2]])
else:
samples.extend(unify_same_samples(aspects_in_one_sentence))
aspects_in_one_sentence = []
aspects_in_one_sentence.append([lines[i], lines[i + 1], lines[i + 2]])
return samples
def split_aspects(sentence):
single_aspect_with_contex = []
aspect_num = len(sentence[1].split("|"))
aspects = sentence[1].split("|")
polarity = sentence[2].split("|")
pre_position = 0
aspect_contex = sentence[0]
for i in range(aspect_num):
aspect_contex = aspect_contex.replace("$A$", aspects[i], 1)
single_aspect_with_contex.append(
(aspect_contex[pre_position:aspect_contex.find("$A$")], aspects[i], polarity[i]))
pre_position = aspect_contex.find(aspects[i]) + len(aspects[i]) + 1
return single_aspect_with_contex
def refactor_dataset(fname, dist_fname):
lines = []
samples = assemble_aspects(fname)
for sample in samples:
for token_index in range(len(sample[1])):
token, label, polarty = sample[0].split()[token_index], sample[1][token_index], sample[2][token_index]
lines.append(token + " " + label + " " + str(polarty))
lines.append('\n')
if os.path.exists(dist_fname):
os.remove(dist_fname)
fout = open(dist_fname, 'w', encoding='utf8')
for line in lines:
fout.writelines((line+'\n').replace('\n\n', '\n'))
fout.close()
def refactor_chinese_dataset(fname, train_fname,test_fname):
lines = []
samples = assemble_aspects(fname)
positive = 0
negative = 0
sum = 0
for sample in samples[:int(len(samples)/5)]:
for token_index in range(len(sample[1])):
token, label, polarty = sample[0].split()[token_index], sample[1][token_index], sample[2][token_index]
lines.append(token + " " + label + " " + str(polarty))
lines.append('\n')
if 1 in sample[2]:
positive+=1
else:negative+=1
sum+=1
print(train_fname+f"sum={sum} positive={positive} negative={negative}")
if os.path.exists(test_fname):
os.remove(test_fname)
fout = open(test_fname, 'w', encoding='utf8')
for line in lines:
fout.writelines((line+'\n').replace('\n\n', '\n'))
fout.close()
positive = 0
negative = 0
sum = 0
for sample in samples[int(len(samples)/5):]:
for token_index in range(len(sample[1])):
tokens = sample[0].split()
token, label, polarty = sample[0].split()[token_index], sample[1][token_index], sample[2][token_index]
lines.append(token + " " + label + " " + str(polarty))
lines.append('\n')
if 1 in sample[2]:
positive+=1
else:negative+=1
sum+=1
print(train_fname+f"sum={sum} positive={positive} negative={negative}")
if os.path.exists(train_fname):
os.remove(train_fname)
fout = open(train_fname, 'w', encoding='utf8')
for line in lines:
fout.writelines((line + '\n').replace('\n\n', '\n'))
fout.close()
def detect_error_in_dataset(dataset):
f = open(dataset, 'r', encoding='utf8')
lines = f.readlines()
for i in range(0, len(lines), 3):
if i + 3 < len(lines):
if is_similar(lines[i],lines[i+3]) and len((lines[i]+" "+ lines[i+1]).split()) != len((lines[i+3]+" "+ lines[i+4]).split()):
print(lines[i].replace('$T$', lines[i+1].replace('\n','')))
print(lines[i+3].replace('$T$', lines[i+4].replace('\n','')))
if __name__ == "__main__":
refactor_dataset(
r"../datasets/semeval14/Restaurants_Train.xml.seg",
r"../atepc_datasets/restaurant/Restaurants.atepc.train.dat",
)
refactor_dataset(
r"../datasets/semeval14/Restaurants_Test_Gold.xml.seg",
r"../atepc_datasets/restaurant/Restaurants.atepc.test.dat",
)
| true | true |
f71cab1867cc22a6cea57f7a9832a1702c206111 | 2,746 | py | Python | makbe/expanders/tca9555.py | kazhida/makbe-py | b2840251118959a826fe8d3e2e84c2000dba3081 | [
"MIT"
] | null | null | null | makbe/expanders/tca9555.py | kazhida/makbe-py | b2840251118959a826fe8d3e2e84c2000dba3081 | [
"MIT"
] | 1 | 2021-11-29T08:23:50.000Z | 2021-11-29T08:23:50.000Z | makbe/expanders/tca9555.py | kazhida/makbe-py | b2840251118959a826fe8d3e2e84c2000dba3081 | [
"MIT"
] | null | null | null | # MIT License
#
# Copyright (c) 2021 Kazuyuki HIDA
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from .. key_switch import nop_switch
from .. import IoExpander, KeySwitch
class TCA9555(IoExpander):
"""TCA9555(PCA9555も同じ)
"""
def __init__(self, dev_address: int):
"""デバイスアドレスを指定してオブジェクトを生成
上位4ビット分は固定なので、下位3ビット部分だけを指定する
:param dev_address: デバイスアドレスの下位3ビット分
"""
self.dev_address = dev_address + 0x20
self.switches = []
for i in range(16):
self.switches.append(nop_switch())
def init_device(self, i2c) -> bool:
"""I2Cの初期化
:param i2c: I2Cマスタ
:return: Trueを返す
"""
i2c.writeto(self.dev_address, bytes([0x06, 0xFF]))
i2c.writeto(self.dev_address, bytes([0x07, 0xFF]))
return True
def read_device(self, i2c) -> [bool]:
"""I/Oエクスパンダを読み込んで、その状態を返す
:param i2c: I2Cマスタ
:return: 各ピンの状態(ONでTrue)のリストを返す
"""
buffer = bytearray(2)
i2c.writeto_then_readfrom(self.dev_address, bytes([0x00]), buffer)
result = []
for i, b in enumerate(buffer):
for p in range(8):
mask = 1 << p
if buffer[i] & mask != 0:
result.append(False)
else:
result.append(True)
return result
def assign(self, pin: int, switch: KeySwitch):
"""ピンにキースイッチを割り当てる
:param pin: ピン番号(0オリジン)
:param switch: キースイッチ
"""
self.switches[pin] = switch
def switch(self, pin: int) -> KeySwitch:
"""ピンに対応するキースイッチを返す
:param pin: ピン番号(0オリジン)
:return: 対応するキースイッチ
"""
return self.switches[pin]
| 34.759494 | 80 | 0.643117 |
from .. key_switch import nop_switch
from .. import IoExpander, KeySwitch
class TCA9555(IoExpander):
def __init__(self, dev_address: int):
self.dev_address = dev_address + 0x20
self.switches = []
for i in range(16):
self.switches.append(nop_switch())
def init_device(self, i2c) -> bool:
i2c.writeto(self.dev_address, bytes([0x06, 0xFF]))
i2c.writeto(self.dev_address, bytes([0x07, 0xFF]))
return True
def read_device(self, i2c) -> [bool]:
buffer = bytearray(2)
i2c.writeto_then_readfrom(self.dev_address, bytes([0x00]), buffer)
result = []
for i, b in enumerate(buffer):
for p in range(8):
mask = 1 << p
if buffer[i] & mask != 0:
result.append(False)
else:
result.append(True)
return result
def assign(self, pin: int, switch: KeySwitch):
self.switches[pin] = switch
def switch(self, pin: int) -> KeySwitch:
return self.switches[pin]
| true | true |
f71cab3e710d8cc552a1054d037bb361fdbacb7d | 2,386 | py | Python | old/test_reverse_linked_list.py | kurtrm/data_structures_rev | 58f425a877898a45595de9c57c7eb8e087a0c3a2 | [
"MIT"
] | null | null | null | old/test_reverse_linked_list.py | kurtrm/data_structures_rev | 58f425a877898a45595de9c57c7eb8e087a0c3a2 | [
"MIT"
] | null | null | null | old/test_reverse_linked_list.py | kurtrm/data_structures_rev | 58f425a877898a45595de9c57c7eb8e087a0c3a2 | [
"MIT"
] | null | null | null | """Test of the reversed linked list."""
import pytest
@pytest.fixture
def linked_list():
"""Make linked_list for testing."""
from linked_list import LinkedList
linked_list = LinkedList([1, 2, 3])
return linked_list
def test_empty_linked_list(linked_list):
"""Test exception from empty linked_list."""
from reverse_linked_list import reverse_linked_list
linked_list.pop()
linked_list.pop()
linked_list.pop()
with pytest.raises(IndexError):
reverse_linked_list(linked_list)
def test_one_in_linked_list(linked_list):
"""Test get one time back with one item in list."""
from reverse_linked_list import reverse_linked_list
linked_list.pop()
linked_list.pop()
reverse_linked_list(linked_list)
assert linked_list.head.data == 1
def test_two_in_linked_list(linked_list):
"""Test that it works with two items."""
from reverse_linked_list import reverse_linked_list
linked_list.pop()
reverse_linked_list(linked_list)
assert linked_list.head.data == 1
def test_reverse_linked_list(linked_list):
"""Test that we reverse the list."""
from reverse_linked_list import reverse_linked_list
reverse_linked_list(linked_list)
assert linked_list.head.data == 1
assert linked_list.head.next_node.data == 2
assert linked_list.head.next_node.next_node.data == 3
def test_long_reverse_linked_list(linked_list):
"""Test that we reverse the list."""
from reverse_linked_list import reverse_linked_list
linked_list.push(4)
linked_list.push(5)
reverse_linked_list(linked_list)
assert linked_list.head.data == 1
assert linked_list.head.next_node.data == 2
assert linked_list.head.next_node.next_node.data == 3
assert linked_list.head.next_node.next_node.next_node.data == 4
assert linked_list.head.next_node.next_node.next_node.next_node.data == 5
assert linked_list.head.next_node.next_node.next_node.next_node.next_node is None
reverse_linked_list(linked_list)
assert linked_list.head.data == 5
assert linked_list.head.next_node.data == 4
assert linked_list.head.next_node.next_node.data == 3
assert linked_list.head.next_node.next_node.next_node.data == 2
assert linked_list.head.next_node.next_node.next_node.next_node.data == 1
assert linked_list.head.next_node.next_node.next_node.next_node.next_node is None
| 34.57971 | 85 | 0.754401 |
import pytest
@pytest.fixture
def linked_list():
from linked_list import LinkedList
linked_list = LinkedList([1, 2, 3])
return linked_list
def test_empty_linked_list(linked_list):
from reverse_linked_list import reverse_linked_list
linked_list.pop()
linked_list.pop()
linked_list.pop()
with pytest.raises(IndexError):
reverse_linked_list(linked_list)
def test_one_in_linked_list(linked_list):
from reverse_linked_list import reverse_linked_list
linked_list.pop()
linked_list.pop()
reverse_linked_list(linked_list)
assert linked_list.head.data == 1
def test_two_in_linked_list(linked_list):
from reverse_linked_list import reverse_linked_list
linked_list.pop()
reverse_linked_list(linked_list)
assert linked_list.head.data == 1
def test_reverse_linked_list(linked_list):
from reverse_linked_list import reverse_linked_list
reverse_linked_list(linked_list)
assert linked_list.head.data == 1
assert linked_list.head.next_node.data == 2
assert linked_list.head.next_node.next_node.data == 3
def test_long_reverse_linked_list(linked_list):
from reverse_linked_list import reverse_linked_list
linked_list.push(4)
linked_list.push(5)
reverse_linked_list(linked_list)
assert linked_list.head.data == 1
assert linked_list.head.next_node.data == 2
assert linked_list.head.next_node.next_node.data == 3
assert linked_list.head.next_node.next_node.next_node.data == 4
assert linked_list.head.next_node.next_node.next_node.next_node.data == 5
assert linked_list.head.next_node.next_node.next_node.next_node.next_node is None
reverse_linked_list(linked_list)
assert linked_list.head.data == 5
assert linked_list.head.next_node.data == 4
assert linked_list.head.next_node.next_node.data == 3
assert linked_list.head.next_node.next_node.next_node.data == 2
assert linked_list.head.next_node.next_node.next_node.next_node.data == 1
assert linked_list.head.next_node.next_node.next_node.next_node.next_node is None
| true | true |
f71cabbacd1f7c032bc3b010d748f5f29a9c6426 | 442 | py | Python | form.py | GreciaFlores1996/CursoPython | b81edad009ea36786d28ca5781c63df0f5376ac5 | [
"MIT"
] | null | null | null | form.py | GreciaFlores1996/CursoPython | b81edad009ea36786d28ca5781c63df0f5376ac5 | [
"MIT"
] | 1 | 2019-08-20T22:20:45.000Z | 2019-08-20T22:21:38.000Z | form.py | GreciaFlores1996/CursoPython | b81edad009ea36786d28ca5781c63df0f5376ac5 | [
"MIT"
] | null | null | null | from wtforms import Form
from wtforms import StringField
from wtforms import IntegerField
from wtforms.validators import DataRequired
class EmailForm(Form):
name = StringField('name', validators=[DataRequired()])
email = StringField('email', validators=[DataRequired()])
class LoginForm(Form):
username = StringField('username', validators=[DataRequired()])
password = StringField('password', validators=[DataRequired()])
| 29.466667 | 67 | 0.757919 | from wtforms import Form
from wtforms import StringField
from wtforms import IntegerField
from wtforms.validators import DataRequired
class EmailForm(Form):
name = StringField('name', validators=[DataRequired()])
email = StringField('email', validators=[DataRequired()])
class LoginForm(Form):
username = StringField('username', validators=[DataRequired()])
password = StringField('password', validators=[DataRequired()])
| true | true |
f71cabd18bcfe59a84a59dd1c41d1bd8fc494488 | 865 | py | Python | plaidctf/2016/rage/netloop.py | petermuller/plaidctf | 68274ce096c4c8456a566911d23f4672cddb31a3 | [
"MIT"
] | null | null | null | plaidctf/2016/rage/netloop.py | petermuller/plaidctf | 68274ce096c4c8456a566911d23f4672cddb31a3 | [
"MIT"
] | null | null | null | plaidctf/2016/rage/netloop.py | petermuller/plaidctf | 68274ce096c4c8456a566911d23f4672cddb31a3 | [
"MIT"
] | null | null | null | from brute import brute
from pwn import *
from hashlib import md5
import numpy as np
bru = 25
best_imgs = dict()
for p in xrange(1):
for r in xrange(1):
for k in xrange(bru):
conn = remote('localhost', 29281)
salt = conn.recvn(12)
for i in brute(length=6, ramp=False):
if True:
print('Broke md5 with ' + i)
conn.send(i)
break
c1 = np.ones((240, 320), np.float32) * (k)
c2 = np.ones((240, 320), np.float32) * (r)
c3 = np.ones((240, 320), np.float32) * (p)
img = np.array([c1, c2, c3])
#img = img / img.max()
print("Sending whole image", k, r, p)
conn.send(img)
print("Receiving?")
resp = conn.recvline()
print(resp)
if "BAD" not in resp:
best_imgs[(k, r, p)] = img
print(best_imgs.keys())
f = open("keys.txt", 'w')
for key in best_imgs.keys():
f.write(key + '\n')
f.close()
| 24.027778 | 52 | 0.581503 | from brute import brute
from pwn import *
from hashlib import md5
import numpy as np
bru = 25
best_imgs = dict()
for p in xrange(1):
for r in xrange(1):
for k in xrange(bru):
conn = remote('localhost', 29281)
salt = conn.recvn(12)
for i in brute(length=6, ramp=False):
if True:
print('Broke md5 with ' + i)
conn.send(i)
break
c1 = np.ones((240, 320), np.float32) * (k)
c2 = np.ones((240, 320), np.float32) * (r)
c3 = np.ones((240, 320), np.float32) * (p)
img = np.array([c1, c2, c3])
print("Sending whole image", k, r, p)
conn.send(img)
print("Receiving?")
resp = conn.recvline()
print(resp)
if "BAD" not in resp:
best_imgs[(k, r, p)] = img
print(best_imgs.keys())
f = open("keys.txt", 'w')
for key in best_imgs.keys():
f.write(key + '\n')
f.close()
| false | true |
f71cabef85002e6d78fa7bf1e3356fe2e5b10593 | 2,601 | py | Python | src/DSGRN/Query/Database.py | yingxinac/DSGRN | b5bc64e5a99e6d266f6ac5ba7ac9d04954f12d32 | [
"MIT"
] | 9 | 2017-10-15T20:49:36.000Z | 2022-02-24T19:26:39.000Z | src/DSGRN/Query/Database.py | yingxinac/DSGRN | b5bc64e5a99e6d266f6ac5ba7ac9d04954f12d32 | [
"MIT"
] | 19 | 2015-07-02T15:59:06.000Z | 2020-06-09T18:13:05.000Z | src/DSGRN/Query/Database.py | yingxinac/DSGRN | b5bc64e5a99e6d266f6ac5ba7ac9d04954f12d32 | [
"MIT"
] | 21 | 2015-11-06T16:28:34.000Z | 2019-09-20T09:26:54.000Z | import sqlite3
import graphviz
from DSGRN._dsgrn import *
from functools import reduce
from DSGRN.Query.Logging import LogToSTDOUT
class Database:
def __init__(self, database_name):
"""
Initialize a DSGRN database object
"""
self.dbname = database_name
self.conn = sqlite3.connect(database_name)
self.cursor = self.conn.cursor()
# Load network spec from database
sqlexpression = "select Specification from Network"
self.cursor.execute(sqlexpression)
network_spec = self.cursor.fetchone()[0]
# construct network
self.network = Network(network_spec)
self.parametergraph = ParameterGraph(self.network)
# D is the number of network nodes
self.D = self.parametergraph.dimension()
self.names = [ self.network.name(i) for i in range(0, self.D)]
# DSGRN uses an indexing scheme to refer to parameters. It is based on a mixed-radix number scheme
# where the place value of each digit varies according to the number of logic parameters for each node
# and the number of order parameter for each node. Specifically, the ordering of the digits is (from least
# significant) the sizes of each factor graph, followed by the number of permutations of the out-edges for
# each node. We call these "bases" (as in number base) and we compute the place value for each digit.
self.indexing_place_bases = [self.parametergraph.logicsize(i) for i in range(0,self.D)] + [self.parametergraph.ordersize(i) for i in range(0,self.D)]
self.indexing_place_values = reduce ( lambda x, y : x + [x[-1]*y], self.indexing_place_bases[:-1], [1])
def execute(self, expression, parameters = None):
"""
Perform an SQL query.
Returns a "cursor" object (see python sqlite3 API for details)
"""
return self.cursor.execute(expression, parameters)
def __call__(self, pi):
c = self.conn.cursor()
sqlexpression = "select MorseGraphIndex from Signatures where ParameterIndex = ?"
c.execute(sqlexpression,(pi,))
mgi = c.fetchone()[0]
return mgi
def __del__(self):
"""
Commit and close upon destruction
"""
self.conn.commit()
self.conn.close()
def _repr_svg_(self):
return graphviz.Source(self.network.graphviz())._repr_svg_()
def DrawMorseGraph(self, morsegraphindex):
"""
Return an object which renders to a graphviz representation in Jupyter
"""
c = self.conn.cursor()
sqlexpression = "select Graphviz from MorseGraphViz where MorseGraphIndex = ?"
c.execute(sqlexpression,(morsegraphindex,))
gv = c.fetchone()[0]
return graphviz.Source(gv)
| 38.820896 | 153 | 0.708958 | import sqlite3
import graphviz
from DSGRN._dsgrn import *
from functools import reduce
from DSGRN.Query.Logging import LogToSTDOUT
class Database:
def __init__(self, database_name):
self.dbname = database_name
self.conn = sqlite3.connect(database_name)
self.cursor = self.conn.cursor()
sqlexpression = "select Specification from Network"
self.cursor.execute(sqlexpression)
network_spec = self.cursor.fetchone()[0]
self.network = Network(network_spec)
self.parametergraph = ParameterGraph(self.network)
self.D = self.parametergraph.dimension()
self.names = [ self.network.name(i) for i in range(0, self.D)]
self.indexing_place_bases = [self.parametergraph.logicsize(i) for i in range(0,self.D)] + [self.parametergraph.ordersize(i) for i in range(0,self.D)]
self.indexing_place_values = reduce ( lambda x, y : x + [x[-1]*y], self.indexing_place_bases[:-1], [1])
def execute(self, expression, parameters = None):
return self.cursor.execute(expression, parameters)
def __call__(self, pi):
c = self.conn.cursor()
sqlexpression = "select MorseGraphIndex from Signatures where ParameterIndex = ?"
c.execute(sqlexpression,(pi,))
mgi = c.fetchone()[0]
return mgi
def __del__(self):
self.conn.commit()
self.conn.close()
def _repr_svg_(self):
return graphviz.Source(self.network.graphviz())._repr_svg_()
def DrawMorseGraph(self, morsegraphindex):
c = self.conn.cursor()
sqlexpression = "select Graphviz from MorseGraphViz where MorseGraphIndex = ?"
c.execute(sqlexpression,(morsegraphindex,))
gv = c.fetchone()[0]
return graphviz.Source(gv)
| true | true |
f71cacb71c497b993580e8b6ab79d5b35f0c8185 | 7,853 | py | Python | lit_nlp/examples/sst_pytorch_demo.py | johnson7788/lit | 3eb824b01e0f72a5486124b16056bf912465debc | [
"Apache-2.0"
] | 1 | 2021-04-12T22:57:04.000Z | 2021-04-12T22:57:04.000Z | lit_nlp/examples/sst_pytorch_demo.py | johnson7788/lit | 3eb824b01e0f72a5486124b16056bf912465debc | [
"Apache-2.0"
] | 4 | 2022-02-14T19:37:07.000Z | 2022-02-27T20:24:08.000Z | lit_nlp/examples/sst_pytorch_demo.py | haaami01/lit | 3eb824b01e0f72a5486124b16056bf912465debc | [
"Apache-2.0"
] | null | null | null | # Lint as: python3
r"""Code example for a custom model, using PyTorch.
This demo shows how to use a custom model with LIT, in just a few lines of code.
We'll use a transformers model, with a minimal amount of code to implement the
LIT API. Compared to models/glue_models.py, this has fewer features, but the
code is more readable.
This demo is similar in functionality to simple_tf2_demo.py, but uses PyTorch
instead of TensorFlow 2.
The transformers library can load weights from either,
so you can use any saved model compatible with the underlying model class
(AutoModelForSequenceClassification). To train something for this demo, you can:
- Use quickstart_sst_demo.py, and set --model_path to somewhere durable
- Or: Use tools/glue_trainer.py
- Or: Use any fine-tuning code that works with transformers, such as
https://github.com/huggingface/transformers#quick-tour-of-the-fine-tuningusage-scripts
To run locally:
python -m lit_nlp.examples.simple_pytorch_demo \
--port=5432 --model_path=/path/to/saved/model
Then navigate to localhost:5432 to access the demo UI.
NOTE: this demo still uses TensorFlow Datasets (which depends on TensorFlow) to
load the data. However, the output of glue.SST2Data is just NumPy arrays and
plain Python data, and you can easily replace this with a different library or
directly loading from CSV.
"""
import re
from absl import app
from absl import flags
from absl import logging
from lit_nlp import dev_server
from lit_nlp import server_flags
from lit_nlp.api import model as lit_model
from lit_nlp.api import types as lit_types
from lit_nlp.examples.datasets import glue
from lit_nlp.lib import utils
import torch
import transformers
# NOTE: additional flags defined in server_flags.py
FLAGS = flags.FLAGS
flags.DEFINE_string(
"model_path", None,
"Path to trained model, in standard transformers format, e.g. as "
"saved by model.save_pretrained() and tokenizer.save_pretrained()")
def _from_pretrained(cls, *args, **kw):
"""Load a transformers model in PyTorch, with fallback to TF2/Keras weights."""
try:
return cls.from_pretrained(*args, **kw)
except OSError as e:
logging.warning("Caught OSError loading model: %s", e)
logging.warning(
"Re-trying to convert from TensorFlow checkpoint (from_tf=True)")
return cls.from_pretrained(*args, from_tf=True, **kw)
class SimpleSentimentModel(lit_model.Model):
"""Simple sentiment analysis model."""
LABELS = ["0", "1"] # negative, positive
compute_grads: bool = True # if True, compute and return gradients.
def __init__(self, model_name_or_path):
self.tokenizer = transformers.AutoTokenizer.from_pretrained(
model_name_or_path)
model_config = transformers.AutoConfig.from_pretrained(
model_name_or_path,
num_labels=2,
output_hidden_states=True,
output_attentions=True,
)
# This is a just a regular PyTorch model.
self.model = _from_pretrained(
transformers.AutoModelForSequenceClassification,
model_name_or_path,
config=model_config)
self.model.eval()
##
# LIT API implementation
def max_minibatch_size(self):
# This tells lit_model.Model.predict() how to batch inputs to
# predict_minibatch().
# Alternately, you can just override predict() and handle batching yourself.
return 32
def predict_minibatch(self, inputs):
# Preprocess to ids and masks, and make the input batch.
encoded_input = self.tokenizer.batch_encode_plus(
[ex["sentence"] for ex in inputs],
return_tensors="pt",
add_special_tokens=True,
max_length=128,
padding="longest",
truncation="longest_first")
# Check and send to cuda (GPU) if available
if torch.cuda.is_available():
self.model.cuda()
for tensor in encoded_input:
encoded_input[tensor] = encoded_input[tensor].cuda()
# Run a forward pass.
with torch.set_grad_enabled(self.compute_grads):
out: transformers.modeling_outputs.SequenceClassifierOutput = \
self.model(**encoded_input)
# Post-process outputs.
batched_outputs = {
"probas": torch.nn.functional.softmax(out.logits, dim=-1),
"input_ids": encoded_input["input_ids"],
"ntok": torch.sum(encoded_input["attention_mask"], dim=1),
"cls_emb": out.hidden_states[-1][:, 0], # last layer, first token
}
# Add attention layers to batched_outputs
assert len(out.attentions) == self.model.config.num_hidden_layers
for i, layer_attention in enumerate(out.attentions):
batched_outputs[f"layer_{i}/attention"] = layer_attention
# Request gradients after the forward pass.
# Note: hidden_states[0] includes position and segment encodings, as well as
# subword embeddings.
if self.compute_grads:
# <torch.float32>[batch_size, num_tokens, emb_dim]
scalar_pred_for_gradients = torch.max(
batched_outputs["probas"], dim=1, keepdim=False, out=None)[0]
batched_outputs["input_emb_grad"] = torch.autograd.grad(
scalar_pred_for_gradients,
out.hidden_states[0],
grad_outputs=torch.ones_like(scalar_pred_for_gradients))[0]
# Post-process outputs.
# Return as NumPy for further processing.
detached_outputs = {
k: v.cpu().detach().numpy() for k, v in batched_outputs.items()}
# Unbatch outputs so we get one record per input example.
for output in utils.unbatch_preds(detached_outputs):
ntok = output.pop("ntok")
output["tokens"] = self.tokenizer.convert_ids_to_tokens(
output.pop("input_ids")[:ntok])
# set token gradients
if self.compute_grads:
output["token_grad_sentence"] = output["input_emb_grad"][:ntok]
# Process attention.
for key in output:
if not re.match(r"layer_(\d+)/attention", key):
continue
# Select only real tokens, since most of this matrix is padding.
# <float32>[num_heads, max_seq_length, max_seq_length]
# -> <float32>[num_heads, num_tokens, num_tokens]
output[key] = output[key][:, :ntok, :ntok].transpose((0, 2, 1))
# Make a copy of this array to avoid memory leaks, since NumPy otherwise
# keeps a pointer around that prevents the source array from being GCed.
output[key] = output[key].copy()
yield output
def input_spec(self) -> lit_types.Spec:
return {
"sentence": lit_types.TextSegment(),
"label": lit_types.CategoryLabel(vocab=self.LABELS, required=False)
}
def output_spec(self) -> lit_types.Spec:
ret = {
"tokens": lit_types.Tokens(),
"probas": lit_types.MulticlassPreds(parent="label", vocab=self.LABELS),
"cls_emb": lit_types.Embeddings()
}
# Gradients, if requested.
if self.compute_grads:
ret["token_grad_sentence"] = lit_types.TokenGradients(
align="tokens")
# Attention heads, one field for each layer.
for i in range(self.model.config.num_hidden_layers):
ret[f"layer_{i}/attention"] = lit_types.AttentionHeads(
align_in="tokens", align_out="tokens")
return ret
def main(_):
# Normally path is a directory; if it's an archive file, download and
# extract to the transformers cache.
model_path = FLAGS.model_path
if model_path.endswith(".tar.gz"):
model_path = transformers.file_utils.cached_path(
model_path, extract_compressed_file=True)
# Load the model we defined above.
models = {"sst": SimpleSentimentModel(model_path)}
# Load SST-2 validation set from TFDS.
datasets = {"sst_dev": glue.SST2Data("validation")}
# Start the LIT server. See server_flags.py for server options.
lit_demo = dev_server.Server(models, datasets, **server_flags.get_flags())
lit_demo.serve()
if __name__ == "__main__":
app.run(main)
| 37.395238 | 86 | 0.706736 |
import re
from absl import app
from absl import flags
from absl import logging
from lit_nlp import dev_server
from lit_nlp import server_flags
from lit_nlp.api import model as lit_model
from lit_nlp.api import types as lit_types
from lit_nlp.examples.datasets import glue
from lit_nlp.lib import utils
import torch
import transformers
FLAGS = flags.FLAGS
flags.DEFINE_string(
"model_path", None,
"Path to trained model, in standard transformers format, e.g. as "
"saved by model.save_pretrained() and tokenizer.save_pretrained()")
def _from_pretrained(cls, *args, **kw):
try:
return cls.from_pretrained(*args, **kw)
except OSError as e:
logging.warning("Caught OSError loading model: %s", e)
logging.warning(
"Re-trying to convert from TensorFlow checkpoint (from_tf=True)")
return cls.from_pretrained(*args, from_tf=True, **kw)
class SimpleSentimentModel(lit_model.Model):
LABELS = ["0", "1"]
compute_grads: bool = True
def __init__(self, model_name_or_path):
self.tokenizer = transformers.AutoTokenizer.from_pretrained(
model_name_or_path)
model_config = transformers.AutoConfig.from_pretrained(
model_name_or_path,
num_labels=2,
output_hidden_states=True,
output_attentions=True,
)
self.model = _from_pretrained(
transformers.AutoModelForSequenceClassification,
model_name_or_path,
config=model_config)
self.model.eval()
def max_minibatch_size(self):
return 32
def predict_minibatch(self, inputs):
encoded_input = self.tokenizer.batch_encode_plus(
[ex["sentence"] for ex in inputs],
return_tensors="pt",
add_special_tokens=True,
max_length=128,
padding="longest",
truncation="longest_first")
if torch.cuda.is_available():
self.model.cuda()
for tensor in encoded_input:
encoded_input[tensor] = encoded_input[tensor].cuda()
with torch.set_grad_enabled(self.compute_grads):
out: transformers.modeling_outputs.SequenceClassifierOutput = \
self.model(**encoded_input)
batched_outputs = {
"probas": torch.nn.functional.softmax(out.logits, dim=-1),
"input_ids": encoded_input["input_ids"],
"ntok": torch.sum(encoded_input["attention_mask"], dim=1),
"cls_emb": out.hidden_states[-1][:, 0],
}
assert len(out.attentions) == self.model.config.num_hidden_layers
for i, layer_attention in enumerate(out.attentions):
batched_outputs[f"layer_{i}/attention"] = layer_attention
if self.compute_grads:
scalar_pred_for_gradients = torch.max(
batched_outputs["probas"], dim=1, keepdim=False, out=None)[0]
batched_outputs["input_emb_grad"] = torch.autograd.grad(
scalar_pred_for_gradients,
out.hidden_states[0],
grad_outputs=torch.ones_like(scalar_pred_for_gradients))[0]
detached_outputs = {
k: v.cpu().detach().numpy() for k, v in batched_outputs.items()}
for output in utils.unbatch_preds(detached_outputs):
ntok = output.pop("ntok")
output["tokens"] = self.tokenizer.convert_ids_to_tokens(
output.pop("input_ids")[:ntok])
if self.compute_grads:
output["token_grad_sentence"] = output["input_emb_grad"][:ntok]
for key in output:
if not re.match(r"layer_(\d+)/attention", key):
continue
output[key] = output[key][:, :ntok, :ntok].transpose((0, 2, 1))
output[key] = output[key].copy()
yield output
def input_spec(self) -> lit_types.Spec:
return {
"sentence": lit_types.TextSegment(),
"label": lit_types.CategoryLabel(vocab=self.LABELS, required=False)
}
def output_spec(self) -> lit_types.Spec:
ret = {
"tokens": lit_types.Tokens(),
"probas": lit_types.MulticlassPreds(parent="label", vocab=self.LABELS),
"cls_emb": lit_types.Embeddings()
}
if self.compute_grads:
ret["token_grad_sentence"] = lit_types.TokenGradients(
align="tokens")
for i in range(self.model.config.num_hidden_layers):
ret[f"layer_{i}/attention"] = lit_types.AttentionHeads(
align_in="tokens", align_out="tokens")
return ret
def main(_):
# extract to the transformers cache.
model_path = FLAGS.model_path
if model_path.endswith(".tar.gz"):
model_path = transformers.file_utils.cached_path(
model_path, extract_compressed_file=True)
# Load the model we defined above.
models = {"sst": SimpleSentimentModel(model_path)}
# Load SST-2 validation set from TFDS.
datasets = {"sst_dev": glue.SST2Data("validation")}
# Start the LIT server. See server_flags.py for server options.
lit_demo = dev_server.Server(models, datasets, **server_flags.get_flags())
lit_demo.serve()
if __name__ == "__main__":
app.run(main)
| true | true |
f71cad4b7e4b8cf28664ced9914cb52c172687a1 | 16,093 | py | Python | bioinformatics-programs/prepareNames.py | dengzq1234/TREND | 0374da1fbdd3b5236445d62f07ea84485074b437 | [
"MIT"
] | null | null | null | bioinformatics-programs/prepareNames.py | dengzq1234/TREND | 0374da1fbdd3b5236445d62f07ea84485074b437 | [
"MIT"
] | null | null | null | bioinformatics-programs/prepareNames.py | dengzq1234/TREND | 0374da1fbdd3b5236445d62f07ea84485074b437 | [
"MIT"
] | null | null | null | #!/usr/bin/python
# Author: Vadim Gumerov
# 03.04/2019
import sys, getopt
import re
from ete2 import Tree
from Bio import SeqIO
from Bio import Entrez
import time
import collections
import urllib
import json
import math
import multiprocessing
import time
#pip install timeout-decorator
import timeout_decorator
manager = multiprocessing.Manager()
Entrez.email = "A.N.Other@example.com"
# 2 sec
ENTREZ_EFETCH_TIMEOUT = 15
SLEEP_TIME_FOR_NCBI_REQUEST = 0.25
INPUT_FILE = None
OUTPUT_FILE = None
OUTPUT_FILE_WITH_DASHES = None
INPUT_FILE_TREE = None
OUTPUT_FILE_TREE = None
FETCH_FROM_IDS = False
FETCH_FROM_TREE = False
REMOVE_DASHES = True
REGEX_VOID = re.compile(r"(\(|\)|:|,|}|{|'|/|]|\[|\\)")
REGEX_VOID = re.compile(r"(\(|\)|:|,|}|{|'|]|\[|\\)")
REGEX_UNDERSCORE = re.compile(r"( |/|\|)")
REGEX_UNDERSCORE_MULTIPLE = re.compile(r"_{2,}")
REGEX_VOID_SUBST = ""
REGEX_UNDERSCORE_SUBST = "_"
REGEX_LEAF_NAME_1 = re.compile(r"(\('[^\(].+?':|,'[^\(].+?':)")
REGEX_LEAF_NAME_2 = re.compile(r"(\([^\(].+?:|,[^\(].+?:)")
REGEX_NUMBER_UNDERSCORE = re.compile(r"^\d+_")
#~ MIST_BASE_URL = "https://api.mistdb.caltech.edu/v1/genes/"
#~ ASEQ_SEQUENCE_FIELD = "?fields=id,stable_id&fields.Aseq=sequence&fields.Component=definition"
MIST_BASE_URL = "https://api.mistdb.caltech.edu/v1/genes?search="
ASEQ_SEQUENCE_FIELD = "&fields=id,stable_id&fields.Aseq=sequence&fields.Component=definition"
NUMBER_OF_PROCESSES = 30
FETCH_FROM_NCBI = True
FETCH_FROM_MIST = False
USAGE = "\nThe script makes sequence names and/or tree leaves names newick friendly.\n" + \
"Moreover the script fetch sequences by id from the list of ids or from the tree leves.\n\n" + \
"python " + sys.argv[0] + '''
-h || --help - help
-i || --ifile - input file with sequences or id list
-s || --sfile - input file with phylogenetic tree in newick format
-o || --ofile - output file with sequences with changed names (and retrieved seqs if id list was given)
-n || --nfile - output file with tree with changed names of leaves
-b || --bfile - output file with sequences with changed names but not removed dashes; this is a ready alignment
[-f || --fetchFromIds] - fetch sequences from ids or not: "true" or "false". Default is "false".
Only "-f" or "-t" (below) can be used. Not both at the same time.
[-t || --fetchFromTree] - fetch sequences from tree leaves names or not: "true" or "false". Default is "false"
[-c || --proc_num] - number of processes to run simultaneously, default is 30. The number is big bacause the process is not CPU-intensive
[-m || --fetchFromMist] - fetch or not from Mist: "true" or "false". Default is "false".
[-r || --fetchFromNCBI] - fetch or not from NCBI: "true" or "false". Default is "true".
[-d || --removeDashes] - remove or not dashes in sequences: "true" or "false". Default is "true".
'''
def initialize(argv):
global INPUT_FILE, INPUT_FILE_TREE, OUTPUT_FILE, OUTPUT_FILE_WITH_DASHES, OUTPUT_FILE_TREE, FETCH_FROM_IDS, FETCH_FROM_TREE
global NUMBER_OF_PROCESSES, FETCH_FROM_MIST, FETCH_FROM_NCBI, REMOVE_DASHES
try:
opts, args = getopt.getopt(argv[1:],"hi:s:o:b:n:f:t:c:m:r:d:",["help", "ifile=", "sfile=", "ofile=", "nfile=", "bfile=", "fetchFromIds=", \
"fetchFromTree=", "proc_num=", "fetchFromMist=", "fetchFromNCBI=", "removeDashes="])
if len(opts) == 0:
raise getopt.GetoptError("Options are required\n")
except getopt.GetoptError as e:
print "===========ERROR==========\n " + str(e) + USAGE
sys.exit(2)
try:
for opt, arg in opts:
if opt in ("-h", "--help"):
print USAGE
sys.exit()
elif opt in ("-i", "--ifile"):
INPUT_FILE = str(arg).strip()
elif opt in ("-o", "--ofile"):
OUTPUT_FILE = str(arg).strip()
elif opt in ("-b", "--bfile"):
OUTPUT_FILE_WITH_DASHES = str(arg).strip()
elif opt in ("-s", "--sfile"):
INPUT_FILE_TREE = str(arg).strip()
elif opt in ("-n", "--nfile"):
OUTPUT_FILE_TREE = str(arg).strip()
elif opt in ("-f", "--fetchSeqs"):
if str(arg).strip().lower() == "true":
FETCH_FROM_IDS = True
elif opt in ("-t", "--fetchFromTree"):
if str(arg).strip().lower() == "true":
FETCH_FROM_TREE = True
elif opt in ("-m", "--fetchFromMist"):
if str(arg).strip().lower() == "true":
FETCH_FROM_MIST = True
elif opt in ("-r", "--fetchFromNCBI"):
if str(arg).strip().lower() == "false":
FETCH_FROM_NCBI = False
elif opt in ("-c", "--proc_num"):
NUMBER_OF_PROCESSES = int(str(arg).strip())
elif opt in ("-d", "--removeDashes"):
if str(arg).strip().lower() == "false":
REMOVE_DASHES = False
except Exception as e:
print "===========ERROR==========\n " + str(e) + USAGE
sys.exit(2)
# basic name changer
def getChangedName(line):
line = REGEX_VOID.sub(REGEX_VOID_SUBST, line)
line = REGEX_UNDERSCORE.sub(REGEX_UNDERSCORE_SUBST, line)
return REGEX_UNDERSCORE_MULTIPLE.sub(REGEX_UNDERSCORE_SUBST, line)
# Default case for Tree
def getChangedNamesForTree():
if not isInOutOk(INPUT_FILE_TREE, OUTPUT_FILE_TREE, True):
return
with open(INPUT_FILE_TREE, "r") as inputFile:
tree = inputFile.read()
iterObject1 = re.finditer(REGEX_LEAF_NAME_1, tree)
iterObject2 = re.finditer(REGEX_LEAF_NAME_2, tree)
iterObject3 = re.finditer(REGEX_NUMBER_UNDERSCORE, tree)
for match in iterObject1:
tree = tree.replace(match.group()[1:-1], getChangedName(match.group()[2:-2]))
for match in iterObject2:
tree = tree.replace(match.group()[1:-1], getChangedName(match.group()[1:-1]))
#check if we can read the tree
treeObject = Tree(tree)
with open(OUTPUT_FILE_TREE, "w") as outputFile:
outputFile.write(tree)
return treeObject
# Save sequences by Id: this is a final step
def getChangedNamesForSeqsAndSave(handle=False, proteinIdToSeq=False, proteinIdToTrueId=False):
if not isInOutOk(INPUT_FILE, OUTPUT_FILE):
return
savedIds = set()
with open(OUTPUT_FILE, "w") as outputFile:
# default case
if not handle and not proteinIdToSeq:
# if an output file for the alignment with unchanged sequences is not provided:
if not OUTPUT_FILE_WITH_DASHES:
with open(INPUT_FILE, "r") as inputFile:
for sequence in SeqIO.parse(inputFile, "fasta"):
protSeq = str(sequence.seq)
if REMOVE_DASHES:
protSeq = protSeq.replace("-", "")
outputFile.write(">" + getChangedName(sequence.description) + "\n")
outputFile.write(protSeq + "\n")
# else save unchanged sequences to the file provided for the alignment and save sequences after removing dashes
# to a separate file:
else:
with open(INPUT_FILE, "r") as inputFile, open(OUTPUT_FILE_WITH_DASHES, "w") as outputWDashes:
for sequence in SeqIO.parse(inputFile, "fasta"):
protSeq = str(sequence.seq)
outputWDashes.write(">" + getChangedName(sequence.description) + "\n")
outputWDashes.write(protSeq + "\n")
if REMOVE_DASHES:
protSeq = protSeq.replace("-", "")
outputFile.write(">" + getChangedName(sequence.description) + "\n")
outputFile.write(protSeq + "\n")
if handle:
# after retrieving seqeunces by Id from NCBI
print("Handle is present")
for eachRecord in SeqIO.parse(handle, "fasta"):
if eachRecord.id not in savedIds:
protSeq = str(eachRecord.seq)
outputFile.write(">" + getChangedName(eachRecord.description) + "\n")
outputFile.write(protSeq + "\n")
savedIds.add(eachRecord.id)
if proteinIdToSeq:
# after retrieving seqeunces by Id from Mist
for proteinName, seq in proteinIdToSeq.items():
if proteinIdToTrueId[proteinName] not in savedIds:
protSeq = seq
outputFile.write(">" + getChangedName(proteinName) + "\n")
outputFile.write(protSeq + "\n")
savedIds.add(proteinIdToTrueId[proteinName])
####===================================================#####
### Fetching sequences from NCBI using input Id list ###
def prepareIdListFromInput():
proteinIds = set()
with open(INPUT_FILE, "r") as inputFile:
for line in inputFile:
proteinIds.add(line.strip())
proteinIdsMultiProc = manager.list(proteinIds.copy())
return (proteinIds, proteinIdsMultiProc)
def fetchNamesAndSave():
handle = None
proteinIdToSeq = None
proteinIdToTrueId = None
proteinIds, proteinIdsMultiProc = prepareIdListFromInput()
try:
if FETCH_FROM_MIST:
proteinIdToSeq, proteinIdToTrueId = fetchFromMistByIds(proteinIdsMultiProc)
print("Fetching from MiST finished OK.")
except Exception, e:
print("Error while fetching from MiST.")
print (e)
finally:
try:
if FETCH_FROM_NCBI:
print("Will try to fetch from NCBI.")
handle = getHandleOfFetchedSequencesFromNcbi(proteinIds)
print("Fetching from NCBI finished OK.")
if FETCH_FROM_MIST and proteinIdToSeq:
getChangedNamesForSeqsAndSave(handle, proteinIdToSeq, proteinIdToTrueId)
else:
getChangedNamesForSeqsAndSave(handle=handle)
except Exception, e:
print("Error while fetching from NCBI.")
print (e)
if FETCH_FROM_MIST and proteinIdToSeq:
getChangedNamesForSeqsAndSave(handle, proteinIdToSeq, proteinIdToTrueId)
finally:
if handle:
handle.close()
# ============== Fetch from MiST# ============== #
def fetchFromMistByIds(proteinIds, proteinIdsToOrigNames=None):
if not proteinIds:
return None
print("Fetching from MiST using ids")
proteinIdToSeq = manager.dict()
proteinIdToTrueId = manager.dict()
elementsForOneThread = int(math.ceil(len(proteinIds)/float(NUMBER_OF_PROCESSES)))
processes = list()
startIndex = 0
endIndex = elementsForOneThread
for ind in xrange(NUMBER_OF_PROCESSES):
if startIndex <= len(proteinIds):
process = multiprocessing.Process(target=fetchFromMist, \
args=(proteinIdToSeq, proteinIdToTrueId, proteinIds[startIndex:endIndex], proteinIdsToOrigNames,))
processes.append(process)
startIndex = endIndex
endIndex = endIndex + elementsForOneThread
for proc in processes:
proc.start()
for proc in processes:
proc.join()
print("Fetched from MiST OK using ids")
return (proteinIdToSeq.copy(), proteinIdToTrueId.copy())
def fetchFromMist(proteinIdToSeq, proteinIdToTrueId, multiProcList, proteinIdsToOrigNames=None):
for proteinId in multiProcList:
proteinId = proteinId.strip()
preparedUrl = MIST_BASE_URL + proteinId + ASEQ_SEQUENCE_FIELD
result = urllib.urlopen(preparedUrl).read()
proteinDict = json.loads(result)
if len(proteinDict):
proteinDict = proteinDict[0]
if "Aseq" in proteinDict and "sequence" in proteinDict["Aseq"]:
proteinSeq = proteinDict["Aseq"]["sequence"]
if not proteinIdsToOrigNames:
if "Component" in proteinDict and "definition" in proteinDict["Component"]:
proteinName = proteinId + "_" + proteinDict["Component"]["definition"].split(",")[0]
proteinIdToSeq[proteinName] = proteinSeq
proteinIdToTrueId[proteinName] = proteinId
else:
for proteinName in proteinIdsToOrigNames[proteinId]:
proteinIdToSeq[proteinName] = proteinSeq
# ============== Fetch from MiST# ============== #
####===================================================#####
### Fetching sequences from NCBI using Tree leaves names ###
def fetchSequencesForTreeAndSave(treeObject):
proteinIds1 = set()
proteinIds2 = set()
proteinIds03 = set()
proteinIdsToSeq1 = None
proteinIdsToSeq2 = None
proteinIdsToSeq3 = None
proteinIdsToOrigNames1 = collections.defaultdict(set)
proteinIdsToOrigNames2 = collections.defaultdict(set)
proteinIdsToOrigNames3 = collections.defaultdict(set)
terminals = treeObject.get_leaves()
for protein in terminals:
originalProteinName = protein.name.strip("'")
fullProteinName = REGEX_NUMBER_UNDERSCORE.sub("", originalProteinName)
fullProteinNameSplitted = fullProteinName.split("_")
partProteinName = fullProteinNameSplitted[0].strip()
proteinIds2.add(partProteinName)
proteinIdsToOrigNames2[partProteinName].add(originalProteinName)
if len(fullProteinNameSplitted) >= 2:
partProteinName = "_".join(fullProteinNameSplitted[0:2]).strip()
proteinIds1.add(partProteinName)
proteinIdsToOrigNames1[partProteinName].add(originalProteinName)
# this case can happen only for MiST Ids
if len(fullProteinNameSplitted) >= 3:
partProteinName = "_".join(fullProteinNameSplitted[0:3]).strip()
proteinIds03.add(partProteinName)
proteinIdsToOrigNames3[partProteinName].add(originalProteinName)
# multiporcess dicts from usual sets to fetch from MiST
proteinIdsForMist1 = manager.list(proteinIds1)
proteinIdsForMist2 = manager.list(proteinIds2)
proteinIdsForMist3 = manager.list(proteinIds03)
if FETCH_FROM_NCBI:
proteinIdsToSeq1 = fetchSequencesAndGetNameToSeqMap(proteinIds1, proteinIdsToOrigNames1)
proteinIdsToSeq2 = fetchSequencesAndGetNameToSeqMap(proteinIds2, proteinIdsToOrigNames2)
if FETCH_FROM_MIST:
proteinIdsToSeq3ForMist = fetchFromMistByIds(proteinIdsForMist3, proteinIdsToOrigNames3)
if proteinIdsToSeq3ForMist:
proteinIdsToSeq3ForMist = proteinIdsToSeq3ForMist[0]
if not FETCH_FROM_NCBI:
proteinIdsToSeq1ForMist = fetchFromMistByIds(proteinIdsForMist1, proteinIdsToOrigNames1)[0]
if proteinIdsToSeq1ForMist:
proteinIdsToSeq1ForMist = proteinIdsToSeq1ForMist[0]
proteinIdsToSeq2ForMist = fetchFromMistByIds(proteinIdsForMist2, proteinIdsToOrigNames2)[0]
if proteinIdsToSeq2ForMist:
proteinIdsToSeq2ForMist = proteinIdsToSeq2ForMist[0]
with open(OUTPUT_FILE, "w") as outputFile:
if FETCH_FROM_NCBI:
saveFetchedSeqsForTree(proteinIdsToSeq1, outputFile)
saveFetchedSeqsForTree(proteinIdsToSeq2, outputFile)
if FETCH_FROM_MIST:
saveFetchedSeqsForTree(proteinIdsToSeq3ForMist, outputFile)
if not FETCH_FROM_NCBI:
saveFetchedSeqsForTree(proteinIdsToSeq1ForMist, outputFile)
saveFetchedSeqsForTree(proteinIdsToSeq2ForMist, outputFile)
def fetchSequencesAndGetNameToSeqMap(proteinIds, proteinIdsToOrigNames):
proteinIdsSeqs = dict()
proteinIdsHandle = None
if not proteinIds:
return None
try:
proteinIdsHandle = getHandleOfFetchedSequencesFromNcbi(proteinIds)
if proteinIdsHandle:
print("NCBI hanlde is present")
for eachRecord in SeqIO.parse(proteinIdsHandle, "fasta"):
for name in proteinIdsToOrigNames[eachRecord.id]:
seq = str(eachRecord.seq)
proteinIdsSeqs[name] = seq
except Exception, e:
print(e)
finally:
if proteinIdsHandle:
proteinIdsHandle.close()
return proteinIdsSeqs
def saveFetchedSeqsForTree(proteinIdsToSeq, outputFile):
if proteinIdsToSeq and len(proteinIdsToSeq):
for name, seqs in proteinIdsToSeq.items():
outputFile.write(">" + name + "\n")
outputFile.write(seqs + "\n")
####===================================================#####
# A generic fetcher. It returns a handle of fetched records
@timeout_decorator.timeout(ENTREZ_EFETCH_TIMEOUT, use_signals=True)
def getHandleOfFetchedSequencesFromNcbi(proteinIds):
handle = None
try:
#handle = Entrez.efetch(db="protein", id="OYV75139.1,ACR67403.1", rettype="fasta", retmode="text")
handle = Entrez.efetch(db="protein", id=",".join(proteinIds), rettype="fasta", retmode="text")
print("Fetched from NCBI OK")
except Exception, e:
print ("Couldn't retrieve sequences by id from NCBI in time")
if handle:
handle.close()
return handle
def isInOutOk(inputFile, outputFile, isTree=False):
if not inputFile and not outputFile:
return False
if not inputFile or not outputFile:
if isTree:
print("Both input file name with the tree in newick format and output file name should be provided!")
elif not FETCH_FROM_TREE:
print("Both input file name with the sequenes and output file name should be provided!")
return False
if isTree:
print("Changing tree leaves names")
else:
print("Changing sequences names")
return True
def main(argv):
initialize(argv)
if FETCH_FROM_IDS and FETCH_FROM_TREE:
print("You can't fetch both using id list and tree leaves names! Exiting")
return
if FETCH_FROM_IDS:
fetchNamesAndSave()
elif not FETCH_FROM_TREE:
getChangedNamesForSeqsAndSave()
treeObject = getChangedNamesForTree()
if FETCH_FROM_TREE:
fetchSequencesForTreeAndSave(treeObject)
if __name__ == "__main__":
main(sys.argv)
| 38.685096 | 143 | 0.7182 |
import sys, getopt
import re
from ete2 import Tree
from Bio import SeqIO
from Bio import Entrez
import time
import collections
import urllib
import json
import math
import multiprocessing
import time
import timeout_decorator
manager = multiprocessing.Manager()
Entrez.email = "A.N.Other@example.com"
ENTREZ_EFETCH_TIMEOUT = 15
SLEEP_TIME_FOR_NCBI_REQUEST = 0.25
INPUT_FILE = None
OUTPUT_FILE = None
OUTPUT_FILE_WITH_DASHES = None
INPUT_FILE_TREE = None
OUTPUT_FILE_TREE = None
FETCH_FROM_IDS = False
FETCH_FROM_TREE = False
REMOVE_DASHES = True
REGEX_VOID = re.compile(r"(\(|\)|:|,|}|{|'|/|]|\[|\\)")
REGEX_VOID = re.compile(r"(\(|\)|:|,|}|{|'|]|\[|\\)")
REGEX_UNDERSCORE = re.compile(r"( |/|\|)")
REGEX_UNDERSCORE_MULTIPLE = re.compile(r"_{2,}")
REGEX_VOID_SUBST = ""
REGEX_UNDERSCORE_SUBST = "_"
REGEX_LEAF_NAME_1 = re.compile(r"(\('[^\(].+?':|,'[^\(].+?':)")
REGEX_LEAF_NAME_2 = re.compile(r"(\([^\(].+?:|,[^\(].+?:)")
REGEX_NUMBER_UNDERSCORE = re.compile(r"^\d+_")
MIST_BASE_URL = "https://api.mistdb.caltech.edu/v1/genes?search="
ASEQ_SEQUENCE_FIELD = "&fields=id,stable_id&fields.Aseq=sequence&fields.Component=definition"
NUMBER_OF_PROCESSES = 30
FETCH_FROM_NCBI = True
FETCH_FROM_MIST = False
USAGE = "\nThe script makes sequence names and/or tree leaves names newick friendly.\n" + \
"Moreover the script fetch sequences by id from the list of ids or from the tree leves.\n\n" + \
"python " + sys.argv[0] + '''
-h || --help - help
-i || --ifile - input file with sequences or id list
-s || --sfile - input file with phylogenetic tree in newick format
-o || --ofile - output file with sequences with changed names (and retrieved seqs if id list was given)
-n || --nfile - output file with tree with changed names of leaves
-b || --bfile - output file with sequences with changed names but not removed dashes; this is a ready alignment
[-f || --fetchFromIds] - fetch sequences from ids or not: "true" or "false". Default is "false".
Only "-f" or "-t" (below) can be used. Not both at the same time.
[-t || --fetchFromTree] - fetch sequences from tree leaves names or not: "true" or "false". Default is "false"
[-c || --proc_num] - number of processes to run simultaneously, default is 30. The number is big bacause the process is not CPU-intensive
[-m || --fetchFromMist] - fetch or not from Mist: "true" or "false". Default is "false".
[-r || --fetchFromNCBI] - fetch or not from NCBI: "true" or "false". Default is "true".
[-d || --removeDashes] - remove or not dashes in sequences: "true" or "false". Default is "true".
'''
def initialize(argv):
global INPUT_FILE, INPUT_FILE_TREE, OUTPUT_FILE, OUTPUT_FILE_WITH_DASHES, OUTPUT_FILE_TREE, FETCH_FROM_IDS, FETCH_FROM_TREE
global NUMBER_OF_PROCESSES, FETCH_FROM_MIST, FETCH_FROM_NCBI, REMOVE_DASHES
try:
opts, args = getopt.getopt(argv[1:],"hi:s:o:b:n:f:t:c:m:r:d:",["help", "ifile=", "sfile=", "ofile=", "nfile=", "bfile=", "fetchFromIds=", \
"fetchFromTree=", "proc_num=", "fetchFromMist=", "fetchFromNCBI=", "removeDashes="])
if len(opts) == 0:
raise getopt.GetoptError("Options are required\n")
except getopt.GetoptError as e:
print "===========ERROR==========\n " + str(e) + USAGE
sys.exit(2)
try:
for opt, arg in opts:
if opt in ("-h", "--help"):
print USAGE
sys.exit()
elif opt in ("-i", "--ifile"):
INPUT_FILE = str(arg).strip()
elif opt in ("-o", "--ofile"):
OUTPUT_FILE = str(arg).strip()
elif opt in ("-b", "--bfile"):
OUTPUT_FILE_WITH_DASHES = str(arg).strip()
elif opt in ("-s", "--sfile"):
INPUT_FILE_TREE = str(arg).strip()
elif opt in ("-n", "--nfile"):
OUTPUT_FILE_TREE = str(arg).strip()
elif opt in ("-f", "--fetchSeqs"):
if str(arg).strip().lower() == "true":
FETCH_FROM_IDS = True
elif opt in ("-t", "--fetchFromTree"):
if str(arg).strip().lower() == "true":
FETCH_FROM_TREE = True
elif opt in ("-m", "--fetchFromMist"):
if str(arg).strip().lower() == "true":
FETCH_FROM_MIST = True
elif opt in ("-r", "--fetchFromNCBI"):
if str(arg).strip().lower() == "false":
FETCH_FROM_NCBI = False
elif opt in ("-c", "--proc_num"):
NUMBER_OF_PROCESSES = int(str(arg).strip())
elif opt in ("-d", "--removeDashes"):
if str(arg).strip().lower() == "false":
REMOVE_DASHES = False
except Exception as e:
print "===========ERROR==========\n " + str(e) + USAGE
sys.exit(2)
def getChangedName(line):
line = REGEX_VOID.sub(REGEX_VOID_SUBST, line)
line = REGEX_UNDERSCORE.sub(REGEX_UNDERSCORE_SUBST, line)
return REGEX_UNDERSCORE_MULTIPLE.sub(REGEX_UNDERSCORE_SUBST, line)
def getChangedNamesForTree():
if not isInOutOk(INPUT_FILE_TREE, OUTPUT_FILE_TREE, True):
return
with open(INPUT_FILE_TREE, "r") as inputFile:
tree = inputFile.read()
iterObject1 = re.finditer(REGEX_LEAF_NAME_1, tree)
iterObject2 = re.finditer(REGEX_LEAF_NAME_2, tree)
iterObject3 = re.finditer(REGEX_NUMBER_UNDERSCORE, tree)
for match in iterObject1:
tree = tree.replace(match.group()[1:-1], getChangedName(match.group()[2:-2]))
for match in iterObject2:
tree = tree.replace(match.group()[1:-1], getChangedName(match.group()[1:-1]))
treeObject = Tree(tree)
with open(OUTPUT_FILE_TREE, "w") as outputFile:
outputFile.write(tree)
return treeObject
def getChangedNamesForSeqsAndSave(handle=False, proteinIdToSeq=False, proteinIdToTrueId=False):
if not isInOutOk(INPUT_FILE, OUTPUT_FILE):
return
savedIds = set()
with open(OUTPUT_FILE, "w") as outputFile:
if not handle and not proteinIdToSeq:
if not OUTPUT_FILE_WITH_DASHES:
with open(INPUT_FILE, "r") as inputFile:
for sequence in SeqIO.parse(inputFile, "fasta"):
protSeq = str(sequence.seq)
if REMOVE_DASHES:
protSeq = protSeq.replace("-", "")
outputFile.write(">" + getChangedName(sequence.description) + "\n")
outputFile.write(protSeq + "\n")
else:
with open(INPUT_FILE, "r") as inputFile, open(OUTPUT_FILE_WITH_DASHES, "w") as outputWDashes:
for sequence in SeqIO.parse(inputFile, "fasta"):
protSeq = str(sequence.seq)
outputWDashes.write(">" + getChangedName(sequence.description) + "\n")
outputWDashes.write(protSeq + "\n")
if REMOVE_DASHES:
protSeq = protSeq.replace("-", "")
outputFile.write(">" + getChangedName(sequence.description) + "\n")
outputFile.write(protSeq + "\n")
if handle:
print("Handle is present")
for eachRecord in SeqIO.parse(handle, "fasta"):
if eachRecord.id not in savedIds:
protSeq = str(eachRecord.seq)
outputFile.write(">" + getChangedName(eachRecord.description) + "\n")
outputFile.write(protSeq + "\n")
savedIds.add(eachRecord.id)
if proteinIdToSeq:
for proteinName, seq in proteinIdToSeq.items():
if proteinIdToTrueId[proteinName] not in savedIds:
protSeq = seq
outputFile.write(">" + getChangedName(proteinName) + "\n")
outputFile.write(protSeq + "\n")
savedIds.add(proteinIdToTrueId[proteinName])
eq = None
proteinIdToTrueId = None
proteinIds, proteinIdsMultiProc = prepareIdListFromInput()
try:
if FETCH_FROM_MIST:
proteinIdToSeq, proteinIdToTrueId = fetchFromMistByIds(proteinIdsMultiProc)
print("Fetching from MiST finished OK.")
except Exception, e:
print("Error while fetching from MiST.")
print (e)
finally:
try:
if FETCH_FROM_NCBI:
print("Will try to fetch from NCBI.")
handle = getHandleOfFetchedSequencesFromNcbi(proteinIds)
print("Fetching from NCBI finished OK.")
if FETCH_FROM_MIST and proteinIdToSeq:
getChangedNamesForSeqsAndSave(handle, proteinIdToSeq, proteinIdToTrueId)
else:
getChangedNamesForSeqsAndSave(handle=handle)
except Exception, e:
print("Error while fetching from NCBI.")
print (e)
if FETCH_FROM_MIST and proteinIdToSeq:
getChangedNamesForSeqsAndSave(handle, proteinIdToSeq, proteinIdToTrueId)
finally:
if handle:
handle.close()
yIds(proteinIds, proteinIdsToOrigNames=None):
if not proteinIds:
return None
print("Fetching from MiST using ids")
proteinIdToSeq = manager.dict()
proteinIdToTrueId = manager.dict()
elementsForOneThread = int(math.ceil(len(proteinIds)/float(NUMBER_OF_PROCESSES)))
processes = list()
startIndex = 0
endIndex = elementsForOneThread
for ind in xrange(NUMBER_OF_PROCESSES):
if startIndex <= len(proteinIds):
process = multiprocessing.Process(target=fetchFromMist, \
args=(proteinIdToSeq, proteinIdToTrueId, proteinIds[startIndex:endIndex], proteinIdsToOrigNames,))
processes.append(process)
startIndex = endIndex
endIndex = endIndex + elementsForOneThread
for proc in processes:
proc.start()
for proc in processes:
proc.join()
print("Fetched from MiST OK using ids")
return (proteinIdToSeq.copy(), proteinIdToTrueId.copy())
def fetchFromMist(proteinIdToSeq, proteinIdToTrueId, multiProcList, proteinIdsToOrigNames=None):
for proteinId in multiProcList:
proteinId = proteinId.strip()
preparedUrl = MIST_BASE_URL + proteinId + ASEQ_SEQUENCE_FIELD
result = urllib.urlopen(preparedUrl).read()
proteinDict = json.loads(result)
if len(proteinDict):
proteinDict = proteinDict[0]
if "Aseq" in proteinDict and "sequence" in proteinDict["Aseq"]:
proteinSeq = proteinDict["Aseq"]["sequence"]
if not proteinIdsToOrigNames:
if "Component" in proteinDict and "definition" in proteinDict["Component"]:
proteinName = proteinId + "_" + proteinDict["Component"]["definition"].split(",")[0]
proteinIdToSeq[proteinName] = proteinSeq
proteinIdToTrueId[proteinName] = proteinId
else:
for proteinName in proteinIdsToOrigNames[proteinId]:
proteinIdToSeq[proteinName] = proteinSeq
ctions.defaultdict(set)
terminals = treeObject.get_leaves()
for protein in terminals:
originalProteinName = protein.name.strip("'")
fullProteinName = REGEX_NUMBER_UNDERSCORE.sub("", originalProteinName)
fullProteinNameSplitted = fullProteinName.split("_")
partProteinName = fullProteinNameSplitted[0].strip()
proteinIds2.add(partProteinName)
proteinIdsToOrigNames2[partProteinName].add(originalProteinName)
if len(fullProteinNameSplitted) >= 2:
partProteinName = "_".join(fullProteinNameSplitted[0:2]).strip()
proteinIds1.add(partProteinName)
proteinIdsToOrigNames1[partProteinName].add(originalProteinName)
# this case can happen only for MiST Ids
if len(fullProteinNameSplitted) >= 3:
partProteinName = "_".join(fullProteinNameSplitted[0:3]).strip()
proteinIds03.add(partProteinName)
proteinIdsToOrigNames3[partProteinName].add(originalProteinName)
# multiporcess dicts from usual sets to fetch from MiST
proteinIdsForMist1 = manager.list(proteinIds1)
proteinIdsForMist2 = manager.list(proteinIds2)
proteinIdsForMist3 = manager.list(proteinIds03)
if FETCH_FROM_NCBI:
proteinIdsToSeq1 = fetchSequencesAndGetNameToSeqMap(proteinIds1, proteinIdsToOrigNames1)
proteinIdsToSeq2 = fetchSequencesAndGetNameToSeqMap(proteinIds2, proteinIdsToOrigNames2)
if FETCH_FROM_MIST:
proteinIdsToSeq3ForMist = fetchFromMistByIds(proteinIdsForMist3, proteinIdsToOrigNames3)
if proteinIdsToSeq3ForMist:
proteinIdsToSeq3ForMist = proteinIdsToSeq3ForMist[0]
if not FETCH_FROM_NCBI:
proteinIdsToSeq1ForMist = fetchFromMistByIds(proteinIdsForMist1, proteinIdsToOrigNames1)[0]
if proteinIdsToSeq1ForMist:
proteinIdsToSeq1ForMist = proteinIdsToSeq1ForMist[0]
proteinIdsToSeq2ForMist = fetchFromMistByIds(proteinIdsForMist2, proteinIdsToOrigNames2)[0]
if proteinIdsToSeq2ForMist:
proteinIdsToSeq2ForMist = proteinIdsToSeq2ForMist[0]
with open(OUTPUT_FILE, "w") as outputFile:
if FETCH_FROM_NCBI:
saveFetchedSeqsForTree(proteinIdsToSeq1, outputFile)
saveFetchedSeqsForTree(proteinIdsToSeq2, outputFile)
if FETCH_FROM_MIST:
saveFetchedSeqsForTree(proteinIdsToSeq3ForMist, outputFile)
if not FETCH_FROM_NCBI:
saveFetchedSeqsForTree(proteinIdsToSeq1ForMist, outputFile)
saveFetchedSeqsForTree(proteinIdsToSeq2ForMist, outputFile)
def fetchSequencesAndGetNameToSeqMap(proteinIds, proteinIdsToOrigNames):
proteinIdsSeqs = dict()
proteinIdsHandle = None
if not proteinIds:
return None
try:
proteinIdsHandle = getHandleOfFetchedSequencesFromNcbi(proteinIds)
if proteinIdsHandle:
print("NCBI hanlde is present")
for eachRecord in SeqIO.parse(proteinIdsHandle, "fasta"):
for name in proteinIdsToOrigNames[eachRecord.id]:
seq = str(eachRecord.seq)
proteinIdsSeqs[name] = seq
except Exception, e:
print(e)
finally:
if proteinIdsHandle:
proteinIdsHandle.close()
return proteinIdsSeqs
def saveFetchedSeqsForTree(proteinIdsToSeq, outputFile):
if proteinIdsToSeq and len(proteinIdsToSeq):
for name, seqs in proteinIdsToSeq.items():
outputFile.write(">" + name + "\n")
outputFile.write(seqs + "\n")
####===================================================#####
# A generic fetcher. It returns a handle of fetched records
@timeout_decorator.timeout(ENTREZ_EFETCH_TIMEOUT, use_signals=True)
def getHandleOfFetchedSequencesFromNcbi(proteinIds):
handle = None
try:
#handle = Entrez.efetch(db="protein", id="OYV75139.1,ACR67403.1", rettype="fasta", retmode="text")
handle = Entrez.efetch(db="protein", id=",".join(proteinIds), rettype="fasta", retmode="text")
print("Fetched from NCBI OK")
except Exception, e:
print ("Couldn't retrieve sequences by id from NCBI in time")
if handle:
handle.close()
return handle
def isInOutOk(inputFile, outputFile, isTree=False):
if not inputFile and not outputFile:
return False
if not inputFile or not outputFile:
if isTree:
print("Both input file name with the tree in newick format and output file name should be provided!")
elif not FETCH_FROM_TREE:
print("Both input file name with the sequenes and output file name should be provided!")
return False
if isTree:
print("Changing tree leaves names")
else:
print("Changing sequences names")
return True
def main(argv):
initialize(argv)
if FETCH_FROM_IDS and FETCH_FROM_TREE:
print("You can't fetch both using id list and tree leaves names! Exiting")
return
if FETCH_FROM_IDS:
fetchNamesAndSave()
elif not FETCH_FROM_TREE:
getChangedNamesForSeqsAndSave()
treeObject = getChangedNamesForTree()
if FETCH_FROM_TREE:
fetchSequencesForTreeAndSave(treeObject)
if __name__ == "__main__":
main(sys.argv)
| false | true |
f71cae616991607462e2bfde3a5cc705076fafbc | 6,982 | py | Python | geektime_ebook_maker/spider/mini_spider.py | fakeYanss/geektime_ebook_maker | b536f3bdaf84f8180aac1d2601be8058e0e91115 | [
"MIT"
] | 33 | 2018-08-13T02:52:15.000Z | 2018-10-16T03:38:11.000Z | geektime_ebook_maker/spider/mini_spider.py | fakeYanss/geektime_ebook_maker | b536f3bdaf84f8180aac1d2601be8058e0e91115 | [
"MIT"
] | null | null | null | geektime_ebook_maker/spider/mini_spider.py | fakeYanss/geektime_ebook_maker | b536f3bdaf84f8180aac1d2601be8058e0e91115 | [
"MIT"
] | 4 | 2018-08-13T05:26:11.000Z | 2018-09-06T09:59:52.000Z | # coding=utf8
import os
from threading import Thread
try:
from queue import Queue, Empty as QueueEmpty
except ImportError:
from Queue import Queue, Empty as QueueEmpty
import requests
import logging
import traceback
error_logger = logging.getLogger('error')
error_logger.setLevel(logging.ERROR)
ERROR_STATUS = -1
def error_catch(func):
def wrap(*args, **kwargs):
try:
result = func(*args, **kwargs)
return result
except:
error_logger.error(traceback.format_exc())
return ERROR_STATUS
return wrap
def fetch(url, method='GET', **kwargs):
"""
fetch the url and return the http response body
implement the same api as requests.request
:param url:
:param method: method for the new :class:`Request` object.
:param params: (optional) Dictionary or bytes to be sent in the query string for the :class:`Request`.
:param data: (optional) Dictionary, bytes, or file-like object to send in the body of the :class:`Request`.
:param headers: (optional) Dictionary of HTTP Headers to send with the :class:`Request`.
:param cookies: (optional) Dict or CookieJar object to send with the :class:`Request`.
:param files: (optional) Dictionary of 'name': file-like-objects (or {'name': ('filename', fileobj)}) for multipart encoding upload.
:param auth: (optional) Auth tuple to enable Basic/Digest/Custom HTTP Auth.
:param timeout: (optional) Float describing the timeout of the request.
:param allow_redirects: (optional) Boolean. Set to True if POST/PUT/DELETE redirect following is allowed.
:param proxies: (optional) Dictionary mapping protocol to the URL of the proxy.
:param verify: (optional) if ``True``, the SSL cert will be verified. A CA_BUNDLE path can also be provided.
:param stream: (optional) if ``False``, the response content will be immediately downloaded.
:param cert: (optional) if String, path to ssl client cert file (.pem). If Tuple, ('cert', 'key') pair.
:return: str (or unicode in python2) or ERROR_STATUS
str for the http response body
ERROR_STATUS means fetch error
"""
resp = requests.request(method, url, **kwargs)
html_content = resp.text
return html_content
class Spider(object):
def __init__(self, parse_func, save_func):
self.q_fetch = Queue() # element (url, request_params_dict) content_dict is request_params
self.q_parse = Queue() # element (url, request_params_dict, content_dict) content_dict is {'content': response.content}
self.q_save = Queue() # element (url, request_params_dict, content_dict) content_dict is key_value_pair to save
self._fetch = error_catch(fetch)
self._parse = error_catch(parse_func)
self._save = error_catch(save_func)
def set_start_url(self, url, **kw):
"""
:param url:
:param kw:
:return: None
"""
self.q_fetch.put_nowait((url, kw))
def add_url(self, url, **kw):
self.q_fetch.put_nowait((url, kw))
def start_fetch(self):
while True:
try:
url, params = self.q_fetch.get(block=True, timeout=5)
print('----- fetch start: url={} -----\n'.format(url))
result = self._fetch(url, **params)
if result == ERROR_STATUS:
continue
html_content = result
print('----- fetch end: url={} -----\n'.format(url))
self.q_parse.put_nowait((url, params, {'html_content': html_content}))
except QueueEmpty:
break
def start_parse(self):
while True:
try:
url, params, content = self.q_parse.get(block=True, timeout=5)
print('----- parse start: url={} -----\n'.format(url))
result = self._parse(url, params, html_content=content['html_content'])
if result == ERROR_STATUS:
continue
url_to_fetch_list, content_to_save = result
print('----- parse end: url={} -----\n'.format(url))
# put new url to q_fetch
for item in url_to_fetch_list:
self.q_fetch.put_nowait(item)
# put to q_save
self.q_save.put_nowait((url, params, {'content_to_save': content_to_save}))
except QueueEmpty:
break
def start_save(self):
while True:
try:
url, params, content = self.q_save.get(block=True, timeout=5)
print('----- save start: url={} -----\n'.format(url))
result = self._save(url, params, content=content['content_to_save'])
if result == ERROR_STATUS:
continue
print('----- save end: url={} -----\n'.format(url))
except QueueEmpty:
break
@error_catch
def start_crawl(self):
thread_pool_fetch = [Thread(target=self.start_fetch, args=()) for i in range(5)]
thread_pool_parse = [Thread(target=self.start_parse, args=()) for i in range(5)]
thread_pool_save = [Thread(target=self.start_save, args=()) for i in range(5)]
for td in thread_pool_fetch:
td.start()
for td in thread_pool_parse:
td.start()
for td in thread_pool_save:
td.start()
for td in thread_pool_fetch:
if td.is_alive():
td.join()
for td in thread_pool_parse:
if td.is_alive():
td.join()
for td in thread_pool_save:
if td.is_alive():
td.join()
def parse(url, request_params, html_content):
"""
parse content in html_content based on url
:param url:
:param html_content: http response body of url
:return: tuple or ERROR_STATUS
tuple (new_url_to_fetch_list, parsed_content_to_save)
ERROR_STATUS means parse failed
"""
raise NotImplemented
def save(url, request_params, content):
"""
save content based on url
:param url:
:param content:
:return: anything or ERROR_STATUS
ERROR_STATUS means save failed
"""
raise NotImplemented
if __name__ == '__main__':
def parse(url, request_params, html_content):
print(html_content)
result = ([], '')
if url == 'http://www.baidu.com':
result = ([('http://www.sina.com', {}), ('http://www.qq.com', {})], 'welcome to baidu')
if url == 'http://www.sina.com':
result = ([], 'welcome to sina')
if url == 'http://www.qq.com':
result = ([], 'welcome to qq')
return result
def save(url, request_params, content):
print(content)
spider = Spider(parse, save)
spider.set_start_url('http://www.baidu.com')
spider.start_crawl()
| 32.474419 | 136 | 0.596534 |
import os
from threading import Thread
try:
from queue import Queue, Empty as QueueEmpty
except ImportError:
from Queue import Queue, Empty as QueueEmpty
import requests
import logging
import traceback
error_logger = logging.getLogger('error')
error_logger.setLevel(logging.ERROR)
ERROR_STATUS = -1
def error_catch(func):
def wrap(*args, **kwargs):
try:
result = func(*args, **kwargs)
return result
except:
error_logger.error(traceback.format_exc())
return ERROR_STATUS
return wrap
def fetch(url, method='GET', **kwargs):
resp = requests.request(method, url, **kwargs)
html_content = resp.text
return html_content
class Spider(object):
def __init__(self, parse_func, save_func):
self.q_fetch = Queue()
self.q_parse = Queue()
self.q_save = Queue()
self._fetch = error_catch(fetch)
self._parse = error_catch(parse_func)
self._save = error_catch(save_func)
def set_start_url(self, url, **kw):
self.q_fetch.put_nowait((url, kw))
def add_url(self, url, **kw):
self.q_fetch.put_nowait((url, kw))
def start_fetch(self):
while True:
try:
url, params = self.q_fetch.get(block=True, timeout=5)
print('----- fetch start: url={} -----\n'.format(url))
result = self._fetch(url, **params)
if result == ERROR_STATUS:
continue
html_content = result
print('----- fetch end: url={} -----\n'.format(url))
self.q_parse.put_nowait((url, params, {'html_content': html_content}))
except QueueEmpty:
break
def start_parse(self):
while True:
try:
url, params, content = self.q_parse.get(block=True, timeout=5)
print('----- parse start: url={} -----\n'.format(url))
result = self._parse(url, params, html_content=content['html_content'])
if result == ERROR_STATUS:
continue
url_to_fetch_list, content_to_save = result
print('----- parse end: url={} -----\n'.format(url))
for item in url_to_fetch_list:
self.q_fetch.put_nowait(item)
self.q_save.put_nowait((url, params, {'content_to_save': content_to_save}))
except QueueEmpty:
break
def start_save(self):
while True:
try:
url, params, content = self.q_save.get(block=True, timeout=5)
print('----- save start: url={} -----\n'.format(url))
result = self._save(url, params, content=content['content_to_save'])
if result == ERROR_STATUS:
continue
print('----- save end: url={} -----\n'.format(url))
except QueueEmpty:
break
@error_catch
def start_crawl(self):
thread_pool_fetch = [Thread(target=self.start_fetch, args=()) for i in range(5)]
thread_pool_parse = [Thread(target=self.start_parse, args=()) for i in range(5)]
thread_pool_save = [Thread(target=self.start_save, args=()) for i in range(5)]
for td in thread_pool_fetch:
td.start()
for td in thread_pool_parse:
td.start()
for td in thread_pool_save:
td.start()
for td in thread_pool_fetch:
if td.is_alive():
td.join()
for td in thread_pool_parse:
if td.is_alive():
td.join()
for td in thread_pool_save:
if td.is_alive():
td.join()
def parse(url, request_params, html_content):
raise NotImplemented
def save(url, request_params, content):
raise NotImplemented
if __name__ == '__main__':
def parse(url, request_params, html_content):
print(html_content)
result = ([], '')
if url == 'http://www.baidu.com':
result = ([('http://www.sina.com', {}), ('http://www.qq.com', {})], 'welcome to baidu')
if url == 'http://www.sina.com':
result = ([], 'welcome to sina')
if url == 'http://www.qq.com':
result = ([], 'welcome to qq')
return result
def save(url, request_params, content):
print(content)
spider = Spider(parse, save)
spider.set_start_url('http://www.baidu.com')
spider.start_crawl()
| true | true |
f71caea71cfc518c2ef4111293c2ff14384cf596 | 1,255 | py | Python | src/sentry/api/endpoints/project_environments.py | apragacz/sf-sentry | 2fdd6c1195c29a1d401d1cd538c22ea68556699a | [
"BSD-3-Clause"
] | 1 | 2018-03-05T15:40:12.000Z | 2018-03-05T15:40:12.000Z | src/sentry/api/endpoints/project_environments.py | pitchin/sentry | ff6f260e9edb726374d2e4f455ff8b3d0ecd551e | [
"BSD-3-Clause"
] | 1 | 2018-08-22T16:49:48.000Z | 2018-08-22T16:49:48.000Z | src/sentry/api/endpoints/project_environments.py | pitchin/sentry | ff6f260e9edb726374d2e4f455ff8b3d0ecd551e | [
"BSD-3-Clause"
] | 1 | 2018-07-02T09:46:44.000Z | 2018-07-02T09:46:44.000Z | from __future__ import absolute_import
from rest_framework.response import Response
from sentry.api.bases.project import ProjectEndpoint
from sentry.api.serializers import serialize
from sentry.models import EnvironmentProject
environment_visibility_filter_options = {
'all': lambda queryset: queryset,
'hidden': lambda queryset: queryset.filter(is_hidden=True),
'visible': lambda queryset: queryset.exclude(is_hidden=True),
}
class ProjectEnvironmentsEndpoint(ProjectEndpoint):
def get(self, request, project):
queryset = EnvironmentProject.objects.filter(
project=project,
).select_related('environment').order_by('environment__name')
visibility = request.GET.get('visibility', 'visible')
if visibility not in environment_visibility_filter_options:
return Response({
'detail': 'Invalid value for \'visibility\', valid values are: {!r}'.format(
environment_visibility_filter_options.keys(),
),
}, status=400)
add_visibility_filters = environment_visibility_filter_options[visibility]
queryset = add_visibility_filters(queryset)
return Response(serialize(list(queryset), request.user))
| 35.857143 | 92 | 0.713147 | from __future__ import absolute_import
from rest_framework.response import Response
from sentry.api.bases.project import ProjectEndpoint
from sentry.api.serializers import serialize
from sentry.models import EnvironmentProject
environment_visibility_filter_options = {
'all': lambda queryset: queryset,
'hidden': lambda queryset: queryset.filter(is_hidden=True),
'visible': lambda queryset: queryset.exclude(is_hidden=True),
}
class ProjectEnvironmentsEndpoint(ProjectEndpoint):
def get(self, request, project):
queryset = EnvironmentProject.objects.filter(
project=project,
).select_related('environment').order_by('environment__name')
visibility = request.GET.get('visibility', 'visible')
if visibility not in environment_visibility_filter_options:
return Response({
'detail': 'Invalid value for \'visibility\', valid values are: {!r}'.format(
environment_visibility_filter_options.keys(),
),
}, status=400)
add_visibility_filters = environment_visibility_filter_options[visibility]
queryset = add_visibility_filters(queryset)
return Response(serialize(list(queryset), request.user))
| true | true |
f71caf4a0f239065a54f05daee5fc3a53ea19433 | 696 | py | Python | tarefas-poo/lista-03/tribo/view/paineis/painel_cria_tribo.py | victoriaduarte/POO_UFSC | 0c65b4f26383d1e3038d8469bd91fd2c0cb98c1a | [
"MIT"
] | null | null | null | tarefas-poo/lista-03/tribo/view/paineis/painel_cria_tribo.py | victoriaduarte/POO_UFSC | 0c65b4f26383d1e3038d8469bd91fd2c0cb98c1a | [
"MIT"
] | null | null | null | tarefas-poo/lista-03/tribo/view/paineis/painel_cria_tribo.py | victoriaduarte/POO_UFSC | 0c65b4f26383d1e3038d8469bd91fd2c0cb98c1a | [
"MIT"
] | null | null | null | # --------------------------
# UFSC - CTC - INE - INE5663
# Exercício da Tribo
# --------------------------
# Classe responsável por criar uma tribo
#
from model.tribo import Tribo
from view.paineis.painel_abstrato import PainelAbstrato
class PainelCriaTribo(PainelAbstrato):
def __init__(self, iu):
super().__init__('Criar Tribo', iu)
def _interaja(self):
nome = input('Nome da tribo: ')
qtd_guerreiros = int(input('Quantidade máxima de guerreiros: '))
qtd_vidas = int(input('Quantidade máxima de vidas de cada guerreiro: '))
tribo = Tribo(nome, qtd_guerreiros, qtd_vidas)
self._iu.armazene_tribo(tribo)
print('Tribo criada!')
| 30.26087 | 80 | 0.627874 |
from model.tribo import Tribo
from view.paineis.painel_abstrato import PainelAbstrato
class PainelCriaTribo(PainelAbstrato):
def __init__(self, iu):
super().__init__('Criar Tribo', iu)
def _interaja(self):
nome = input('Nome da tribo: ')
qtd_guerreiros = int(input('Quantidade máxima de guerreiros: '))
qtd_vidas = int(input('Quantidade máxima de vidas de cada guerreiro: '))
tribo = Tribo(nome, qtd_guerreiros, qtd_vidas)
self._iu.armazene_tribo(tribo)
print('Tribo criada!')
| true | true |
f71cb0c1773a3937199f2475478d123c6d026639 | 3,726 | py | Python | src/lupuxt2py/constants.py | ChrisKeck/lupuxt2py | 73dc0c636c81fc7007044d9e6c2d34a1794ebae3 | [
"MIT"
] | null | null | null | src/lupuxt2py/constants.py | ChrisKeck/lupuxt2py | 73dc0c636c81fc7007044d9e6c2d34a1794ebae3 | [
"MIT"
] | null | null | null | src/lupuxt2py/constants.py | ChrisKeck/lupuxt2py | 73dc0c636c81fc7007044d9e6c2d34a1794ebae3 | [
"MIT"
] | null | null | null | # Used in setup.py
# -*- coding: utf-8 -*-
VERSION = "0.1.1"
PROJECT_PACKAGE_NAME = "lupupy"
PROJECT_LICENSE = "MIT"
PROJECT_URL = "http://www.github.com/majuss/lupupy"
PROJECT_DESCRIPTION = "A python cli for Lupusec alarm panels."
PROJECT_LONG_DESCRIPTION = (
"lupupy is a python3 interface for"
" the Lupus Electronics alarm panel."
" Its intented to get used in various"
" smart home services to get a full"
" integration of all you devices."
)
PROJECT_AUTHOR = "Majuss"
MODE_AWAY = "Arm"
MODE_HOME = "Home"
MODE_DISARMED = "Disarm"
MODE_ALARM_TRIGGERED = "Einbruch"
ALL_MODES = [MODE_DISARMED, MODE_HOME, MODE_AWAY]
MODE_TRANSLATION_XT1 = {"Disarm": 2, "Home": 1, "Arm": 0}
MODE_TRANSLATION_XT2 = {"Disarm": 0, "Arm": 1, "Home": 2}
XT2_MODES_TO_TEXT = {
"{AREA_MODE_0}": "Disarm",
"{AREA_MODE_1}": "Arm",
"{AREA_MODE_2}": "Home",
"{AREA_MODE_3}": "Home",
"{AREA_MODE_4}": "Home",
}
STATE_ALARM_DISARMED = "disarmed"
STATE_ALARM_ARMED_HOME = "armed_home"
STATE_ALARM_ARMED_AWAY = "armed_away"
STATE_ALARM_TRIGGERED = "alarm_triggered"
MODE_TRANSLATION_GENERIC = {
"Disarm": "disarmed",
"Home": "armed_home",
"Arm": "armed_away",
}
DEFAULT_MODE = MODE_AWAY
HISTORY_REQUEST = "historyGet"
HISTORY_ALARM_COLUMN = "a"
HISTORY_HEADER = "hisrows"
HISTORY_CACHE_NAME = ".lupusec_history_cache"
STATUS_ON_INT = 0
STATUS_ON = "on"
STATUS_OFF_INT = 1
STATUS_OFF = "off"
STATUS_OFFLINE = "offline"
STATUS_CLOSED = "Geschlossen"
STATUS_CLOSED_INT = 0
STATUS_OPEN = "Offen"
STATUS_OPEN_INT = 1
ALARM_NAME = "Lupusec Alarm"
ALARM_DEVICE_ID = "0"
ALARM_TYPE = "Alarm"
# GENERIC Lupusec DEVICE TYPES
TYPE_WINDOW = "Fensterkontakt"
TYPE_DOOR = "Türkontakt"
TYPE_CONTACT_XT2 = 4
TYPE_WATER_XT2 = 5
TYPE_SMOKE_XT2 = 11
TYPE_POWER_SWITCH_1_XT2 = 24
TYPE_POWER_SWITCH_2_XT2 = 25
TYPE_POWER_SWITCH = "Steckdose"
TYPE_SWITCH = [TYPE_POWER_SWITCH, TYPE_POWER_SWITCH_1_XT2, TYPE_POWER_SWITCH_2_XT2]
TYPE_OPENING = [TYPE_DOOR, TYPE_WINDOW, TYPE_CONTACT_XT2]
BINARY_SENSOR_TYPES = TYPE_OPENING
TYPE_SENSOR = ["Rauchmelder", "Wassermelder", TYPE_WATER_XT2, TYPE_SMOKE_XT2]
TYPE_TRANSLATION = {
"Fensterkontakt": "window",
"Türkontakt": "door",
TYPE_CONTACT_XT2: "Fenster-/Türkontakt",
TYPE_WATER_XT2: "Wassermelder",
TYPE_SMOKE_XT2: "Rauchmelder",
}
DEVICES_API_XT1 = "sensorListGet"
DEVICES_API_XT2 = "deviceListGet"
urlTokenGet: str = '/action/tokenGet'
urlLogoutPost = '/action/logout'
urlDeviceListGet = '/action/deviceListGet'
urlDevicePSSListGet = '/action/deviceListPSSGet'
urlDeviceGet = '/action/deviceGet'
urlPanelCondGet = '/action/panelCondGet'
urlPanelCondPost = '/action/panelCondPost'
urlDeviceSwitchPSSPost = '/action/deviceSwitchPSSPost'
urlHaExecutePost = '/action/haExecutePost'
urlDeviceEditGet = '/action/deviceEditGet'
urlDeviceEditPost = '/action/deviceEditPost'
urlDeviceSwitchDimmerPost = '/action/deviceSwitchDimmerPost'
urlDeviceHueColorControl = '/action/deviceHueColorControl'
urlDeviceEditThermoPost = '/action/deviceEditThermoPost'
urlDeviceEditThermoGet = '/action/deviceEditThermoGet'
urlDeviceEditShutterPost = '/action/deviceEditShutterPost'
urlDeviceEditShutterGet = '/action/deviceEditShutterGet'
urlDeviceEditMeterGet = '/action/deviceEditMeterGet'
urlDeviceEditMeterPost = '/action/deviceEditMeterPost'
urlDeviceNukiCmd = '/action/nukiCmd'
urlIpcamGet = '/action/ipcamGet'
urlPasthru = '/action/passthru'
urlDeviceListUPICGet = '/action/deviceListUPICGet'
urlDeviceDoUPICPost = '/action/deviceDoUPICPost'
urlSendSMSPost = '/action/sendSMSPost'
urlSmsgwTestPost = '/action/smsgwTestPost'
urlSystemGet = '/action/systemGet'
urlLogsGet = '/action/logsGet'
urlrecordListGet = '/action/recordListGet'
urlwelcomeGet = '/action/welcomeGet'
| 32.4 | 83 | 0.766774 |
VERSION = "0.1.1"
PROJECT_PACKAGE_NAME = "lupupy"
PROJECT_LICENSE = "MIT"
PROJECT_URL = "http://www.github.com/majuss/lupupy"
PROJECT_DESCRIPTION = "A python cli for Lupusec alarm panels."
PROJECT_LONG_DESCRIPTION = (
"lupupy is a python3 interface for"
" the Lupus Electronics alarm panel."
" Its intented to get used in various"
" smart home services to get a full"
" integration of all you devices."
)
PROJECT_AUTHOR = "Majuss"
MODE_AWAY = "Arm"
MODE_HOME = "Home"
MODE_DISARMED = "Disarm"
MODE_ALARM_TRIGGERED = "Einbruch"
ALL_MODES = [MODE_DISARMED, MODE_HOME, MODE_AWAY]
MODE_TRANSLATION_XT1 = {"Disarm": 2, "Home": 1, "Arm": 0}
MODE_TRANSLATION_XT2 = {"Disarm": 0, "Arm": 1, "Home": 2}
XT2_MODES_TO_TEXT = {
"{AREA_MODE_0}": "Disarm",
"{AREA_MODE_1}": "Arm",
"{AREA_MODE_2}": "Home",
"{AREA_MODE_3}": "Home",
"{AREA_MODE_4}": "Home",
}
STATE_ALARM_DISARMED = "disarmed"
STATE_ALARM_ARMED_HOME = "armed_home"
STATE_ALARM_ARMED_AWAY = "armed_away"
STATE_ALARM_TRIGGERED = "alarm_triggered"
MODE_TRANSLATION_GENERIC = {
"Disarm": "disarmed",
"Home": "armed_home",
"Arm": "armed_away",
}
DEFAULT_MODE = MODE_AWAY
HISTORY_REQUEST = "historyGet"
HISTORY_ALARM_COLUMN = "a"
HISTORY_HEADER = "hisrows"
HISTORY_CACHE_NAME = ".lupusec_history_cache"
STATUS_ON_INT = 0
STATUS_ON = "on"
STATUS_OFF_INT = 1
STATUS_OFF = "off"
STATUS_OFFLINE = "offline"
STATUS_CLOSED = "Geschlossen"
STATUS_CLOSED_INT = 0
STATUS_OPEN = "Offen"
STATUS_OPEN_INT = 1
ALARM_NAME = "Lupusec Alarm"
ALARM_DEVICE_ID = "0"
ALARM_TYPE = "Alarm"
TYPE_WINDOW = "Fensterkontakt"
TYPE_DOOR = "Türkontakt"
TYPE_CONTACT_XT2 = 4
TYPE_WATER_XT2 = 5
TYPE_SMOKE_XT2 = 11
TYPE_POWER_SWITCH_1_XT2 = 24
TYPE_POWER_SWITCH_2_XT2 = 25
TYPE_POWER_SWITCH = "Steckdose"
TYPE_SWITCH = [TYPE_POWER_SWITCH, TYPE_POWER_SWITCH_1_XT2, TYPE_POWER_SWITCH_2_XT2]
TYPE_OPENING = [TYPE_DOOR, TYPE_WINDOW, TYPE_CONTACT_XT2]
BINARY_SENSOR_TYPES = TYPE_OPENING
TYPE_SENSOR = ["Rauchmelder", "Wassermelder", TYPE_WATER_XT2, TYPE_SMOKE_XT2]
TYPE_TRANSLATION = {
"Fensterkontakt": "window",
"Türkontakt": "door",
TYPE_CONTACT_XT2: "Fenster-/Türkontakt",
TYPE_WATER_XT2: "Wassermelder",
TYPE_SMOKE_XT2: "Rauchmelder",
}
DEVICES_API_XT1 = "sensorListGet"
DEVICES_API_XT2 = "deviceListGet"
urlTokenGet: str = '/action/tokenGet'
urlLogoutPost = '/action/logout'
urlDeviceListGet = '/action/deviceListGet'
urlDevicePSSListGet = '/action/deviceListPSSGet'
urlDeviceGet = '/action/deviceGet'
urlPanelCondGet = '/action/panelCondGet'
urlPanelCondPost = '/action/panelCondPost'
urlDeviceSwitchPSSPost = '/action/deviceSwitchPSSPost'
urlHaExecutePost = '/action/haExecutePost'
urlDeviceEditGet = '/action/deviceEditGet'
urlDeviceEditPost = '/action/deviceEditPost'
urlDeviceSwitchDimmerPost = '/action/deviceSwitchDimmerPost'
urlDeviceHueColorControl = '/action/deviceHueColorControl'
urlDeviceEditThermoPost = '/action/deviceEditThermoPost'
urlDeviceEditThermoGet = '/action/deviceEditThermoGet'
urlDeviceEditShutterPost = '/action/deviceEditShutterPost'
urlDeviceEditShutterGet = '/action/deviceEditShutterGet'
urlDeviceEditMeterGet = '/action/deviceEditMeterGet'
urlDeviceEditMeterPost = '/action/deviceEditMeterPost'
urlDeviceNukiCmd = '/action/nukiCmd'
urlIpcamGet = '/action/ipcamGet'
urlPasthru = '/action/passthru'
urlDeviceListUPICGet = '/action/deviceListUPICGet'
urlDeviceDoUPICPost = '/action/deviceDoUPICPost'
urlSendSMSPost = '/action/sendSMSPost'
urlSmsgwTestPost = '/action/smsgwTestPost'
urlSystemGet = '/action/systemGet'
urlLogsGet = '/action/logsGet'
urlrecordListGet = '/action/recordListGet'
urlwelcomeGet = '/action/welcomeGet'
| true | true |
f71cb55ac21dc79bb494db37b62d30dc5c9b3af6 | 597 | py | Python | multilstm_tensorpack/tensorpack/utils/globvars.py | neale/A4C | acbbb3cf14e31a19c12f27306971b4db4feafe09 | [
"MIT"
] | 1 | 2017-03-11T23:10:00.000Z | 2017-03-11T23:10:00.000Z | multilstm_tensorpack/tensorpack/utils/globvars.py | neale/A4C | acbbb3cf14e31a19c12f27306971b4db4feafe09 | [
"MIT"
] | null | null | null | multilstm_tensorpack/tensorpack/utils/globvars.py | neale/A4C | acbbb3cf14e31a19c12f27306971b4db4feafe09 | [
"MIT"
] | 1 | 2021-04-30T15:34:24.000Z | 2021-04-30T15:34:24.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# File: globvars.py
# Author: Yuxin Wu <ppwwyyxxc@gmail.com>
import six
import argparse
__all__ = ['globalns', 'use_global_argument']
if six.PY2:
class NS:
pass
else:
import types
NS = types.SimpleNamespace
globalns = NS()
def use_global_argument(args):
"""
Add the content of :class:`argparse.Namespace` to globalns.
Args:
args (argparse.Namespace): arguments
"""
assert isinstance(args, argparse.Namespace), type(args)
for k, v in six.iteritems(vars(args)):
setattr(globalns, k, v)
| 19.258065 | 63 | 0.649916 |
import six
import argparse
__all__ = ['globalns', 'use_global_argument']
if six.PY2:
class NS:
pass
else:
import types
NS = types.SimpleNamespace
globalns = NS()
def use_global_argument(args):
assert isinstance(args, argparse.Namespace), type(args)
for k, v in six.iteritems(vars(args)):
setattr(globalns, k, v)
| true | true |
f71cb812c630d4ea90200d9a5c076f1b4590a71e | 1,220 | py | Python | iotbx/command_line/sort_atoms.py | hbrunie/cctbx_project | 2d8cb383d50fe20cdbbe4bebae8ed35fabce61e5 | [
"BSD-3-Clause-LBNL"
] | 2 | 2021-03-18T12:31:57.000Z | 2022-03-14T06:27:06.000Z | iotbx/command_line/sort_atoms.py | hbrunie/cctbx_project | 2d8cb383d50fe20cdbbe4bebae8ed35fabce61e5 | [
"BSD-3-Clause-LBNL"
] | null | null | null | iotbx/command_line/sort_atoms.py | hbrunie/cctbx_project | 2d8cb383d50fe20cdbbe4bebae8ed35fabce61e5 | [
"BSD-3-Clause-LBNL"
] | 1 | 2020-02-04T15:39:06.000Z | 2020-02-04T15:39:06.000Z | # LIBTBX_SET_DISPATCHER_NAME iotbx.pdb.sort_atoms
from __future__ import absolute_import, division, print_function
from libtbx.utils import Usage
import sys
import iotbx.pdb
import mmtbx.model
master_phil_str = """
file_name = None
.type = path
.multiple = False
.optional = False
.style = hidden
"""
def show_usage():
help_msg = """\
iotbx.pdb.sort_atoms model.pdb
Sort atoms in residues so they will be in the same order in all residues.
Also renumbers atoms (atom serial number field 7-11 columns)."""
raise Usage(help_msg)
def run(args):
if len(args) == 0:
show_usage()
return
inp_fn = args[0]
pdb_input = iotbx.pdb.input(
file_name=inp_fn,
source_info=None,
raise_sorry_if_format_error=True)
model = mmtbx.model.manager(
model_input = pdb_input)
out_fn_prefix = inp_fn
if inp_fn.endswith(".pdb") or inp_fn.endswith(".cif"):
out_fn_prefix = inp_fn[:-4]
out_fn = out_fn_prefix + "_sorted"
txt = ""
if model.input_format_was_cif():
out_fn += ".cif"
txt = model.model_as_mmcif()
else:
out_fn += ".pdb"
txt = model.model_as_pdb()
with open(out_fn, 'w') as f:
f.write(txt)
if (__name__ == "__main__"):
run(sys.argv[1:])
| 22.181818 | 73 | 0.685246 |
from __future__ import absolute_import, division, print_function
from libtbx.utils import Usage
import sys
import iotbx.pdb
import mmtbx.model
master_phil_str = """
file_name = None
.type = path
.multiple = False
.optional = False
.style = hidden
"""
def show_usage():
help_msg = """\
iotbx.pdb.sort_atoms model.pdb
Sort atoms in residues so they will be in the same order in all residues.
Also renumbers atoms (atom serial number field 7-11 columns)."""
raise Usage(help_msg)
def run(args):
if len(args) == 0:
show_usage()
return
inp_fn = args[0]
pdb_input = iotbx.pdb.input(
file_name=inp_fn,
source_info=None,
raise_sorry_if_format_error=True)
model = mmtbx.model.manager(
model_input = pdb_input)
out_fn_prefix = inp_fn
if inp_fn.endswith(".pdb") or inp_fn.endswith(".cif"):
out_fn_prefix = inp_fn[:-4]
out_fn = out_fn_prefix + "_sorted"
txt = ""
if model.input_format_was_cif():
out_fn += ".cif"
txt = model.model_as_mmcif()
else:
out_fn += ".pdb"
txt = model.model_as_pdb()
with open(out_fn, 'w') as f:
f.write(txt)
if (__name__ == "__main__"):
run(sys.argv[1:])
| true | true |
f71cb8ae677cc7579d50cdb36f7a5ee87a4ab448 | 4,358 | py | Python | Lib/dumbdbm.py | SaadBazaz/ChinesePython | 800955539dda912d4a1621bcf5a700aaaddc012f | [
"CNRI-Python-GPL-Compatible"
] | 3 | 2022-01-30T20:08:24.000Z | 2022-02-12T08:51:12.000Z | Lib/dumbdbm.py | SaadBazaz/ChinesePython | 800955539dda912d4a1621bcf5a700aaaddc012f | [
"CNRI-Python-GPL-Compatible"
] | null | null | null | Lib/dumbdbm.py | SaadBazaz/ChinesePython | 800955539dda912d4a1621bcf5a700aaaddc012f | [
"CNRI-Python-GPL-Compatible"
] | null | null | null | """A dumb and slow but simple dbm clone.
For database spam, spam.dir contains the index (a text file),
spam.bak *may* contain a backup of the index (also a text file),
while spam.dat contains the data (a binary file).
XXX TO DO:
- seems to contain a bug when updating...
- reclaim free space (currently, space once occupied by deleted or expanded
items is never reused)
- support concurrent access (currently, if two processes take turns making
updates, they can mess up the index)
- support efficient access to large databases (currently, the whole index
is read when the database is opened, and some updates rewrite the whole index)
- support opening for read-only (flag = 'm')
"""
_os = __import__('os')
import __builtin__
_open = __builtin__.open
_BLOCKSIZE = 512
error = IOError # For anydbm
class _Database:
def __init__(self, file):
if _os.sep == '.':
endsep = '/'
else:
endsep = '.'
self._dirfile = file + endsep + 'dir'
self._datfile = file + endsep + 'dat'
self._bakfile = file + endsep + 'bak'
# Mod by Jack: create data file if needed
try:
f = _open(self._datfile, 'r')
except IOError:
f = _open(self._datfile, 'w')
f.close()
self._update()
def _update(self):
self._index = {}
try:
f = _open(self._dirfile)
except IOError:
pass
else:
while 1:
line = f.readline().rstrip()
if not line: break
key, (pos, siz) = eval(line)
self._index[key] = (pos, siz)
f.close()
def _commit(self):
try: _os.unlink(self._bakfile)
except _os.error: pass
try: _os.rename(self._dirfile, self._bakfile)
except _os.error: pass
f = _open(self._dirfile, 'w')
for key, (pos, siz) in self._index.items():
f.write("%s, (%s, %s)\n" % (`key`, `pos`, `siz`))
f.close()
def __getitem__(self, key):
pos, siz = self._index[key] # may raise KeyError
f = _open(self._datfile, 'rb')
f.seek(pos)
dat = f.read(siz)
f.close()
return dat
def _addval(self, val):
f = _open(self._datfile, 'rb+')
f.seek(0, 2)
pos = int(f.tell())
## Does not work under MW compiler
## pos = ((pos + _BLOCKSIZE - 1) / _BLOCKSIZE) * _BLOCKSIZE
## f.seek(pos)
npos = ((pos + _BLOCKSIZE - 1) / _BLOCKSIZE) * _BLOCKSIZE
f.write('\0'*(npos-pos))
pos = npos
f.write(val)
f.close()
return (pos, len(val))
def _setval(self, pos, val):
f = _open(self._datfile, 'rb+')
f.seek(pos)
f.write(val)
f.close()
return (pos, len(val))
def _addkey(self, key, (pos, siz)):
self._index[key] = (pos, siz)
f = _open(self._dirfile, 'a')
f.write("%s, (%s, %s)\n" % (`key`, `pos`, `siz`))
f.close()
def __setitem__(self, key, val):
if not type(key) == type('') == type(val):
raise TypeError, "keys and values must be strings"
if not self._index.has_key(key):
(pos, siz) = self._addval(val)
self._addkey(key, (pos, siz))
else:
pos, siz = self._index[key]
oldblocks = (siz + _BLOCKSIZE - 1) / _BLOCKSIZE
newblocks = (len(val) + _BLOCKSIZE - 1) / _BLOCKSIZE
if newblocks <= oldblocks:
pos, siz = self._setval(pos, val)
self._index[key] = pos, siz
else:
pos, siz = self._addval(val)
self._index[key] = pos, siz
def __delitem__(self, key):
del self._index[key]
self._commit()
def keys(self):
return self._index.keys()
def has_key(self, key):
return self._index.has_key(key)
def __len__(self):
return len(self._index)
def close(self):
self._commit()
self._index = None
self._datfile = self._dirfile = self._bakfile = None
def __del__(self):
if self._index is not None:
self._commit()
def open(file, flag = None, mode = None):
# flag, mode arguments are currently ignored
return _Database(file)
| 28.298701 | 78 | 0.543139 | """A dumb and slow but simple dbm clone.
For database spam, spam.dir contains the index (a text file),
spam.bak *may* contain a backup of the index (also a text file),
while spam.dat contains the data (a binary file).
XXX TO DO:
- seems to contain a bug when updating...
- reclaim free space (currently, space once occupied by deleted or expanded
items is never reused)
- support concurrent access (currently, if two processes take turns making
updates, they can mess up the index)
- support efficient access to large databases (currently, the whole index
is read when the database is opened, and some updates rewrite the whole index)
- support opening for read-only (flag = 'm')
"""
_os = __import__('os')
import __builtin__
_open = __builtin__.open
_BLOCKSIZE = 512
error = IOError
class _Database:
def __init__(self, file):
if _os.sep == '.':
endsep = '/'
else:
endsep = '.'
self._dirfile = file + endsep + 'dir'
self._datfile = file + endsep + 'dat'
self._bakfile = file + endsep + 'bak'
try:
f = _open(self._datfile, 'r')
except IOError:
f = _open(self._datfile, 'w')
f.close()
self._update()
def _update(self):
self._index = {}
try:
f = _open(self._dirfile)
except IOError:
pass
else:
while 1:
line = f.readline().rstrip()
if not line: break
key, (pos, siz) = eval(line)
self._index[key] = (pos, siz)
f.close()
def _commit(self):
try: _os.unlink(self._bakfile)
except _os.error: pass
try: _os.rename(self._dirfile, self._bakfile)
except _os.error: pass
f = _open(self._dirfile, 'w')
for key, (pos, siz) in self._index.items():
f.write("%s, (%s, %s)\n" % (`key`, `pos`, `siz`))
f.close()
def __getitem__(self, key):
pos, siz = self._index[key]
f = _open(self._datfile, 'rb')
f.seek(pos)
dat = f.read(siz)
f.close()
return dat
def _addval(self, val):
f = _open(self._datfile, 'rb+')
f.seek(0, 2)
pos = int(f.tell())
f.write(val)
f.close()
return (pos, len(val))
def _setval(self, pos, val):
f = _open(self._datfile, 'rb+')
f.seek(pos)
f.write(val)
f.close()
return (pos, len(val))
def _addkey(self, key, (pos, siz)):
self._index[key] = (pos, siz)
f = _open(self._dirfile, 'a')
f.write("%s, (%s, %s)\n" % (`key`, `pos`, `siz`))
f.close()
def __setitem__(self, key, val):
if not type(key) == type('') == type(val):
raise TypeError, "keys and values must be strings"
if not self._index.has_key(key):
(pos, siz) = self._addval(val)
self._addkey(key, (pos, siz))
else:
pos, siz = self._index[key]
oldblocks = (siz + _BLOCKSIZE - 1) / _BLOCKSIZE
newblocks = (len(val) + _BLOCKSIZE - 1) / _BLOCKSIZE
if newblocks <= oldblocks:
pos, siz = self._setval(pos, val)
self._index[key] = pos, siz
else:
pos, siz = self._addval(val)
self._index[key] = pos, siz
def __delitem__(self, key):
del self._index[key]
self._commit()
def keys(self):
return self._index.keys()
def has_key(self, key):
return self._index.has_key(key)
def __len__(self):
return len(self._index)
def close(self):
self._commit()
self._index = None
self._datfile = self._dirfile = self._bakfile = None
def __del__(self):
if self._index is not None:
self._commit()
def open(file, flag = None, mode = None):
return _Database(file)
| false | true |
f71cb926199d235645c93f0a046fc2b7260452e8 | 1,138 | py | Python | machine-learning-pipeline/airflow/dags/train_simple_model.py | dataength/automating-your-data-pipeline-with-apache-airflow | 19b7fe4a41874708c5927b7c32f9840f4285090c | [
"MIT"
] | 30 | 2020-07-09T17:37:47.000Z | 2022-01-19T04:17:02.000Z | machine-learning-pipeline/airflow/dags/train_simple_model.py | mizzony/automating-your-data-pipeline-with-apache-airflow | 90a1351de6de78c0f0a6fb2e778e2ba3b7c78f5e | [
"MIT"
] | 38 | 2021-08-12T08:01:47.000Z | 2022-03-29T22:29:27.000Z | machine-learning-pipeline/airflow/dags/train_simple_model.py | mizzony/automating-your-data-pipeline-with-apache-airflow | 90a1351de6de78c0f0a6fb2e778e2ba3b7c78f5e | [
"MIT"
] | 22 | 2020-07-10T02:41:39.000Z | 2022-03-23T22:08:52.000Z | import pickle
from airflow import DAG
from airflow.hooks.postgres_hook import PostgresHook
from airflow.operators.dummy_operator import DummyOperator
from airflow.operators.python_operator import PythonOperator
from airflow.utils import timezone
from sklearn.ensemble import RandomForestClassifier
default_args = {
'owner': 'ODDS',
}
dag = DAG(
'train_simple_model',
schedule_interval='*/15 * * * *',
default_args=default_args,
start_date=timezone.datetime(2020, 8, 1),
catchup=False
)
start = DummyOperator(task_id='start', dag=dag)
def train_func():
clf = RandomForestClassifier(random_state=0)
X = [[ 1, 2, 3],
[11, 12, 13]]
y = [0, 1]
clf.fit(X, y)
MODEL_PATH = '/Users/zkan/Projects/dataength/' \
'automating-your-data-pipeline-with-apache-airflow/' \
'machine-learning-pipeline/airflow/dags'
with open(f'{MODEL_PATH}/models/clf.model', 'wb') as outfile:
pickle.dump(clf, outfile)
train = PythonOperator(
task_id='train',
python_callable=train_func,
dag=dag,
)
end = DummyOperator(task_id='end', dag=dag)
start >> train >> end
| 22.76 | 65 | 0.692443 | import pickle
from airflow import DAG
from airflow.hooks.postgres_hook import PostgresHook
from airflow.operators.dummy_operator import DummyOperator
from airflow.operators.python_operator import PythonOperator
from airflow.utils import timezone
from sklearn.ensemble import RandomForestClassifier
default_args = {
'owner': 'ODDS',
}
dag = DAG(
'train_simple_model',
schedule_interval='*/15 * * * *',
default_args=default_args,
start_date=timezone.datetime(2020, 8, 1),
catchup=False
)
start = DummyOperator(task_id='start', dag=dag)
def train_func():
clf = RandomForestClassifier(random_state=0)
X = [[ 1, 2, 3],
[11, 12, 13]]
y = [0, 1]
clf.fit(X, y)
MODEL_PATH = '/Users/zkan/Projects/dataength/' \
'automating-your-data-pipeline-with-apache-airflow/' \
'machine-learning-pipeline/airflow/dags'
with open(f'{MODEL_PATH}/models/clf.model', 'wb') as outfile:
pickle.dump(clf, outfile)
train = PythonOperator(
task_id='train',
python_callable=train_func,
dag=dag,
)
end = DummyOperator(task_id='end', dag=dag)
start >> train >> end
| true | true |
f71cba9b88574b1dfb171079ea67df5863e28a5e | 1,843 | py | Python | nighteen_cpc.py | toddlerya/AnalyzeNPC | 5d16f994ec34300a3050463aad08ad3a1ec1eaba | [
"MIT"
] | 4 | 2018-09-15T02:43:04.000Z | 2022-02-11T01:56:49.000Z | nighteen_cpc.py | toddlerya/AnalyzeNPC | 5d16f994ec34300a3050463aad08ad3a1ec1eaba | [
"MIT"
] | null | null | null | nighteen_cpc.py | toddlerya/AnalyzeNPC | 5d16f994ec34300a3050463aad08ad3a1ec1eaba | [
"MIT"
] | 5 | 2018-03-12T10:01:48.000Z | 2021-11-05T05:34:48.000Z | #!/usr/bin/env python
# -*- coding:utf-8 -*-
# author: toddler
import jieba
import re
import os
from collections import Counter
from wordcloud import WordCloud
import matplotlib.pyplot as plt
def cut_analyze(input_file):
"""
:param input_file: 输入带切词分析的文本路径
:return: (list1, list2) list1切词处理后的列表结果, list2输出切词处理排序后的词频结果, 列表-元祖嵌套结果
"""
cpc_dict_path = u'user_dict/cpc_dictionary.txt'
stop_words_path = u'user_dict/stopword.txt'
with open(input_file) as f:
content = f.read()
with open(stop_words_path) as sf:
st_content = sf.readlines()
jieba.load_userdict(cpc_dict_path) # 加载针对全国人民代表大会的分词词典
stop_words = [line.strip().decode('utf-8') for line in st_content] # 将读取的数据都转为unicode处理
seg_list = jieba.cut(content, cut_all=False) # 精确模式
filter_seg_list = list()
for seg in seg_list:
goal_word = ''.join(re.findall(u'[\u4e00-\u9fa5]+', seg)).strip() # 过滤所有非中文字符内容
if len(goal_word) != 0 and not stop_words.__contains__(goal_word): # 过滤分词结果中的停词内容
# filter_seg_list.append(goal_word.encode('utf-8')) # 将unicode的文本转为utf-8保存到列表以备后续处理
filter_seg_list.append(goal_word)
seg_counter_all = Counter(filter_seg_list).most_common() # 对切词结果按照词频排序
# for item in seg_counter_all:
# print "词语: {0} - 频数: {1}".format(item[0].encode('utf-8'), item[1])
return filter_seg_list, seg_counter_all
def main():
input_file_path = u'input_file/nighteen-cpc.txt'
cut_data, sort_data = cut_analyze(input_file=input_file_path)
font = os.path.abspath('assets/msyh.ttf')
wc = WordCloud(collocations=False, font_path=font, width=3600, height=3600, margin=2)
wc.generate_from_frequencies(dict(sort_data))
plt.figure()
plt.imshow(wc)
plt.axis('off')
plt.show()
if __name__ == '__main__':
main() | 29.725806 | 96 | 0.688009 |
import jieba
import re
import os
from collections import Counter
from wordcloud import WordCloud
import matplotlib.pyplot as plt
def cut_analyze(input_file):
cpc_dict_path = u'user_dict/cpc_dictionary.txt'
stop_words_path = u'user_dict/stopword.txt'
with open(input_file) as f:
content = f.read()
with open(stop_words_path) as sf:
st_content = sf.readlines()
jieba.load_userdict(cpc_dict_path)
stop_words = [line.strip().decode('utf-8') for line in st_content]
seg_list = jieba.cut(content, cut_all=False)
filter_seg_list = list()
for seg in seg_list:
goal_word = ''.join(re.findall(u'[\u4e00-\u9fa5]+', seg)).strip()
if len(goal_word) != 0 and not stop_words.__contains__(goal_word):
pend(goal_word)
seg_counter_all = Counter(filter_seg_list).most_common()
return filter_seg_list, seg_counter_all
def main():
input_file_path = u'input_file/nighteen-cpc.txt'
cut_data, sort_data = cut_analyze(input_file=input_file_path)
font = os.path.abspath('assets/msyh.ttf')
wc = WordCloud(collocations=False, font_path=font, width=3600, height=3600, margin=2)
wc.generate_from_frequencies(dict(sort_data))
plt.figure()
plt.imshow(wc)
plt.axis('off')
plt.show()
if __name__ == '__main__':
main() | true | true |
f71cbafafa7b775082fc935301d70d2a60767f9b | 6,977 | py | Python | models/render.py | RichTeaMan/duck-game | b47db72e30767411251a43000a9afad7ee11f822 | [
"MIT"
] | null | null | null | models/render.py | RichTeaMan/duck-game | b47db72e30767411251a43000a9afad7ee11f822 | [
"MIT"
] | null | null | null | models/render.py | RichTeaMan/duck-game | b47db72e30767411251a43000a9afad7ee11f822 | [
"MIT"
] | null | null | null | import sys
import math
import pathlib
import bpy
import mathutils
from PIL import Image
modelDir = pathlib.Path(__file__).parent.absolute()
scn = bpy.context.scene
images_created = 0
def update_camera(camera, focus_point=mathutils.Vector((0.0, 0.0, 0.0)), distance=10.0):
"""
Focus the camera to a focus point and place the camera at a specific distance from that
focus point. The camera stays in a direct line with the focus point.
:param camera: the camera object
:type camera: bpy.types.object
:param focus_point: the point to focus on (default=``mathutils.Vector((0.0, 0.0, 0.0))``)
:type focus_point: mathutils.Vector
:param distance: the distance to keep to the focus point (default=``10.0``)
:type distance: float
"""
looking_direction = camera.location - focus_point
rot_quat = looking_direction.to_track_quat('Z', 'Y')
camera.rotation_euler = rot_quat.to_euler()
camera.rotation_euler[0] = math.radians(54.736) # angle for isometric projection
#camera.location = rot_quat * mathutils.Vector((0.0, 0.0, distance))
# update_camera(bpy.data.objects['Camera'])
def render_direction(direction_name, camera_x, camera_y):
global images_created
filepath = f"{modelDir}/renders/{images_created}.png"
camera_object_name = f"CameraObj-{direction_name}"
cam_obj = bpy.data.objects.get(camera_object_name)
if (not cam_obj):
cam = bpy.data.cameras.new(f"Camera-{direction_name}")
cam.lens = 18
cam.type = 'ORTHO'
cam.ortho_scale = 1.4
# create the first camera object
cam_obj = bpy.data.objects.new(camera_object_name, cam)
cam_obj.location = (camera_x, camera_y, 0.5)
cam_obj.rotation_euler = (0, 0, 0)
scn.collection.objects.link(cam_obj)
update_camera(cam_obj)
scn.camera = cam_obj
bpy.context.scene.render.filepath = filepath
bpy.ops.render.render(animation=False, write_still=True,
use_viewport=False, layer='', scene='')
images_created = images_created + 1
return filepath
def render_frames(files):
offset = 0.4
files.append(render_direction("W", -offset, 0))
files.append(render_direction("NW", -offset, -offset))
files.append(render_direction("N", 0, -offset))
files.append(render_direction("NE", offset, -offset))
files.append(render_direction("E", offset, 0))
files.append(render_direction("SE", offset, offset))
files.append(render_direction("S", 0, offset))
files.append(render_direction("SW", -offset, offset))
def renderDuck(skin_name):
body_texture_image = bpy.data.images[f"duck-texture-{skin_name}"]
body_material = bpy.data.materials.get("duck-body")
body_bsdf = body_material.node_tree.nodes["Principled BSDF"]
body_shader_node_texture_image = body_material.node_tree.nodes.new('ShaderNodeTexImage')
body_shader_node_texture_image.image = body_texture_image
body_material.node_tree.links.new(body_bsdf.inputs['Base Color'], body_shader_node_texture_image.outputs['Color'])
wing_texture_image = bpy.data.images[f"duck-wing-texture-{skin_name}"]
wing_material = bpy.data.materials.get("duck-wing")
wing_bsdf = wing_material.node_tree.nodes["Principled BSDF"]
wing_shader_node_texture_image = wing_material.node_tree.nodes.new('ShaderNodeTexImage')
wing_shader_node_texture_image.image = wing_texture_image
wing_material.node_tree.links.new(wing_bsdf.inputs['Base Color'], wing_shader_node_texture_image.outputs['Color'])
files = []
# tail wagging
bpy.data.shape_keys["Key.001"].key_blocks["tail-right"].value = 1.0
render_frames(files)
bpy.data.shape_keys["Key.001"].key_blocks["tail-right"].value = 0.5
render_frames(files)
bpy.data.shape_keys["Key.001"].key_blocks["tail-right"].value = 0.0
render_frames(files)
bpy.data.shape_keys["Key.001"].key_blocks["tail-left"].value = 0.5
render_frames(files)
bpy.data.shape_keys["Key.001"].key_blocks["tail-left"].value = 1.0
render_frames(files)
bpy.data.shape_keys["Key.001"].key_blocks["tail-left"].value = 0.0
# feeding
render_frames(files) # wasted frame for laziness reasons
bpy.data.shape_keys["Key.001"].key_blocks["feed"].value = 0.5
render_frames(files)
bpy.data.shape_keys["Key.001"].key_blocks["feed"].value = 1.0
render_frames(files)
bpy.data.shape_keys["Key.001"].key_blocks["feed"].value = 0.0
# mouth
render_frames(files) # wasted frame for laziness reasons
bpy.data.shape_keys["Key.001"].key_blocks["mouth"].value = 0.5
render_frames(files)
bpy.data.shape_keys["Key.001"].key_blocks["mouth"].value = 1.0
render_frames(files)
bpy.data.shape_keys["Key.001"].key_blocks["mouth"].value = 0.0
# swim flapping
render_frames(files) # wasted frame for laziness reasons
bpy.data.shape_keys["Key.001"].key_blocks["standing"].value = 0.5
bpy.data.shape_keys["Key"].key_blocks["wing-standing"].value = 0.5
bpy.data.shape_keys["Key"].key_blocks["standing-flap"].value = 0.5
render_frames(files)
bpy.data.shape_keys["Key.001"].key_blocks["standing"].value = 1.0
bpy.data.shape_keys["Key"].key_blocks["wing-standing"].value = 1.0
bpy.data.shape_keys["Key"].key_blocks["standing-flap"].value = 1.0
render_frames(files)
bpy.data.shape_keys["Key.001"].key_blocks["standing"].value = 1.0
bpy.data.shape_keys["Key"].key_blocks["wing-standing"].value = 1.0
bpy.data.shape_keys["Key"].key_blocks["standing-flap"].value = 1.0
render_frames(files)
bpy.data.shape_keys["Key"].key_blocks["standing-flap-up"].value = 1.0
render_frames(files)
bpy.data.shape_keys["Key"].key_blocks["standing-flap-up"].value = 0.0
bpy.data.shape_keys["Key"].key_blocks["standing-flap-down"].value = 1.0
render_frames(files)
bpy.data.shape_keys["Key.001"].key_blocks["standing"].value = 0.0
bpy.data.shape_keys["Key"].key_blocks["wing-standing"].value = 0.0
bpy.data.shape_keys["Key"].key_blocks["standing-flap"].value = 0.0
bpy.data.shape_keys["Key"].key_blocks["standing-flap-up"].value = 0.0
bpy.data.shape_keys["Key"].key_blocks["standing-flap-down"].value = 0.0
images = [Image.open(x) for x in files]
widths, heights = zip(*(i.size for i in images))
# sheet is padded
total_width = 32 * 512
total_height = 8 * 512
new_im = Image.new('RGBA', (total_width, total_height))
x_offset = 0
y_offset = 0
count = 0
for im in images:
new_im.paste(im, (x_offset, y_offset))
count = count + 1
if count % 8 == 0:
y_offset = 0
x_offset += im.size[0]
else:
y_offset += im.size[1]
new_im.save(f"{modelDir}/../public/assets/duck-{skin_name}-spritesheet.png")
renderDuck("white")
renderDuck("mallard")
renderDuck("brown")
renderDuck("mandarin")
renderDuck("duckling")
print(f"Render complete. {images_created} images rendered.") | 39.642045 | 118 | 0.694138 | import sys
import math
import pathlib
import bpy
import mathutils
from PIL import Image
modelDir = pathlib.Path(__file__).parent.absolute()
scn = bpy.context.scene
images_created = 0
def update_camera(camera, focus_point=mathutils.Vector((0.0, 0.0, 0.0)), distance=10.0):
looking_direction = camera.location - focus_point
rot_quat = looking_direction.to_track_quat('Z', 'Y')
camera.rotation_euler = rot_quat.to_euler()
camera.rotation_euler[0] = math.radians(54.736)
def render_direction(direction_name, camera_x, camera_y):
global images_created
filepath = f"{modelDir}/renders/{images_created}.png"
camera_object_name = f"CameraObj-{direction_name}"
cam_obj = bpy.data.objects.get(camera_object_name)
if (not cam_obj):
cam = bpy.data.cameras.new(f"Camera-{direction_name}")
cam.lens = 18
cam.type = 'ORTHO'
cam.ortho_scale = 1.4
cam_obj = bpy.data.objects.new(camera_object_name, cam)
cam_obj.location = (camera_x, camera_y, 0.5)
cam_obj.rotation_euler = (0, 0, 0)
scn.collection.objects.link(cam_obj)
update_camera(cam_obj)
scn.camera = cam_obj
bpy.context.scene.render.filepath = filepath
bpy.ops.render.render(animation=False, write_still=True,
use_viewport=False, layer='', scene='')
images_created = images_created + 1
return filepath
def render_frames(files):
offset = 0.4
files.append(render_direction("W", -offset, 0))
files.append(render_direction("NW", -offset, -offset))
files.append(render_direction("N", 0, -offset))
files.append(render_direction("NE", offset, -offset))
files.append(render_direction("E", offset, 0))
files.append(render_direction("SE", offset, offset))
files.append(render_direction("S", 0, offset))
files.append(render_direction("SW", -offset, offset))
def renderDuck(skin_name):
body_texture_image = bpy.data.images[f"duck-texture-{skin_name}"]
body_material = bpy.data.materials.get("duck-body")
body_bsdf = body_material.node_tree.nodes["Principled BSDF"]
body_shader_node_texture_image = body_material.node_tree.nodes.new('ShaderNodeTexImage')
body_shader_node_texture_image.image = body_texture_image
body_material.node_tree.links.new(body_bsdf.inputs['Base Color'], body_shader_node_texture_image.outputs['Color'])
wing_texture_image = bpy.data.images[f"duck-wing-texture-{skin_name}"]
wing_material = bpy.data.materials.get("duck-wing")
wing_bsdf = wing_material.node_tree.nodes["Principled BSDF"]
wing_shader_node_texture_image = wing_material.node_tree.nodes.new('ShaderNodeTexImage')
wing_shader_node_texture_image.image = wing_texture_image
wing_material.node_tree.links.new(wing_bsdf.inputs['Base Color'], wing_shader_node_texture_image.outputs['Color'])
files = []
bpy.data.shape_keys["Key.001"].key_blocks["tail-right"].value = 1.0
render_frames(files)
bpy.data.shape_keys["Key.001"].key_blocks["tail-right"].value = 0.5
render_frames(files)
bpy.data.shape_keys["Key.001"].key_blocks["tail-right"].value = 0.0
render_frames(files)
bpy.data.shape_keys["Key.001"].key_blocks["tail-left"].value = 0.5
render_frames(files)
bpy.data.shape_keys["Key.001"].key_blocks["tail-left"].value = 1.0
render_frames(files)
bpy.data.shape_keys["Key.001"].key_blocks["tail-left"].value = 0.0
render_frames(files)
bpy.data.shape_keys["Key.001"].key_blocks["feed"].value = 0.5
render_frames(files)
bpy.data.shape_keys["Key.001"].key_blocks["feed"].value = 1.0
render_frames(files)
bpy.data.shape_keys["Key.001"].key_blocks["feed"].value = 0.0
render_frames(files)
bpy.data.shape_keys["Key.001"].key_blocks["mouth"].value = 0.5
render_frames(files)
bpy.data.shape_keys["Key.001"].key_blocks["mouth"].value = 1.0
render_frames(files)
bpy.data.shape_keys["Key.001"].key_blocks["mouth"].value = 0.0
render_frames(files)
bpy.data.shape_keys["Key.001"].key_blocks["standing"].value = 0.5
bpy.data.shape_keys["Key"].key_blocks["wing-standing"].value = 0.5
bpy.data.shape_keys["Key"].key_blocks["standing-flap"].value = 0.5
render_frames(files)
bpy.data.shape_keys["Key.001"].key_blocks["standing"].value = 1.0
bpy.data.shape_keys["Key"].key_blocks["wing-standing"].value = 1.0
bpy.data.shape_keys["Key"].key_blocks["standing-flap"].value = 1.0
render_frames(files)
bpy.data.shape_keys["Key.001"].key_blocks["standing"].value = 1.0
bpy.data.shape_keys["Key"].key_blocks["wing-standing"].value = 1.0
bpy.data.shape_keys["Key"].key_blocks["standing-flap"].value = 1.0
render_frames(files)
bpy.data.shape_keys["Key"].key_blocks["standing-flap-up"].value = 1.0
render_frames(files)
bpy.data.shape_keys["Key"].key_blocks["standing-flap-up"].value = 0.0
bpy.data.shape_keys["Key"].key_blocks["standing-flap-down"].value = 1.0
render_frames(files)
bpy.data.shape_keys["Key.001"].key_blocks["standing"].value = 0.0
bpy.data.shape_keys["Key"].key_blocks["wing-standing"].value = 0.0
bpy.data.shape_keys["Key"].key_blocks["standing-flap"].value = 0.0
bpy.data.shape_keys["Key"].key_blocks["standing-flap-up"].value = 0.0
bpy.data.shape_keys["Key"].key_blocks["standing-flap-down"].value = 0.0
images = [Image.open(x) for x in files]
widths, heights = zip(*(i.size for i in images))
total_width = 32 * 512
total_height = 8 * 512
new_im = Image.new('RGBA', (total_width, total_height))
x_offset = 0
y_offset = 0
count = 0
for im in images:
new_im.paste(im, (x_offset, y_offset))
count = count + 1
if count % 8 == 0:
y_offset = 0
x_offset += im.size[0]
else:
y_offset += im.size[1]
new_im.save(f"{modelDir}/../public/assets/duck-{skin_name}-spritesheet.png")
renderDuck("white")
renderDuck("mallard")
renderDuck("brown")
renderDuck("mandarin")
renderDuck("duckling")
print(f"Render complete. {images_created} images rendered.") | true | true |
f71cbc5a7db50b299b464568fe69775d801e45e9 | 1,650 | py | Python | concept_formation/tests/benchmark_cobweb.py | ThomasHoppe/concept_formation | 2468fea78ba46804bf44228519eb33ebc5780d31 | [
"MIT"
] | 47 | 2015-06-08T20:34:18.000Z | 2021-09-26T17:59:06.000Z | concept_formation/tests/benchmark_cobweb.py | ThomasHoppe/concept_formation | 2468fea78ba46804bf44228519eb33ebc5780d31 | [
"MIT"
] | 65 | 2015-07-27T18:16:31.000Z | 2021-10-04T14:02:51.000Z | concept_formation/tests/benchmark_cobweb.py | ThomasHoppe/concept_formation | 2468fea78ba46804bf44228519eb33ebc5780d31 | [
"MIT"
] | 13 | 2015-07-27T13:27:03.000Z | 2022-03-15T02:18:10.000Z | from random import randint
from timeit import timeit
from matplotlib import pyplot as plt
import matplotlib.patches as mpatches
def generate_dataset(n_inst, n_attr, n_val):
instances = []
for i in range(n_inst):
i = {}
for j in range(n_attr):
i[str(j)] = randint(1, n_val)
instances.append(i)
return instances
def time(n_inst, n_attr, n_val):
return timeit('tree.fit(x)',
setup=('from __main__ import generate_dataset; '
'from concept_formation.cobweb import CobwebTree; '
'tree = CobwebTree(); '
'x = generate_dataset(%i, %i, %i)' % (n_inst, n_attr,
n_val)),
number=1)
if __name__ == "__main__":
# 5 attributes
sizes = [10, 30, 60, 120, 180, 220]
times = [time(i, 5, 5) for i in sizes]
plt.plot(sizes, times, 'ro')
plt.plot(sizes, times, 'r-')
# 10 attributes
times = [time(i, 10, 5) for i in sizes]
plt.plot(sizes, times, 'bo')
plt.plot(sizes, times, 'b-')
# 20 attributes
times = [time(i, 20, 5) for i in sizes]
plt.plot(sizes, times, 'go')
plt.plot(sizes, times, 'g-')
red_patch = mpatches.Patch(color='red', label='# attr=5')
blue_patch = mpatches.Patch(color='blue', label='# attr=10')
green_patch = mpatches.Patch(color='green', label='# attr=20')
plt.legend(handles=[red_patch, blue_patch, green_patch], loc=2)
plt.xlabel('Number of training instances (5 possible values / attr)')
plt.ylabel('Runtime in Seconds')
plt.show()
| 31.132075 | 78 | 0.569697 | from random import randint
from timeit import timeit
from matplotlib import pyplot as plt
import matplotlib.patches as mpatches
def generate_dataset(n_inst, n_attr, n_val):
instances = []
for i in range(n_inst):
i = {}
for j in range(n_attr):
i[str(j)] = randint(1, n_val)
instances.append(i)
return instances
def time(n_inst, n_attr, n_val):
return timeit('tree.fit(x)',
setup=('from __main__ import generate_dataset; '
'from concept_formation.cobweb import CobwebTree; '
'tree = CobwebTree(); '
'x = generate_dataset(%i, %i, %i)' % (n_inst, n_attr,
n_val)),
number=1)
if __name__ == "__main__":
sizes = [10, 30, 60, 120, 180, 220]
times = [time(i, 5, 5) for i in sizes]
plt.plot(sizes, times, 'ro')
plt.plot(sizes, times, 'r-')
times = [time(i, 10, 5) for i in sizes]
plt.plot(sizes, times, 'bo')
plt.plot(sizes, times, 'b-')
times = [time(i, 20, 5) for i in sizes]
plt.plot(sizes, times, 'go')
plt.plot(sizes, times, 'g-')
red_patch = mpatches.Patch(color='red', label='# attr=5')
blue_patch = mpatches.Patch(color='blue', label='# attr=10')
green_patch = mpatches.Patch(color='green', label='# attr=20')
plt.legend(handles=[red_patch, blue_patch, green_patch], loc=2)
plt.xlabel('Number of training instances (5 possible values / attr)')
plt.ylabel('Runtime in Seconds')
plt.show()
| true | true |
f71cbc803e6c23ac267127d39b3cacff5df2afb2 | 1,759 | py | Python | ddtrace/contrib/starlette/patch.py | p7g/dd-trace-py | 141ac0ab6e9962e3b3bafc9de172076075289a19 | [
"Apache-2.0",
"BSD-3-Clause"
] | 308 | 2016-12-07T16:49:27.000Z | 2022-03-15T10:06:45.000Z | ddtrace/contrib/starlette/patch.py | p7g/dd-trace-py | 141ac0ab6e9962e3b3bafc9de172076075289a19 | [
"Apache-2.0",
"BSD-3-Clause"
] | 1,928 | 2016-11-28T17:13:18.000Z | 2022-03-31T21:43:19.000Z | ddtrace/contrib/starlette/patch.py | p7g/dd-trace-py | 141ac0ab6e9962e3b3bafc9de172076075289a19 | [
"Apache-2.0",
"BSD-3-Clause"
] | 311 | 2016-11-27T03:01:49.000Z | 2022-03-18T21:34:03.000Z | import starlette
from starlette.middleware import Middleware
from starlette.routing import Match
from ddtrace import config
from ddtrace.contrib.asgi.middleware import TraceMiddleware
from ddtrace.internal.logger import get_logger
from ddtrace.internal.utils.wrappers import unwrap as _u
from ddtrace.vendor.wrapt import wrap_function_wrapper as _w
log = get_logger(__name__)
config._add(
"starlette",
dict(
_default_service="starlette",
request_span_name="starlette.request",
distributed_tracing=True,
aggregate_resources=True,
),
)
def get_resource(scope):
path = None
routes = scope["app"].routes
for route in routes:
match, _ = route.matches(scope)
if match == Match.FULL:
path = route.path
break
elif match == Match.PARTIAL and path is None:
path = route.path
return path
def span_modifier(span, scope):
resource = get_resource(scope)
if config.starlette["aggregate_resources"] and resource:
span.resource = "{} {}".format(scope["method"], resource)
def traced_init(wrapped, instance, args, kwargs):
mw = kwargs.pop("middleware", [])
mw.insert(0, Middleware(TraceMiddleware, integration_config=config.starlette, span_modifier=span_modifier))
kwargs.update({"middleware": mw})
wrapped(*args, **kwargs)
def patch():
if getattr(starlette, "_datadog_patch", False):
return
setattr(starlette, "_datadog_patch", True)
_w("starlette.applications", "Starlette.__init__", traced_init)
def unpatch():
if not getattr(starlette, "_datadog_patch", False):
return
setattr(starlette, "_datadog_patch", False)
_u(starlette.applications.Starlette, "__init__")
| 25.867647 | 111 | 0.69585 | import starlette
from starlette.middleware import Middleware
from starlette.routing import Match
from ddtrace import config
from ddtrace.contrib.asgi.middleware import TraceMiddleware
from ddtrace.internal.logger import get_logger
from ddtrace.internal.utils.wrappers import unwrap as _u
from ddtrace.vendor.wrapt import wrap_function_wrapper as _w
log = get_logger(__name__)
config._add(
"starlette",
dict(
_default_service="starlette",
request_span_name="starlette.request",
distributed_tracing=True,
aggregate_resources=True,
),
)
def get_resource(scope):
path = None
routes = scope["app"].routes
for route in routes:
match, _ = route.matches(scope)
if match == Match.FULL:
path = route.path
break
elif match == Match.PARTIAL and path is None:
path = route.path
return path
def span_modifier(span, scope):
resource = get_resource(scope)
if config.starlette["aggregate_resources"] and resource:
span.resource = "{} {}".format(scope["method"], resource)
def traced_init(wrapped, instance, args, kwargs):
mw = kwargs.pop("middleware", [])
mw.insert(0, Middleware(TraceMiddleware, integration_config=config.starlette, span_modifier=span_modifier))
kwargs.update({"middleware": mw})
wrapped(*args, **kwargs)
def patch():
if getattr(starlette, "_datadog_patch", False):
return
setattr(starlette, "_datadog_patch", True)
_w("starlette.applications", "Starlette.__init__", traced_init)
def unpatch():
if not getattr(starlette, "_datadog_patch", False):
return
setattr(starlette, "_datadog_patch", False)
_u(starlette.applications.Starlette, "__init__")
| true | true |
f71cbe052a1401c87b58ad7ee12061265e925398 | 3,707 | py | Python | locations/spiders/mcdonalds_hu.py | thismakessand/alltheplaces | b6116199844c9e88bff3a691290f07a7457470ba | [
"MIT"
] | 1 | 2019-08-19T10:00:55.000Z | 2019-08-19T10:00:55.000Z | locations/spiders/mcdonalds_hu.py | thismakessand/alltheplaces | b6116199844c9e88bff3a691290f07a7457470ba | [
"MIT"
] | null | null | null | locations/spiders/mcdonalds_hu.py | thismakessand/alltheplaces | b6116199844c9e88bff3a691290f07a7457470ba | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
import scrapy
import json
import re
from locations.items import GeojsonPointItem
class McDonaldsHUSpider(scrapy.Spider):
name = "mcdonalds_hu"
allowed_domains = ["www.mcdonalds.hu"]
start_urls = (
'https://www.mcdonalds.hu/ettermeink',
)
def store_hours(self, data):
day_groups = []
this_day_group = {}
weekdays = ['Mo', 'Th', 'We', 'Tu', 'Fr', 'Sa', 'Su']
day_hours = data.xpath('.//div[@class="grid__item one-half text--right"]//text()').extract()
index = 0
for day_hour in day_hours:
day_hour = day_hour.strip()
if index == 7:
break
hours = ''
match = re.search(r'([0-9]{1,2}):([0-9]{1,2})–([0-9]{1,2}):([0-9]{1,2})', day_hour)
if not match:
hours = "off"
else:
sh, sm, eh, em = match.groups()
hours = '{}:{}-{}:{}'.format(sh, sm, int(eh) + 12 if int(eh) < 12 else int(eh), em)
short_day = weekdays[index]
if not this_day_group:
this_day_group = {
'from_day': short_day,
'to_day': short_day,
'hours': hours,
}
elif hours == this_day_group['hours']:
this_day_group['to_day'] = short_day
elif hours != this_day_group['hours']:
day_groups.append(this_day_group)
this_day_group = {
'from_day': short_day,
'to_day': short_day,
'hours': hours,
}
index = index + 1
day_groups.append(this_day_group)
if not day_groups:
return None
opening_hours = ''
if len(day_groups) == 1 and day_groups[0]['hours'] in ('00:00-23:59', '00:00-00:00'):
opening_hours = '24/7'
else:
for day_group in day_groups:
if day_group['from_day'] == day_group['to_day']:
opening_hours += '{from_day} {hours}; '.format(**day_group)
else:
opening_hours += '{from_day}-{to_day} {hours}; '.format(**day_group)
opening_hours = opening_hours [:-2]
return opening_hours
def parse_latlon(self, data):
map_url = data.xpath('//a[@title="Mutatás a térképen"]/@href').extract_first().strip()
lat_lon = map_url.split("loc:")[1]
lat = lat_lon.split(",")[0]
lon = lat_lon.split(",")[1]
return lat, lon
def parse_store(self, response):
address = response.xpath('//h1[@class="text--uppercase"]/text()').extract_first()
phone = response.xpath('//a[@title="Telefonszám"]/text()').extract_first()
lat, lon = self.parse_latlon(response)
properties = {
'ref': response.meta['ref'],
'phone': phone.strip() if phone else "",
'lon': lon,
'lat': lat,
'name': "McDonald's",
'addr_full': address.strip() if address else ""
}
opening_hours = self.store_hours(response)
if opening_hours:
properties['opening_hours'] = opening_hours
yield GeojsonPointItem(**properties)
def parse(self, response):
results = response.xpath('//article')
for item in results:
ref_id = item.xpath('.//footer/a/@href').extract_first().strip()
ref_id = ref_id.split("/")[2]
yield scrapy.Request(response.urljoin('https://www.mcdonalds.hu/ettermeink/' + ref_id), meta={'ref':ref_id}, callback=self.parse_store)
| 34.324074 | 147 | 0.514702 |
import scrapy
import json
import re
from locations.items import GeojsonPointItem
class McDonaldsHUSpider(scrapy.Spider):
name = "mcdonalds_hu"
allowed_domains = ["www.mcdonalds.hu"]
start_urls = (
'https://www.mcdonalds.hu/ettermeink',
)
def store_hours(self, data):
day_groups = []
this_day_group = {}
weekdays = ['Mo', 'Th', 'We', 'Tu', 'Fr', 'Sa', 'Su']
day_hours = data.xpath('.//div[@class="grid__item one-half text--right"]//text()').extract()
index = 0
for day_hour in day_hours:
day_hour = day_hour.strip()
if index == 7:
break
hours = ''
match = re.search(r'([0-9]{1,2}):([0-9]{1,2})–([0-9]{1,2}):([0-9]{1,2})', day_hour)
if not match:
hours = "off"
else:
sh, sm, eh, em = match.groups()
hours = '{}:{}-{}:{}'.format(sh, sm, int(eh) + 12 if int(eh) < 12 else int(eh), em)
short_day = weekdays[index]
if not this_day_group:
this_day_group = {
'from_day': short_day,
'to_day': short_day,
'hours': hours,
}
elif hours == this_day_group['hours']:
this_day_group['to_day'] = short_day
elif hours != this_day_group['hours']:
day_groups.append(this_day_group)
this_day_group = {
'from_day': short_day,
'to_day': short_day,
'hours': hours,
}
index = index + 1
day_groups.append(this_day_group)
if not day_groups:
return None
opening_hours = ''
if len(day_groups) == 1 and day_groups[0]['hours'] in ('00:00-23:59', '00:00-00:00'):
opening_hours = '24/7'
else:
for day_group in day_groups:
if day_group['from_day'] == day_group['to_day']:
opening_hours += '{from_day} {hours}; '.format(**day_group)
else:
opening_hours += '{from_day}-{to_day} {hours}; '.format(**day_group)
opening_hours = opening_hours [:-2]
return opening_hours
def parse_latlon(self, data):
map_url = data.xpath('//a[@title="Mutatás a térképen"]/@href').extract_first().strip()
lat_lon = map_url.split("loc:")[1]
lat = lat_lon.split(",")[0]
lon = lat_lon.split(",")[1]
return lat, lon
def parse_store(self, response):
address = response.xpath('//h1[@class="text--uppercase"]/text()').extract_first()
phone = response.xpath('//a[@title="Telefonszám"]/text()').extract_first()
lat, lon = self.parse_latlon(response)
properties = {
'ref': response.meta['ref'],
'phone': phone.strip() if phone else "",
'lon': lon,
'lat': lat,
'name': "McDonald's",
'addr_full': address.strip() if address else ""
}
opening_hours = self.store_hours(response)
if opening_hours:
properties['opening_hours'] = opening_hours
yield GeojsonPointItem(**properties)
def parse(self, response):
results = response.xpath('//article')
for item in results:
ref_id = item.xpath('.//footer/a/@href').extract_first().strip()
ref_id = ref_id.split("/")[2]
yield scrapy.Request(response.urljoin('https://www.mcdonalds.hu/ettermeink/' + ref_id), meta={'ref':ref_id}, callback=self.parse_store)
| true | true |
f71cbe39c1107e8c3db2f02071238dd85d13bb46 | 8,346 | py | Python | src/ml_rasa/scripts/preprocessors/check_english.py | GrigalashviliT/spoilerBlocker | 18a5e9689099d3b631a15ed20cc84a043f324055 | [
"MIT"
] | 5 | 2020-05-20T16:59:04.000Z | 2021-08-22T18:30:47.000Z | src/ml_rasa/scripts/preprocessors/check_english.py | GrigalashviliT/spoilerBlocker | 18a5e9689099d3b631a15ed20cc84a043f324055 | [
"MIT"
] | 10 | 2020-05-20T16:07:04.000Z | 2020-07-22T19:21:16.000Z | src/ml_rasa/scripts/preprocessors/check_english.py | GrigalashviliT/spoilerBlocker | 18a5e9689099d3b631a15ed20cc84a043f324055 | [
"MIT"
] | null | null | null | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import re
from re import sub
from typing import Any, List, Text
from functools import reduce
from rasa.nlu.components import Component
from rasa.nlu.config import RasaNLUModelConfig
from rasa.nlu.training_data import Message
from rasa.nlu.training_data import TrainingData
import string
class CheckEnglish(Component):
provides = ["text"]
alphabet = ['a', 'b', 'c', 'd']
stopwords = ["a","about","above","after","again","against","ain","all","am","an","and","any","are","aren","aren't","as","at","be","because","been","before","being","below","between","both","but","by","can","couldn","couldn't","d","did","didn","didn't","do","does","doesn","doesn't","doing","don","don't","down","during","each","few","for","from","further","had","hadn","hadn't","has","hasn","hasn't","have","haven","haven't","having","he","her","here","hers","herself","him","himself","his","how","i","if","in","into","is","isn","isn't","it","it's","its","itself","just","ll","m","ma","me","mightn","mightn't","more","most","mustn","mustn't","my","myself","needn","needn't","no","nor","not","now","o","of","off","on","once","only","or","other","our","ours","ourselves","out","over","own","re","s","same","shan","shan't","she","she's","should","should've","shouldn","shouldn't","so","some","such","t","than","that","that'll","the","their","theirs","them","themselves","then","there","these","they","this","those","through","to","too","under","until","up","ve","very","was","wasn","wasn't","we","were","weren","weren't","what","when","where","which","while","who","whom","why","will","with","won","won't","wouldn","wouldn't","y","you","you'd","you'll","you're","you've","your","yours","yourself","yourselves","could","he'd","he'll","he's","here's","how's","i'd","i'll","i'm","i've","let's","ought","she'd","she'll","that's","there's","they'd","they'll","they're","they've","we'd","we'll","we're","we've","what's","when's","where's","who's","why's","would","able","abst","accordance","according","accordingly","across","act","actually","added","adj","affected","affecting","affects","afterwards","ah","almost","alone","along","already","also","although","always","among","amongst","announce","another","anybody","anyhow","anymore","anyone","anything","anyway","anyways","anywhere","apparently","approximately","arent","arise","around","aside","ask","asking","auth","available","away","awfully","b","back","became","become","becomes","becoming","beforehand","begin","beginning","beginnings","begins","behind","believe","beside","besides","beyond","biol","brief","briefly","c","ca","came","cannot","can't","cause","causes","certain","certainly","co","com","come","comes","contain","containing","contains","couldnt","date","different","done","downwards","due","e","ed","edu","effect","eg","eight","eighty","either","else","elsewhere","end","ending","enough","especially","et","etc","even","ever","every","everybody","everyone","everything","everywhere","ex","except","f","far","ff","fifth","first","five","fix","followed","following","follows","former","formerly","forth","found","four","furthermore","g","gave","get","gets","getting","give","given","gives","giving","go","goes","gone","got","gotten","h","happens","hardly","hed","hence","hereafter","hereby","herein","heres","hereupon","hes","hi","hid","hither","home","howbeit","however","hundred","id","ie","im","immediate","immediately","importance","important","inc","indeed","index","information","instead","invention","inward","itd","it'll","j","k","keep","keeps","kept","kg","km","know","known","knows","l","largely","last","lately","later","latter","latterly","least","less","lest","let","lets","like","liked","likely","line","little","'ll","look","looking","looks","ltd","made","mainly","make","makes","many","may","maybe","mean","means","meantime","meanwhile","merely","mg","might","million","miss","ml","moreover","mostly","mr","mrs","much","mug","must","n","na","name","namely","nay","nd","near","nearly","necessarily","necessary","need","needs","neither","never","nevertheless","new","next","nine","ninety","nobody","non","none","nonetheless","noone","normally","nos","noted","nothing","nowhere","obtain","obtained","obviously","often","oh","ok","okay","old","omitted","one","ones","onto","ord","others","otherwise","outside","overall","owing","p","page","pages","part","particular","particularly","past","per","perhaps","placed","please","plus","poorly","possible","possibly","potentially","pp","predominantly","present","previously","primarily","probably","promptly","proud","provides","put","q","que","quickly","quite","qv","r","ran","rather","rd","readily","really","recent","recently","ref","refs","regarding","regardless","regards","related","relatively","research","respectively","resulted","resulting","results","right","run","said","saw","say","saying","says","sec","section","see","seeing","seem","seemed","seeming","seems","seen","self","selves","sent","seven","several","shall","shed","shes","show","showed","shown","showns","shows","significant","significantly","similar","similarly","since","six","slightly","somebody","somehow","someone","somethan","something","sometime","sometimes","somewhat","somewhere","soon","sorry","specifically","specified","specify","specifying","still","stop","strongly","sub","substantially","successfully","sufficiently","suggest","sup","sure","take","taken","taking","tell","tends","th","thank","thanks","thanx","thats","that've","thence","thereafter","thereby","thered","therefore","therein","there'll","thereof","therere","theres","thereto","thereupon","there've","theyd","theyre","think","thou","though","thoughh","thousand","throug","throughout","thru","thus","til","tip","together","took","toward","towards","tried","tries","truly","try","trying","ts","twice","two","u","un","unfortunately","unless","unlike","unlikely","unto","upon","ups","us","use","used","useful","usefully","usefulness","uses","using","usually","v","value","various","'ve","via","viz","vol","vols","vs","w","want","wants","wasnt","way","wed","welcome","went","werent","whatever","what'll","whats","whence","whenever","whereafter","whereas","whereby","wherein","wheres","whereupon","wherever","whether","whim","whither","whod","whoever","whole","who'll","whomever","whos","whose","widely","willing","wish","within","without","wont","words","world","wouldnt","www","x","yes","yet","youd","youre","z","zero","a's","ain't","allow","allows","apart","appear","appreciate","appropriate","associated","best","better","c'mon","c's","cant","changes","clearly","concerning","consequently","consider","considering","corresponding","course","currently","definitely","described","despite","entirely","exactly","example","going","greetings","hello","help","hopefully","ignored","inasmuch","indicate","indicated","indicates","inner","insofar","it'd","keep","keeps","novel","presumably","reasonably","second","secondly","sensible","serious","seriously","sure","t's","third","thorough","thoroughly","three","well","wonder"]
def train(self, training_data, config, **kwargs):
# type: (TrainingData, RasaNLUModelConfig, **Any) -> None
for example in training_data.training_examples:
example.text = self.preprocess(example.text)
example.set("text", example.text)
def process(self, message, **kwargs):
# type: (Message, **Any) -> None
message.text = self.preprocess(message.get('text'))
message.set("text", message.text)
def english_word_count(self, word):
alph = list(string.ascii_lowercase)
count = 0
for ch in word:
if ch in alph:
count += 1
return count
def preprocess(self, text):
text = text.lower()
alph = list(string.ascii_lowercase)
new_text = ''
for word in text.split():
count = self.english_word_count(word)
if word in self.stopwords:
continue
if count / len(word) > 0.6:
new_text += word + ' '
return new_text[:-1] | 107 | 6,589 | 0.619458 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import re
from re import sub
from typing import Any, List, Text
from functools import reduce
from rasa.nlu.components import Component
from rasa.nlu.config import RasaNLUModelConfig
from rasa.nlu.training_data import Message
from rasa.nlu.training_data import TrainingData
import string
class CheckEnglish(Component):
provides = ["text"]
alphabet = ['a', 'b', 'c', 'd']
stopwords = ["a","about","above","after","again","against","ain","all","am","an","and","any","are","aren","aren't","as","at","be","because","been","before","being","below","between","both","but","by","can","couldn","couldn't","d","did","didn","didn't","do","does","doesn","doesn't","doing","don","don't","down","during","each","few","for","from","further","had","hadn","hadn't","has","hasn","hasn't","have","haven","haven't","having","he","her","here","hers","herself","him","himself","his","how","i","if","in","into","is","isn","isn't","it","it's","its","itself","just","ll","m","ma","me","mightn","mightn't","more","most","mustn","mustn't","my","myself","needn","needn't","no","nor","not","now","o","of","off","on","once","only","or","other","our","ours","ourselves","out","over","own","re","s","same","shan","shan't","she","she's","should","should've","shouldn","shouldn't","so","some","such","t","than","that","that'll","the","their","theirs","them","themselves","then","there","these","they","this","those","through","to","too","under","until","up","ve","very","was","wasn","wasn't","we","were","weren","weren't","what","when","where","which","while","who","whom","why","will","with","won","won't","wouldn","wouldn't","y","you","you'd","you'll","you're","you've","your","yours","yourself","yourselves","could","he'd","he'll","he's","here's","how's","i'd","i'll","i'm","i've","let's","ought","she'd","she'll","that's","there's","they'd","they'll","they're","they've","we'd","we'll","we're","we've","what's","when's","where's","who's","why's","would","able","abst","accordance","according","accordingly","across","act","actually","added","adj","affected","affecting","affects","afterwards","ah","almost","alone","along","already","also","although","always","among","amongst","announce","another","anybody","anyhow","anymore","anyone","anything","anyway","anyways","anywhere","apparently","approximately","arent","arise","around","aside","ask","asking","auth","available","away","awfully","b","back","became","become","becomes","becoming","beforehand","begin","beginning","beginnings","begins","behind","believe","beside","besides","beyond","biol","brief","briefly","c","ca","came","cannot","can't","cause","causes","certain","certainly","co","com","come","comes","contain","containing","contains","couldnt","date","different","done","downwards","due","e","ed","edu","effect","eg","eight","eighty","either","else","elsewhere","end","ending","enough","especially","et","etc","even","ever","every","everybody","everyone","everything","everywhere","ex","except","f","far","ff","fifth","first","five","fix","followed","following","follows","former","formerly","forth","found","four","furthermore","g","gave","get","gets","getting","give","given","gives","giving","go","goes","gone","got","gotten","h","happens","hardly","hed","hence","hereafter","hereby","herein","heres","hereupon","hes","hi","hid","hither","home","howbeit","however","hundred","id","ie","im","immediate","immediately","importance","important","inc","indeed","index","information","instead","invention","inward","itd","it'll","j","k","keep","keeps","kept","kg","km","know","known","knows","l","largely","last","lately","later","latter","latterly","least","less","lest","let","lets","like","liked","likely","line","little","'ll","look","looking","looks","ltd","made","mainly","make","makes","many","may","maybe","mean","means","meantime","meanwhile","merely","mg","might","million","miss","ml","moreover","mostly","mr","mrs","much","mug","must","n","na","name","namely","nay","nd","near","nearly","necessarily","necessary","need","needs","neither","never","nevertheless","new","next","nine","ninety","nobody","non","none","nonetheless","noone","normally","nos","noted","nothing","nowhere","obtain","obtained","obviously","often","oh","ok","okay","old","omitted","one","ones","onto","ord","others","otherwise","outside","overall","owing","p","page","pages","part","particular","particularly","past","per","perhaps","placed","please","plus","poorly","possible","possibly","potentially","pp","predominantly","present","previously","primarily","probably","promptly","proud","provides","put","q","que","quickly","quite","qv","r","ran","rather","rd","readily","really","recent","recently","ref","refs","regarding","regardless","regards","related","relatively","research","respectively","resulted","resulting","results","right","run","said","saw","say","saying","says","sec","section","see","seeing","seem","seemed","seeming","seems","seen","self","selves","sent","seven","several","shall","shed","shes","show","showed","shown","showns","shows","significant","significantly","similar","similarly","since","six","slightly","somebody","somehow","someone","somethan","something","sometime","sometimes","somewhat","somewhere","soon","sorry","specifically","specified","specify","specifying","still","stop","strongly","sub","substantially","successfully","sufficiently","suggest","sup","sure","take","taken","taking","tell","tends","th","thank","thanks","thanx","thats","that've","thence","thereafter","thereby","thered","therefore","therein","there'll","thereof","therere","theres","thereto","thereupon","there've","theyd","theyre","think","thou","though","thoughh","thousand","throug","throughout","thru","thus","til","tip","together","took","toward","towards","tried","tries","truly","try","trying","ts","twice","two","u","un","unfortunately","unless","unlike","unlikely","unto","upon","ups","us","use","used","useful","usefully","usefulness","uses","using","usually","v","value","various","'ve","via","viz","vol","vols","vs","w","want","wants","wasnt","way","wed","welcome","went","werent","whatever","what'll","whats","whence","whenever","whereafter","whereas","whereby","wherein","wheres","whereupon","wherever","whether","whim","whither","whod","whoever","whole","who'll","whomever","whos","whose","widely","willing","wish","within","without","wont","words","world","wouldnt","www","x","yes","yet","youd","youre","z","zero","a's","ain't","allow","allows","apart","appear","appreciate","appropriate","associated","best","better","c'mon","c's","cant","changes","clearly","concerning","consequently","consider","considering","corresponding","course","currently","definitely","described","despite","entirely","exactly","example","going","greetings","hello","help","hopefully","ignored","inasmuch","indicate","indicated","indicates","inner","insofar","it'd","keep","keeps","novel","presumably","reasonably","second","secondly","sensible","serious","seriously","sure","t's","third","thorough","thoroughly","three","well","wonder"]
def train(self, training_data, config, **kwargs):
for example in training_data.training_examples:
example.text = self.preprocess(example.text)
example.set("text", example.text)
def process(self, message, **kwargs):
message.text = self.preprocess(message.get('text'))
message.set("text", message.text)
def english_word_count(self, word):
alph = list(string.ascii_lowercase)
count = 0
for ch in word:
if ch in alph:
count += 1
return count
def preprocess(self, text):
text = text.lower()
alph = list(string.ascii_lowercase)
new_text = ''
for word in text.split():
count = self.english_word_count(word)
if word in self.stopwords:
continue
if count / len(word) > 0.6:
new_text += word + ' '
return new_text[:-1] | true | true |
f71cbe9a6893b097ff92eef32e8a4f740fdc19a0 | 13,481 | py | Python | wheat/util/streamable.py | grayfallstown/wheat-blockchain | f391cdd30a0cbcdb2adf4439a25581fd28b42c1f | [
"Apache-2.0"
] | null | null | null | wheat/util/streamable.py | grayfallstown/wheat-blockchain | f391cdd30a0cbcdb2adf4439a25581fd28b42c1f | [
"Apache-2.0"
] | null | null | null | wheat/util/streamable.py | grayfallstown/wheat-blockchain | f391cdd30a0cbcdb2adf4439a25581fd28b42c1f | [
"Apache-2.0"
] | null | null | null | # flake8: noqa
# pylint: disable
from __future__ import annotations
import dataclasses
import io
import pprint
import sys
from enum import Enum
from typing import Any, BinaryIO, Dict, List, Tuple, Type, Callable, Optional, Iterator
from blspy import G1Element, G2Element, PrivateKey
from wheat.types.blockchain_format.program import Program, SerializedProgram
from wheat.types.blockchain_format.sized_bytes import bytes32
from wheat.util.byte_types import hexstr_to_bytes
from wheat.util.hash import std_hash
from wheat.util.ints import int64, int512, uint32, uint64, uint128
from wheat.util.type_checking import is_type_List, is_type_SpecificOptional, is_type_Tuple, strictdataclass
if sys.version_info < (3, 8):
def get_args(t: Type[Any]) -> Tuple[Any, ...]:
return getattr(t, "__args__", ())
else:
from typing import get_args
pp = pprint.PrettyPrinter(indent=1, width=120, compact=True)
# TODO: Remove hack, this allows streaming these objects from binary
size_hints = {
"PrivateKey": PrivateKey.PRIVATE_KEY_SIZE,
"G1Element": G1Element.SIZE,
"G2Element": G2Element.SIZE,
"ConditionOpcode": 1,
}
unhashable_types = [
PrivateKey,
G1Element,
G2Element,
Program,
SerializedProgram,
]
# JSON does not support big ints, so these types must be serialized differently in JSON
big_ints = [uint64, int64, uint128, int512]
def dataclass_from_dict(klass, d):
"""
Converts a dictionary based on a dataclass, into an instance of that dataclass.
Recursively goes through lists, optionals, and dictionaries.
"""
if is_type_SpecificOptional(klass):
# Type is optional, data is either None, or Any
if not d:
return None
return dataclass_from_dict(get_args(klass)[0], d)
elif is_type_Tuple(klass):
# Type is tuple, can have multiple different types inside
i = 0
klass_properties = []
for item in d:
klass_properties.append(dataclass_from_dict(klass.__args__[i], item))
i = i + 1
return tuple(klass_properties)
elif dataclasses.is_dataclass(klass):
# Type is a dataclass, data is a dictionary
fieldtypes = {f.name: f.type for f in dataclasses.fields(klass)}
return klass(**{f: dataclass_from_dict(fieldtypes[f], d[f]) for f in d})
elif is_type_List(klass):
# Type is a list, data is a list
return [dataclass_from_dict(get_args(klass)[0], item) for item in d]
elif issubclass(klass, bytes):
# Type is bytes, data is a hex string
return klass(hexstr_to_bytes(d))
elif klass in unhashable_types:
# Type is unhashable (bls type), so cast from hex string
return klass.from_bytes(hexstr_to_bytes(d))
else:
# Type is a primitive, cast with correct class
return klass(d)
def recurse_jsonify(d):
"""
Makes bytes objects and unhashable types into strings with 0x, and makes large ints into
strings.
"""
if isinstance(d, list) or isinstance(d, tuple):
new_list = []
for item in d:
if type(item) in unhashable_types or issubclass(type(item), bytes):
item = f"0x{bytes(item).hex()}"
if isinstance(item, dict):
item = recurse_jsonify(item)
if isinstance(item, list):
item = recurse_jsonify(item)
if isinstance(item, tuple):
item = recurse_jsonify(item)
if isinstance(item, Enum):
item = item.name
if isinstance(item, int) and type(item) in big_ints:
item = int(item)
new_list.append(item)
d = new_list
else:
for key, value in d.items():
if type(value) in unhashable_types or issubclass(type(value), bytes):
d[key] = f"0x{bytes(value).hex()}"
if isinstance(value, dict):
d[key] = recurse_jsonify(value)
if isinstance(value, list):
d[key] = recurse_jsonify(value)
if isinstance(value, tuple):
d[key] = recurse_jsonify(value)
if isinstance(value, Enum):
d[key] = value.name
if isinstance(value, int) and type(value) in big_ints:
d[key] = int(value)
return d
PARSE_FUNCTIONS_FOR_STREAMABLE_CLASS = {}
def streamable(cls: Any):
"""
This is a decorator for class definitions. It applies the strictdataclass decorator,
which checks all types at construction. It also defines a simple serialization format,
and adds parse, from bytes, stream, and __bytes__ methods.
Serialization format:
- Each field is serialized in order, by calling from_bytes/__bytes__.
- For Lists, there is a 4 byte prefix for the list length.
- For Optionals, there is a one byte prefix, 1 iff object is present, 0 iff not.
All of the constituents must have parse/from_bytes, and stream/__bytes__ and therefore
be of fixed size. For example, int cannot be a constituent since it is not a fixed size,
whereas uint32 can be.
Furthermore, a get_hash() member is added, which performs a serialization and a sha256.
This class is used for deterministic serialization and hashing, for consensus critical
objects such as the block header.
Make sure to use the Streamable class as a parent class when using the streamable decorator,
as it will allow linters to recognize the methods that are added by the decorator. Also,
use the @dataclass(frozen=True) decorator as well, for linters to recognize constructor
arguments.
"""
cls1 = strictdataclass(cls)
t = type(cls.__name__, (cls1, Streamable), {})
parse_functions = []
try:
fields = cls1.__annotations__ # pylint: disable=no-member
except Exception:
fields = {}
for _, f_type in fields.items():
parse_functions.append(cls.function_to_parse_one_item(f_type))
PARSE_FUNCTIONS_FOR_STREAMABLE_CLASS[t] = parse_functions
return t
def parse_bool(f: BinaryIO) -> bool:
bool_byte = f.read(1)
assert bool_byte is not None and len(bool_byte) == 1 # Checks for EOF
if bool_byte == bytes([0]):
return False
elif bool_byte == bytes([1]):
return True
else:
raise ValueError("Bool byte must be 0 or 1")
def parse_optional(f: BinaryIO, parse_inner_type_f: Callable[[BinaryIO], Any]) -> Optional[Any]:
is_present_bytes = f.read(1)
assert is_present_bytes is not None and len(is_present_bytes) == 1 # Checks for EOF
if is_present_bytes == bytes([0]):
return None
elif is_present_bytes == bytes([1]):
return parse_inner_type_f(f)
else:
raise ValueError("Optional must be 0 or 1")
def parse_bytes(f: BinaryIO) -> bytes:
list_size_bytes = f.read(4)
assert list_size_bytes is not None and len(list_size_bytes) == 4 # Checks for EOF
list_size: uint32 = uint32(int.from_bytes(list_size_bytes, "big"))
bytes_read = f.read(list_size)
assert bytes_read is not None and len(bytes_read) == list_size
return bytes_read
def parse_list(f: BinaryIO, parse_inner_type_f: Callable[[BinaryIO], Any]) -> List[Any]:
full_list: List = []
# wjb assert inner_type != get_args(List)[0]
list_size_bytes = f.read(4)
assert list_size_bytes is not None and len(list_size_bytes) == 4 # Checks for EOF
list_size = uint32(int.from_bytes(list_size_bytes, "big"))
for list_index in range(list_size):
full_list.append(parse_inner_type_f(f))
return full_list
def parse_tuple(f: BinaryIO, list_parse_inner_type_f: List[Callable[[BinaryIO], Any]]) -> Tuple[Any, ...]:
full_list = []
for parse_f in list_parse_inner_type_f:
full_list.append(parse_f(f))
return tuple(full_list)
def parse_size_hints(f: BinaryIO, f_type: Type, bytes_to_read: int) -> Any:
bytes_read = f.read(bytes_to_read)
assert bytes_read is not None and len(bytes_read) == bytes_to_read
return f_type.from_bytes(bytes_read)
def parse_str(f: BinaryIO) -> str:
str_size_bytes = f.read(4)
assert str_size_bytes is not None and len(str_size_bytes) == 4 # Checks for EOF
str_size: uint32 = uint32(int.from_bytes(str_size_bytes, "big"))
str_read_bytes = f.read(str_size)
assert str_read_bytes is not None and len(str_read_bytes) == str_size # Checks for EOF
return bytes.decode(str_read_bytes, "utf-8")
class Streamable:
@classmethod
def function_to_parse_one_item(cls: Type[cls.__name__], f_type: Type): # type: ignore
"""
This function returns a function taking one argument `f: BinaryIO` that parses
and returns a value of the given type.
"""
inner_type: Type
if f_type is bool:
return parse_bool
if is_type_SpecificOptional(f_type):
inner_type = get_args(f_type)[0]
parse_inner_type_f = cls.function_to_parse_one_item(inner_type)
return lambda f: parse_optional(f, parse_inner_type_f)
if hasattr(f_type, "parse"):
return f_type.parse
if f_type == bytes:
return parse_bytes
if is_type_List(f_type):
inner_type = get_args(f_type)[0]
parse_inner_type_f = cls.function_to_parse_one_item(inner_type)
return lambda f: parse_list(f, parse_inner_type_f)
if is_type_Tuple(f_type):
inner_types = get_args(f_type)
list_parse_inner_type_f = [cls.function_to_parse_one_item(_) for _ in inner_types]
return lambda f: parse_tuple(f, list_parse_inner_type_f)
if hasattr(f_type, "from_bytes") and f_type.__name__ in size_hints:
bytes_to_read = size_hints[f_type.__name__]
return lambda f: parse_size_hints(f, f_type, bytes_to_read)
if f_type is str:
return parse_str
raise NotImplementedError(f"Type {f_type} does not have parse")
@classmethod
def parse(cls: Type[cls.__name__], f: BinaryIO) -> cls.__name__: # type: ignore
# Create the object without calling __init__() to avoid unnecessary post-init checks in strictdataclass
obj: Streamable = object.__new__(cls)
fields: Iterator[str] = iter(getattr(cls, "__annotations__", {}))
values: Iterator = (parse_f(f) for parse_f in PARSE_FUNCTIONS_FOR_STREAMABLE_CLASS[cls])
for field, value in zip(fields, values):
object.__setattr__(obj, field, value)
# Use -1 as a sentinel value as it's not currently serializable
if next(fields, -1) != -1:
raise ValueError("Failed to parse incomplete Streamable object")
if next(values, -1) != -1:
raise ValueError("Failed to parse unknown data in Streamable object")
return obj
def stream_one_item(self, f_type: Type, item, f: BinaryIO) -> None:
inner_type: Type
if is_type_SpecificOptional(f_type):
inner_type = get_args(f_type)[0]
if item is None:
f.write(bytes([0]))
else:
f.write(bytes([1]))
self.stream_one_item(inner_type, item, f)
elif f_type == bytes:
f.write(uint32(len(item)).to_bytes(4, "big"))
f.write(item)
elif hasattr(f_type, "stream"):
item.stream(f)
elif hasattr(f_type, "__bytes__"):
f.write(bytes(item))
elif is_type_List(f_type):
assert is_type_List(type(item))
f.write(uint32(len(item)).to_bytes(4, "big"))
inner_type = get_args(f_type)[0]
# wjb assert inner_type != get_args(List)[0] # type: ignore
for element in item:
self.stream_one_item(inner_type, element, f)
elif is_type_Tuple(f_type):
inner_types = get_args(f_type)
assert len(item) == len(inner_types)
for i in range(len(item)):
self.stream_one_item(inner_types[i], item[i], f)
elif f_type is str:
str_bytes = item.encode("utf-8")
f.write(uint32(len(str_bytes)).to_bytes(4, "big"))
f.write(str_bytes)
elif f_type is bool:
f.write(int(item).to_bytes(1, "big"))
else:
raise NotImplementedError(f"can't stream {item}, {f_type}")
def stream(self, f: BinaryIO) -> None:
try:
fields = self.__annotations__ # pylint: disable=no-member
except Exception:
fields = {}
for f_name, f_type in fields.items():
self.stream_one_item(f_type, getattr(self, f_name), f)
def get_hash(self) -> bytes32:
return bytes32(std_hash(bytes(self)))
@classmethod
def from_bytes(cls: Any, blob: bytes) -> Any:
f = io.BytesIO(blob)
parsed = cls.parse(f)
assert f.read() == b""
return parsed
def __bytes__(self: Any) -> bytes:
f = io.BytesIO()
self.stream(f)
return bytes(f.getvalue())
def __str__(self: Any) -> str:
return pp.pformat(recurse_jsonify(dataclasses.asdict(self)))
def __repr__(self: Any) -> str:
return pp.pformat(recurse_jsonify(dataclasses.asdict(self)))
def to_json_dict(self) -> Dict:
return recurse_jsonify(dataclasses.asdict(self))
@classmethod
def from_json_dict(cls: Any, json_dict: Dict) -> Any:
return dataclass_from_dict(cls, json_dict)
| 37.551532 | 111 | 0.649952 |
from __future__ import annotations
import dataclasses
import io
import pprint
import sys
from enum import Enum
from typing import Any, BinaryIO, Dict, List, Tuple, Type, Callable, Optional, Iterator
from blspy import G1Element, G2Element, PrivateKey
from wheat.types.blockchain_format.program import Program, SerializedProgram
from wheat.types.blockchain_format.sized_bytes import bytes32
from wheat.util.byte_types import hexstr_to_bytes
from wheat.util.hash import std_hash
from wheat.util.ints import int64, int512, uint32, uint64, uint128
from wheat.util.type_checking import is_type_List, is_type_SpecificOptional, is_type_Tuple, strictdataclass
if sys.version_info < (3, 8):
def get_args(t: Type[Any]) -> Tuple[Any, ...]:
return getattr(t, "__args__", ())
else:
from typing import get_args
pp = pprint.PrettyPrinter(indent=1, width=120, compact=True)
size_hints = {
"PrivateKey": PrivateKey.PRIVATE_KEY_SIZE,
"G1Element": G1Element.SIZE,
"G2Element": G2Element.SIZE,
"ConditionOpcode": 1,
}
unhashable_types = [
PrivateKey,
G1Element,
G2Element,
Program,
SerializedProgram,
]
big_ints = [uint64, int64, uint128, int512]
def dataclass_from_dict(klass, d):
if is_type_SpecificOptional(klass):
if not d:
return None
return dataclass_from_dict(get_args(klass)[0], d)
elif is_type_Tuple(klass):
i = 0
klass_properties = []
for item in d:
klass_properties.append(dataclass_from_dict(klass.__args__[i], item))
i = i + 1
return tuple(klass_properties)
elif dataclasses.is_dataclass(klass):
fieldtypes = {f.name: f.type for f in dataclasses.fields(klass)}
return klass(**{f: dataclass_from_dict(fieldtypes[f], d[f]) for f in d})
elif is_type_List(klass):
return [dataclass_from_dict(get_args(klass)[0], item) for item in d]
elif issubclass(klass, bytes):
return klass(hexstr_to_bytes(d))
elif klass in unhashable_types:
return klass.from_bytes(hexstr_to_bytes(d))
else:
return klass(d)
def recurse_jsonify(d):
if isinstance(d, list) or isinstance(d, tuple):
new_list = []
for item in d:
if type(item) in unhashable_types or issubclass(type(item), bytes):
item = f"0x{bytes(item).hex()}"
if isinstance(item, dict):
item = recurse_jsonify(item)
if isinstance(item, list):
item = recurse_jsonify(item)
if isinstance(item, tuple):
item = recurse_jsonify(item)
if isinstance(item, Enum):
item = item.name
if isinstance(item, int) and type(item) in big_ints:
item = int(item)
new_list.append(item)
d = new_list
else:
for key, value in d.items():
if type(value) in unhashable_types or issubclass(type(value), bytes):
d[key] = f"0x{bytes(value).hex()}"
if isinstance(value, dict):
d[key] = recurse_jsonify(value)
if isinstance(value, list):
d[key] = recurse_jsonify(value)
if isinstance(value, tuple):
d[key] = recurse_jsonify(value)
if isinstance(value, Enum):
d[key] = value.name
if isinstance(value, int) and type(value) in big_ints:
d[key] = int(value)
return d
PARSE_FUNCTIONS_FOR_STREAMABLE_CLASS = {}
def streamable(cls: Any):
cls1 = strictdataclass(cls)
t = type(cls.__name__, (cls1, Streamable), {})
parse_functions = []
try:
fields = cls1.__annotations__
except Exception:
fields = {}
for _, f_type in fields.items():
parse_functions.append(cls.function_to_parse_one_item(f_type))
PARSE_FUNCTIONS_FOR_STREAMABLE_CLASS[t] = parse_functions
return t
def parse_bool(f: BinaryIO) -> bool:
bool_byte = f.read(1)
assert bool_byte is not None and len(bool_byte) == 1
if bool_byte == bytes([0]):
return False
elif bool_byte == bytes([1]):
return True
else:
raise ValueError("Bool byte must be 0 or 1")
def parse_optional(f: BinaryIO, parse_inner_type_f: Callable[[BinaryIO], Any]) -> Optional[Any]:
is_present_bytes = f.read(1)
assert is_present_bytes is not None and len(is_present_bytes) == 1
if is_present_bytes == bytes([0]):
return None
elif is_present_bytes == bytes([1]):
return parse_inner_type_f(f)
else:
raise ValueError("Optional must be 0 or 1")
def parse_bytes(f: BinaryIO) -> bytes:
list_size_bytes = f.read(4)
assert list_size_bytes is not None and len(list_size_bytes) == 4
list_size: uint32 = uint32(int.from_bytes(list_size_bytes, "big"))
bytes_read = f.read(list_size)
assert bytes_read is not None and len(bytes_read) == list_size
return bytes_read
def parse_list(f: BinaryIO, parse_inner_type_f: Callable[[BinaryIO], Any]) -> List[Any]:
full_list: List = []
list_size_bytes = f.read(4)
assert list_size_bytes is not None and len(list_size_bytes) == 4
list_size = uint32(int.from_bytes(list_size_bytes, "big"))
for list_index in range(list_size):
full_list.append(parse_inner_type_f(f))
return full_list
def parse_tuple(f: BinaryIO, list_parse_inner_type_f: List[Callable[[BinaryIO], Any]]) -> Tuple[Any, ...]:
full_list = []
for parse_f in list_parse_inner_type_f:
full_list.append(parse_f(f))
return tuple(full_list)
def parse_size_hints(f: BinaryIO, f_type: Type, bytes_to_read: int) -> Any:
bytes_read = f.read(bytes_to_read)
assert bytes_read is not None and len(bytes_read) == bytes_to_read
return f_type.from_bytes(bytes_read)
def parse_str(f: BinaryIO) -> str:
str_size_bytes = f.read(4)
assert str_size_bytes is not None and len(str_size_bytes) == 4
str_size: uint32 = uint32(int.from_bytes(str_size_bytes, "big"))
str_read_bytes = f.read(str_size)
assert str_read_bytes is not None and len(str_read_bytes) == str_size
return bytes.decode(str_read_bytes, "utf-8")
class Streamable:
@classmethod
def function_to_parse_one_item(cls: Type[cls.__name__], f_type: Type):
inner_type: Type
if f_type is bool:
return parse_bool
if is_type_SpecificOptional(f_type):
inner_type = get_args(f_type)[0]
parse_inner_type_f = cls.function_to_parse_one_item(inner_type)
return lambda f: parse_optional(f, parse_inner_type_f)
if hasattr(f_type, "parse"):
return f_type.parse
if f_type == bytes:
return parse_bytes
if is_type_List(f_type):
inner_type = get_args(f_type)[0]
parse_inner_type_f = cls.function_to_parse_one_item(inner_type)
return lambda f: parse_list(f, parse_inner_type_f)
if is_type_Tuple(f_type):
inner_types = get_args(f_type)
list_parse_inner_type_f = [cls.function_to_parse_one_item(_) for _ in inner_types]
return lambda f: parse_tuple(f, list_parse_inner_type_f)
if hasattr(f_type, "from_bytes") and f_type.__name__ in size_hints:
bytes_to_read = size_hints[f_type.__name__]
return lambda f: parse_size_hints(f, f_type, bytes_to_read)
if f_type is str:
return parse_str
raise NotImplementedError(f"Type {f_type} does not have parse")
@classmethod
def parse(cls: Type[cls.__name__], f: BinaryIO) -> cls.__name__:
obj: Streamable = object.__new__(cls)
fields: Iterator[str] = iter(getattr(cls, "__annotations__", {}))
values: Iterator = (parse_f(f) for parse_f in PARSE_FUNCTIONS_FOR_STREAMABLE_CLASS[cls])
for field, value in zip(fields, values):
object.__setattr__(obj, field, value)
if next(fields, -1) != -1:
raise ValueError("Failed to parse incomplete Streamable object")
if next(values, -1) != -1:
raise ValueError("Failed to parse unknown data in Streamable object")
return obj
def stream_one_item(self, f_type: Type, item, f: BinaryIO) -> None:
inner_type: Type
if is_type_SpecificOptional(f_type):
inner_type = get_args(f_type)[0]
if item is None:
f.write(bytes([0]))
else:
f.write(bytes([1]))
self.stream_one_item(inner_type, item, f)
elif f_type == bytes:
f.write(uint32(len(item)).to_bytes(4, "big"))
f.write(item)
elif hasattr(f_type, "stream"):
item.stream(f)
elif hasattr(f_type, "__bytes__"):
f.write(bytes(item))
elif is_type_List(f_type):
assert is_type_List(type(item))
f.write(uint32(len(item)).to_bytes(4, "big"))
inner_type = get_args(f_type)[0]
# wjb assert inner_type != get_args(List)[0] # type: ignore
for element in item:
self.stream_one_item(inner_type, element, f)
elif is_type_Tuple(f_type):
inner_types = get_args(f_type)
assert len(item) == len(inner_types)
for i in range(len(item)):
self.stream_one_item(inner_types[i], item[i], f)
elif f_type is str:
str_bytes = item.encode("utf-8")
f.write(uint32(len(str_bytes)).to_bytes(4, "big"))
f.write(str_bytes)
elif f_type is bool:
f.write(int(item).to_bytes(1, "big"))
else:
raise NotImplementedError(f"can't stream {item}, {f_type}")
def stream(self, f: BinaryIO) -> None:
try:
fields = self.__annotations__
except Exception:
fields = {}
for f_name, f_type in fields.items():
self.stream_one_item(f_type, getattr(self, f_name), f)
def get_hash(self) -> bytes32:
return bytes32(std_hash(bytes(self)))
@classmethod
def from_bytes(cls: Any, blob: bytes) -> Any:
f = io.BytesIO(blob)
parsed = cls.parse(f)
assert f.read() == b""
return parsed
def __bytes__(self: Any) -> bytes:
f = io.BytesIO()
self.stream(f)
return bytes(f.getvalue())
def __str__(self: Any) -> str:
return pp.pformat(recurse_jsonify(dataclasses.asdict(self)))
def __repr__(self: Any) -> str:
return pp.pformat(recurse_jsonify(dataclasses.asdict(self)))
def to_json_dict(self) -> Dict:
return recurse_jsonify(dataclasses.asdict(self))
@classmethod
def from_json_dict(cls: Any, json_dict: Dict) -> Any:
return dataclass_from_dict(cls, json_dict)
| true | true |
f71cbf4460d98bc10c011e9a945b70eb738776be | 853 | py | Python | setup.py | rwindsor1/DICOMcat | 1f6549882cce93f270ad24d4c4c4140d51536789 | [
"MIT"
] | 1 | 2021-08-09T15:50:53.000Z | 2021-08-09T15:50:53.000Z | setup.py | rwindsor1/DICOMcat | 1f6549882cce93f270ad24d4c4c4140d51536789 | [
"MIT"
] | null | null | null | setup.py | rwindsor1/DICOMcat | 1f6549882cce93f270ad24d4c4c4140d51536789 | [
"MIT"
] | null | null | null | from setuptools import setup
from os import path
this_directory = path.abspath(path.dirname(__file__))
with open(path.join(this_directory, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
setup(name= 'dicomcat',
version= '0.1',
description='A simple python-based tool based on imgcat for displaying DICOM files in iTerm2.',
long_description_content_type='text/markdown',
long_description=long_description,
url='https://github.com/rwindsor1/DICOMcat',
author ='Rhydian Windsor',
author_email= 'windsorrhydian@gmail.com',
license= 'MIT',
packages=['dicomcat'],
test_suite='nose.collector',
tests_require=['nose'],
entry_points={
'console_scripts': ['dicomcat=dicomcat.cli:show_dicom']
},
include_package_data=True,
ip_safe=False)
| 34.12 | 101 | 0.681125 | from setuptools import setup
from os import path
this_directory = path.abspath(path.dirname(__file__))
with open(path.join(this_directory, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
setup(name= 'dicomcat',
version= '0.1',
description='A simple python-based tool based on imgcat for displaying DICOM files in iTerm2.',
long_description_content_type='text/markdown',
long_description=long_description,
url='https://github.com/rwindsor1/DICOMcat',
author ='Rhydian Windsor',
author_email= 'windsorrhydian@gmail.com',
license= 'MIT',
packages=['dicomcat'],
test_suite='nose.collector',
tests_require=['nose'],
entry_points={
'console_scripts': ['dicomcat=dicomcat.cli:show_dicom']
},
include_package_data=True,
ip_safe=False)
| true | true |
f71cbfad5d23526173717cefa4699e471cc4b889 | 522 | py | Python | 16 Exception Handling/finallydemo.py | Himanshu44626748/Learn-Python | f3a4d997f2d29b146e5f7434f4801ae94bc3483f | [
"MIT"
] | 2 | 2020-03-16T14:57:44.000Z | 2020-11-29T07:45:54.000Z | 16 Exception Handling/finallydemo.py | Himanshu44626748/Learn-Python | f3a4d997f2d29b146e5f7434f4801ae94bc3483f | [
"MIT"
] | null | null | null | 16 Exception Handling/finallydemo.py | Himanshu44626748/Learn-Python | f3a4d997f2d29b146e5f7434f4801ae94bc3483f | [
"MIT"
] | 1 | 2020-08-13T07:59:02.000Z | 2020-08-13T07:59:02.000Z | try:
f = open("myfile","w")
a,b = [int(x) for x in input("Enter two numbers:").split()]
c = a/b
f.write("Writing %d into file" %c)
except ZeroDivisionError:
print("Division by zero is not allowed")
print("Please enter a non zero number")
finally:
f.close() # Writing f.close() in finally block because whether the error appears or not, we always want to close the file, so we will use f.close() in finally.
print("File Closed")
print("Code after that exception") | 37.285714 | 189 | 0.632184 | try:
f = open("myfile","w")
a,b = [int(x) for x in input("Enter two numbers:").split()]
c = a/b
f.write("Writing %d into file" %c)
except ZeroDivisionError:
print("Division by zero is not allowed")
print("Please enter a non zero number")
finally:
f.close()
print("File Closed")
print("Code after that exception") | true | true |
f71cbfefe5963b92d2e1699d24dfdedb87ab4f03 | 1,191 | py | Python | benchmarks/python/microbench.py | cyntsh/dex-lang | 88a647c4b7347cc4124d9b03b90b4348c8125698 | [
"BSD-Source-Code"
] | 1,223 | 2019-10-25T12:35:46.000Z | 2022-03-30T02:08:54.000Z | benchmarks/python/microbench.py | cyntsh/dex-lang | 88a647c4b7347cc4124d9b03b90b4348c8125698 | [
"BSD-Source-Code"
] | 425 | 2019-10-27T21:12:15.000Z | 2022-03-31T17:47:57.000Z | benchmarks/python/microbench.py | cyntsh/dex-lang | 88a647c4b7347cc4124d9b03b90b4348c8125698 | [
"BSD-Source-Code"
] | 87 | 2019-10-26T17:41:23.000Z | 2022-02-05T23:32:04.000Z | # Copyright 2020 Google LLC
#
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file or at
# https://developers.google.com/open-source/licenses/bsd
import json
from functools import partial
import time
import jax.numpy as np
import jax.random as random
from jax import jit
from jax.config import config
config.enable_omnistaging()
# warm up
np.dot(1.0, 1.0)
def benchit(bench_name, x, f):
f_jitted = jit(f)
t0 = time.time()
f_jitted(x).block_until_ready()
t1 = time.time()
f_jitted(x).block_until_ready()
t2 = time.time()
run_time = t2 - t1
compile_time = t1 - t0 - run_time
print(json.dumps(
{"bench_name" : bench_name,
"compile_time" : compile_time,
"run_time" : run_time}))
@partial(benchit, "sum", 0)
def sum_bench(key):
xs = random.normal(random.PRNGKey(key), shape=(10000,))
return np.sum(xs[:, None] + xs[None, :], axis=0)
@partial(benchit, "gaussian", 0)
def gaussian_bench(key):
return random.normal(random.PRNGKey(key), shape=(100000000,))
@partial(benchit, "matmul", 0)
def matmul_bench(key):
mat = random.normal(random.PRNGKey(key), shape=(1000, 1000))
return np.dot(mat, mat)
| 24.8125 | 63 | 0.699412 |
import json
from functools import partial
import time
import jax.numpy as np
import jax.random as random
from jax import jit
from jax.config import config
config.enable_omnistaging()
np.dot(1.0, 1.0)
def benchit(bench_name, x, f):
f_jitted = jit(f)
t0 = time.time()
f_jitted(x).block_until_ready()
t1 = time.time()
f_jitted(x).block_until_ready()
t2 = time.time()
run_time = t2 - t1
compile_time = t1 - t0 - run_time
print(json.dumps(
{"bench_name" : bench_name,
"compile_time" : compile_time,
"run_time" : run_time}))
@partial(benchit, "sum", 0)
def sum_bench(key):
xs = random.normal(random.PRNGKey(key), shape=(10000,))
return np.sum(xs[:, None] + xs[None, :], axis=0)
@partial(benchit, "gaussian", 0)
def gaussian_bench(key):
return random.normal(random.PRNGKey(key), shape=(100000000,))
@partial(benchit, "matmul", 0)
def matmul_bench(key):
mat = random.normal(random.PRNGKey(key), shape=(1000, 1000))
return np.dot(mat, mat)
| true | true |
f71cc027fdd19119fb0399b5df5021a92a9837ac | 2,248 | py | Python | e3d/plugin_management/PluginHandlers.py | jr-garcia/Engendro3D | 93a6a6c26be2b9a8c1520e9d83516c39532ab1ed | [
"MIT"
] | 8 | 2017-04-19T03:59:43.000Z | 2020-04-29T00:29:12.000Z | e3d/plugin_management/PluginHandlers.py | jr-garcia/Engendro3D | 93a6a6c26be2b9a8c1520e9d83516c39532ab1ed | [
"MIT"
] | null | null | null | e3d/plugin_management/PluginHandlers.py | jr-garcia/Engendro3D | 93a6a6c26be2b9a8c1520e9d83516c39532ab1ed | [
"MIT"
] | 3 | 2018-04-26T16:57:46.000Z | 2021-03-01T05:48:06.000Z | from os import path
from shutil import make_archive
import os
from json import load, dump
PLUGINEXTENSION = '.epf'
DESCRIPTIONNAME = 'description'
def packPluginFromFolder(folderPath):
folderPath = path.abspath(folderPath)
if not path.exists(folderPath):
raise FileNotFoundError('the folder does not exist.')
if not path.isdir(folderPath):
raise NotADirectoryError('folderPath must be a directory with files.')
parentFolder = path.abspath(path.join(folderPath, path.pardir))
descriptionPath = path.abspath(path.join(folderPath, DESCRIPTIONNAME + '.json'))
if not path.exists(descriptionPath):
raise FileNotFoundError('required plugin description file not found.')
zipTitle = folderPath
finalName = zipTitle + PLUGINEXTENSION
make_archive(zipTitle, 'gztar', folderPath, './')
os.rename(zipTitle + '.tar.gz', finalName)
class PluginDescription(object):
def __init__(self, name='', description='', authorName='', authorEmail=''):
self.name = name
self.description = description
self.authorName = authorName
self.authorEmail = authorEmail
def __repr__(self):
return self.name
def _toDict(self):
d = dir(self)
dd = {v: getattr(self, v) for v in d if not v.startswith('_') and not callable(getattr(self, v))}
return dd
def saveToDisk(self, destFolder):
try:
finalPath = path.abspath(path.join(destFolder, DESCRIPTIONNAME + '.json'))
with open(finalPath, 'w') as dest:
dump(self._toDict(), dest, indent=4)
except:
raise
@staticmethod
def fromDisk(folderPath):
descriptionPath = path.abspath(path.join(folderPath, DESCRIPTIONNAME + '.json'))
if not path.exists(descriptionPath):
raise FileNotFoundError('required plugin description file not found.')
with open(descriptionPath) as desc:
data = load(desc)
description = PluginDescription(**data)
return description
class _Plugin(object):
def __init__(self, description, mainClass, pluginPath):
self.description = description
self.mainClass = mainClass
self.pluginPath = pluginPath
| 32.57971 | 105 | 0.666815 | from os import path
from shutil import make_archive
import os
from json import load, dump
PLUGINEXTENSION = '.epf'
DESCRIPTIONNAME = 'description'
def packPluginFromFolder(folderPath):
folderPath = path.abspath(folderPath)
if not path.exists(folderPath):
raise FileNotFoundError('the folder does not exist.')
if not path.isdir(folderPath):
raise NotADirectoryError('folderPath must be a directory with files.')
parentFolder = path.abspath(path.join(folderPath, path.pardir))
descriptionPath = path.abspath(path.join(folderPath, DESCRIPTIONNAME + '.json'))
if not path.exists(descriptionPath):
raise FileNotFoundError('required plugin description file not found.')
zipTitle = folderPath
finalName = zipTitle + PLUGINEXTENSION
make_archive(zipTitle, 'gztar', folderPath, './')
os.rename(zipTitle + '.tar.gz', finalName)
class PluginDescription(object):
def __init__(self, name='', description='', authorName='', authorEmail=''):
self.name = name
self.description = description
self.authorName = authorName
self.authorEmail = authorEmail
def __repr__(self):
return self.name
def _toDict(self):
d = dir(self)
dd = {v: getattr(self, v) for v in d if not v.startswith('_') and not callable(getattr(self, v))}
return dd
def saveToDisk(self, destFolder):
try:
finalPath = path.abspath(path.join(destFolder, DESCRIPTIONNAME + '.json'))
with open(finalPath, 'w') as dest:
dump(self._toDict(), dest, indent=4)
except:
raise
@staticmethod
def fromDisk(folderPath):
descriptionPath = path.abspath(path.join(folderPath, DESCRIPTIONNAME + '.json'))
if not path.exists(descriptionPath):
raise FileNotFoundError('required plugin description file not found.')
with open(descriptionPath) as desc:
data = load(desc)
description = PluginDescription(**data)
return description
class _Plugin(object):
def __init__(self, description, mainClass, pluginPath):
self.description = description
self.mainClass = mainClass
self.pluginPath = pluginPath
| true | true |
f71cc05cd87321ac0280e9c1dac9a793ff504e60 | 6,120 | py | Python | tests/test_enums.py | eerimoq/sython | 90937bf44b798b9c1ae0d18e31e11e95967b46c6 | [
"MIT"
] | null | null | null | tests/test_enums.py | eerimoq/sython | 90937bf44b798b9c1ae0d18e31e11e95967b46c6 | [
"MIT"
] | null | null | null | tests/test_enums.py | eerimoq/sython | 90937bf44b798b9c1ae0d18e31e11e95967b46c6 | [
"MIT"
] | null | null | null | from .utils import TestCase
from .utils import build_and_test_module
class Test(TestCase):
def test_enums(self):
build_and_test_module('enums')
def test_invalid_string_enum_member_value(self):
self.assert_transpile_raises(
'@enum\n'
'class Foo:\n'
' A = "s"\n',
' File "", line 3\n'
' A = "s"\n'
' ^\n'
"CompileError: invalid enum member value\n")
def test_invalid_enum_member_name(self):
self.assert_transpile_raises(
'@enum\n'
'class Foo:\n'
' V1, V2 = 1\n',
' File "", line 3\n'
' V1, V2 = 1\n'
' ^\n'
"CompileError: invalid enum member syntax\n")
def test_invalid_enum_member_value_plus_sign(self):
self.assert_transpile_raises(
'@enum\n'
'class Foo:\n'
' A = +1\n',
' File "", line 3\n'
' A = +1\n'
' ^\n'
"CompileError: invalid enum member value\n")
def test_invalid_enum_member_value_variable(self):
self.assert_transpile_raises(
'@enum\n'
'class Foo:\n'
' A = b\n',
' File "", line 3\n'
' A = b\n'
' ^\n'
"CompileError: invalid enum member value\n")
def test_non_pascal_case_enum_member_name(self):
self.assert_transpile_raises(
'@enum\n'
'class Foo:\n'
' aB = 1\n',
' File "", line 3\n'
' aB = 1\n'
' ^\n'
"CompileError: enum member names must be pascal case\n")
def test_invalid_enum_member_syntax(self):
self.assert_transpile_raises(
'@enum\n'
'class Foo:\n'
' 1 + 1\n',
' File "", line 3\n'
' 1 + 1\n'
' ^\n'
"CompileError: invalid enum member syntax\n")
def test_empty_enum_type(self):
self.assert_transpile_raises(
'@enum()\n'
'class Foo:\n'
' Ab = 1\n',
' File "", line 1\n'
' @enum()\n'
' ^\n'
"CompileError: one parameter expected, got 0\n")
def test_bad_enum_type_f32(self):
self.assert_transpile_raises(
'@enum(f32)\n'
'class Foo:\n'
' Ab = 1\n',
' File "", line 1\n'
' @enum(f32)\n'
' ^\n'
"CompileError: integer type expected, not 'f32'\n")
def test_enum_float_value(self):
self.assert_transpile_raises(
'@enum\n'
'class Foo:\n'
' A = 1\n'
'func foo():\n'
' print(Foo(0.0))\n',
' File "", line 5\n'
' print(Foo(0.0))\n'
' ^\n'
"CompileError: cannot convert float to 'i64'\n")
def test_enum_too_many_parameters(self):
self.assert_transpile_raises(
'@enum\n'
'class Foo:\n'
' A = 1\n'
'func foo():\n'
' print(Foo(1, 2))\n',
' File "", line 5\n'
' print(Foo(1, 2))\n'
' ^\n'
"CompileError: expected 1 parameter, got 2\n")
def test_not_enum(self):
self.assert_transpile_raises(
'@enum\n'
'class Foo:\n'
' A = 1\n'
'func foo():\n'
' print(not Foo.A)\n',
' File "", line 5\n'
' print(not Foo.A)\n'
' ^\n'
"CompileError: expected a 'bool', got a 'foo.lib.Foo'\n")
def test_enum_member_value_lower_than_previous_1(self):
self.assert_transpile_raises(
'@enum\n'
'class Foo:\n'
' A = 0\n'
' B = -1\n',
' File "", line 4\n'
' B = -1\n'
' ^\n'
"CompileError: enum member value lower than for previous member\n")
def test_enum_member_value_lower_than_previous_2(self):
self.assert_transpile_raises(
'@enum\n'
'class Foo:\n'
' A\n'
' B\n'
' C = 0\n',
' File "", line 5\n'
' C = 0\n'
' ^\n'
"CompileError: enum member value lower than for previous member\n")
def test_enum_pascal_case(self):
self.assert_transpile_raises(
'@enum\n'
'class foo:\n'
' A\n',
' File "", line 2\n'
' class foo:\n'
' ^\n'
"CompileError: enum names must be pascal case\n")
def test_enum_bad_member_syntax(self):
self.assert_transpile_raises(
'@enum\n'
'class Foo:\n'
' func a(self):\n'
' pass\n',
' File "", line 3\n'
' func a(self):\n'
' ^\n'
"CompileError: invalid enum member syntax\n")
def test_use_missing_enum_value_in_print(self):
self.assert_transpile_raises(
'@enum\n'
'class Foo:\n'
' Apa = 1\n'
'func foo():\n'
' print(Foo.APA)\n',
' File "", line 5\n'
' print(Foo.APA)\n'
' ^\n'
"CompileError: enum has no member 'APA'\n")
def test_use_missing_enum_value_in_comparision(self):
self.assert_transpile_raises(
'@enum\n'
'class Foo:\n'
' Apa = 1\n'
'func foo():\n'
' if Foo.APA == Foo.Apa:\n'
' pass\n',
' File "", line 5\n'
' if Foo.APA == Foo.Apa:\n'
' ^\n'
"CompileError: enum has no member 'APA'\n")
| 31.546392 | 79 | 0.427288 | from .utils import TestCase
from .utils import build_and_test_module
class Test(TestCase):
def test_enums(self):
build_and_test_module('enums')
def test_invalid_string_enum_member_value(self):
self.assert_transpile_raises(
'@enum\n'
'class Foo:\n'
' A = "s"\n',
' File "", line 3\n'
' A = "s"\n'
' ^\n'
"CompileError: invalid enum member value\n")
def test_invalid_enum_member_name(self):
self.assert_transpile_raises(
'@enum\n'
'class Foo:\n'
' V1, V2 = 1\n',
' File "", line 3\n'
' V1, V2 = 1\n'
' ^\n'
"CompileError: invalid enum member syntax\n")
def test_invalid_enum_member_value_plus_sign(self):
self.assert_transpile_raises(
'@enum\n'
'class Foo:\n'
' A = +1\n',
' File "", line 3\n'
' A = +1\n'
' ^\n'
"CompileError: invalid enum member value\n")
def test_invalid_enum_member_value_variable(self):
self.assert_transpile_raises(
'@enum\n'
'class Foo:\n'
' A = b\n',
' File "", line 3\n'
' A = b\n'
' ^\n'
"CompileError: invalid enum member value\n")
def test_non_pascal_case_enum_member_name(self):
self.assert_transpile_raises(
'@enum\n'
'class Foo:\n'
' aB = 1\n',
' File "", line 3\n'
' aB = 1\n'
' ^\n'
"CompileError: enum member names must be pascal case\n")
def test_invalid_enum_member_syntax(self):
self.assert_transpile_raises(
'@enum\n'
'class Foo:\n'
' 1 + 1\n',
' File "", line 3\n'
' 1 + 1\n'
' ^\n'
"CompileError: invalid enum member syntax\n")
def test_empty_enum_type(self):
self.assert_transpile_raises(
'@enum()\n'
'class Foo:\n'
' Ab = 1\n',
' File "", line 1\n'
' @enum()\n'
' ^\n'
"CompileError: one parameter expected, got 0\n")
def test_bad_enum_type_f32(self):
self.assert_transpile_raises(
'@enum(f32)\n'
'class Foo:\n'
' Ab = 1\n',
' File "", line 1\n'
' @enum(f32)\n'
' ^\n'
"CompileError: integer type expected, not 'f32'\n")
def test_enum_float_value(self):
self.assert_transpile_raises(
'@enum\n'
'class Foo:\n'
' A = 1\n'
'func foo():\n'
' print(Foo(0.0))\n',
' File "", line 5\n'
' print(Foo(0.0))\n'
' ^\n'
"CompileError: cannot convert float to 'i64'\n")
def test_enum_too_many_parameters(self):
self.assert_transpile_raises(
'@enum\n'
'class Foo:\n'
' A = 1\n'
'func foo():\n'
' print(Foo(1, 2))\n',
' File "", line 5\n'
' print(Foo(1, 2))\n'
' ^\n'
"CompileError: expected 1 parameter, got 2\n")
def test_not_enum(self):
self.assert_transpile_raises(
'@enum\n'
'class Foo:\n'
' A = 1\n'
'func foo():\n'
' print(not Foo.A)\n',
' File "", line 5\n'
' print(not Foo.A)\n'
' ^\n'
"CompileError: expected a 'bool', got a 'foo.lib.Foo'\n")
def test_enum_member_value_lower_than_previous_1(self):
self.assert_transpile_raises(
'@enum\n'
'class Foo:\n'
' A = 0\n'
' B = -1\n',
' File "", line 4\n'
' B = -1\n'
' ^\n'
"CompileError: enum member value lower than for previous member\n")
def test_enum_member_value_lower_than_previous_2(self):
self.assert_transpile_raises(
'@enum\n'
'class Foo:\n'
' A\n'
' B\n'
' C = 0\n',
' File "", line 5\n'
' C = 0\n'
' ^\n'
"CompileError: enum member value lower than for previous member\n")
def test_enum_pascal_case(self):
self.assert_transpile_raises(
'@enum\n'
'class foo:\n'
' A\n',
' File "", line 2\n'
' class foo:\n'
' ^\n'
"CompileError: enum names must be pascal case\n")
def test_enum_bad_member_syntax(self):
self.assert_transpile_raises(
'@enum\n'
'class Foo:\n'
' func a(self):\n'
' pass\n',
' File "", line 3\n'
' func a(self):\n'
' ^\n'
"CompileError: invalid enum member syntax\n")
def test_use_missing_enum_value_in_print(self):
self.assert_transpile_raises(
'@enum\n'
'class Foo:\n'
' Apa = 1\n'
'func foo():\n'
' print(Foo.APA)\n',
' File "", line 5\n'
' print(Foo.APA)\n'
' ^\n'
"CompileError: enum has no member 'APA'\n")
def test_use_missing_enum_value_in_comparision(self):
self.assert_transpile_raises(
'@enum\n'
'class Foo:\n'
' Apa = 1\n'
'func foo():\n'
' if Foo.APA == Foo.Apa:\n'
' pass\n',
' File "", line 5\n'
' if Foo.APA == Foo.Apa:\n'
' ^\n'
"CompileError: enum has no member 'APA'\n")
| true | true |
f71cc0cae73f084599395e8d8ba1c44ef7ba93fe | 1,764 | py | Python | LogFileSetup.py | skw32/DefectCorrectionsNotebook | 7342bc6cafa4c19c774d48c4f68b02db7d2e2eb1 | [
"BSD-3-Clause"
] | 4 | 2019-03-05T01:04:30.000Z | 2020-05-19T13:07:20.000Z | LogFileSetup.py | lxf-gzu/DefectCorrectionsNotebook | fef2ede0afb27e35d8e69c1d8aa759df284dc149 | [
"BSD-3-Clause"
] | 1 | 2019-06-01T18:07:53.000Z | 2019-06-01T18:07:53.000Z | LogFileSetup.py | lxf-gzu/DefectCorrectionsNotebook | fef2ede0afb27e35d8e69c1d8aa759df284dc149 | [
"BSD-3-Clause"
] | 6 | 2019-03-26T18:38:23.000Z | 2020-05-21T07:07:33.000Z | import logging
def configure_logging(logfile_path):
''' Initialize logging defaults for in-notebook messages and
'log.info' file written to store intermediate results during analysis of each defect
To use, the following lines must be added to the code:
import LogFileSetup as lfs
logger = lfs.configure_logging(os.path.join(PATH-TO-LOGFILE-DIR, "log"))
Usage example in notebook: logger.info("MESSAGE")
'''
# Set default format for each line of log messages within notebook
notebook_formatter = logging.Formatter("[%(levelname)s] [Cell line num: %(lineno)s] %(message)s")
# Set default format for each line in log.info file (look into methods to outputt cell num, not just line num in cell)
# info_file_formatter = logging.Formatter("[%(levelname)s] [Notebook cell num: %(???)s] [Cell line num: %(lineno)s] %(message)s")
# Initialise log.info for defect processing information
defect_logger = logging.getLogger()
# For log.info file
info_file_handler = logging.FileHandler(logfile_path + ".info", mode='w')
info_file_handler.setLevel(logging.INFO)
# info_file_handler.setFormatter(info_file_formatter)
# For messages within notebook
notebook_handler = logging.StreamHandler()
notebook_handler.setLevel(logging.INFO)
notebook_handler.setFormatter(notebook_formatter)
# Remove default handlers and add custom ones (for log.info file and messages in notebooks)
list(map(defect_logger.removeHandler, defect_logger.handlers[:]))
list(map(defect_logger.removeFilter, defect_logger.filters[:]))
defect_logger.setLevel(logging.INFO)
defect_logger.addHandler(info_file_handler)
defect_logger.addHandler(notebook_handler)
return defect_logger
| 46.421053 | 132 | 0.740363 | import logging
def configure_logging(logfile_path):
notebook_formatter = logging.Formatter("[%(levelname)s] [Cell line num: %(lineno)s] %(message)s")
defect_logger = logging.getLogger()
info_file_handler = logging.FileHandler(logfile_path + ".info", mode='w')
info_file_handler.setLevel(logging.INFO)
notebook_handler = logging.StreamHandler()
notebook_handler.setLevel(logging.INFO)
notebook_handler.setFormatter(notebook_formatter)
list(map(defect_logger.removeHandler, defect_logger.handlers[:]))
list(map(defect_logger.removeFilter, defect_logger.filters[:]))
defect_logger.setLevel(logging.INFO)
defect_logger.addHandler(info_file_handler)
defect_logger.addHandler(notebook_handler)
return defect_logger
| true | true |
f71cc1efb366d21efb50b72a9d38ce6d8c3b520d | 1,439 | py | Python | workers/clustering_worker/setup.py | hsh3n3/augur | bb65774a0884fd82ec7799f33ac87997268d5a5f | [
"MIT"
] | 1 | 2020-12-21T23:39:27.000Z | 2020-12-21T23:39:27.000Z | workers/clustering_worker/setup.py | hsh3n3/augur | bb65774a0884fd82ec7799f33ac87997268d5a5f | [
"MIT"
] | 2 | 2021-12-10T01:45:26.000Z | 2021-12-10T01:58:04.000Z | workers/clustering_worker/setup.py | hsh3n3/augur | bb65774a0884fd82ec7799f33ac87997268d5a5f | [
"MIT"
] | 1 | 2019-05-20T15:30:40.000Z | 2019-05-20T15:30:40.000Z | import io
import os
import re
from setuptools import find_packages
from setuptools import setup
def read(filename):
filename = os.path.join(os.path.dirname(__file__), filename)
text_type = type(u"")
with io.open(filename, mode="r", encoding='utf-8') as fd:
return re.sub(text_type(r':[a-z]+:`~?(.*?)`'), text_type(r'``\1``'), fd.read())
setup(
name="clustering_worker",
version="0.0.1",
url="https://github.com/chaoss/augur",
license='MIT',
author="Sarit Adhikari",
author_email="sarit.adhikari@gmail.com",
description="worker to cluster repository based on messages on issues and pull requests ",
packages=find_packages(),
install_requires=[
'Flask==1.1.4',
'Flask-Cors==3.0.10',
'Flask-Login==0.5.0',
'Flask-WTF==0.14.3',
'requests==2.22.0',
'psycopg2-binary==2.8.6',
'sklearn==0.0',
'numpy==1.19.5',
'nltk==3.5',
'seaborn==0.11.1',
'pandas==1.1.3',
'matplotlib==3.3.4'
],
entry_points={
'console_scripts': [
'clustering_worker_start=workers.clustering_worker.runtime:main',
],
},
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.7',
]
)
| 28.78 | 94 | 0.579569 | import io
import os
import re
from setuptools import find_packages
from setuptools import setup
def read(filename):
filename = os.path.join(os.path.dirname(__file__), filename)
text_type = type(u"")
with io.open(filename, mode="r", encoding='utf-8') as fd:
return re.sub(text_type(r':[a-z]+:`~?(.*?)`'), text_type(r'``\1``'), fd.read())
setup(
name="clustering_worker",
version="0.0.1",
url="https://github.com/chaoss/augur",
license='MIT',
author="Sarit Adhikari",
author_email="sarit.adhikari@gmail.com",
description="worker to cluster repository based on messages on issues and pull requests ",
packages=find_packages(),
install_requires=[
'Flask==1.1.4',
'Flask-Cors==3.0.10',
'Flask-Login==0.5.0',
'Flask-WTF==0.14.3',
'requests==2.22.0',
'psycopg2-binary==2.8.6',
'sklearn==0.0',
'numpy==1.19.5',
'nltk==3.5',
'seaborn==0.11.1',
'pandas==1.1.3',
'matplotlib==3.3.4'
],
entry_points={
'console_scripts': [
'clustering_worker_start=workers.clustering_worker.runtime:main',
],
},
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.7',
]
)
| true | true |
f71cc343a74ac1d719f7021173dac1e468df922f | 2,529 | py | Python | src/main/python/baselines/dsl/search/indexer.py | sgottsch/Tab2KG | 5c749ae6056a8c9b6a23674a7bf9d8a3cc7b8530 | [
"MIT"
] | null | null | null | src/main/python/baselines/dsl/search/indexer.py | sgottsch/Tab2KG | 5c749ae6056a8c9b6a23674a7bf9d8a3cc7b8530 | [
"MIT"
] | null | null | null | src/main/python/baselines/dsl/search/indexer.py | sgottsch/Tab2KG | 5c749ae6056a8c9b6a23674a7bf9d8a3cc7b8530 | [
"MIT"
] | null | null | null | # This is an edited version of https://github.com/minhptx/iswc-2016-semantic-labeling, which was edited to use it as a baseline for Tab2KG (https://github.com/sgottsch/Tab2KG).
import logging
from elasticsearch.exceptions import RequestError
from elasticsearch.helpers import scan, bulk
from lib.utils import get_index_name
__author__ = "minh"
class Indexer:
def __init__(self, es):
self.es = es
def init_analyzers(self, index_config):
print("init_analyzers")
print(index_config)
print(get_index_name(index_config))
if(self.es.indices.exists(get_index_name(index_config))):
self.es.indices.delete(index=get_index_name(index_config))
self.es.indices.create(index=get_index_name(index_config))
def index_column(self, column, source_name, index_config):
body = column.to_json()
body['source'] = source_name
try:
self.es.index(index=get_index_name(index_config), doc_type="service",
body=body)
return True
except RequestError:
print("Error")
return False
def index_source(self, source, index_config):
# self.es.indices.put_mapping(index=get_index_name(index_config), doc_type="service", body={
# "service": {
# "properties": {
# "source": {
# "type": "string",
# "index": "not_analyzed"
# }
# }
# }
# })
for column in source.column_map.values():
if column.semantic_type:
if len(column.value_list) > 0:
successful = self.index_column(column, source.index_name, index_config)
if(not successful):
return False
else:
logging.warning("Indexer: IGNORE COLUMN `%s` in source `%s` because of empty values", column.name, source.name)
return True
def delete_column(self, attr_name, source_name, index_config):
bulk_deletes = []
for result in scan(self.es, query={
"query": {
"match": {
"name": attr_name,
}
}
}, index=get_index_name(index_config), doc_type="service", _source=False,
track_scores=False, scroll='5m'):
result['_op_type'] = 'delete'
bulk_deletes.append(result)
bulk(self.es, bulk_deletes)
| 35.125 | 176 | 0.572558 |
import logging
from elasticsearch.exceptions import RequestError
from elasticsearch.helpers import scan, bulk
from lib.utils import get_index_name
__author__ = "minh"
class Indexer:
def __init__(self, es):
self.es = es
def init_analyzers(self, index_config):
print("init_analyzers")
print(index_config)
print(get_index_name(index_config))
if(self.es.indices.exists(get_index_name(index_config))):
self.es.indices.delete(index=get_index_name(index_config))
self.es.indices.create(index=get_index_name(index_config))
def index_column(self, column, source_name, index_config):
body = column.to_json()
body['source'] = source_name
try:
self.es.index(index=get_index_name(index_config), doc_type="service",
body=body)
return True
except RequestError:
print("Error")
return False
def index_source(self, source, index_config):
for column in source.column_map.values():
if column.semantic_type:
if len(column.value_list) > 0:
successful = self.index_column(column, source.index_name, index_config)
if(not successful):
return False
else:
logging.warning("Indexer: IGNORE COLUMN `%s` in source `%s` because of empty values", column.name, source.name)
return True
def delete_column(self, attr_name, source_name, index_config):
bulk_deletes = []
for result in scan(self.es, query={
"query": {
"match": {
"name": attr_name,
}
}
}, index=get_index_name(index_config), doc_type="service", _source=False,
track_scores=False, scroll='5m'):
result['_op_type'] = 'delete'
bulk_deletes.append(result)
bulk(self.es, bulk_deletes)
| true | true |
f71cc3aaeaa11d3ebb6404eeec7169f405f64fce | 1,731 | py | Python | rkqc/tools/lnn_optimization.py | ah744/ScaffCC_RKQC | b7f9adb330f667c007ee9de6c3c1319c55ccc65b | [
"BSD-2-Clause"
] | 1 | 2022-02-05T23:28:35.000Z | 2022-02-05T23:28:35.000Z | rkqc/tools/lnn_optimization.py | ah744/ScaffCC_RKQC | b7f9adb330f667c007ee9de6c3c1319c55ccc65b | [
"BSD-2-Clause"
] | null | null | null | rkqc/tools/lnn_optimization.py | ah744/ScaffCC_RKQC | b7f9adb330f667c007ee9de6c3c1319c55ccc65b | [
"BSD-2-Clause"
] | 1 | 2022-02-05T23:42:06.000Z | 2022-02-05T23:42:06.000Z | #!/home/adam/Documents/revkit-1.3/python
#!/usr/bin/python
# RevKit: A Toolkit for Reversible Circuit Design (www.revkit.org)
# Copyright (C) 2009-2011 The RevKit Developers <revkit@informatik.uni-bremen.de>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import sys , os
sys.path.append(os.path.dirname(sys.path[0]))
from revkit import *
opts = program_options()
opts.add_read_realization_option()
opts.add_write_realization_option()
opts.add_option( "reordering", 0, "0: Naive algorithm\n1: Local reordering\n2: Global reordering" )
opts.parse( sys.argv )
if not opts.good():
print opts
exit( 1 )
circ = circuit()
read_realization( circ, opts.read_realization_filename() ) or sys.exit ("Cannot read " + opts.read_realization_filename())
newcirc = circuit()
r = lnn_optimization( newcirc, circ, opts["reordering"] )
if type(r) == dict:
if opts.is_write_realization_filename_set():
write_realization( newcirc, opts.write_realization_filename() )
print "Original Circuit:"
print_statistics( circ )
print
print "Optimized Circuit:"
print_statistics( newcirc, r["runtime"] )
else:
print r
| 32.055556 | 122 | 0.740035 |
import sys , os
sys.path.append(os.path.dirname(sys.path[0]))
from revkit import *
opts = program_options()
opts.add_read_realization_option()
opts.add_write_realization_option()
opts.add_option( "reordering", 0, "0: Naive algorithm\n1: Local reordering\n2: Global reordering" )
opts.parse( sys.argv )
if not opts.good():
print opts
exit( 1 )
circ = circuit()
read_realization( circ, opts.read_realization_filename() ) or sys.exit ("Cannot read " + opts.read_realization_filename())
newcirc = circuit()
r = lnn_optimization( newcirc, circ, opts["reordering"] )
if type(r) == dict:
if opts.is_write_realization_filename_set():
write_realization( newcirc, opts.write_realization_filename() )
print "Original Circuit:"
print_statistics( circ )
print
print "Optimized Circuit:"
print_statistics( newcirc, r["runtime"] )
else:
print r
| false | true |
f71cc45987d40f97d2107f51052d959e0ffc1f6c | 10,055 | py | Python | crawler/crawler.py | thelumberjhack/corpusgen | 8ff1045e5b884991903697e2567a2ba67f37060f | [
"MIT"
] | 2 | 2021-01-01T12:20:39.000Z | 2021-05-10T23:33:27.000Z | crawler/crawler.py | thelumberjhack/corpusgen | 8ff1045e5b884991903697e2567a2ba67f37060f | [
"MIT"
] | null | null | null | crawler/crawler.py | thelumberjhack/corpusgen | 8ff1045e5b884991903697e2567a2ba67f37060f | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# This code greatly inspires itself from http://aosabook.org/en/500L/a-web-crawler-with-asyncio-coroutines.html
import cgi
from collections import namedtuple
import os
import re
import logging
import urllib
import asyncio
import aiohttp
from asyncio import Queue
import time
LOGGER = logging.getLogger(__name__)
FetchStatistic = namedtuple(
'FetchStatistic', [
'url',
'next_url',
'status',
'exception',
'size',
'content_type',
'encoding',
'num_urls',
'num_new_urls'
]
)
class Crawler(object):
""" Crawls a set of urls.
"""
def __init__(self, roots, exclude=None, strict=True, max_redirect=10, max_tries=3, max_tasks=10, *, loop=None,
max_size=1024**2, file_type=None):
self.loop = loop or asyncio.get_event_loop()
self.roots = roots
self.exclude = exclude
self.strict = strict
self.max_redirect = max_redirect
self.max_tries = max_tries
self.max_tasks = max_tasks
self.queue = Queue(loop=self.loop)
self.seen_urls = set()
self.done = []
self.session = aiohttp.ClientSession(loop=self.loop)
self.root_domains = set()
self.max_file_size = max_size
if file_type.startswith("."):
self.file_type = file_type
else:
self.file_type = "." + file_type
for root in roots:
parts = urllib.parse.urlparse(root)
host, port = urllib.parse.splitport(parts.netloc)
if not host:
continue
if re.match(r'\A[\d\.]*\Z', host):
self.root_domains.add(host)
else:
host = host.lower()
if self.strict:
self.root_domains.add(host)
else:
self.root_domains.add(self.lenient_host(host))
for root in roots:
self.add_url(root)
self.t0 = time.time()
self.t1 = None
@staticmethod
def lenient_host(host):
parts = host.split('.')[-2:]
return ''.join(parts)
@staticmethod
def is_redirect(response):
return response.status in (300, 301, 302, 303, 307)
def close(self):
""" Close resources
:return: None
"""
self.session.close()
def host_ok(self, host):
""" Can this host be crawled?
:param host:
:return:
"""
host = host.lower()
if host in self.root_domains:
return True
if re.match(r'\A[\d\.]*\Z', host):
return False
if self.strict:
return self.host_ok_strict(host)
else:
return self.host_ok_lenient(host)
def host_ok_strict(self, host):
if host.startswith("www."):
host = host[4:]
else:
host = "www." + host
return host in self.root_domains
def host_ok_lenient(self, host):
return self.lenient_host(host) in self.root_domains
def record_statistic(self, fetch_statistic):
self.done.append(fetch_statistic)
@asyncio.coroutine
def parse_links(self, response):
""" Return a FetchStatistic and list of links.
:param response:
:return: FetchStatistic and links.
"""
links = set()
content_type = None
encoding = None
body = yield from response.read()
if response.status == 200:
content_type = response.headers.get("content-type")
pdict = {}
if content_type:
content_type, pdict = cgi.parse_header(content_type)
encoding = pdict.get("charset", "utf-8")
if content_type in ("text/html", "application/xml"):
text = yield from response.text()
# get all urls links
urls = set(re.findall(r'''(?i)href=["']([^\s"'<>]+)''', text))
if urls:
LOGGER.info("got {} distinct urls from {}".format(len(urls), response.url))
for url in urls:
normalized = urllib.parse.urljoin(response.url, url)
defragmented, frag = urllib.parse.urldefrag(normalized)
if self.url_allowed(defragmented):
links.add(defragmented)
stat = FetchStatistic(
url=response.url,
next_url=None,
status=response.status,
exception=None,
size=len(body),
content_type=content_type,
encoding=encoding,
num_urls=len(links),
num_new_urls=len(links - self.seen_urls)
)
return stat, links
@asyncio.coroutine
def fetch(self, url, max_redirect):
""" Fetch one url.
:param url:
:param max_redirect:
:return:
"""
tries = 0
exception = None
while tries < self.max_tries:
try:
response = yield from self.session.get(url, allow_redirects=False)
if tries > 1:
LOGGER.info("try {} for {} success".format(tries, url))
break
except aiohttp.ClientError as client_error:
LOGGER.info("try {} for {} raised {}".format(tries, url, client_error))
exception = client_error
tries += 1
else:
# we never broke out of the loop: all tries failed
LOGGER.error("{} failed after {} tries".format(url, self.max_tries))
self.record_statistic(
FetchStatistic(
url=url,
next_url=None,
status=None,
exception=exception,
size=0,
content_type=None,
encoding=None,
num_urls=0,
num_new_urls=0
)
)
return
try:
if self.is_redirect(response):
location = response.headers['location']
next_url = urllib.parse.urljoin(url, location)
self.record_statistic(
FetchStatistic(
url=url,
next_url=next_url,
status=response.status,
exception=None,
size=0,
content_type=None,
encoding=None,
num_urls=0,
num_new_urls=0
)
)
if next_url in self.seen_urls:
return
if max_redirect > 0:
LOGGER.info("redirect to {} from {}".format(next_url, url))
self.add_url(next_url, max_redirect - 1)
else:
LOGGER.error("redirect limit reached for {} from {}".format(next_url, url))
else:
stat, links = yield from self.parse_links(response)
self.record_statistic(stat)
for link in links.difference(self.seen_urls):
self.queue.put_nowait((link, self.max_redirect))
self.seen_urls.update(links)
finally:
yield from response.release()
@asyncio.coroutine
def work(self):
""" Process Queue items forever.
:return: None
"""
try:
while True:
url, max_redirect = yield from self.queue.get()
assert url in self.seen_urls
yield from self.fetch(url, max_redirect)
self.queue.task_done()
except asyncio.CancelledError as cancelled:
pass
def url_allowed(self, url):
""" Is url http or https format. Also checks the pointed url file type and size.
:param url: given url
:return: True if all conditions are met. False otherwise.
"""
if self.exclude and re.search(self.exclude, url):
return False
parts = urllib.parse.urlparse(url)
if parts.scheme not in ("http", "https"):
LOGGER.debug("skipping non-http scheme in {}".format(url))
return False
host, port = urllib.parse.splitport(parts.netloc)
if not self.host_ok(host):
LOGGER.debug("skipping non-root host in {}".format(url))
return False
# check file type
if not self.file_ok(url):
LOGGER.debug("skipping non {} files".format(self.file_type))
return False
return True
def add_url(self, url, max_redirect=None):
""" Adds url to the queue if not seen before.
:param url:
:param max_redirect:
:return: None
"""
if max_redirect is None:
max_redirect = self.max_redirect
LOGGER.debug("adding {} {}".format(url, max_redirect))
self.seen_urls.add(url)
self.queue.put_nowait((url, max_redirect))
@asyncio.coroutine
def crawl(self):
""" Run the crawler until all finished.
:return: None
"""
workers = [asyncio.Task(self.work(), loop=self.loop) for _ in range(self.max_tasks)]
self.t0 = time.time()
yield from self.queue.join()
self.t1 = time.time()
for w in workers:
w.cancel()
def file_ok(self, url):
""" Is the url pointing to the correct file type? Is its size OK?
:param url:
:return: True if file is from a type the user requested. False otherwise.
"""
href_path = urllib.parse.urlparse(url).path
extension = os.path.splitext(href_path)[1]
return extension == self.file_type
def size_ok(self, response):
""" Check if file size <= MAX_SIZE before downloading.
:param response:
:return:
"""
raise NotImplementedError
| 30.014925 | 114 | 0.529687 |
import cgi
from collections import namedtuple
import os
import re
import logging
import urllib
import asyncio
import aiohttp
from asyncio import Queue
import time
LOGGER = logging.getLogger(__name__)
FetchStatistic = namedtuple(
'FetchStatistic', [
'url',
'next_url',
'status',
'exception',
'size',
'content_type',
'encoding',
'num_urls',
'num_new_urls'
]
)
class Crawler(object):
def __init__(self, roots, exclude=None, strict=True, max_redirect=10, max_tries=3, max_tasks=10, *, loop=None,
max_size=1024**2, file_type=None):
self.loop = loop or asyncio.get_event_loop()
self.roots = roots
self.exclude = exclude
self.strict = strict
self.max_redirect = max_redirect
self.max_tries = max_tries
self.max_tasks = max_tasks
self.queue = Queue(loop=self.loop)
self.seen_urls = set()
self.done = []
self.session = aiohttp.ClientSession(loop=self.loop)
self.root_domains = set()
self.max_file_size = max_size
if file_type.startswith("."):
self.file_type = file_type
else:
self.file_type = "." + file_type
for root in roots:
parts = urllib.parse.urlparse(root)
host, port = urllib.parse.splitport(parts.netloc)
if not host:
continue
if re.match(r'\A[\d\.]*\Z', host):
self.root_domains.add(host)
else:
host = host.lower()
if self.strict:
self.root_domains.add(host)
else:
self.root_domains.add(self.lenient_host(host))
for root in roots:
self.add_url(root)
self.t0 = time.time()
self.t1 = None
@staticmethod
def lenient_host(host):
parts = host.split('.')[-2:]
return ''.join(parts)
@staticmethod
def is_redirect(response):
return response.status in (300, 301, 302, 303, 307)
def close(self):
self.session.close()
def host_ok(self, host):
host = host.lower()
if host in self.root_domains:
return True
if re.match(r'\A[\d\.]*\Z', host):
return False
if self.strict:
return self.host_ok_strict(host)
else:
return self.host_ok_lenient(host)
def host_ok_strict(self, host):
if host.startswith("www."):
host = host[4:]
else:
host = "www." + host
return host in self.root_domains
def host_ok_lenient(self, host):
return self.lenient_host(host) in self.root_domains
def record_statistic(self, fetch_statistic):
self.done.append(fetch_statistic)
@asyncio.coroutine
def parse_links(self, response):
links = set()
content_type = None
encoding = None
body = yield from response.read()
if response.status == 200:
content_type = response.headers.get("content-type")
pdict = {}
if content_type:
content_type, pdict = cgi.parse_header(content_type)
encoding = pdict.get("charset", "utf-8")
if content_type in ("text/html", "application/xml"):
text = yield from response.text()
urls = set(re.findall(r'''(?i)href=["']([^\s"'<>]+)''', text))
if urls:
LOGGER.info("got {} distinct urls from {}".format(len(urls), response.url))
for url in urls:
normalized = urllib.parse.urljoin(response.url, url)
defragmented, frag = urllib.parse.urldefrag(normalized)
if self.url_allowed(defragmented):
links.add(defragmented)
stat = FetchStatistic(
url=response.url,
next_url=None,
status=response.status,
exception=None,
size=len(body),
content_type=content_type,
encoding=encoding,
num_urls=len(links),
num_new_urls=len(links - self.seen_urls)
)
return stat, links
@asyncio.coroutine
def fetch(self, url, max_redirect):
tries = 0
exception = None
while tries < self.max_tries:
try:
response = yield from self.session.get(url, allow_redirects=False)
if tries > 1:
LOGGER.info("try {} for {} success".format(tries, url))
break
except aiohttp.ClientError as client_error:
LOGGER.info("try {} for {} raised {}".format(tries, url, client_error))
exception = client_error
tries += 1
else:
LOGGER.error("{} failed after {} tries".format(url, self.max_tries))
self.record_statistic(
FetchStatistic(
url=url,
next_url=None,
status=None,
exception=exception,
size=0,
content_type=None,
encoding=None,
num_urls=0,
num_new_urls=0
)
)
return
try:
if self.is_redirect(response):
location = response.headers['location']
next_url = urllib.parse.urljoin(url, location)
self.record_statistic(
FetchStatistic(
url=url,
next_url=next_url,
status=response.status,
exception=None,
size=0,
content_type=None,
encoding=None,
num_urls=0,
num_new_urls=0
)
)
if next_url in self.seen_urls:
return
if max_redirect > 0:
LOGGER.info("redirect to {} from {}".format(next_url, url))
self.add_url(next_url, max_redirect - 1)
else:
LOGGER.error("redirect limit reached for {} from {}".format(next_url, url))
else:
stat, links = yield from self.parse_links(response)
self.record_statistic(stat)
for link in links.difference(self.seen_urls):
self.queue.put_nowait((link, self.max_redirect))
self.seen_urls.update(links)
finally:
yield from response.release()
@asyncio.coroutine
def work(self):
try:
while True:
url, max_redirect = yield from self.queue.get()
assert url in self.seen_urls
yield from self.fetch(url, max_redirect)
self.queue.task_done()
except asyncio.CancelledError as cancelled:
pass
def url_allowed(self, url):
if self.exclude and re.search(self.exclude, url):
return False
parts = urllib.parse.urlparse(url)
if parts.scheme not in ("http", "https"):
LOGGER.debug("skipping non-http scheme in {}".format(url))
return False
host, port = urllib.parse.splitport(parts.netloc)
if not self.host_ok(host):
LOGGER.debug("skipping non-root host in {}".format(url))
return False
if not self.file_ok(url):
LOGGER.debug("skipping non {} files".format(self.file_type))
return False
return True
def add_url(self, url, max_redirect=None):
if max_redirect is None:
max_redirect = self.max_redirect
LOGGER.debug("adding {} {}".format(url, max_redirect))
self.seen_urls.add(url)
self.queue.put_nowait((url, max_redirect))
@asyncio.coroutine
def crawl(self):
workers = [asyncio.Task(self.work(), loop=self.loop) for _ in range(self.max_tasks)]
self.t0 = time.time()
yield from self.queue.join()
self.t1 = time.time()
for w in workers:
w.cancel()
def file_ok(self, url):
href_path = urllib.parse.urlparse(url).path
extension = os.path.splitext(href_path)[1]
return extension == self.file_type
def size_ok(self, response):
raise NotImplementedError
| true | true |
f71cc717d2a50c2a2eac3e063f01eef3d43d7dc5 | 1,914 | py | Python | code/venv/lib/python3.8/site-packages/datadog_api_client/v1/model/hourly_usage_attribution_response.py | Valisback/hiring-engineers | 7196915dd5a429ae27c21fa43d527f0332e662ed | [
"Apache-2.0"
] | null | null | null | code/venv/lib/python3.8/site-packages/datadog_api_client/v1/model/hourly_usage_attribution_response.py | Valisback/hiring-engineers | 7196915dd5a429ae27c21fa43d527f0332e662ed | [
"Apache-2.0"
] | null | null | null | code/venv/lib/python3.8/site-packages/datadog_api_client/v1/model/hourly_usage_attribution_response.py | Valisback/hiring-engineers | 7196915dd5a429ae27c21fa43d527f0332e662ed | [
"Apache-2.0"
] | null | null | null | # Unless explicitly stated otherwise all files in this repository are licensed under the Apache-2.0 License.
# This product includes software developed at Datadog (https://www.datadoghq.com/).
# Copyright 2019-Present Datadog, Inc.
from datadog_api_client.v1.model_utils import (
ModelNormal,
cached_property,
)
def lazy_import():
from datadog_api_client.v1.model.hourly_usage_attribution_body import HourlyUsageAttributionBody
from datadog_api_client.v1.model.hourly_usage_attribution_metadata import HourlyUsageAttributionMetadata
globals()["HourlyUsageAttributionBody"] = HourlyUsageAttributionBody
globals()["HourlyUsageAttributionMetadata"] = HourlyUsageAttributionMetadata
class HourlyUsageAttributionResponse(ModelNormal):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
validations = {}
@cached_property
def openapi_types():
lazy_import()
return {
"metadata": (HourlyUsageAttributionMetadata,),
"usage": ([HourlyUsageAttributionBody],),
}
attribute_map = {
"metadata": "metadata",
"usage": "usage",
}
read_only_vars = {}
def __init__(self, *args, **kwargs):
"""HourlyUsageAttributionResponse - a model defined in OpenAPI
Keyword Args:
metadata (HourlyUsageAttributionMetadata): [optional]
usage ([HourlyUsageAttributionBody]): [optional] Get the hourly usage attribution by tag(s).
"""
super().__init__(kwargs)
self._check_pos_args(args)
@classmethod
def _from_openapi_data(cls, *args, **kwargs):
"""Helper creating a new instance from a response."""
self = super(HourlyUsageAttributionResponse, cls)._from_openapi_data(kwargs)
self._check_pos_args(args)
return self
| 29.90625 | 108 | 0.698537 |
from datadog_api_client.v1.model_utils import (
ModelNormal,
cached_property,
)
def lazy_import():
from datadog_api_client.v1.model.hourly_usage_attribution_body import HourlyUsageAttributionBody
from datadog_api_client.v1.model.hourly_usage_attribution_metadata import HourlyUsageAttributionMetadata
globals()["HourlyUsageAttributionBody"] = HourlyUsageAttributionBody
globals()["HourlyUsageAttributionMetadata"] = HourlyUsageAttributionMetadata
class HourlyUsageAttributionResponse(ModelNormal):
validations = {}
@cached_property
def openapi_types():
lazy_import()
return {
"metadata": (HourlyUsageAttributionMetadata,),
"usage": ([HourlyUsageAttributionBody],),
}
attribute_map = {
"metadata": "metadata",
"usage": "usage",
}
read_only_vars = {}
def __init__(self, *args, **kwargs):
super().__init__(kwargs)
self._check_pos_args(args)
@classmethod
def _from_openapi_data(cls, *args, **kwargs):
self = super(HourlyUsageAttributionResponse, cls)._from_openapi_data(kwargs)
self._check_pos_args(args)
return self
| true | true |
f71cc725c05458f3a9369d780bd91d3992785579 | 5,283 | py | Python | ecommerce/admin.py | Wassaf-Shahzad/micromasters | b1340a8c233499b1d8d22872a6bc1fe7f49fd323 | [
"BSD-3-Clause"
] | 32 | 2016-03-25T01:03:13.000Z | 2022-01-15T19:35:42.000Z | ecommerce/admin.py | Wassaf-Shahzad/micromasters | b1340a8c233499b1d8d22872a6bc1fe7f49fd323 | [
"BSD-3-Clause"
] | 4,858 | 2016-03-03T13:48:30.000Z | 2022-03-29T22:09:51.000Z | ecommerce/admin.py | umarmughal824/micromasters | ea92d3bcea9be4601150fc497302ddacc1161622 | [
"BSD-3-Clause"
] | 20 | 2016-08-18T22:07:44.000Z | 2021-11-15T13:35:35.000Z | """
Admin views for ecommerce models
"""
from django.contrib import admin
from ecommerce.models import (
Coupon,
CouponAudit,
CouponInvoice,
CouponInvoiceAudit,
Line,
Order,
OrderAudit,
Receipt,
RedeemedCoupon,
RedeemedCouponAudit,
UserCoupon,
UserCouponAudit,
)
from micromasters.utils import get_field_names
class LineAdmin(admin.ModelAdmin):
"""Admin for Line"""
model = Line
readonly_fields = get_field_names(Line)
def has_add_permission(self, request):
return False
def has_delete_permission(self, request, obj=None):
return False
class OrderAdmin(admin.ModelAdmin):
"""Admin for Order"""
model = Order
list_filter = ('status',)
list_display = ('id', 'user', 'status', 'created_at', 'course_key',)
search_fields = (
'user__username',
'user__email',
)
readonly_fields = [name for name in get_field_names(Order) if name != 'status']
def course_key(self, obj):
"""
returns first course key associated with order
"""
line = obj.line_set.first()
return line.course_key
def has_add_permission(self, request):
return False
def has_delete_permission(self, request, obj=None):
return False
def save_model(self, request, obj, form, change):
"""
Saves object and logs change to object
"""
obj.save_and_log(request.user)
class OrderAuditAdmin(admin.ModelAdmin):
"""Admin for OrderAudit"""
model = OrderAudit
readonly_fields = get_field_names(OrderAudit)
def has_add_permission(self, request):
return False
def has_delete_permission(self, request, obj=None):
return False
class ReceiptAdmin(admin.ModelAdmin):
"""Admin for Receipt"""
model = Receipt
readonly_fields = get_field_names(Receipt)
def has_add_permission(self, request):
return False
def has_delete_permission(self, request, obj=None):
return False
class CouponInvoiceAdmin(admin.ModelAdmin):
"""Admin for CouponInvoice"""
model = CouponInvoice
def save_model(self, request, obj, form, change):
"""
Saves object and logs change to object
"""
obj.save_and_log(request.user)
class CouponInvoiceAuditAdmin(admin.ModelAdmin):
"""Admin for CouponInvoiceAudit"""
model = CouponInvoiceAudit
readonly_fields = get_field_names(CouponInvoiceAudit)
def has_add_permission(self, request):
return False
def has_delete_permission(self, request, obj=None):
return False
class CouponAdmin(admin.ModelAdmin):
"""Admin for Coupon"""
model = Coupon
search_fields = (
'coupon_code',
'invoice__invoice_number',
'invoice__description',
)
list_filter = [
'invoice',
'enabled',
'coupon_type',
'amount_type',
]
def save_model(self, request, obj, form, change):
"""
Saves object and logs change to object
"""
obj.save_and_log(request.user)
class CouponAuditAdmin(admin.ModelAdmin):
"""Admin for CouponAudit"""
model = CouponAudit
readonly_fields = get_field_names(CouponAudit)
def has_add_permission(self, request):
return False
def has_delete_permission(self, request, obj=None):
return False
class RedeemedCouponAdmin(admin.ModelAdmin):
"""Admin for RedeemedCoupon"""
model = RedeemedCoupon
readonly_fields = get_field_names(RedeemedCoupon)
def save_model(self, request, obj, form, change):
"""
Saves object and logs change to object
"""
obj.save_and_log(request.user)
class RedeemedCouponAuditAdmin(admin.ModelAdmin):
"""Admin for RedeemedCouponAudit"""
model = RedeemedCouponAudit
readonly_fields = get_field_names(RedeemedCouponAudit)
def has_add_permission(self, request):
return False
def has_delete_permission(self, request, obj=None):
return False
class UserCouponAdmin(admin.ModelAdmin):
"""Admin for UserCoupon"""
model = UserCoupon
readonly_fields = get_field_names(UserCoupon)
def save_model(self, request, obj, form, change):
"""
Saves object and logs change to object
"""
obj.save_and_log(request.user)
class UserCouponAuditAdmin(admin.ModelAdmin):
"""Admin for UserCouponAudit"""
model = UserCouponAudit
readonly_fields = get_field_names(UserCouponAudit)
def has_add_permission(self, request):
return False
def has_delete_permission(self, request, obj=None):
return False
admin.site.register(CouponInvoice, CouponInvoiceAdmin)
admin.site.register(CouponInvoiceAudit, CouponInvoiceAuditAdmin)
admin.site.register(Coupon, CouponAdmin)
admin.site.register(CouponAudit, CouponAuditAdmin)
admin.site.register(Line, LineAdmin)
admin.site.register(Order, OrderAdmin)
admin.site.register(OrderAudit, OrderAuditAdmin)
admin.site.register(RedeemedCoupon, RedeemedCouponAdmin)
admin.site.register(RedeemedCouponAudit, RedeemedCouponAuditAdmin)
admin.site.register(Receipt, ReceiptAdmin)
admin.site.register(UserCoupon, UserCouponAdmin)
admin.site.register(UserCouponAudit, UserCouponAuditAdmin)
| 25.157143 | 83 | 0.68692 |
from django.contrib import admin
from ecommerce.models import (
Coupon,
CouponAudit,
CouponInvoice,
CouponInvoiceAudit,
Line,
Order,
OrderAudit,
Receipt,
RedeemedCoupon,
RedeemedCouponAudit,
UserCoupon,
UserCouponAudit,
)
from micromasters.utils import get_field_names
class LineAdmin(admin.ModelAdmin):
model = Line
readonly_fields = get_field_names(Line)
def has_add_permission(self, request):
return False
def has_delete_permission(self, request, obj=None):
return False
class OrderAdmin(admin.ModelAdmin):
model = Order
list_filter = ('status',)
list_display = ('id', 'user', 'status', 'created_at', 'course_key',)
search_fields = (
'user__username',
'user__email',
)
readonly_fields = [name for name in get_field_names(Order) if name != 'status']
def course_key(self, obj):
line = obj.line_set.first()
return line.course_key
def has_add_permission(self, request):
return False
def has_delete_permission(self, request, obj=None):
return False
def save_model(self, request, obj, form, change):
obj.save_and_log(request.user)
class OrderAuditAdmin(admin.ModelAdmin):
model = OrderAudit
readonly_fields = get_field_names(OrderAudit)
def has_add_permission(self, request):
return False
def has_delete_permission(self, request, obj=None):
return False
class ReceiptAdmin(admin.ModelAdmin):
model = Receipt
readonly_fields = get_field_names(Receipt)
def has_add_permission(self, request):
return False
def has_delete_permission(self, request, obj=None):
return False
class CouponInvoiceAdmin(admin.ModelAdmin):
model = CouponInvoice
def save_model(self, request, obj, form, change):
obj.save_and_log(request.user)
class CouponInvoiceAuditAdmin(admin.ModelAdmin):
model = CouponInvoiceAudit
readonly_fields = get_field_names(CouponInvoiceAudit)
def has_add_permission(self, request):
return False
def has_delete_permission(self, request, obj=None):
return False
class CouponAdmin(admin.ModelAdmin):
model = Coupon
search_fields = (
'coupon_code',
'invoice__invoice_number',
'invoice__description',
)
list_filter = [
'invoice',
'enabled',
'coupon_type',
'amount_type',
]
def save_model(self, request, obj, form, change):
obj.save_and_log(request.user)
class CouponAuditAdmin(admin.ModelAdmin):
model = CouponAudit
readonly_fields = get_field_names(CouponAudit)
def has_add_permission(self, request):
return False
def has_delete_permission(self, request, obj=None):
return False
class RedeemedCouponAdmin(admin.ModelAdmin):
model = RedeemedCoupon
readonly_fields = get_field_names(RedeemedCoupon)
def save_model(self, request, obj, form, change):
obj.save_and_log(request.user)
class RedeemedCouponAuditAdmin(admin.ModelAdmin):
model = RedeemedCouponAudit
readonly_fields = get_field_names(RedeemedCouponAudit)
def has_add_permission(self, request):
return False
def has_delete_permission(self, request, obj=None):
return False
class UserCouponAdmin(admin.ModelAdmin):
model = UserCoupon
readonly_fields = get_field_names(UserCoupon)
def save_model(self, request, obj, form, change):
obj.save_and_log(request.user)
class UserCouponAuditAdmin(admin.ModelAdmin):
model = UserCouponAudit
readonly_fields = get_field_names(UserCouponAudit)
def has_add_permission(self, request):
return False
def has_delete_permission(self, request, obj=None):
return False
admin.site.register(CouponInvoice, CouponInvoiceAdmin)
admin.site.register(CouponInvoiceAudit, CouponInvoiceAuditAdmin)
admin.site.register(Coupon, CouponAdmin)
admin.site.register(CouponAudit, CouponAuditAdmin)
admin.site.register(Line, LineAdmin)
admin.site.register(Order, OrderAdmin)
admin.site.register(OrderAudit, OrderAuditAdmin)
admin.site.register(RedeemedCoupon, RedeemedCouponAdmin)
admin.site.register(RedeemedCouponAudit, RedeemedCouponAuditAdmin)
admin.site.register(Receipt, ReceiptAdmin)
admin.site.register(UserCoupon, UserCouponAdmin)
admin.site.register(UserCouponAudit, UserCouponAuditAdmin)
| true | true |
f71cc7626802c7caa73aac783baedbb65798da02 | 3,272 | py | Python | pylint_plugins/api_models.py | FairwindsOps/st2 | 2b76ca740c4af0d6b2c1d1ba5534ce4133fd16fa | [
"Apache-2.0"
] | 1 | 2021-04-08T03:21:49.000Z | 2021-04-08T03:21:49.000Z | pylint_plugins/api_models.py | FairwindsOps/st2 | 2b76ca740c4af0d6b2c1d1ba5534ce4133fd16fa | [
"Apache-2.0"
] | null | null | null | pylint_plugins/api_models.py | FairwindsOps/st2 | 2b76ca740c4af0d6b2c1d1ba5534ce4133fd16fa | [
"Apache-2.0"
] | null | null | null | # Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Plugin which tells Pylint how to handle classes which define attributes using jsonschema
in "schema" class attribute.
Those classes dyamically assign attributes defined in the schema on the class inside the
constructor.
"""
import six
from astroid import MANAGER
from astroid import nodes
from astroid import scoped_nodes
# A list of class names for which we want to skip the checks
CLASS_NAME_BLACKLIST = [
'ExecutionSpecificationAPI'
]
def register(linter):
pass
def transform(cls):
if cls.name in CLASS_NAME_BLACKLIST:
return
if cls.name.endswith('API') or 'schema' in cls.locals:
# This is a class which defines attributes in "schema" variable using json schema.
# Those attributes are then assigned during run time inside the constructor
fqdn = cls.qname()
module_name, class_name = fqdn.rsplit('.', 1)
module = __import__(module_name, fromlist=[class_name])
actual_cls = getattr(module, class_name)
schema = actual_cls.schema
if not isinstance(schema, dict):
# Not a class we are interested in
return
properties = schema.get('properties', {})
for property_name, property_data in six.iteritems(properties):
property_name = property_name.replace('-', '_') # Note: We do the same in Python code
property_type = property_data.get('type', None)
if isinstance(property_type, (list, tuple)):
# Hack for attributes with multiple types (e.g. string, null)
property_type = property_type[0]
if property_type == 'object':
node = nodes.Dict()
elif property_type == 'array':
node = nodes.List()
elif property_type == 'integer':
node = scoped_nodes.builtin_lookup('int')[1][0]
elif property_type == 'number':
node = scoped_nodes.builtin_lookup('float')[1][0]
elif property_type == 'string':
node = scoped_nodes.builtin_lookup('str')[1][0]
elif property_type == 'boolean':
node = scoped_nodes.builtin_lookup('bool')[1][0]
elif property_type == 'null':
node = scoped_nodes.builtin_lookup('None')[1][0]
else:
node = scoped_nodes.Class(property_name, None)
cls.locals[property_name] = [node]
MANAGER.register_transform(scoped_nodes.Class, transform)
| 36.764045 | 98 | 0.663814 |
import six
from astroid import MANAGER
from astroid import nodes
from astroid import scoped_nodes
CLASS_NAME_BLACKLIST = [
'ExecutionSpecificationAPI'
]
def register(linter):
pass
def transform(cls):
if cls.name in CLASS_NAME_BLACKLIST:
return
if cls.name.endswith('API') or 'schema' in cls.locals:
fqdn = cls.qname()
module_name, class_name = fqdn.rsplit('.', 1)
module = __import__(module_name, fromlist=[class_name])
actual_cls = getattr(module, class_name)
schema = actual_cls.schema
if not isinstance(schema, dict):
return
properties = schema.get('properties', {})
for property_name, property_data in six.iteritems(properties):
property_name = property_name.replace('-', '_')
property_type = property_data.get('type', None)
if isinstance(property_type, (list, tuple)):
property_type = property_type[0]
if property_type == 'object':
node = nodes.Dict()
elif property_type == 'array':
node = nodes.List()
elif property_type == 'integer':
node = scoped_nodes.builtin_lookup('int')[1][0]
elif property_type == 'number':
node = scoped_nodes.builtin_lookup('float')[1][0]
elif property_type == 'string':
node = scoped_nodes.builtin_lookup('str')[1][0]
elif property_type == 'boolean':
node = scoped_nodes.builtin_lookup('bool')[1][0]
elif property_type == 'null':
node = scoped_nodes.builtin_lookup('None')[1][0]
else:
node = scoped_nodes.Class(property_name, None)
cls.locals[property_name] = [node]
MANAGER.register_transform(scoped_nodes.Class, transform)
| true | true |
f71cc7dd876392c1eb5462cd7fd83e0f8c22bec2 | 19,039 | py | Python | convlab2/policy/larl/multiwoz/latent_dialog/enc2dec/decoders.py | ljw23/ConvLab-2 | 13d48ea0e441701bd66100689b6c25b561f15525 | [
"Apache-2.0"
] | 339 | 2020-03-04T09:43:22.000Z | 2022-03-26T17:27:38.000Z | convlab2/policy/larl/multiwoz/latent_dialog/enc2dec/decoders.py | ljw23/ConvLab-2 | 13d48ea0e441701bd66100689b6c25b561f15525 | [
"Apache-2.0"
] | 122 | 2020-04-12T04:19:06.000Z | 2022-03-23T14:20:57.000Z | convlab2/policy/larl/multiwoz/latent_dialog/enc2dec/decoders.py | ljw23/ConvLab-2 | 13d48ea0e441701bd66100689b6c25b561f15525 | [
"Apache-2.0"
] | 138 | 2020-02-18T16:48:04.000Z | 2022-03-26T17:27:43.000Z | import torch as th
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.autograd import Variable
import numpy as np
from convlab2.policy.larl.multiwoz.latent_dialog.enc2dec.base_modules import BaseRNN
from convlab2.policy.larl.multiwoz.latent_dialog.utils import cast_type, LONG, FLOAT
from convlab2.policy.larl.multiwoz.latent_dialog.corpora import DECODING_MASKED_TOKENS, EOS
TEACH_FORCE = 'teacher_forcing'
TEACH_GEN = 'teacher_gen'
GEN = 'gen'
GEN_VALID = 'gen_valid'
class Attention(nn.Module):
def __init__(self, dec_cell_size, ctx_cell_size, attn_mode, project):
super(Attention, self).__init__()
self.dec_cell_size = dec_cell_size
self.ctx_cell_size = ctx_cell_size
self.attn_mode = attn_mode
if project:
self.linear_out = nn.Linear(
dec_cell_size+ctx_cell_size, dec_cell_size)
else:
self.linear_out = None
if attn_mode == 'general':
self.dec_w = nn.Linear(dec_cell_size, ctx_cell_size)
elif attn_mode == 'cat':
self.dec_w = nn.Linear(dec_cell_size, dec_cell_size)
self.attn_w = nn.Linear(ctx_cell_size, dec_cell_size)
self.query_w = nn.Linear(dec_cell_size, 1)
def forward(self, output, context):
# output: (batch_size, output_seq_len, dec_cell_size)
# context: (batch_size, max_ctx_len, ctx_cell_size)
batch_size = output.size(0)
max_ctx_len = context.size(1)
if self.attn_mode == 'dot':
# (batch_size, output_seq_len, max_ctx_len)
attn = th.bmm(output, context.transpose(1, 2))
elif self.attn_mode == 'general':
# (batch_size, output_seq_len, ctx_cell_size)
mapped_output = self.dec_w(output)
# (batch_size, output_seq_len, max_ctx_len)
attn = th.bmm(mapped_output, context.transpose(1, 2))
elif self.attn_mode == 'cat':
# (batch_size, output_seq_len, dec_cell_size)
mapped_output = self.dec_w(output)
# (batch_size, max_ctx_len, dec_cell_size)
mapped_attn = self.attn_w(context)
# (batch_size, output_seq_len, max_ctx_len, dec_cell_size)
tiled_output = mapped_output.unsqueeze(
2).repeat(1, 1, max_ctx_len, 1)
# (batch_size, 1, max_ctx_len, dec_cell_size)
tiled_attn = mapped_attn.unsqueeze(1)
# (batch_size, output_seq_len, max_ctx_len, dec_cell_size)
fc1 = F.tanh(tiled_output+tiled_attn)
# (batch_size, otuput_seq_len, max_ctx_len)
attn = self.query_w(fc1).squeeze(-1)
else:
raise ValueError('Unknown attention mode')
# TODO mask
# if self.mask is not None:
# (batch_size, output_seq_len, max_ctx_len)
attn = F.softmax(attn.view(-1, max_ctx_len),
dim=1).view(batch_size, -1, max_ctx_len)
# (batch_size, output_seq_len, ctx_cell_size)
mix = th.bmm(attn, context)
# (batch_size, output_seq_len, dec_cell_size+ctx_cell_size)
combined = th.cat((mix, output), dim=2)
if self.linear_out is None:
return combined, attn
else:
output = F.tanh(
self.linear_out(combined.view(-1, self.dec_cell_size+self.ctx_cell_size))).view(
batch_size, -1, self.dec_cell_size) # (batch_size, output_seq_len, dec_cell_size)
return output, attn
class DecoderRNN(BaseRNN):
def __init__(self, input_dropout_p, rnn_cell, input_size, hidden_size, num_layers, output_dropout_p,
bidirectional, vocab_size, use_attn, ctx_cell_size, attn_mode, sys_id, eos_id, use_gpu,
max_dec_len, embedding=None):
super(DecoderRNN, self).__init__(input_dropout_p=input_dropout_p,
rnn_cell=rnn_cell,
input_size=input_size,
hidden_size=hidden_size,
num_layers=num_layers,
output_dropout_p=output_dropout_p,
bidirectional=bidirectional)
# TODO embedding is None or not
if embedding is None:
self.embedding = nn.Embedding(vocab_size, input_size)
else:
self.embedding = embedding
# share parameters between encoder and decoder
# self.rnn = ctx_encoder.rnn
# self.FC = nn.Linear(input_size, utt_encoder.output_size)
self.use_attn = use_attn
if self.use_attn:
self.attention = Attention(dec_cell_size=hidden_size,
ctx_cell_size=ctx_cell_size,
attn_mode=attn_mode,
project=True)
self.dec_cell_size = hidden_size
self.output_size = vocab_size
self.project = nn.Linear(self.dec_cell_size, self.output_size)
self.log_softmax = F.log_softmax
self.sys_id = sys_id
self.eos_id = eos_id
self.use_gpu = use_gpu
self.max_dec_len = max_dec_len
def forward(self, batch_size, dec_inputs, dec_init_state, attn_context, mode, gen_type, beam_size, goal_hid=None):
# dec_inputs: (batch_size, response_size-1)
# attn_context: (batch_size, max_ctx_len, ctx_cell_size)
# goal_hid: (batch_size, goal_nhid)
ret_dict = dict()
if self.use_attn:
ret_dict[DecoderRNN.KEY_ATTN_SCORE] = list()
if mode == GEN:
dec_inputs = None
if gen_type != 'beam':
beam_size = 1
if dec_inputs is not None:
decoder_input = dec_inputs
else:
# prepare the BOS inputs
with th.no_grad():
bos_var = Variable(th.LongTensor([self.sys_id]))
bos_var = cast_type(bos_var, LONG, self.use_gpu)
decoder_input = bos_var.expand(
batch_size*beam_size, 1) # (batch_size, 1)
if mode == GEN and gen_type == 'beam':
# TODO if beam search, repeat the initial states of the RNN
pass
else:
decoder_hidden_state = dec_init_state
# list of logprob | max_dec_len*(batch_size, 1, vocab_size)
prob_outputs = []
symbol_outputs = [] # list of word ids | max_dec_len*(batch_size, 1)
# back_pointers = []
# lengths = blabla...
def decode(step, cum_sum, step_output, step_attn):
prob_outputs.append(step_output)
step_output_slice = step_output.squeeze(
1) # (batch_size, vocab_size)
if self.use_attn:
ret_dict[DecoderRNN.KEY_ATTN_SCORE].append(step_attn)
if gen_type == 'greedy':
_, symbols = step_output_slice.topk(1) # (batch_size, 1)
elif gen_type == 'sample':
# TODO FIXME
# symbols = self.gumbel_max(step_output_slice)
pass
elif gen_type == 'beam':
# TODO
pass
else:
raise ValueError('Unsupported decoding mode')
symbol_outputs.append(symbols)
return cum_sum, symbols
if mode == TEACH_FORCE:
prob_outputs, decoder_hidden_state, attn = self.forward_step(
input_var=decoder_input, hidden_state=decoder_hidden_state, encoder_outputs=attn_context, goal_hid=goal_hid)
else:
# do free running here
cum_sum = None
for step in range(self.max_dec_len):
# Input:
# decoder_input: (batch_size, 1)
# decoder_hidden_state: tuple: (h, c)
# attn_context: (batch_size, max_ctx_len, ctx_cell_size)
# goal_hid: (batch_size, goal_nhid)
# Output:
# decoder_output: (batch_size, 1, vocab_size)
# decoder_hidden_state: tuple: (h, c)
# step_attn: (batch_size, 1, max_ctx_len)
decoder_output, decoder_hidden_state, step_attn = self.forward_step(
decoder_input, decoder_hidden_state, attn_context, goal_hid=goal_hid)
cum_sum, symbols = decode(
step, cum_sum, decoder_output, step_attn)
decoder_input = symbols
# (batch_size, max_dec_len, vocab_size)
prob_outputs = th.cat(prob_outputs, dim=1)
# back tracking to recover the 1-best in beam search
# if gen_type == 'beam':
ret_dict[DecoderRNN.KEY_SEQUENCE] = symbol_outputs
# prob_outputs: (batch_size, max_dec_len, vocab_size)
# decoder_hidden_state: tuple: (h, c)
# ret_dict[DecoderRNN.KEY_ATTN_SCORE]: max_dec_len*(batch_size, 1, max_ctx_len)
# ret_dict[DecoderRNN.KEY_SEQUENCE]: max_dec_len*(batch_size, 1)
return prob_outputs, decoder_hidden_state, ret_dict
def forward_step(self, input_var, hidden_state, encoder_outputs, goal_hid):
# input_var: (batch_size, response_size-1 i.e. output_seq_len)
# hidden_state: tuple: (h, c)
# encoder_outputs: (batch_size, max_ctx_len, ctx_cell_size)
# goal_hid: (batch_size, goal_nhid)
batch_size, output_seq_len = input_var.size()
# (batch_size, output_seq_len, embedding_dim)
embedded = self.embedding(input_var)
# add goals
if goal_hid is not None:
# (batch_size, 1, goal_nhid)
goal_hid = goal_hid.view(goal_hid.size(0), 1, goal_hid.size(1))
# (batch_size, output_seq_len, goal_nhid)
goal_rep = goal_hid.repeat(1, output_seq_len, 1)
# (batch_size, output_seq_len, embedding_dim+goal_nhid)
embedded = th.cat([embedded, goal_rep], dim=2)
embedded = self.input_dropout(embedded)
# ############
# embedded = self.FC(embedded.view(-1, embedded.size(-1))).view(batch_size, output_seq_len, -1)
# output: (batch_size, output_seq_len, dec_cell_size)
# hidden: tuple: (h, c)
output, hidden_s = self.rnn(embedded, hidden_state)
attn = None
if self.use_attn:
# output: (batch_size, output_seq_len, dec_cell_size)
# encoder_outputs: (batch_size, max_ctx_len, ctx_cell_size)
# attn: (batch_size, output_seq_len, max_ctx_len)
output, attn = self.attention(output, encoder_outputs)
# (batch_size*output_seq_len, vocab_size)
logits = self.project(output.contiguous().view(-1, self.dec_cell_size))
prediction = self.log_softmax(logits, dim=logits.dim(
)-1).view(batch_size, output_seq_len, -1) # (batch_size, output_seq_len, vocab_size)
return prediction, hidden_s, attn
# special for rl
def _step(self, input_var, hidden_state, encoder_outputs, goal_hid):
# input_var: (1, 1)
# hidden_state: tuple: (h, c)
# encoder_outputs: (1, max_dlg_len, dlg_cell_size)
# goal_hid: (1, goal_nhid)
batch_size, output_seq_len = input_var.size()
embedded = self.embedding(input_var) # (1, 1, embedding_dim)
if goal_hid is not None:
goal_hid = goal_hid.view(goal_hid.size(
0), 1, goal_hid.size(1)) # (1, 1, goal_nhid)
goal_rep = goal_hid.repeat(
1, output_seq_len, 1) # (1, 1, goal_nhid)
# (1, 1, embedding_dim+goal_nhid)
embedded = th.cat([embedded, goal_rep], dim=2)
embedded = self.input_dropout(embedded)
# ############
# embedded = self.FC(embedded.view(-1, embedded.size(-1))).view(batch_size, output_seq_len, -1)
# output: (1, 1, dec_cell_size)
# hidden: tuple: (h, c)
output, hidden_s = self.rnn(embedded, hidden_state)
attn = None
if self.use_attn:
# output: (1, 1, dec_cell_size)
# encoder_outputs: (1, max_dlg_len, dlg_cell_size)
# attn: (1, 1, max_dlg_len)
output, attn = self.attention(output, encoder_outputs)
# (1*1, vocab_size)
logits = self.project(output.view(-1, self.dec_cell_size))
prediction = logits.view(
batch_size, output_seq_len, -1) # (1, 1, vocab_size)
# prediction = self.log_softmax(logits, dim=logits.dim()-1).view(batch_size, output_seq_len, -1) # (batch_size, output_seq_len, vocab_size)
return prediction, hidden_s
# special for rl
def write(self, input_var, hidden_state, encoder_outputs, max_words, vocab, stop_tokens, goal_hid=None, mask=True,
decoding_masked_tokens=DECODING_MASKED_TOKENS):
# input_var: (1, 1)
# hidden_state: tuple: (h, c)
# encoder_outputs: max_dlg_len*(1, 1, dlg_cell_size)
# goal_hid: (1, goal_nhid)
logprob_outputs = [] # list of logprob | max_dec_len*(1, )
symbol_outputs = [] # list of word ids | max_dec_len*(1, )
decoder_input = input_var
decoder_hidden_state = hidden_state
if type(encoder_outputs) is list:
# (1, max_dlg_len, dlg_cell_size)
encoder_outputs = th.cat(encoder_outputs, 1)
# print('encoder_outputs.size() = {}'.format(encoder_outputs.size()))
if mask:
special_token_mask = Variable(th.FloatTensor(
[-999. if token in decoding_masked_tokens else 0. for token in vocab]))
special_token_mask = cast_type(
special_token_mask, FLOAT, self.use_gpu) # (vocab_size, )
def _sample(dec_output, num_i):
# dec_output: (1, 1, vocab_size), need to softmax and log_softmax
dec_output = dec_output.view(-1) # (vocab_size, )
# TODO temperature
prob = F.softmax(dec_output/0.6, dim=0) # (vocab_size, )
logprob = F.log_softmax(dec_output, dim=0) # (vocab_size, )
symbol = prob.multinomial(num_samples=1).detach() # (1, )
# _, symbol = prob.topk(1) # (1, )
_, tmp_symbol = prob.topk(1) # (1, )
# print('multinomial symbol = {}, prob = {}'.format(symbol, prob[symbol.item()]))
# print('topk symbol = {}, prob = {}'.format(tmp_symbol, prob[tmp_symbol.item()]))
logprob = logprob.gather(0, symbol) # (1, )
return logprob, symbol
for i in range(max_words):
decoder_output, decoder_hidden_state = self._step(
decoder_input, decoder_hidden_state, encoder_outputs, goal_hid)
# disable special tokens from being generated in a normal turn
if mask:
decoder_output += special_token_mask.expand(1, 1, -1)
logprob, symbol = _sample(decoder_output, i)
logprob_outputs.append(logprob)
symbol_outputs.append(symbol)
decoder_input = symbol.view(1, -1)
if vocab[symbol.item()] in stop_tokens:
break
assert len(logprob_outputs) == len(symbol_outputs)
# logprob_list = [t.item() for t in logprob_outputs]
logprob_list = logprob_outputs
symbol_list = [t.item() for t in symbol_outputs]
return logprob_list, symbol_list
# For MultiWoz RL
def forward_rl(self, batch_size, dec_init_state, attn_context, vocab, max_words, goal_hid=None, mask=True, temp=0.1):
# prepare the BOS inputs
with th.no_grad():
bos_var = Variable(th.LongTensor([self.sys_id]))
bos_var = cast_type(bos_var, LONG, self.use_gpu)
decoder_input = bos_var.expand(batch_size, 1) # (1, 1)
decoder_hidden_state = dec_init_state # tuple: (h, c)
encoder_outputs = attn_context # (1, ctx_len, ctx_cell_size)
logprob_outputs = [] # list of logprob | max_dec_len*(1, )
symbol_outputs = [] # list of word ids | max_dec_len*(1, )
if mask:
special_token_mask = Variable(th.FloatTensor(
[-999. if token in DECODING_MASKED_TOKENS else 0. for token in vocab]))
special_token_mask = cast_type(
special_token_mask, FLOAT, self.use_gpu) # (vocab_size, )
def _sample(dec_output, num_i):
# dec_output: (1, 1, vocab_size), need to softmax and log_softmax
# (batch_size, vocab_size, )
dec_output = dec_output.view(batch_size, -1)
# (batch_size, vocab_size, )
prob = F.softmax(dec_output/temp, dim=1)
# (batch_size, vocab_size, )
logprob = F.log_softmax(dec_output, dim=1)
symbol = prob.multinomial(
num_samples=1).detach() # (batch_size, 1)
# _, symbol = prob.topk(1) # (1, )
_, tmp_symbol = prob.topk(1) # (1, )
# print('multinomial symbol = {}, prob = {}'.format(symbol, prob[symbol.item()]))
# print('topk symbol = {}, prob = {}'.format(tmp_symbol, prob[tmp_symbol.item()]))
logprob = logprob.gather(1, symbol) # (1, )
return logprob, symbol
stopped_samples = set()
for i in range(max_words):
decoder_output, decoder_hidden_state = self._step(
decoder_input, decoder_hidden_state, encoder_outputs, goal_hid)
# disable special tokens from being generated in a normal turn
if mask:
decoder_output += special_token_mask.expand(1, 1, -1)
logprob, symbol = _sample(decoder_output, i)
logprob_outputs.append(logprob)
symbol_outputs.append(symbol)
decoder_input = symbol.view(batch_size, -1)
for b_id in range(batch_size):
if vocab[symbol[b_id].item()] == EOS:
stopped_samples.add(b_id)
if len(stopped_samples) == batch_size:
break
assert len(logprob_outputs) == len(symbol_outputs)
symbol_outputs = th.cat(
symbol_outputs, dim=1).cpu().data.numpy().tolist()
logprob_outputs = th.cat(logprob_outputs, dim=1)
logprob_list = []
symbol_list = []
for b_id in range(batch_size):
b_logprob = []
b_symbol = []
for t_id in range(logprob_outputs.shape[1]):
symbol = symbol_outputs[b_id][t_id]
if vocab[symbol] == EOS and t_id != 0:
break
b_symbol.append(symbol_outputs[b_id][t_id])
b_logprob.append(logprob_outputs[b_id][t_id])
logprob_list.append(b_logprob)
symbol_list.append(b_symbol)
# TODO backward compatible, if batch_size == 1, we remove the nested structure
if batch_size == 1:
logprob_list = logprob_list[0]
symbol_list = symbol_list[0]
return logprob_list, symbol_list
| 43.270455 | 147 | 0.59625 | import torch as th
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.autograd import Variable
import numpy as np
from convlab2.policy.larl.multiwoz.latent_dialog.enc2dec.base_modules import BaseRNN
from convlab2.policy.larl.multiwoz.latent_dialog.utils import cast_type, LONG, FLOAT
from convlab2.policy.larl.multiwoz.latent_dialog.corpora import DECODING_MASKED_TOKENS, EOS
TEACH_FORCE = 'teacher_forcing'
TEACH_GEN = 'teacher_gen'
GEN = 'gen'
GEN_VALID = 'gen_valid'
class Attention(nn.Module):
def __init__(self, dec_cell_size, ctx_cell_size, attn_mode, project):
super(Attention, self).__init__()
self.dec_cell_size = dec_cell_size
self.ctx_cell_size = ctx_cell_size
self.attn_mode = attn_mode
if project:
self.linear_out = nn.Linear(
dec_cell_size+ctx_cell_size, dec_cell_size)
else:
self.linear_out = None
if attn_mode == 'general':
self.dec_w = nn.Linear(dec_cell_size, ctx_cell_size)
elif attn_mode == 'cat':
self.dec_w = nn.Linear(dec_cell_size, dec_cell_size)
self.attn_w = nn.Linear(ctx_cell_size, dec_cell_size)
self.query_w = nn.Linear(dec_cell_size, 1)
def forward(self, output, context):
batch_size = output.size(0)
max_ctx_len = context.size(1)
if self.attn_mode == 'dot':
attn = th.bmm(output, context.transpose(1, 2))
elif self.attn_mode == 'general':
mapped_output = self.dec_w(output)
attn = th.bmm(mapped_output, context.transpose(1, 2))
elif self.attn_mode == 'cat':
mapped_output = self.dec_w(output)
mapped_attn = self.attn_w(context)
tiled_output = mapped_output.unsqueeze(
2).repeat(1, 1, max_ctx_len, 1)
tiled_attn = mapped_attn.unsqueeze(1)
fc1 = F.tanh(tiled_output+tiled_attn)
attn = self.query_w(fc1).squeeze(-1)
else:
raise ValueError('Unknown attention mode')
attn = F.softmax(attn.view(-1, max_ctx_len),
dim=1).view(batch_size, -1, max_ctx_len)
mix = th.bmm(attn, context)
combined = th.cat((mix, output), dim=2)
if self.linear_out is None:
return combined, attn
else:
output = F.tanh(
self.linear_out(combined.view(-1, self.dec_cell_size+self.ctx_cell_size))).view(
batch_size, -1, self.dec_cell_size)
return output, attn
class DecoderRNN(BaseRNN):
def __init__(self, input_dropout_p, rnn_cell, input_size, hidden_size, num_layers, output_dropout_p,
bidirectional, vocab_size, use_attn, ctx_cell_size, attn_mode, sys_id, eos_id, use_gpu,
max_dec_len, embedding=None):
super(DecoderRNN, self).__init__(input_dropout_p=input_dropout_p,
rnn_cell=rnn_cell,
input_size=input_size,
hidden_size=hidden_size,
num_layers=num_layers,
output_dropout_p=output_dropout_p,
bidirectional=bidirectional)
if embedding is None:
self.embedding = nn.Embedding(vocab_size, input_size)
else:
self.embedding = embedding
self.use_attn = use_attn
if self.use_attn:
self.attention = Attention(dec_cell_size=hidden_size,
ctx_cell_size=ctx_cell_size,
attn_mode=attn_mode,
project=True)
self.dec_cell_size = hidden_size
self.output_size = vocab_size
self.project = nn.Linear(self.dec_cell_size, self.output_size)
self.log_softmax = F.log_softmax
self.sys_id = sys_id
self.eos_id = eos_id
self.use_gpu = use_gpu
self.max_dec_len = max_dec_len
def forward(self, batch_size, dec_inputs, dec_init_state, attn_context, mode, gen_type, beam_size, goal_hid=None):
ret_dict = dict()
if self.use_attn:
ret_dict[DecoderRNN.KEY_ATTN_SCORE] = list()
if mode == GEN:
dec_inputs = None
if gen_type != 'beam':
beam_size = 1
if dec_inputs is not None:
decoder_input = dec_inputs
else:
with th.no_grad():
bos_var = Variable(th.LongTensor([self.sys_id]))
bos_var = cast_type(bos_var, LONG, self.use_gpu)
decoder_input = bos_var.expand(
batch_size*beam_size, 1)
if mode == GEN and gen_type == 'beam':
pass
else:
decoder_hidden_state = dec_init_state
prob_outputs = []
symbol_outputs = []
def decode(step, cum_sum, step_output, step_attn):
prob_outputs.append(step_output)
step_output_slice = step_output.squeeze(
1)
if self.use_attn:
ret_dict[DecoderRNN.KEY_ATTN_SCORE].append(step_attn)
if gen_type == 'greedy':
_, symbols = step_output_slice.topk(1)
elif gen_type == 'sample':
pass
elif gen_type == 'beam':
pass
else:
raise ValueError('Unsupported decoding mode')
symbol_outputs.append(symbols)
return cum_sum, symbols
if mode == TEACH_FORCE:
prob_outputs, decoder_hidden_state, attn = self.forward_step(
input_var=decoder_input, hidden_state=decoder_hidden_state, encoder_outputs=attn_context, goal_hid=goal_hid)
else:
cum_sum = None
for step in range(self.max_dec_len):
decoder_output, decoder_hidden_state, step_attn = self.forward_step(
decoder_input, decoder_hidden_state, attn_context, goal_hid=goal_hid)
cum_sum, symbols = decode(
step, cum_sum, decoder_output, step_attn)
decoder_input = symbols
prob_outputs = th.cat(prob_outputs, dim=1)
ret_dict[DecoderRNN.KEY_SEQUENCE] = symbol_outputs
return prob_outputs, decoder_hidden_state, ret_dict
def forward_step(self, input_var, hidden_state, encoder_outputs, goal_hid):
batch_size, output_seq_len = input_var.size()
embedded = self.embedding(input_var)
if goal_hid is not None:
goal_hid = goal_hid.view(goal_hid.size(0), 1, goal_hid.size(1))
goal_rep = goal_hid.repeat(1, output_seq_len, 1)
embedded = th.cat([embedded, goal_rep], dim=2)
embedded = self.input_dropout(embedded)
den_state)
attn = None
if self.use_attn:
output, attn = self.attention(output, encoder_outputs)
logits = self.project(output.contiguous().view(-1, self.dec_cell_size))
prediction = self.log_softmax(logits, dim=logits.dim(
)-1).view(batch_size, output_seq_len, -1)
return prediction, hidden_s, attn
def _step(self, input_var, hidden_state, encoder_outputs, goal_hid):
batch_size, output_seq_len = input_var.size()
embedded = self.embedding(input_var)
if goal_hid is not None:
goal_hid = goal_hid.view(goal_hid.size(
0), 1, goal_hid.size(1))
goal_rep = goal_hid.repeat(
1, output_seq_len, 1)
embedded = th.cat([embedded, goal_rep], dim=2)
embedded = self.input_dropout(embedded)
den_state)
attn = None
if self.use_attn:
output, attn = self.attention(output, encoder_outputs)
logits = self.project(output.view(-1, self.dec_cell_size))
prediction = logits.view(
batch_size, output_seq_len, -1)
def write(self, input_var, hidden_state, encoder_outputs, max_words, vocab, stop_tokens, goal_hid=None, mask=True,
decoding_masked_tokens=DECODING_MASKED_TOKENS):
logprob_outputs = []
symbol_outputs = []
decoder_input = input_var
decoder_hidden_state = hidden_state
if type(encoder_outputs) is list:
encoder_outputs = th.cat(encoder_outputs, 1)
if mask:
special_token_mask = Variable(th.FloatTensor(
[-999. if token in decoding_masked_tokens else 0. for token in vocab]))
special_token_mask = cast_type(
special_token_mask, FLOAT, self.use_gpu)
def _sample(dec_output, num_i):
dec_output = dec_output.view(-1)
prob = F.softmax(dec_output/0.6, dim=0)
logprob = F.log_softmax(dec_output, dim=0)
symbol = prob.multinomial(num_samples=1).detach()
_, tmp_symbol = prob.topk(1)
logprob = logprob.gather(0, symbol)
return logprob, symbol
for i in range(max_words):
decoder_output, decoder_hidden_state = self._step(
decoder_input, decoder_hidden_state, encoder_outputs, goal_hid)
if mask:
decoder_output += special_token_mask.expand(1, 1, -1)
logprob, symbol = _sample(decoder_output, i)
logprob_outputs.append(logprob)
symbol_outputs.append(symbol)
decoder_input = symbol.view(1, -1)
if vocab[symbol.item()] in stop_tokens:
break
assert len(logprob_outputs) == len(symbol_outputs)
logprob_list = logprob_outputs
symbol_list = [t.item() for t in symbol_outputs]
return logprob_list, symbol_list
def forward_rl(self, batch_size, dec_init_state, attn_context, vocab, max_words, goal_hid=None, mask=True, temp=0.1):
with th.no_grad():
bos_var = Variable(th.LongTensor([self.sys_id]))
bos_var = cast_type(bos_var, LONG, self.use_gpu)
decoder_input = bos_var.expand(batch_size, 1)
decoder_hidden_state = dec_init_state
encoder_outputs = attn_context
logprob_outputs = []
symbol_outputs = []
if mask:
special_token_mask = Variable(th.FloatTensor(
[-999. if token in DECODING_MASKED_TOKENS else 0. for token in vocab]))
special_token_mask = cast_type(
special_token_mask, FLOAT, self.use_gpu)
def _sample(dec_output, num_i):
dec_output = dec_output.view(batch_size, -1)
prob = F.softmax(dec_output/temp, dim=1)
logprob = F.log_softmax(dec_output, dim=1)
symbol = prob.multinomial(
num_samples=1).detach()
_, tmp_symbol = prob.topk(1)
logprob = logprob.gather(1, symbol)
return logprob, symbol
stopped_samples = set()
for i in range(max_words):
decoder_output, decoder_hidden_state = self._step(
decoder_input, decoder_hidden_state, encoder_outputs, goal_hid)
if mask:
decoder_output += special_token_mask.expand(1, 1, -1)
logprob, symbol = _sample(decoder_output, i)
logprob_outputs.append(logprob)
symbol_outputs.append(symbol)
decoder_input = symbol.view(batch_size, -1)
for b_id in range(batch_size):
if vocab[symbol[b_id].item()] == EOS:
stopped_samples.add(b_id)
if len(stopped_samples) == batch_size:
break
assert len(logprob_outputs) == len(symbol_outputs)
symbol_outputs = th.cat(
symbol_outputs, dim=1).cpu().data.numpy().tolist()
logprob_outputs = th.cat(logprob_outputs, dim=1)
logprob_list = []
symbol_list = []
for b_id in range(batch_size):
b_logprob = []
b_symbol = []
for t_id in range(logprob_outputs.shape[1]):
symbol = symbol_outputs[b_id][t_id]
if vocab[symbol] == EOS and t_id != 0:
break
b_symbol.append(symbol_outputs[b_id][t_id])
b_logprob.append(logprob_outputs[b_id][t_id])
logprob_list.append(b_logprob)
symbol_list.append(b_symbol)
if batch_size == 1:
logprob_list = logprob_list[0]
symbol_list = symbol_list[0]
return logprob_list, symbol_list
| true | true |
f71cc8be7733ef8a045954aeb9adff342d2082ce | 8,657 | py | Python | src/diamond/handler/graphite.py | biancalana/Diamond | e044ec49f5bf1f70cb2c3ddffe38f347db7c9bce | [
"MIT"
] | 2 | 2016-10-24T02:51:32.000Z | 2021-01-09T20:49:44.000Z | src/diamond/handler/graphite.py | biancalana/Diamond | e044ec49f5bf1f70cb2c3ddffe38f347db7c9bce | [
"MIT"
] | 1 | 2016-04-06T00:00:35.000Z | 2016-04-06T00:00:35.000Z | src/diamond/handler/graphite.py | biancalana/Diamond | e044ec49f5bf1f70cb2c3ddffe38f347db7c9bce | [
"MIT"
] | 2 | 2016-05-12T08:01:52.000Z | 2016-05-16T20:39:47.000Z | # coding=utf-8
"""
Send metrics to a [graphite](http://graphite.wikidot.com/) using the default
interface.
Graphite is an enterprise-scale monitoring tool that runs well on cheap
hardware. It was originally designed and written by Chris Davis at Orbitz in
2006 as side project that ultimately grew to be a foundational monitoring tool.
In 2008, Orbitz allowed Graphite to be released under the open source Apache
2.0 license. Since then Chris has continued to work on Graphite and has
deployed it at other companies including Sears, where it serves as a pillar of
the e-commerce monitoring system. Today many
[large companies](http://graphite.readthedocs.org/en/latest/who-is-using.html)
use it.
"""
from Handler import Handler
import socket
class GraphiteHandler(Handler):
"""
Implements the abstract Handler class, sending data to graphite
"""
def __init__(self, config=None):
"""
Create a new instance of the GraphiteHandler class
"""
# Initialize Handler
Handler.__init__(self, config)
# Initialize Data
self.socket = None
# Initialize Options
self.proto = self.config['proto'].lower().strip()
self.host = self.config['host']
self.port = int(self.config['port'])
self.timeout = float(self.config['timeout'])
self.keepalive = bool(self.config['keepalive'])
self.keepaliveinterval = int(self.config['keepaliveinterval'])
self.batch_size = int(self.config['batch'])
self.max_backlog_multiplier = int(
self.config['max_backlog_multiplier'])
self.trim_backlog_multiplier = int(
self.config['trim_backlog_multiplier'])
self.flow_info = self.config['flow_info']
self.scope_id = self.config['scope_id']
self.metrics = []
# Connect
self._connect()
def get_default_config_help(self):
"""
Returns the help text for the configuration options for this handler
"""
config = super(GraphiteHandler, self).get_default_config_help()
config.update({
'host': 'Hostname',
'port': 'Port',
'proto': 'udp, udp4, udp6, tcp, tcp4, or tcp6',
'timeout': '',
'batch': 'How many to store before sending to the graphite server',
'max_backlog_multiplier': 'how many batches to store before trimming', # NOQA
'trim_backlog_multiplier': 'Trim down how many batches',
'keepalive': 'Enable keepalives for tcp streams',
'keepaliveinterval': 'How frequently to send keepalives',
'flow_info': 'IPv6 Flow Info',
'scope_id': 'IPv6 Scope ID',
})
return config
def get_default_config(self):
"""
Return the default config for the handler
"""
config = super(GraphiteHandler, self).get_default_config()
config.update({
'host': 'localhost',
'port': 2003,
'proto': 'tcp',
'timeout': 15,
'batch': 1,
'max_backlog_multiplier': 5,
'trim_backlog_multiplier': 4,
'keepalive': 0,
'keepaliveinterval': 10,
'flow_info': 0,
'scope_id': 0,
})
return config
def __del__(self):
"""
Destroy instance of the GraphiteHandler class
"""
self._close()
def process(self, metric):
"""
Process a metric by sending it to graphite
"""
# Append the data to the array as a string
self.metrics.append(str(metric))
if len(self.metrics) >= self.batch_size:
self._send()
def flush(self):
"""Flush metrics in queue"""
self._send()
def _send_data(self, data):
"""
Try to send all data in buffer.
"""
try:
self.socket.sendall(data)
self._reset_errors()
except:
self._close()
self._throttle_error("GraphiteHandler: Socket error, "
"trying reconnect.")
self._connect()
try:
self.socket.sendall(data)
except:
return
self._reset_errors()
def _send(self):
"""
Send data to graphite. Data that can not be sent will be queued.
"""
# Check to see if we have a valid socket. If not, try to connect.
try:
try:
if self.socket is None:
self.log.debug("GraphiteHandler: Socket is not connected. "
"Reconnecting.")
self._connect()
if self.socket is None:
self.log.debug("GraphiteHandler: Reconnect failed.")
else:
# Send data to socket
self._send_data(''.join(self.metrics))
self.metrics = []
except Exception:
self._close()
self._throttle_error("GraphiteHandler: Error sending metrics.")
raise
finally:
if len(self.metrics) >= (
self.batch_size * self.max_backlog_multiplier):
trim_offset = (self.batch_size *
self.trim_backlog_multiplier * -1)
self.log.warn('GraphiteHandler: Trimming backlog. Removing' +
' oldest %d and keeping newest %d metrics',
len(self.metrics) - abs(trim_offset),
abs(trim_offset))
self.metrics = self.metrics[trim_offset:]
def _connect(self):
"""
Connect to the graphite server
"""
if (self.proto == 'udp'):
stream = socket.SOCK_DGRAM
else:
stream = socket.SOCK_STREAM
if (self.proto[-1] == '4'):
family = socket.AF_INET
connection_struct = (self.host, self.port)
elif (self.proto[-1] == '6'):
family = socket.AF_INET6
connection_struct = (self.host, self.port,
self.flow_info, self.scope_id)
else:
connection_struct = (self.host, self.port)
try:
addrinfo = socket.getaddrinfo(self.host, self.port, 0, stream)
except socket.gaierror, ex:
self.log.error("GraphiteHandler: Error looking up graphite host"
" '%s' - %s",
self.host, ex)
return
if (len(addrinfo) > 0):
family = addrinfo[0][0]
if (family == socket.AF_INET6):
connection_struct = (self.host, self.port,
self.flow_info, self.scope_id)
else:
family = socket.AF_INET
# Create socket
self.socket = socket.socket(family, stream)
if self.socket is None:
# Log Error
self.log.error("GraphiteHandler: Unable to create socket.")
# Close Socket
self._close()
return
# Enable keepalives?
if self.proto != 'udp' and self.keepalive:
self.log.error("GraphiteHandler: Setting socket keepalives...")
self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1)
self.socket.setsockopt(socket.IPPROTO_TCP, socket.TCP_KEEPIDLE,
self.keepaliveinterval)
self.socket.setsockopt(socket.IPPROTO_TCP, socket.TCP_KEEPINTVL,
self.keepaliveinterval)
self.socket.setsockopt(socket.IPPROTO_TCP, socket.TCP_KEEPCNT, 3)
# Set socket timeout
self.socket.settimeout(self.timeout)
# Connect to graphite server
try:
self.socket.connect(connection_struct)
# Log
self.log.debug("GraphiteHandler: Established connection to "
"graphite server %s:%d.",
self.host, self.port)
except Exception, ex:
# Log Error
self._throttle_error("GraphiteHandler: Failed to connect to "
"%s:%i. %s.", self.host, self.port, ex)
# Close Socket
self._close()
return
def _close(self):
"""
Close the socket
"""
if self.socket is not None:
self.socket.close()
self.socket = None
| 35.479508 | 90 | 0.541065 |
"""
Send metrics to a [graphite](http://graphite.wikidot.com/) using the default
interface.
Graphite is an enterprise-scale monitoring tool that runs well on cheap
hardware. It was originally designed and written by Chris Davis at Orbitz in
2006 as side project that ultimately grew to be a foundational monitoring tool.
In 2008, Orbitz allowed Graphite to be released under the open source Apache
2.0 license. Since then Chris has continued to work on Graphite and has
deployed it at other companies including Sears, where it serves as a pillar of
the e-commerce monitoring system. Today many
[large companies](http://graphite.readthedocs.org/en/latest/who-is-using.html)
use it.
"""
from Handler import Handler
import socket
class GraphiteHandler(Handler):
"""
Implements the abstract Handler class, sending data to graphite
"""
def __init__(self, config=None):
"""
Create a new instance of the GraphiteHandler class
"""
Handler.__init__(self, config)
self.socket = None
self.proto = self.config['proto'].lower().strip()
self.host = self.config['host']
self.port = int(self.config['port'])
self.timeout = float(self.config['timeout'])
self.keepalive = bool(self.config['keepalive'])
self.keepaliveinterval = int(self.config['keepaliveinterval'])
self.batch_size = int(self.config['batch'])
self.max_backlog_multiplier = int(
self.config['max_backlog_multiplier'])
self.trim_backlog_multiplier = int(
self.config['trim_backlog_multiplier'])
self.flow_info = self.config['flow_info']
self.scope_id = self.config['scope_id']
self.metrics = []
self._connect()
def get_default_config_help(self):
"""
Returns the help text for the configuration options for this handler
"""
config = super(GraphiteHandler, self).get_default_config_help()
config.update({
'host': 'Hostname',
'port': 'Port',
'proto': 'udp, udp4, udp6, tcp, tcp4, or tcp6',
'timeout': '',
'batch': 'How many to store before sending to the graphite server',
'max_backlog_multiplier': 'how many batches to store before trimming',
'trim_backlog_multiplier': 'Trim down how many batches',
'keepalive': 'Enable keepalives for tcp streams',
'keepaliveinterval': 'How frequently to send keepalives',
'flow_info': 'IPv6 Flow Info',
'scope_id': 'IPv6 Scope ID',
})
return config
def get_default_config(self):
"""
Return the default config for the handler
"""
config = super(GraphiteHandler, self).get_default_config()
config.update({
'host': 'localhost',
'port': 2003,
'proto': 'tcp',
'timeout': 15,
'batch': 1,
'max_backlog_multiplier': 5,
'trim_backlog_multiplier': 4,
'keepalive': 0,
'keepaliveinterval': 10,
'flow_info': 0,
'scope_id': 0,
})
return config
def __del__(self):
"""
Destroy instance of the GraphiteHandler class
"""
self._close()
def process(self, metric):
"""
Process a metric by sending it to graphite
"""
self.metrics.append(str(metric))
if len(self.metrics) >= self.batch_size:
self._send()
def flush(self):
"""Flush metrics in queue"""
self._send()
def _send_data(self, data):
"""
Try to send all data in buffer.
"""
try:
self.socket.sendall(data)
self._reset_errors()
except:
self._close()
self._throttle_error("GraphiteHandler: Socket error, "
"trying reconnect.")
self._connect()
try:
self.socket.sendall(data)
except:
return
self._reset_errors()
def _send(self):
"""
Send data to graphite. Data that can not be sent will be queued.
"""
try:
try:
if self.socket is None:
self.log.debug("GraphiteHandler: Socket is not connected. "
"Reconnecting.")
self._connect()
if self.socket is None:
self.log.debug("GraphiteHandler: Reconnect failed.")
else:
self._send_data(''.join(self.metrics))
self.metrics = []
except Exception:
self._close()
self._throttle_error("GraphiteHandler: Error sending metrics.")
raise
finally:
if len(self.metrics) >= (
self.batch_size * self.max_backlog_multiplier):
trim_offset = (self.batch_size *
self.trim_backlog_multiplier * -1)
self.log.warn('GraphiteHandler: Trimming backlog. Removing' +
' oldest %d and keeping newest %d metrics',
len(self.metrics) - abs(trim_offset),
abs(trim_offset))
self.metrics = self.metrics[trim_offset:]
def _connect(self):
"""
Connect to the graphite server
"""
if (self.proto == 'udp'):
stream = socket.SOCK_DGRAM
else:
stream = socket.SOCK_STREAM
if (self.proto[-1] == '4'):
family = socket.AF_INET
connection_struct = (self.host, self.port)
elif (self.proto[-1] == '6'):
family = socket.AF_INET6
connection_struct = (self.host, self.port,
self.flow_info, self.scope_id)
else:
connection_struct = (self.host, self.port)
try:
addrinfo = socket.getaddrinfo(self.host, self.port, 0, stream)
except socket.gaierror, ex:
self.log.error("GraphiteHandler: Error looking up graphite host"
" '%s' - %s",
self.host, ex)
return
if (len(addrinfo) > 0):
family = addrinfo[0][0]
if (family == socket.AF_INET6):
connection_struct = (self.host, self.port,
self.flow_info, self.scope_id)
else:
family = socket.AF_INET
self.socket = socket.socket(family, stream)
if self.socket is None:
self.log.error("GraphiteHandler: Unable to create socket.")
self._close()
return
if self.proto != 'udp' and self.keepalive:
self.log.error("GraphiteHandler: Setting socket keepalives...")
self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1)
self.socket.setsockopt(socket.IPPROTO_TCP, socket.TCP_KEEPIDLE,
self.keepaliveinterval)
self.socket.setsockopt(socket.IPPROTO_TCP, socket.TCP_KEEPINTVL,
self.keepaliveinterval)
self.socket.setsockopt(socket.IPPROTO_TCP, socket.TCP_KEEPCNT, 3)
self.socket.settimeout(self.timeout)
try:
self.socket.connect(connection_struct)
self.log.debug("GraphiteHandler: Established connection to "
"graphite server %s:%d.",
self.host, self.port)
except Exception, ex:
self._throttle_error("GraphiteHandler: Failed to connect to "
"%s:%i. %s.", self.host, self.port, ex)
self._close()
return
def _close(self):
"""
Close the socket
"""
if self.socket is not None:
self.socket.close()
self.socket = None
| false | true |
f71cc8fe00c0b5c8796f9ec6ac11c85930433e40 | 4,318 | py | Python | WebMirror/management/UrlManage.py | awesome-archive/ReadableWebProxy | 360104694a21bc14c7756f29205c95823387e30b | [
"BSD-3-Clause"
] | 193 | 2016-08-02T22:04:35.000Z | 2022-03-09T20:45:41.000Z | WebMirror/management/UrlManage.py | awesome-archive/ReadableWebProxy | 360104694a21bc14c7756f29205c95823387e30b | [
"BSD-3-Clause"
] | 533 | 2016-08-23T20:48:23.000Z | 2022-03-28T15:55:13.000Z | WebMirror/management/UrlManage.py | awesome-archive/ReadableWebProxy | 360104694a21bc14c7756f29205c95823387e30b | [
"BSD-3-Clause"
] | 19 | 2015-08-13T18:01:08.000Z | 2021-07-12T17:13:09.000Z |
import calendar
import datetime
import json
import os
import os.path
import shutil
import tqdm
import traceback
from concurrent.futures import ThreadPoolExecutor
import urllib.error
import urllib.parse
from sqlalchemy import and_
from sqlalchemy import or_
import sqlalchemy.exc
if __name__ == "__main__":
import logSetup
logSetup.initLogging()
import common.database as db
import common.Exceptions
import common.management.util
import common.management.file_cleanup
import common.management.WebMirrorManage
import WebMirror.processor.RssProcessor
import flags
import pprint
import config
from config import C_RAW_RESOURCE_DIR
import WebMirror.OutputFilters.rss.FeedDataParser
import WebMirror.OutputFilters.util.feedNameLut
import WebRequest
import WebMirror.Engine
def exposed_fix_lndb_urls():
'''
Scan the qidian feed items, and extract the book url segments which are not
in the feedparser url-seg -> title map.
Given those segments, then do a HTTP fetch, and pull out the page title.
Finally, print that information in a nice table for updating the
scraper func.
'''
with db.session_context() as sess:
pages = sess.query(db.WebPages) \
.filter(db.WebPages.netloc == "lndb.info") \
.all()
print(pages)
# feed_url = feed_item.urls[0].feed_url
# pfunc = feed_item.get_func()
# missing = []
# for release in feed_item.releases:
# item = {}
# item['title'] = release.title
# item['guid'] = release.contentid
# item['linkUrl'] = release.contenturl
# item['feedUrl'] = feed_url
# item['srcname'] = "wat"
# item['published'] = "wat"
# ret = pfunc(item)
# if not ret:
# missing.append(release.contenturl)
# urls = {}
# for url in missing:
# root, _ = url.rsplit("/", 1)
# urls[root] = url
# wg = WebRequest.WebGetRobust()
# lines = []
# for root, url in urls.items():
# urlfrag = root.split("www")[-1]
# meta = common.management.util.get_page_title(wg, url)
# title = meta['title']
# outstr = " ('www{}/', '{}', 'translated'),".format(urlfrag, title)
# lines.append(outstr)
# for outstr in lines:
# print(outstr)
def exposed_fix_nu_duplicate_url_segments():
'''
So the novelupdate scrape borked somewhere, and is generating duplicate URLs.
Gah.
'''
with db.session_context() as sess:
print("Querying for rows")
res = sess.query(db.NuReleaseItem.id, db.NuReleaseItem.outbound_wrapper).all()
print("Found %s nu outbound wrappers" % len(res))
total = 0
bad = 0
urlmap = {}
fix_list = []
print("Parsing URLs")
for dbid, url in res:
total += 1
if url.count("http") > 1:
bad += 1
fix_list.append((dbid, url))
# print(dbid, url)
else:
urlmap[url] = dbid
print("Found %s links, %s of which are invalid" % (total, bad))
count = 0
with db.session_context() as sess:
for dbid, url in tqdm.tqdm(fix_list, desc="Fixing duplicate NU urls."):
actual_url, _ = url.split("http://")
if actual_url in urlmap:
res = sess.query(db.NuResolvedOutbound).filter_by(parent=dbid).update({"parent" : urlmap[actual_url]})
res = sess.query(db.NuReleaseItem).filter_by(id=dbid).delete()
else:
res = sess.query(db.NuReleaseItem).filter_by(id=dbid).update({"outbound_wrapper" : actual_url})
urlmap[url] = dbid
count += 1
if count > 2500:
count = 0
sess.commit()
# res = sess.query(db.NuReleaseItem.id, db.NuReleaseItem.outbound_wrapper).all()
sess.commit()
# print(dbid, curl)
def exposed_scan_for_masked_urls():
'''
Do a streaming iteration over the rows in the database, and run them through the url filtering mechanism to see
if any are actually not wanted.
'''
engine = WebMirror.Engine.SiteArchiver(None, None, None)
with db.session_context() as sess:
print("Querying for rows")
iterable = sess.query(db.WebPages.id, db.WebPages.url, db.WebPages.netloc) \
.order_by(db.WebPages.netloc) \
.yield_per(1000)
rows = 0
skipped = []
for rid, url, netloc in tqdm.tqdm(iterable):
ret = engine.external_link_check(netloc, url)
if not ret:
skipped.append((netloc, url, rid))
rows += 1
print("Found %s rows. Saving" % rows)
with open("delete_netlocs.json", "w") as fp:
json.dump(skipped, fp, indent=4)
print("Saved to output json")
| 23.988889 | 112 | 0.684576 |
import calendar
import datetime
import json
import os
import os.path
import shutil
import tqdm
import traceback
from concurrent.futures import ThreadPoolExecutor
import urllib.error
import urllib.parse
from sqlalchemy import and_
from sqlalchemy import or_
import sqlalchemy.exc
if __name__ == "__main__":
import logSetup
logSetup.initLogging()
import common.database as db
import common.Exceptions
import common.management.util
import common.management.file_cleanup
import common.management.WebMirrorManage
import WebMirror.processor.RssProcessor
import flags
import pprint
import config
from config import C_RAW_RESOURCE_DIR
import WebMirror.OutputFilters.rss.FeedDataParser
import WebMirror.OutputFilters.util.feedNameLut
import WebRequest
import WebMirror.Engine
def exposed_fix_lndb_urls():
with db.session_context() as sess:
pages = sess.query(db.WebPages) \
.filter(db.WebPages.netloc == "lndb.info") \
.all()
print(pages)
def exposed_fix_nu_duplicate_url_segments():
with db.session_context() as sess:
print("Querying for rows")
res = sess.query(db.NuReleaseItem.id, db.NuReleaseItem.outbound_wrapper).all()
print("Found %s nu outbound wrappers" % len(res))
total = 0
bad = 0
urlmap = {}
fix_list = []
print("Parsing URLs")
for dbid, url in res:
total += 1
if url.count("http") > 1:
bad += 1
fix_list.append((dbid, url))
else:
urlmap[url] = dbid
print("Found %s links, %s of which are invalid" % (total, bad))
count = 0
with db.session_context() as sess:
for dbid, url in tqdm.tqdm(fix_list, desc="Fixing duplicate NU urls."):
actual_url, _ = url.split("http://")
if actual_url in urlmap:
res = sess.query(db.NuResolvedOutbound).filter_by(parent=dbid).update({"parent" : urlmap[actual_url]})
res = sess.query(db.NuReleaseItem).filter_by(id=dbid).delete()
else:
res = sess.query(db.NuReleaseItem).filter_by(id=dbid).update({"outbound_wrapper" : actual_url})
urlmap[url] = dbid
count += 1
if count > 2500:
count = 0
sess.commit()
sess.commit()
def exposed_scan_for_masked_urls():
engine = WebMirror.Engine.SiteArchiver(None, None, None)
with db.session_context() as sess:
print("Querying for rows")
iterable = sess.query(db.WebPages.id, db.WebPages.url, db.WebPages.netloc) \
.order_by(db.WebPages.netloc) \
.yield_per(1000)
rows = 0
skipped = []
for rid, url, netloc in tqdm.tqdm(iterable):
ret = engine.external_link_check(netloc, url)
if not ret:
skipped.append((netloc, url, rid))
rows += 1
print("Found %s rows. Saving" % rows)
with open("delete_netlocs.json", "w") as fp:
json.dump(skipped, fp, indent=4)
print("Saved to output json")
| true | true |
f71cc93f4fb121302f3d3b609755c636dc186814 | 53,235 | py | Python | zerver/views/message_fetch.py | CatarinaSMorais/zulip | e943d717b84291397328bd4dc578c04eed21885e | [
"Apache-2.0"
] | 1 | 2021-08-10T07:31:27.000Z | 2021-08-10T07:31:27.000Z | zerver/views/message_fetch.py | CatarinaSMorais/zulip | e943d717b84291397328bd4dc578c04eed21885e | [
"Apache-2.0"
] | 1 | 2021-08-05T14:46:02.000Z | 2021-08-05T14:46:02.000Z | zerver/views/message_fetch.py | CatarinaSMorais/zulip | e943d717b84291397328bd4dc578c04eed21885e | [
"Apache-2.0"
] | 1 | 2021-08-05T14:27:13.000Z | 2021-08-05T14:27:13.000Z | import re
from typing import Any, Callable, Dict, Iterable, List, Optional, Sequence, Tuple, Union
import orjson
from django.conf import settings
from django.contrib.auth.models import AnonymousUser
from django.core.exceptions import ValidationError
from django.db import connection
from django.http import HttpRequest, HttpResponse
from django.utils.html import escape as escape_html
from django.utils.translation import gettext as _
from sqlalchemy import func
from sqlalchemy.dialects import postgresql
from sqlalchemy.engine import Connection, RowProxy
from sqlalchemy.sql import (
ClauseElement,
ColumnElement,
FromClause,
Select,
alias,
and_,
column,
join,
literal,
literal_column,
not_,
or_,
select,
table,
union_all,
)
from sqlalchemy.types import Boolean, Integer, Text
from zerver.context_processors import get_valid_realm_from_request
from zerver.decorator import REQ, has_request_variables
from zerver.lib.actions import recipient_for_user_profiles
from zerver.lib.addressee import get_user_profiles, get_user_profiles_by_ids
from zerver.lib.exceptions import ErrorCode, JsonableError, MissingAuthenticationError
from zerver.lib.message import get_first_visible_message_id, messages_for_ids
from zerver.lib.narrow import is_web_public_compatible, is_web_public_narrow
from zerver.lib.response import json_error, json_success
from zerver.lib.sqlalchemy_utils import get_sqlalchemy_connection
from zerver.lib.streams import (
can_access_stream_history_by_id,
can_access_stream_history_by_name,
get_public_streams_queryset,
get_stream_by_narrow_operand_access_unchecked,
get_web_public_streams_queryset,
)
from zerver.lib.topic import DB_TOPIC_NAME, MATCH_TOPIC, topic_column_sa, topic_match_sa
from zerver.lib.topic_mutes import exclude_topic_mutes
from zerver.lib.types import Validator
from zerver.lib.utils import statsd
from zerver.lib.validator import (
check_bool,
check_dict,
check_int,
check_list,
check_required_string,
check_string,
check_string_or_int,
check_string_or_int_list,
to_non_negative_int,
)
from zerver.models import (
Realm,
Recipient,
Stream,
Subscription,
UserMessage,
UserProfile,
get_active_streams,
get_user_by_id_in_realm_including_cross_realm,
get_user_including_cross_realm,
)
LARGER_THAN_MAX_MESSAGE_ID = 10000000000000000
MAX_MESSAGES_PER_FETCH = 5000
class BadNarrowOperator(JsonableError):
code = ErrorCode.BAD_NARROW
data_fields = ["desc"]
def __init__(self, desc: str) -> None:
self.desc: str = desc
@staticmethod
def msg_format() -> str:
return _("Invalid narrow operator: {desc}")
ConditionTransform = Callable[[ClauseElement], ClauseElement]
OptionalNarrowListT = Optional[List[Dict[str, Any]]]
# These delimiters will not appear in rendered messages or HTML-escaped topics.
TS_START = "<ts-match>"
TS_STOP = "</ts-match>"
def ts_locs_array(
config: "ColumnElement[str]",
text: "ColumnElement[str]",
tsquery: "ColumnElement[object]",
) -> "ColumnElement[List[List[int]]]":
options = f"HighlightAll = TRUE, StartSel = {TS_START}, StopSel = {TS_STOP}"
delimited = func.ts_headline(config, text, tsquery, options)
parts = func.unnest(func.string_to_array(delimited, TS_START)).alias()
part = column(parts.name, Text)
part_len = func.length(part) - len(TS_STOP)
match_pos = func.sum(part_len).over(rows=(None, -1)) + len(TS_STOP)
match_len = func.strpos(part, TS_STOP) - 1
ret = func.array(
select(
[
postgresql.array([match_pos, match_len]), # type: ignore[call-overload] # https://github.com/dropbox/sqlalchemy-stubs/issues/188
]
)
.select_from(parts)
.offset(1)
.as_scalar(),
)
return ret
# When you add a new operator to this, also update zerver/lib/narrow.py
class NarrowBuilder:
"""
Build up a SQLAlchemy query to find messages matching a narrow.
"""
# This class has an important security invariant:
#
# None of these methods ever *add* messages to a query's result.
#
# That is, the `add_term` method, and its helpers the `by_*` methods,
# are passed a Select object representing a query for messages; they may
# call some methods on it, and then they return a resulting Select
# object. Things these methods may do to the queries they handle
# include
# * add conditions to filter out rows (i.e., messages), with `query.where`
# * add columns for more information on the same message, with `query.column`
# * add a join for more information on the same message
#
# Things they may not do include
# * anything that would pull in additional rows, or information on
# other messages.
def __init__(
self,
user_profile: Optional[UserProfile],
msg_id_column: "ColumnElement[int]",
realm: Realm,
is_web_public_query: bool = False,
) -> None:
self.user_profile = user_profile
self.msg_id_column = msg_id_column
self.realm = realm
self.is_web_public_query = is_web_public_query
def add_term(self, query: Select, term: Dict[str, Any]) -> Select:
"""
Extend the given query to one narrowed by the given term, and return the result.
This method satisfies an important security property: the returned
query never includes a message that the given query didn't. In
particular, if the given query will only find messages that a given
user can legitimately see, then so will the returned query.
"""
# To maintain the security property, we hold all the `by_*`
# methods to the same criterion. See the class's block comment
# for details.
# We have to be careful here because we're letting users call a method
# by name! The prefix 'by_' prevents it from colliding with builtin
# Python __magic__ stuff.
operator = term["operator"]
operand = term["operand"]
negated = term.get("negated", False)
method_name = "by_" + operator.replace("-", "_")
method = getattr(self, method_name, None)
if method is None:
raise BadNarrowOperator("unknown operator " + operator)
if negated:
maybe_negate = not_
else:
maybe_negate = lambda cond: cond
return method(query, operand, maybe_negate)
def by_has(self, query: Select, operand: str, maybe_negate: ConditionTransform) -> Select:
if operand not in ["attachment", "image", "link"]:
raise BadNarrowOperator("unknown 'has' operand " + operand)
col_name = "has_" + operand
cond = column(col_name, Boolean)
return query.where(maybe_negate(cond))
def by_in(self, query: Select, operand: str, maybe_negate: ConditionTransform) -> Select:
# This operator does not support is_web_public_query.
assert not self.is_web_public_query
assert self.user_profile is not None
if operand == "home":
conditions = exclude_muting_conditions(self.user_profile, [])
return query.where(and_(*conditions))
elif operand == "all":
return query
raise BadNarrowOperator("unknown 'in' operand " + operand)
def by_is(self, query: Select, operand: str, maybe_negate: ConditionTransform) -> Select:
# This operator class does not support is_web_public_query.
assert not self.is_web_public_query
assert self.user_profile is not None
if operand == "private":
cond = column("flags", Integer).op("&")(UserMessage.flags.is_private.mask) != 0
return query.where(maybe_negate(cond))
elif operand == "starred":
cond = column("flags", Integer).op("&")(UserMessage.flags.starred.mask) != 0
return query.where(maybe_negate(cond))
elif operand == "unread":
cond = column("flags", Integer).op("&")(UserMessage.flags.read.mask) == 0
return query.where(maybe_negate(cond))
elif operand == "mentioned":
cond1 = column("flags", Integer).op("&")(UserMessage.flags.mentioned.mask) != 0
cond2 = column("flags", Integer).op("&")(UserMessage.flags.wildcard_mentioned.mask) != 0
cond = or_(cond1, cond2)
return query.where(maybe_negate(cond))
elif operand == "alerted":
cond = column("flags", Integer).op("&")(UserMessage.flags.has_alert_word.mask) != 0
return query.where(maybe_negate(cond))
raise BadNarrowOperator("unknown 'is' operand " + operand)
_alphanum = frozenset("abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789")
def _pg_re_escape(self, pattern: str) -> str:
"""
Escape user input to place in a regex
Python's re.escape escapes Unicode characters in a way which PostgreSQL
fails on, '\u03bb' to '\\\u03bb'. This function will correctly escape
them for PostgreSQL, '\u03bb' to '\\u03bb'.
"""
s = list(pattern)
for i, c in enumerate(s):
if c not in self._alphanum:
if ord(c) >= 128:
# convert the character to hex PostgreSQL regex will take
# \uXXXX
s[i] = f"\\u{ord(c):0>4x}"
else:
s[i] = "\\" + c
return "".join(s)
def by_stream(
self, query: Select, operand: Union[str, int], maybe_negate: ConditionTransform
) -> Select:
try:
# Because you can see your own message history for
# private streams you are no longer subscribed to, we
# need get_stream_by_narrow_operand_access_unchecked here.
stream = get_stream_by_narrow_operand_access_unchecked(operand, self.realm)
if self.is_web_public_query and not stream.is_web_public:
raise BadNarrowOperator("unknown web-public stream " + str(operand))
except Stream.DoesNotExist:
raise BadNarrowOperator("unknown stream " + str(operand))
if self.realm.is_zephyr_mirror_realm:
# MIT users expect narrowing to "social" to also show messages to
# /^(un)*social(.d)*$/ (unsocial, ununsocial, social.d, ...).
# In `ok_to_include_history`, we assume that a non-negated
# `stream` term for a public stream will limit the query to
# that specific stream. So it would be a bug to hit this
# codepath after relying on this term there. But all streams in
# a Zephyr realm are private, so that doesn't happen.
assert not stream.is_public()
m = re.search(r"^(?:un)*(.+?)(?:\.d)*$", stream.name, re.IGNORECASE)
# Since the regex has a `.+` in it and "" is invalid as a
# stream name, this will always match
assert m is not None
base_stream_name = m.group(1)
matching_streams = get_active_streams(self.realm).filter(
name__iregex=fr"^(un)*{self._pg_re_escape(base_stream_name)}(\.d)*$"
)
recipient_ids = [matching_stream.recipient_id for matching_stream in matching_streams]
cond = column("recipient_id", Integer).in_(recipient_ids)
return query.where(maybe_negate(cond))
recipient = stream.recipient
cond = column("recipient_id", Integer) == recipient.id
return query.where(maybe_negate(cond))
def by_streams(self, query: Select, operand: str, maybe_negate: ConditionTransform) -> Select:
if operand == "public":
# Get all both subscribed and non subscribed public streams
# but exclude any private subscribed streams.
recipient_queryset = get_public_streams_queryset(self.realm)
elif operand == "web-public":
recipient_queryset = get_web_public_streams_queryset(self.realm)
else:
raise BadNarrowOperator("unknown streams operand " + operand)
recipient_ids = recipient_queryset.values_list("recipient_id", flat=True).order_by("id")
cond = column("recipient_id", Integer).in_(recipient_ids)
return query.where(maybe_negate(cond))
def by_topic(self, query: Select, operand: str, maybe_negate: ConditionTransform) -> Select:
if self.realm.is_zephyr_mirror_realm:
# MIT users expect narrowing to topic "foo" to also show messages to /^foo(.d)*$/
# (foo, foo.d, foo.d.d, etc)
m = re.search(r"^(.*?)(?:\.d)*$", operand, re.IGNORECASE)
# Since the regex has a `.*` in it, this will always match
assert m is not None
base_topic = m.group(1)
# Additionally, MIT users expect the empty instance and
# instance "personal" to be the same.
if base_topic in ("", "personal", '(instance "")'):
cond: ClauseElement = or_(
topic_match_sa(""),
topic_match_sa(".d"),
topic_match_sa(".d.d"),
topic_match_sa(".d.d.d"),
topic_match_sa(".d.d.d.d"),
topic_match_sa("personal"),
topic_match_sa("personal.d"),
topic_match_sa("personal.d.d"),
topic_match_sa("personal.d.d.d"),
topic_match_sa("personal.d.d.d.d"),
topic_match_sa('(instance "")'),
topic_match_sa('(instance "").d'),
topic_match_sa('(instance "").d.d'),
topic_match_sa('(instance "").d.d.d'),
topic_match_sa('(instance "").d.d.d.d'),
)
else:
# We limit `.d` counts, since PostgreSQL has much better
# query planning for this than they do for a regular
# expression (which would sometimes table scan).
cond = or_(
topic_match_sa(base_topic),
topic_match_sa(base_topic + ".d"),
topic_match_sa(base_topic + ".d.d"),
topic_match_sa(base_topic + ".d.d.d"),
topic_match_sa(base_topic + ".d.d.d.d"),
)
return query.where(maybe_negate(cond))
cond = topic_match_sa(operand)
return query.where(maybe_negate(cond))
def by_sender(
self, query: Select, operand: Union[str, int], maybe_negate: ConditionTransform
) -> Select:
try:
if isinstance(operand, str):
sender = get_user_including_cross_realm(operand, self.realm)
else:
sender = get_user_by_id_in_realm_including_cross_realm(operand, self.realm)
except UserProfile.DoesNotExist:
raise BadNarrowOperator("unknown user " + str(operand))
cond = column("sender_id", Integer) == literal(sender.id)
return query.where(maybe_negate(cond))
def by_near(self, query: Select, operand: str, maybe_negate: ConditionTransform) -> Select:
return query
def by_id(
self, query: Select, operand: Union[int, str], maybe_negate: ConditionTransform
) -> Select:
if not str(operand).isdigit():
raise BadNarrowOperator("Invalid message ID")
cond = self.msg_id_column == literal(operand)
return query.where(maybe_negate(cond))
def by_pm_with(
self, query: Select, operand: Union[str, Iterable[int]], maybe_negate: ConditionTransform
) -> Select:
# This operator does not support is_web_public_query.
assert not self.is_web_public_query
assert self.user_profile is not None
try:
if isinstance(operand, str):
email_list = operand.split(",")
user_profiles = get_user_profiles(
emails=email_list,
realm=self.realm,
)
else:
"""
This is where we handle passing a list of user IDs for the narrow, which is the
preferred/cleaner API.
"""
user_profiles = get_user_profiles_by_ids(
user_ids=operand,
realm=self.realm,
)
recipient = recipient_for_user_profiles(
user_profiles=user_profiles,
forwarded_mirror_message=False,
forwarder_user_profile=None,
sender=self.user_profile,
allow_deactivated=True,
)
except (JsonableError, ValidationError):
raise BadNarrowOperator("unknown user in " + str(operand))
# Group DM
if recipient.type == Recipient.HUDDLE:
cond = column("recipient_id", Integer) == recipient.id
return query.where(maybe_negate(cond))
# 1:1 PM
other_participant = None
# Find if another person is in PM
for user in user_profiles:
if user.id != self.user_profile.id:
other_participant = user
# PM with another person
if other_participant:
# We need bidirectional messages PM with another person.
# But Recipient.PERSONAL objects only encode the person who
# received the message, and not the other participant in
# the thread (the sender), we need to do a somewhat
# complex query to get messages between these two users
# with either of them as the sender.
self_recipient_id = self.user_profile.recipient_id
cond = or_(
and_(
column("sender_id", Integer) == other_participant.id,
column("recipient_id", Integer) == self_recipient_id,
),
and_(
column("sender_id", Integer) == self.user_profile.id,
column("recipient_id", Integer) == recipient.id,
),
)
return query.where(maybe_negate(cond))
# PM with self
cond = and_(
column("sender_id", Integer) == self.user_profile.id,
column("recipient_id", Integer) == recipient.id,
)
return query.where(maybe_negate(cond))
def by_group_pm_with(
self, query: Select, operand: Union[str, int], maybe_negate: ConditionTransform
) -> Select:
# This operator does not support is_web_public_query.
assert not self.is_web_public_query
assert self.user_profile is not None
try:
if isinstance(operand, str):
narrow_profile = get_user_including_cross_realm(operand, self.realm)
else:
narrow_profile = get_user_by_id_in_realm_including_cross_realm(operand, self.realm)
except UserProfile.DoesNotExist:
raise BadNarrowOperator("unknown user " + str(operand))
self_recipient_ids = [
recipient_tuple["recipient_id"]
for recipient_tuple in Subscription.objects.filter(
user_profile=self.user_profile,
recipient__type=Recipient.HUDDLE,
).values("recipient_id")
]
narrow_recipient_ids = [
recipient_tuple["recipient_id"]
for recipient_tuple in Subscription.objects.filter(
user_profile=narrow_profile,
recipient__type=Recipient.HUDDLE,
).values("recipient_id")
]
recipient_ids = set(self_recipient_ids) & set(narrow_recipient_ids)
cond = column("recipient_id", Integer).in_(recipient_ids)
return query.where(maybe_negate(cond))
def by_search(self, query: Select, operand: str, maybe_negate: ConditionTransform) -> Select:
if settings.USING_PGROONGA:
return self._by_search_pgroonga(query, operand, maybe_negate)
else:
return self._by_search_tsearch(query, operand, maybe_negate)
def _by_search_pgroonga(
self, query: Select, operand: str, maybe_negate: ConditionTransform
) -> Select:
match_positions_character = func.pgroonga_match_positions_character
query_extract_keywords = func.pgroonga_query_extract_keywords
operand_escaped = func.escape_html(operand)
keywords = query_extract_keywords(operand_escaped)
query = query.column(
match_positions_character(column("rendered_content", Text), keywords).label(
"content_matches"
)
)
query = query.column(
match_positions_character(func.escape_html(topic_column_sa()), keywords).label(
"topic_matches"
)
)
condition = column("search_pgroonga").op("&@~")(operand_escaped)
return query.where(maybe_negate(condition))
def _by_search_tsearch(
self, query: Select, operand: str, maybe_negate: ConditionTransform
) -> Select:
tsquery = func.plainto_tsquery(literal("zulip.english_us_search"), literal(operand))
query = query.column(
ts_locs_array(
literal("zulip.english_us_search"), column("rendered_content", Text), tsquery
).label("content_matches")
)
# We HTML-escape the topic in PostgreSQL to avoid doing a server round-trip
query = query.column(
ts_locs_array(
literal("zulip.english_us_search"), func.escape_html(topic_column_sa()), tsquery
).label("topic_matches")
)
# Do quoted string matching. We really want phrase
# search here so we can ignore punctuation and do
# stemming, but there isn't a standard phrase search
# mechanism in PostgreSQL
for term in re.findall(r'"[^"]+"|\S+', operand):
if term[0] == '"' and term[-1] == '"':
term = term[1:-1]
term = "%" + connection.ops.prep_for_like_query(term) + "%"
cond = or_(column("content", Text).ilike(term), topic_column_sa().ilike(term))
query = query.where(maybe_negate(cond))
cond = column("search_tsvector", postgresql.TSVECTOR).op("@@")(tsquery)
return query.where(maybe_negate(cond))
def highlight_string(text: str, locs: Iterable[Tuple[int, int]]) -> str:
highlight_start = '<span class="highlight">'
highlight_stop = "</span>"
pos = 0
result = ""
in_tag = False
for loc in locs:
(offset, length) = loc
prefix_start = pos
prefix_end = offset
match_start = offset
match_end = offset + length
prefix = text[prefix_start:prefix_end]
match = text[match_start:match_end]
for character in prefix + match:
if character == "<":
in_tag = True
elif character == ">":
in_tag = False
if in_tag:
result += prefix
result += match
else:
result += prefix
result += highlight_start
result += match
result += highlight_stop
pos = match_end
result += text[pos:]
return result
def get_search_fields(
rendered_content: str,
topic_name: str,
content_matches: Iterable[Tuple[int, int]],
topic_matches: Iterable[Tuple[int, int]],
) -> Dict[str, str]:
return {
"match_content": highlight_string(rendered_content, content_matches),
MATCH_TOPIC: highlight_string(escape_html(topic_name), topic_matches),
}
def narrow_parameter(json: str) -> OptionalNarrowListT:
data = orjson.loads(json)
if not isinstance(data, list):
raise ValueError("argument is not a list")
if len(data) == 0:
# The "empty narrow" should be None, and not []
return None
def convert_term(elem: Union[Dict[str, Any], List[str]]) -> Dict[str, Any]:
# We have to support a legacy tuple format.
if isinstance(elem, list):
if len(elem) != 2 or any(not isinstance(x, str) for x in elem):
raise ValueError("element is not a string pair")
return dict(operator=elem[0], operand=elem[1])
if isinstance(elem, dict):
# Make sure to sync this list to frontend also when adding a new operator.
# that supports user IDs. Relevant code is located in static/js/message_fetch.js
# in handle_operators_supporting_id_based_api function where you will need to update
# operators_supporting_id, or operators_supporting_ids array.
operators_supporting_id = ["sender", "group-pm-with", "stream"]
operators_supporting_ids = ["pm-with"]
operators_non_empty_operand = {"search"}
operator = elem.get("operator", "")
if operator in operators_supporting_id:
operand_validator: Validator[object] = check_string_or_int
elif operator in operators_supporting_ids:
operand_validator = check_string_or_int_list
elif operator in operators_non_empty_operand:
operand_validator = check_required_string
else:
operand_validator = check_string
validator = check_dict(
required_keys=[
("operator", check_string),
("operand", operand_validator),
],
optional_keys=[
("negated", check_bool),
],
)
try:
validator("elem", elem)
except ValidationError as error:
raise JsonableError(error.message)
# whitelist the fields we care about for now
return dict(
operator=elem["operator"],
operand=elem["operand"],
negated=elem.get("negated", False),
)
raise ValueError("element is not a dictionary")
return list(map(convert_term, data))
def ok_to_include_history(
narrow: OptionalNarrowListT, user_profile: Optional[UserProfile], is_web_public_query: bool
) -> bool:
# There are occasions where we need to find Message rows that
# have no corresponding UserMessage row, because the user is
# reading a public stream that might include messages that
# were sent while the user was not subscribed, but which they are
# allowed to see. We have to be very careful about constructing
# queries in those situations, so this function should return True
# only if we are 100% sure that we're gonna add a clause to the
# query that narrows to a particular public stream on the user's realm.
# If we screw this up, then we can get into a nasty situation of
# polluting our narrow results with messages from other realms.
# For web-public queries, we are always returning history. The
# analogues of the below stream access checks for whether streams
# have is_web_public set and banning is operators in this code
# path are done directly in NarrowBuilder.
if is_web_public_query:
assert user_profile is None
return True
assert user_profile is not None
include_history = False
if narrow is not None:
for term in narrow:
if term["operator"] == "stream" and not term.get("negated", False):
operand: Union[str, int] = term["operand"]
if isinstance(operand, str):
include_history = can_access_stream_history_by_name(user_profile, operand)
else:
include_history = can_access_stream_history_by_id(user_profile, operand)
elif (
term["operator"] == "streams"
and term["operand"] == "public"
and not term.get("negated", False)
and user_profile.can_access_public_streams()
):
include_history = True
# Disable historical messages if the user is narrowing on anything
# that's a property on the UserMessage table. There cannot be
# historical messages in these cases anyway.
for term in narrow:
if term["operator"] == "is":
include_history = False
return include_history
def get_stream_from_narrow_access_unchecked(
narrow: OptionalNarrowListT, realm: Realm
) -> Optional[Stream]:
if narrow is not None:
for term in narrow:
if term["operator"] == "stream":
return get_stream_by_narrow_operand_access_unchecked(term["operand"], realm)
return None
def exclude_muting_conditions(
user_profile: UserProfile, narrow: OptionalNarrowListT
) -> List[ClauseElement]:
conditions = []
stream_id = None
try:
# Note: It is okay here to not check access to stream
# because we are only using the stream id to exclude data,
# not to include results.
stream = get_stream_from_narrow_access_unchecked(narrow, user_profile.realm)
if stream is not None:
stream_id = stream.id
except Stream.DoesNotExist:
pass
# Stream-level muting only applies when looking at views that
# include multiple streams, since we do want users to be able to
# browser messages within a muted stream.
if stream_id is None:
rows = Subscription.objects.filter(
user_profile=user_profile,
active=True,
is_muted=True,
recipient__type=Recipient.STREAM,
).values("recipient_id")
muted_recipient_ids = [row["recipient_id"] for row in rows]
if len(muted_recipient_ids) > 0:
# Only add the condition if we have muted streams to simplify/avoid warnings.
condition = not_(column("recipient_id", Integer).in_(muted_recipient_ids))
conditions.append(condition)
conditions = exclude_topic_mutes(conditions, user_profile, stream_id)
# Muted user logic for hiding messages is implemented entirely
# client-side. This is by design, as it allows UI to hint that
# muted messages exist where their absence might make conversation
# difficult to understand. As a result, we do not need to consider
# muted users in this server-side logic for returning messages to
# clients. (We could in theory exclude PMs from muted users, but
# they're likely to be sufficiently rare to not be worth extra
# logic/testing here).
return conditions
def get_base_query_for_search(
user_profile: Optional[UserProfile], need_message: bool, need_user_message: bool
) -> Tuple[Select, "ColumnElement[int]"]:
# Handle the simple case where user_message isn't involved first.
if not need_user_message:
assert need_message
query = select([column("id", Integer).label("message_id")], None, table("zerver_message"))
inner_msg_id_col: ColumnElement[int]
inner_msg_id_col = literal_column("zerver_message.id", Integer) # type: ignore[assignment] # https://github.com/dropbox/sqlalchemy-stubs/pull/189
return (query, inner_msg_id_col)
assert user_profile is not None
if need_message:
query = select(
[column("message_id"), column("flags", Integer)],
column("user_profile_id") == literal(user_profile.id),
join(
table("zerver_usermessage"),
table("zerver_message"),
literal_column("zerver_usermessage.message_id", Integer)
== literal_column("zerver_message.id", Integer),
),
)
inner_msg_id_col = column("message_id", Integer)
return (query, inner_msg_id_col)
query = select(
[column("message_id"), column("flags", Integer)],
column("user_profile_id") == literal(user_profile.id),
table("zerver_usermessage"),
)
inner_msg_id_col = column("message_id", Integer)
return (query, inner_msg_id_col)
def add_narrow_conditions(
user_profile: Optional[UserProfile],
inner_msg_id_col: "ColumnElement[int]",
query: Select,
narrow: OptionalNarrowListT,
is_web_public_query: bool,
realm: Realm,
) -> Tuple[Select, bool]:
is_search = False # for now
if narrow is None:
return (query, is_search)
# Build the query for the narrow
builder = NarrowBuilder(user_profile, inner_msg_id_col, realm, is_web_public_query)
search_operands = []
# As we loop through terms, builder does most of the work to extend
# our query, but we need to collect the search operands and handle
# them after the loop.
for term in narrow:
if term["operator"] == "search":
search_operands.append(term["operand"])
else:
query = builder.add_term(query, term)
if search_operands:
is_search = True
query = query.column(topic_column_sa()).column(column("rendered_content", Text))
search_term = dict(
operator="search",
operand=" ".join(search_operands),
)
query = builder.add_term(query, search_term)
return (query, is_search)
def find_first_unread_anchor(
sa_conn: Connection, user_profile: Optional[UserProfile], narrow: OptionalNarrowListT
) -> int:
# For anonymous web users, all messages are treated as read, and so
# always return LARGER_THAN_MAX_MESSAGE_ID.
if user_profile is None:
return LARGER_THAN_MAX_MESSAGE_ID
# We always need UserMessage in our query, because it has the unread
# flag for the user.
need_user_message = True
# Because we will need to call exclude_muting_conditions, unless
# the user hasn't muted anything, we will need to include Message
# in our query. It may be worth eventually adding an optimization
# for the case of a user who hasn't muted anything to avoid the
# join in that case, but it's low priority.
need_message = True
query, inner_msg_id_col = get_base_query_for_search(
user_profile=user_profile,
need_message=need_message,
need_user_message=need_user_message,
)
query, is_search = add_narrow_conditions(
user_profile=user_profile,
inner_msg_id_col=inner_msg_id_col,
query=query,
narrow=narrow,
is_web_public_query=False,
realm=user_profile.realm,
)
condition = column("flags", Integer).op("&")(UserMessage.flags.read.mask) == 0
# We exclude messages on muted topics when finding the first unread
# message in this narrow
muting_conditions = exclude_muting_conditions(user_profile, narrow)
if muting_conditions:
condition = and_(condition, *muting_conditions)
first_unread_query = query.where(condition)
first_unread_query = first_unread_query.order_by(inner_msg_id_col.asc()).limit(1)
first_unread_result = list(sa_conn.execute(first_unread_query).fetchall())
if len(first_unread_result) > 0:
anchor = first_unread_result[0][0]
else:
anchor = LARGER_THAN_MAX_MESSAGE_ID
return anchor
def parse_anchor_value(anchor_val: Optional[str], use_first_unread_anchor: bool) -> Optional[int]:
"""Given the anchor and use_first_unread_anchor parameters passed by
the client, computes what anchor value the client requested,
handling backwards-compatibility and the various string-valued
fields. We encode use_first_unread_anchor as anchor=None.
"""
if use_first_unread_anchor:
# Backwards-compatibility: Before we added support for the
# special string-typed anchor values, clients would pass
# anchor=None and use_first_unread_anchor=True to indicate
# what is now expressed as anchor="first_unread".
return None
if anchor_val is None:
# Throw an exception if neither an anchor argument not
# use_first_unread_anchor was specified.
raise JsonableError(_("Missing 'anchor' argument."))
if anchor_val == "oldest":
return 0
if anchor_val == "newest":
return LARGER_THAN_MAX_MESSAGE_ID
if anchor_val == "first_unread":
return None
try:
# We don't use `.isnumeric()` to support negative numbers for
# anchor. We don't recommend it in the API (if you want the
# very first message, use 0 or 1), but it used to be supported
# and was used by the web app, so we need to continue
# supporting it for backwards-compatibility
anchor = int(anchor_val)
if anchor < 0:
return 0
elif anchor > LARGER_THAN_MAX_MESSAGE_ID:
return LARGER_THAN_MAX_MESSAGE_ID
return anchor
except ValueError:
raise JsonableError(_("Invalid anchor"))
@has_request_variables
def get_messages_backend(
request: HttpRequest,
maybe_user_profile: Union[UserProfile, AnonymousUser],
anchor_val: Optional[str] = REQ("anchor", default=None),
num_before: int = REQ(converter=to_non_negative_int),
num_after: int = REQ(converter=to_non_negative_int),
narrow: OptionalNarrowListT = REQ("narrow", converter=narrow_parameter, default=None),
use_first_unread_anchor_val: bool = REQ(
"use_first_unread_anchor", json_validator=check_bool, default=False
),
client_gravatar: bool = REQ(json_validator=check_bool, default=False),
apply_markdown: bool = REQ(json_validator=check_bool, default=True),
) -> HttpResponse:
anchor = parse_anchor_value(anchor_val, use_first_unread_anchor_val)
if num_before + num_after > MAX_MESSAGES_PER_FETCH:
return json_error(
_("Too many messages requested (maximum {}).").format(
MAX_MESSAGES_PER_FETCH,
)
)
if not maybe_user_profile.is_authenticated:
# If user is not authenticated, clients must include
# `streams:web-public` in their narrow query to indicate this
# is a web-public query. This helps differentiate between
# cases of web-public queries (where we should return the
# web-public results only) and clients with buggy
# authentication code (where we should return an auth error).
if not is_web_public_narrow(narrow):
raise MissingAuthenticationError()
assert narrow is not None
if not is_web_public_compatible(narrow):
raise MissingAuthenticationError()
realm = get_valid_realm_from_request(request)
# We use None to indicate unauthenticated requests as it's more
# readable than using AnonymousUser, and the lack of Django
# stubs means that mypy can't check AnonymousUser well.
user_profile: Optional[UserProfile] = None
is_web_public_query = True
else:
assert isinstance(maybe_user_profile, UserProfile)
user_profile = maybe_user_profile
assert user_profile is not None
realm = user_profile.realm
is_web_public_query = False
assert realm is not None
if (
is_web_public_query
or realm.email_address_visibility != Realm.EMAIL_ADDRESS_VISIBILITY_EVERYONE
):
# If email addresses are only available to administrators,
# clients cannot compute gravatars, so we force-set it to false.
client_gravatar = False
include_history = ok_to_include_history(narrow, user_profile, is_web_public_query)
if include_history:
# The initial query in this case doesn't use `zerver_usermessage`,
# and isn't yet limited to messages the user is entitled to see!
#
# This is OK only because we've made sure this is a narrow that
# will cause us to limit the query appropriately elsewhere.
# See `ok_to_include_history` for details.
#
# Note that is_web_public_query=True goes here, since
# include_history is semantically correct for is_web_public_query.
need_message = True
need_user_message = False
elif narrow is None:
# We need to limit to messages the user has received, but we don't actually
# need any fields from Message
need_message = False
need_user_message = True
else:
need_message = True
need_user_message = True
query: FromClause
query, inner_msg_id_col = get_base_query_for_search(
user_profile=user_profile,
need_message=need_message,
need_user_message=need_user_message,
)
query, is_search = add_narrow_conditions(
user_profile=user_profile,
inner_msg_id_col=inner_msg_id_col,
query=query,
narrow=narrow,
realm=realm,
is_web_public_query=is_web_public_query,
)
if narrow is not None:
# Add some metadata to our logging data for narrows
verbose_operators = []
for term in narrow:
if term["operator"] == "is":
verbose_operators.append("is:" + term["operand"])
else:
verbose_operators.append(term["operator"])
request._log_data["extra"] = "[{}]".format(",".join(verbose_operators))
sa_conn = get_sqlalchemy_connection()
if anchor is None:
# `anchor=None` corresponds to the anchor="first_unread" parameter.
anchor = find_first_unread_anchor(
sa_conn,
user_profile,
narrow,
)
anchored_to_left = anchor == 0
# Set value that will be used to short circuit the after_query
# altogether and avoid needless conditions in the before_query.
anchored_to_right = anchor >= LARGER_THAN_MAX_MESSAGE_ID
if anchored_to_right:
num_after = 0
first_visible_message_id = get_first_visible_message_id(realm)
query = limit_query_to_range(
query=query,
num_before=num_before,
num_after=num_after,
anchor=anchor,
anchored_to_left=anchored_to_left,
anchored_to_right=anchored_to_right,
id_col=inner_msg_id_col,
first_visible_message_id=first_visible_message_id,
)
main_query = alias(query)
query = select(main_query.c, None, main_query).order_by(column("message_id", Integer).asc())
# This is a hack to tag the query we use for testing
query = query.prefix_with("/* get_messages */")
rows = list(sa_conn.execute(query).fetchall())
query_info = post_process_limited_query(
rows=rows,
num_before=num_before,
num_after=num_after,
anchor=anchor,
anchored_to_left=anchored_to_left,
anchored_to_right=anchored_to_right,
first_visible_message_id=first_visible_message_id,
)
rows = query_info["rows"]
# The following is a little messy, but ensures that the code paths
# are similar regardless of the value of include_history. The
# 'user_messages' dictionary maps each message to the user's
# UserMessage object for that message, which we will attach to the
# rendered message dict before returning it. We attempt to
# bulk-fetch rendered message dicts from remote cache using the
# 'messages' list.
message_ids: List[int] = []
user_message_flags: Dict[int, List[str]] = {}
if is_web_public_query:
# For web-public users, we treat all historical messages as read.
for row in rows:
message_id = row[0]
message_ids.append(message_id)
user_message_flags[message_id] = ["read"]
elif include_history:
assert user_profile is not None
message_ids = [row[0] for row in rows]
# TODO: This could be done with an outer join instead of two queries
um_rows = UserMessage.objects.filter(user_profile=user_profile, message_id__in=message_ids)
user_message_flags = {um.message_id: um.flags_list() for um in um_rows}
for message_id in message_ids:
if message_id not in user_message_flags:
user_message_flags[message_id] = ["read", "historical"]
else:
for row in rows:
message_id = row[0]
flags = row[1]
user_message_flags[message_id] = UserMessage.flags_list_for_flags(flags)
message_ids.append(message_id)
search_fields: Dict[int, Dict[str, str]] = {}
if is_search:
for row in rows:
message_id = row[0]
(topic_name, rendered_content, content_matches, topic_matches) = row[-4:]
try:
search_fields[message_id] = get_search_fields(
rendered_content, topic_name, content_matches, topic_matches
)
except UnicodeDecodeError as err: # nocoverage
# No coverage for this block since it should be
# impossible, and we plan to remove it once we've
# debugged the case that makes it happen.
raise Exception(str(err), message_id, narrow)
message_list = messages_for_ids(
message_ids=message_ids,
user_message_flags=user_message_flags,
search_fields=search_fields,
apply_markdown=apply_markdown,
client_gravatar=client_gravatar,
allow_edit_history=realm.allow_edit_history,
)
statsd.incr("loaded_old_messages", len(message_list))
ret = dict(
messages=message_list,
result="success",
msg="",
found_anchor=query_info["found_anchor"],
found_oldest=query_info["found_oldest"],
found_newest=query_info["found_newest"],
history_limited=query_info["history_limited"],
anchor=anchor,
)
return json_success(ret)
def limit_query_to_range(
query: Select,
num_before: int,
num_after: int,
anchor: int,
anchored_to_left: bool,
anchored_to_right: bool,
id_col: "ColumnElement[int]",
first_visible_message_id: int,
) -> FromClause:
"""
This code is actually generic enough that we could move it to a
library, but our only caller for now is message search.
"""
need_before_query = (not anchored_to_left) and (num_before > 0)
need_after_query = (not anchored_to_right) and (num_after > 0)
need_both_sides = need_before_query and need_after_query
# The semantics of our flags are as follows:
#
# num_after = number of rows < anchor
# num_after = number of rows > anchor
#
# But we also want the row where id == anchor (if it exists),
# and we don't want to union up to 3 queries. So in some cases
# we do things like `after_limit = num_after + 1` to grab the
# anchor row in the "after" query.
#
# Note that in some cases, if the anchor row isn't found, we
# actually may fetch an extra row at one of the extremes.
if need_both_sides:
before_anchor = anchor - 1
after_anchor = max(anchor, first_visible_message_id)
before_limit = num_before
after_limit = num_after + 1
elif need_before_query:
before_anchor = anchor
before_limit = num_before
if not anchored_to_right:
before_limit += 1
elif need_after_query:
after_anchor = max(anchor, first_visible_message_id)
after_limit = num_after + 1
if need_before_query:
before_query = query
if not anchored_to_right:
before_query = before_query.where(id_col <= before_anchor)
before_query = before_query.order_by(id_col.desc())
before_query = before_query.limit(before_limit)
if need_after_query:
after_query = query
if not anchored_to_left:
after_query = after_query.where(id_col >= after_anchor)
after_query = after_query.order_by(id_col.asc())
after_query = after_query.limit(after_limit)
if need_both_sides:
return union_all(before_query.self_group(), after_query.self_group())
elif need_before_query:
return before_query
elif need_after_query:
return after_query
else:
# If we don't have either a before_query or after_query, it's because
# some combination of num_before/num_after/anchor are zero or
# use_first_unread_anchor logic found no unread messages.
#
# The most likely reason is somebody is doing an id search, so searching
# for something like `message_id = 42` is exactly what we want. In other
# cases, which could possibly be buggy API clients, at least we will
# return at most one row here.
return query.where(id_col == anchor)
def post_process_limited_query(
rows: Sequence[Union[RowProxy, Sequence[Any]]],
num_before: int,
num_after: int,
anchor: int,
anchored_to_left: bool,
anchored_to_right: bool,
first_visible_message_id: int,
) -> Dict[str, Any]:
# Our queries may have fetched extra rows if they added
# "headroom" to the limits, but we want to truncate those
# rows.
#
# Also, in cases where we had non-zero values of num_before or
# num_after, we want to know found_oldest and found_newest, so
# that the clients will know that they got complete results.
if first_visible_message_id > 0:
visible_rows: Sequence[Union[RowProxy, Sequence[Any]]] = [
r for r in rows if r[0] >= first_visible_message_id
]
else:
visible_rows = rows
rows_limited = len(visible_rows) != len(rows)
if anchored_to_right:
num_after = 0
before_rows = visible_rows[:]
anchor_rows = []
after_rows = []
else:
before_rows = [r for r in visible_rows if r[0] < anchor]
anchor_rows = [r for r in visible_rows if r[0] == anchor]
after_rows = [r for r in visible_rows if r[0] > anchor]
if num_before:
before_rows = before_rows[-1 * num_before :]
if num_after:
after_rows = after_rows[:num_after]
visible_rows = [*before_rows, *anchor_rows, *after_rows]
found_anchor = len(anchor_rows) == 1
found_oldest = anchored_to_left or (len(before_rows) < num_before)
found_newest = anchored_to_right or (len(after_rows) < num_after)
# BUG: history_limited is incorrect False in the event that we had
# to bump `anchor` up due to first_visible_message_id, and there
# were actually older messages. This may be a rare event in the
# context where history_limited is relevant, because it can only
# happen in one-sided queries with no num_before (see tests tagged
# BUG in PostProcessTest for examples), and we don't generally do
# those from the UI, so this might be OK for now.
#
# The correct fix for this probably involves e.g. making a
# `before_query` when we increase `anchor` just to confirm whether
# messages were hidden.
history_limited = rows_limited and found_oldest
return dict(
rows=visible_rows,
found_anchor=found_anchor,
found_newest=found_newest,
found_oldest=found_oldest,
history_limited=history_limited,
)
@has_request_variables
def messages_in_narrow_backend(
request: HttpRequest,
user_profile: UserProfile,
msg_ids: List[int] = REQ(json_validator=check_list(check_int)),
narrow: OptionalNarrowListT = REQ(converter=narrow_parameter),
) -> HttpResponse:
first_visible_message_id = get_first_visible_message_id(user_profile.realm)
msg_ids = [message_id for message_id in msg_ids if message_id >= first_visible_message_id]
# This query is limited to messages the user has access to because they
# actually received them, as reflected in `zerver_usermessage`.
query = select(
[column("message_id", Integer), topic_column_sa(), column("rendered_content", Text)],
and_(
column("user_profile_id", Integer) == literal(user_profile.id),
column("message_id", Integer).in_(msg_ids),
),
join(
table("zerver_usermessage"),
table("zerver_message"),
literal_column("zerver_usermessage.message_id", Integer)
== literal_column("zerver_message.id", Integer),
),
)
builder = NarrowBuilder(user_profile, column("message_id", Integer), user_profile.realm)
if narrow is not None:
for term in narrow:
query = builder.add_term(query, term)
sa_conn = get_sqlalchemy_connection()
query_result = list(sa_conn.execute(query).fetchall())
search_fields = {}
for row in query_result:
message_id = row["message_id"]
topic_name = row[DB_TOPIC_NAME]
rendered_content = row["rendered_content"]
if "content_matches" in row:
content_matches = row["content_matches"]
topic_matches = row["topic_matches"]
else:
content_matches = topic_matches = []
search_fields[str(message_id)] = get_search_fields(
rendered_content,
topic_name,
content_matches,
topic_matches,
)
return json_success({"messages": search_fields})
| 39.057227 | 154 | 0.64544 | import re
from typing import Any, Callable, Dict, Iterable, List, Optional, Sequence, Tuple, Union
import orjson
from django.conf import settings
from django.contrib.auth.models import AnonymousUser
from django.core.exceptions import ValidationError
from django.db import connection
from django.http import HttpRequest, HttpResponse
from django.utils.html import escape as escape_html
from django.utils.translation import gettext as _
from sqlalchemy import func
from sqlalchemy.dialects import postgresql
from sqlalchemy.engine import Connection, RowProxy
from sqlalchemy.sql import (
ClauseElement,
ColumnElement,
FromClause,
Select,
alias,
and_,
column,
join,
literal,
literal_column,
not_,
or_,
select,
table,
union_all,
)
from sqlalchemy.types import Boolean, Integer, Text
from zerver.context_processors import get_valid_realm_from_request
from zerver.decorator import REQ, has_request_variables
from zerver.lib.actions import recipient_for_user_profiles
from zerver.lib.addressee import get_user_profiles, get_user_profiles_by_ids
from zerver.lib.exceptions import ErrorCode, JsonableError, MissingAuthenticationError
from zerver.lib.message import get_first_visible_message_id, messages_for_ids
from zerver.lib.narrow import is_web_public_compatible, is_web_public_narrow
from zerver.lib.response import json_error, json_success
from zerver.lib.sqlalchemy_utils import get_sqlalchemy_connection
from zerver.lib.streams import (
can_access_stream_history_by_id,
can_access_stream_history_by_name,
get_public_streams_queryset,
get_stream_by_narrow_operand_access_unchecked,
get_web_public_streams_queryset,
)
from zerver.lib.topic import DB_TOPIC_NAME, MATCH_TOPIC, topic_column_sa, topic_match_sa
from zerver.lib.topic_mutes import exclude_topic_mutes
from zerver.lib.types import Validator
from zerver.lib.utils import statsd
from zerver.lib.validator import (
check_bool,
check_dict,
check_int,
check_list,
check_required_string,
check_string,
check_string_or_int,
check_string_or_int_list,
to_non_negative_int,
)
from zerver.models import (
Realm,
Recipient,
Stream,
Subscription,
UserMessage,
UserProfile,
get_active_streams,
get_user_by_id_in_realm_including_cross_realm,
get_user_including_cross_realm,
)
LARGER_THAN_MAX_MESSAGE_ID = 10000000000000000
MAX_MESSAGES_PER_FETCH = 5000
class BadNarrowOperator(JsonableError):
code = ErrorCode.BAD_NARROW
data_fields = ["desc"]
def __init__(self, desc: str) -> None:
self.desc: str = desc
@staticmethod
def msg_format() -> str:
return _("Invalid narrow operator: {desc}")
ConditionTransform = Callable[[ClauseElement], ClauseElement]
OptionalNarrowListT = Optional[List[Dict[str, Any]]]
TS_START = "<ts-match>"
TS_STOP = "</ts-match>"
def ts_locs_array(
config: "ColumnElement[str]",
text: "ColumnElement[str]",
tsquery: "ColumnElement[object]",
) -> "ColumnElement[List[List[int]]]":
options = f"HighlightAll = TRUE, StartSel = {TS_START}, StopSel = {TS_STOP}"
delimited = func.ts_headline(config, text, tsquery, options)
parts = func.unnest(func.string_to_array(delimited, TS_START)).alias()
part = column(parts.name, Text)
part_len = func.length(part) - len(TS_STOP)
match_pos = func.sum(part_len).over(rows=(None, -1)) + len(TS_STOP)
match_len = func.strpos(part, TS_STOP) - 1
ret = func.array(
select(
[
postgresql.array([match_pos, match_len]), .offset(1)
.as_scalar(),
)
return ret
class NarrowBuilder:
#
# That is, the `add_term` method, and its helpers the `by_*` methods,
# are passed a Select object representing a query for messages; they may
# call some methods on it, and then they return a resulting Select
# object. Things these methods may do to the queries they handle
# include
# * add conditions to filter out rows (i.e., messages), with `query.where`
# * add columns for more information on the same message, with `query.column`
# * add a join for more information on the same message
#
# Things they may not do include
# * anything that would pull in additional rows, or information on
# other messages.
def __init__(
self,
user_profile: Optional[UserProfile],
msg_id_column: "ColumnElement[int]",
realm: Realm,
is_web_public_query: bool = False,
) -> None:
self.user_profile = user_profile
self.msg_id_column = msg_id_column
self.realm = realm
self.is_web_public_query = is_web_public_query
def add_term(self, query: Select, term: Dict[str, Any]) -> Select:
# To maintain the security property, we hold all the `by_*`
# methods to the same criterion. See the class's block comment
# by name! The prefix 'by_' prevents it from colliding with builtin
# Python __magic__ stuff.
operator = term["operator"]
operand = term["operand"]
negated = term.get("negated", False)
method_name = "by_" + operator.replace("-", "_")
method = getattr(self, method_name, None)
if method is None:
raise BadNarrowOperator("unknown operator " + operator)
if negated:
maybe_negate = not_
else:
maybe_negate = lambda cond: cond
return method(query, operand, maybe_negate)
def by_has(self, query: Select, operand: str, maybe_negate: ConditionTransform) -> Select:
if operand not in ["attachment", "image", "link"]:
raise BadNarrowOperator("unknown 'has' operand " + operand)
col_name = "has_" + operand
cond = column(col_name, Boolean)
return query.where(maybe_negate(cond))
def by_in(self, query: Select, operand: str, maybe_negate: ConditionTransform) -> Select:
# This operator does not support is_web_public_query.
assert not self.is_web_public_query
assert self.user_profile is not None
if operand == "home":
conditions = exclude_muting_conditions(self.user_profile, [])
return query.where(and_(*conditions))
elif operand == "all":
return query
raise BadNarrowOperator("unknown 'in' operand " + operand)
def by_is(self, query: Select, operand: str, maybe_negate: ConditionTransform) -> Select:
# This operator class does not support is_web_public_query.
assert not self.is_web_public_query
assert self.user_profile is not None
if operand == "private":
cond = column("flags", Integer).op("&")(UserMessage.flags.is_private.mask) != 0
return query.where(maybe_negate(cond))
elif operand == "starred":
cond = column("flags", Integer).op("&")(UserMessage.flags.starred.mask) != 0
return query.where(maybe_negate(cond))
elif operand == "unread":
cond = column("flags", Integer).op("&")(UserMessage.flags.read.mask) == 0
return query.where(maybe_negate(cond))
elif operand == "mentioned":
cond1 = column("flags", Integer).op("&")(UserMessage.flags.mentioned.mask) != 0
cond2 = column("flags", Integer).op("&")(UserMessage.flags.wildcard_mentioned.mask) != 0
cond = or_(cond1, cond2)
return query.where(maybe_negate(cond))
elif operand == "alerted":
cond = column("flags", Integer).op("&")(UserMessage.flags.has_alert_word.mask) != 0
return query.where(maybe_negate(cond))
raise BadNarrowOperator("unknown 'is' operand " + operand)
_alphanum = frozenset("abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789")
def _pg_re_escape(self, pattern: str) -> str:
s = list(pattern)
for i, c in enumerate(s):
if c not in self._alphanum:
if ord(c) >= 128:
# convert the character to hex PostgreSQL regex will take
# \uXXXX
s[i] = f"\\u{ord(c):0>4x}"
else:
s[i] = "\\" + c
return "".join(s)
def by_stream(
self, query: Select, operand: Union[str, int], maybe_negate: ConditionTransform
) -> Select:
try:
# Because you can see your own message history for
# private streams you are no longer subscribed to, we
# need get_stream_by_narrow_operand_access_unchecked here.
stream = get_stream_by_narrow_operand_access_unchecked(operand, self.realm)
if self.is_web_public_query and not stream.is_web_public:
raise BadNarrowOperator("unknown web-public stream " + str(operand))
except Stream.DoesNotExist:
raise BadNarrowOperator("unknown stream " + str(operand))
if self.realm.is_zephyr_mirror_realm:
# MIT users expect narrowing to "social" to also show messages to
# /^(un)*social(.d)*$/ (unsocial, ununsocial, social.d, ...).
# In `ok_to_include_history`, we assume that a non-negated
# `stream` term for a public stream will limit the query to
# that specific stream. So it would be a bug to hit this
# codepath after relying on this term there. But all streams in
# a Zephyr realm are private, so that doesn't happen.
assert not stream.is_public()
m = re.search(r"^(?:un)*(.+?)(?:\.d)*$", stream.name, re.IGNORECASE)
assert m is not None
base_stream_name = m.group(1)
matching_streams = get_active_streams(self.realm).filter(
name__iregex=fr"^(un)*{self._pg_re_escape(base_stream_name)}(\.d)*$"
)
recipient_ids = [matching_stream.recipient_id for matching_stream in matching_streams]
cond = column("recipient_id", Integer).in_(recipient_ids)
return query.where(maybe_negate(cond))
recipient = stream.recipient
cond = column("recipient_id", Integer) == recipient.id
return query.where(maybe_negate(cond))
def by_streams(self, query: Select, operand: str, maybe_negate: ConditionTransform) -> Select:
if operand == "public":
recipient_queryset = get_public_streams_queryset(self.realm)
elif operand == "web-public":
recipient_queryset = get_web_public_streams_queryset(self.realm)
else:
raise BadNarrowOperator("unknown streams operand " + operand)
recipient_ids = recipient_queryset.values_list("recipient_id", flat=True).order_by("id")
cond = column("recipient_id", Integer).in_(recipient_ids)
return query.where(maybe_negate(cond))
def by_topic(self, query: Select, operand: str, maybe_negate: ConditionTransform) -> Select:
if self.realm.is_zephyr_mirror_realm:
m = re.search(r"^(.*?)(?:\.d)*$", operand, re.IGNORECASE)
assert m is not None
base_topic = m.group(1)
if base_topic in ("", "personal", '(instance "")'):
cond: ClauseElement = or_(
topic_match_sa(""),
topic_match_sa(".d"),
topic_match_sa(".d.d"),
topic_match_sa(".d.d.d"),
topic_match_sa(".d.d.d.d"),
topic_match_sa("personal"),
topic_match_sa("personal.d"),
topic_match_sa("personal.d.d"),
topic_match_sa("personal.d.d.d"),
topic_match_sa("personal.d.d.d.d"),
topic_match_sa('(instance "")'),
topic_match_sa('(instance "").d'),
topic_match_sa('(instance "").d.d'),
topic_match_sa('(instance "").d.d.d'),
topic_match_sa('(instance "").d.d.d.d'),
)
else:
cond = or_(
topic_match_sa(base_topic),
topic_match_sa(base_topic + ".d"),
topic_match_sa(base_topic + ".d.d"),
topic_match_sa(base_topic + ".d.d.d"),
topic_match_sa(base_topic + ".d.d.d.d"),
)
return query.where(maybe_negate(cond))
cond = topic_match_sa(operand)
return query.where(maybe_negate(cond))
def by_sender(
self, query: Select, operand: Union[str, int], maybe_negate: ConditionTransform
) -> Select:
try:
if isinstance(operand, str):
sender = get_user_including_cross_realm(operand, self.realm)
else:
sender = get_user_by_id_in_realm_including_cross_realm(operand, self.realm)
except UserProfile.DoesNotExist:
raise BadNarrowOperator("unknown user " + str(operand))
cond = column("sender_id", Integer) == literal(sender.id)
return query.where(maybe_negate(cond))
def by_near(self, query: Select, operand: str, maybe_negate: ConditionTransform) -> Select:
return query
def by_id(
self, query: Select, operand: Union[int, str], maybe_negate: ConditionTransform
) -> Select:
if not str(operand).isdigit():
raise BadNarrowOperator("Invalid message ID")
cond = self.msg_id_column == literal(operand)
return query.where(maybe_negate(cond))
def by_pm_with(
self, query: Select, operand: Union[str, Iterable[int]], maybe_negate: ConditionTransform
) -> Select:
assert not self.is_web_public_query
assert self.user_profile is not None
try:
if isinstance(operand, str):
email_list = operand.split(",")
user_profiles = get_user_profiles(
emails=email_list,
realm=self.realm,
)
else:
"""
This is where we handle passing a list of user IDs for the narrow, which is the
preferred/cleaner API.
"""
user_profiles = get_user_profiles_by_ids(
user_ids=operand,
realm=self.realm,
)
recipient = recipient_for_user_profiles(
user_profiles=user_profiles,
forwarded_mirror_message=False,
forwarder_user_profile=None,
sender=self.user_profile,
allow_deactivated=True,
)
except (JsonableError, ValidationError):
raise BadNarrowOperator("unknown user in " + str(operand))
if recipient.type == Recipient.HUDDLE:
cond = column("recipient_id", Integer) == recipient.id
return query.where(maybe_negate(cond))
other_participant = None
for user in user_profiles:
if user.id != self.user_profile.id:
other_participant = user
if other_participant:
self_recipient_id = self.user_profile.recipient_id
cond = or_(
and_(
column("sender_id", Integer) == other_participant.id,
column("recipient_id", Integer) == self_recipient_id,
),
and_(
column("sender_id", Integer) == self.user_profile.id,
column("recipient_id", Integer) == recipient.id,
),
)
return query.where(maybe_negate(cond))
cond = and_(
column("sender_id", Integer) == self.user_profile.id,
column("recipient_id", Integer) == recipient.id,
)
return query.where(maybe_negate(cond))
def by_group_pm_with(
self, query: Select, operand: Union[str, int], maybe_negate: ConditionTransform
) -> Select:
assert not self.is_web_public_query
assert self.user_profile is not None
try:
if isinstance(operand, str):
narrow_profile = get_user_including_cross_realm(operand, self.realm)
else:
narrow_profile = get_user_by_id_in_realm_including_cross_realm(operand, self.realm)
except UserProfile.DoesNotExist:
raise BadNarrowOperator("unknown user " + str(operand))
self_recipient_ids = [
recipient_tuple["recipient_id"]
for recipient_tuple in Subscription.objects.filter(
user_profile=self.user_profile,
recipient__type=Recipient.HUDDLE,
).values("recipient_id")
]
narrow_recipient_ids = [
recipient_tuple["recipient_id"]
for recipient_tuple in Subscription.objects.filter(
user_profile=narrow_profile,
recipient__type=Recipient.HUDDLE,
).values("recipient_id")
]
recipient_ids = set(self_recipient_ids) & set(narrow_recipient_ids)
cond = column("recipient_id", Integer).in_(recipient_ids)
return query.where(maybe_negate(cond))
def by_search(self, query: Select, operand: str, maybe_negate: ConditionTransform) -> Select:
if settings.USING_PGROONGA:
return self._by_search_pgroonga(query, operand, maybe_negate)
else:
return self._by_search_tsearch(query, operand, maybe_negate)
def _by_search_pgroonga(
self, query: Select, operand: str, maybe_negate: ConditionTransform
) -> Select:
match_positions_character = func.pgroonga_match_positions_character
query_extract_keywords = func.pgroonga_query_extract_keywords
operand_escaped = func.escape_html(operand)
keywords = query_extract_keywords(operand_escaped)
query = query.column(
match_positions_character(column("rendered_content", Text), keywords).label(
"content_matches"
)
)
query = query.column(
match_positions_character(func.escape_html(topic_column_sa()), keywords).label(
"topic_matches"
)
)
condition = column("search_pgroonga").op("&@~")(operand_escaped)
return query.where(maybe_negate(condition))
def _by_search_tsearch(
self, query: Select, operand: str, maybe_negate: ConditionTransform
) -> Select:
tsquery = func.plainto_tsquery(literal("zulip.english_us_search"), literal(operand))
query = query.column(
ts_locs_array(
literal("zulip.english_us_search"), column("rendered_content", Text), tsquery
).label("content_matches")
)
query = query.column(
ts_locs_array(
literal("zulip.english_us_search"), func.escape_html(topic_column_sa()), tsquery
).label("topic_matches")
)
# mechanism in PostgreSQL
for term in re.findall(r'"[^"]+"|\S+', operand):
if term[0] == '"' and term[-1] == '"':
term = term[1:-1]
term = "%" + connection.ops.prep_for_like_query(term) + "%"
cond = or_(column("content", Text).ilike(term), topic_column_sa().ilike(term))
query = query.where(maybe_negate(cond))
cond = column("search_tsvector", postgresql.TSVECTOR).op("@@")(tsquery)
return query.where(maybe_negate(cond))
def highlight_string(text: str, locs: Iterable[Tuple[int, int]]) -> str:
highlight_start = '<span class="highlight">'
highlight_stop = "</span>"
pos = 0
result = ""
in_tag = False
for loc in locs:
(offset, length) = loc
prefix_start = pos
prefix_end = offset
match_start = offset
match_end = offset + length
prefix = text[prefix_start:prefix_end]
match = text[match_start:match_end]
for character in prefix + match:
if character == "<":
in_tag = True
elif character == ">":
in_tag = False
if in_tag:
result += prefix
result += match
else:
result += prefix
result += highlight_start
result += match
result += highlight_stop
pos = match_end
result += text[pos:]
return result
def get_search_fields(
rendered_content: str,
topic_name: str,
content_matches: Iterable[Tuple[int, int]],
topic_matches: Iterable[Tuple[int, int]],
) -> Dict[str, str]:
return {
"match_content": highlight_string(rendered_content, content_matches),
MATCH_TOPIC: highlight_string(escape_html(topic_name), topic_matches),
}
def narrow_parameter(json: str) -> OptionalNarrowListT:
data = orjson.loads(json)
if not isinstance(data, list):
raise ValueError("argument is not a list")
if len(data) == 0:
# The "empty narrow" should be None, and not []
return None
def convert_term(elem: Union[Dict[str, Any], List[str]]) -> Dict[str, Any]:
# We have to support a legacy tuple format.
if isinstance(elem, list):
if len(elem) != 2 or any(not isinstance(x, str) for x in elem):
raise ValueError("element is not a string pair")
return dict(operator=elem[0], operand=elem[1])
if isinstance(elem, dict):
# Make sure to sync this list to frontend also when adding a new operator.
# that supports user IDs. Relevant code is located in static/js/message_fetch.js
# in handle_operators_supporting_id_based_api function where you will need to update
# operators_supporting_id, or operators_supporting_ids array.
operators_supporting_id = ["sender", "group-pm-with", "stream"]
operators_supporting_ids = ["pm-with"]
operators_non_empty_operand = {"search"}
operator = elem.get("operator", "")
if operator in operators_supporting_id:
operand_validator: Validator[object] = check_string_or_int
elif operator in operators_supporting_ids:
operand_validator = check_string_or_int_list
elif operator in operators_non_empty_operand:
operand_validator = check_required_string
else:
operand_validator = check_string
validator = check_dict(
required_keys=[
("operator", check_string),
("operand", operand_validator),
],
optional_keys=[
("negated", check_bool),
],
)
try:
validator("elem", elem)
except ValidationError as error:
raise JsonableError(error.message)
# whitelist the fields we care about for now
return dict(
operator=elem["operator"],
operand=elem["operand"],
negated=elem.get("negated", False),
)
raise ValueError("element is not a dictionary")
return list(map(convert_term, data))
def ok_to_include_history(
narrow: OptionalNarrowListT, user_profile: Optional[UserProfile], is_web_public_query: bool
) -> bool:
# There are occasions where we need to find Message rows that
# have no corresponding UserMessage row, because the user is
# reading a public stream that might include messages that
# were sent while the user was not subscribed, but which they are
# allowed to see. We have to be very careful about constructing
# queries in those situations, so this function should return True
# only if we are 100% sure that we're gonna add a clause to the
# query that narrows to a particular public stream on the user's realm.
# If we screw this up, then we can get into a nasty situation of
# polluting our narrow results with messages from other realms.
# For web-public queries, we are always returning history. The
# analogues of the below stream access checks for whether streams
# have is_web_public set and banning is operators in this code
# path are done directly in NarrowBuilder.
if is_web_public_query:
assert user_profile is None
return True
assert user_profile is not None
include_history = False
if narrow is not None:
for term in narrow:
if term["operator"] == "stream" and not term.get("negated", False):
operand: Union[str, int] = term["operand"]
if isinstance(operand, str):
include_history = can_access_stream_history_by_name(user_profile, operand)
else:
include_history = can_access_stream_history_by_id(user_profile, operand)
elif (
term["operator"] == "streams"
and term["operand"] == "public"
and not term.get("negated", False)
and user_profile.can_access_public_streams()
):
include_history = True
# Disable historical messages if the user is narrowing on anything
# that's a property on the UserMessage table. There cannot be
# historical messages in these cases anyway.
for term in narrow:
if term["operator"] == "is":
include_history = False
return include_history
def get_stream_from_narrow_access_unchecked(
narrow: OptionalNarrowListT, realm: Realm
) -> Optional[Stream]:
if narrow is not None:
for term in narrow:
if term["operator"] == "stream":
return get_stream_by_narrow_operand_access_unchecked(term["operand"], realm)
return None
def exclude_muting_conditions(
user_profile: UserProfile, narrow: OptionalNarrowListT
) -> List[ClauseElement]:
conditions = []
stream_id = None
try:
# Note: It is okay here to not check access to stream
# because we are only using the stream id to exclude data,
# not to include results.
stream = get_stream_from_narrow_access_unchecked(narrow, user_profile.realm)
if stream is not None:
stream_id = stream.id
except Stream.DoesNotExist:
pass
# Stream-level muting only applies when looking at views that
# include multiple streams, since we do want users to be able to
# browser messages within a muted stream.
if stream_id is None:
rows = Subscription.objects.filter(
user_profile=user_profile,
active=True,
is_muted=True,
recipient__type=Recipient.STREAM,
).values("recipient_id")
muted_recipient_ids = [row["recipient_id"] for row in rows]
if len(muted_recipient_ids) > 0:
# Only add the condition if we have muted streams to simplify/avoid warnings.
condition = not_(column("recipient_id", Integer).in_(muted_recipient_ids))
conditions.append(condition)
conditions = exclude_topic_mutes(conditions, user_profile, stream_id)
# Muted user logic for hiding messages is implemented entirely
# client-side. This is by design, as it allows UI to hint that
# muted messages exist where their absence might make conversation
# difficult to understand. As a result, we do not need to consider
# muted users in this server-side logic for returning messages to
# clients. (We could in theory exclude PMs from muted users, but
# they're likely to be sufficiently rare to not be worth extra
# logic/testing here).
return conditions
def get_base_query_for_search(
user_profile: Optional[UserProfile], need_message: bool, need_user_message: bool
) -> Tuple[Select, "ColumnElement[int]"]:
# Handle the simple case where user_message isn't involved first.
if not need_user_message:
assert need_message
query = select([column("id", Integer).label("message_id")], None, table("zerver_message"))
inner_msg_id_col: ColumnElement[int]
inner_msg_id_col = literal_column("zerver_message.id", Integer) # type: ignore[assignment] # https://github.com/dropbox/sqlalchemy-stubs/pull/189
return (query, inner_msg_id_col)
assert user_profile is not None
if need_message:
query = select(
[column("message_id"), column("flags", Integer)],
column("user_profile_id") == literal(user_profile.id),
join(
table("zerver_usermessage"),
table("zerver_message"),
literal_column("zerver_usermessage.message_id", Integer)
== literal_column("zerver_message.id", Integer),
),
)
inner_msg_id_col = column("message_id", Integer)
return (query, inner_msg_id_col)
query = select(
[column("message_id"), column("flags", Integer)],
column("user_profile_id") == literal(user_profile.id),
table("zerver_usermessage"),
)
inner_msg_id_col = column("message_id", Integer)
return (query, inner_msg_id_col)
def add_narrow_conditions(
user_profile: Optional[UserProfile],
inner_msg_id_col: "ColumnElement[int]",
query: Select,
narrow: OptionalNarrowListT,
is_web_public_query: bool,
realm: Realm,
) -> Tuple[Select, bool]:
is_search = False # for now
if narrow is None:
return (query, is_search)
# Build the query for the narrow
builder = NarrowBuilder(user_profile, inner_msg_id_col, realm, is_web_public_query)
search_operands = []
# As we loop through terms, builder does most of the work to extend
# our query, but we need to collect the search operands and handle
# them after the loop.
for term in narrow:
if term["operator"] == "search":
search_operands.append(term["operand"])
else:
query = builder.add_term(query, term)
if search_operands:
is_search = True
query = query.column(topic_column_sa()).column(column("rendered_content", Text))
search_term = dict(
operator="search",
operand=" ".join(search_operands),
)
query = builder.add_term(query, search_term)
return (query, is_search)
def find_first_unread_anchor(
sa_conn: Connection, user_profile: Optional[UserProfile], narrow: OptionalNarrowListT
) -> int:
# For anonymous web users, all messages are treated as read, and so
# always return LARGER_THAN_MAX_MESSAGE_ID.
if user_profile is None:
return LARGER_THAN_MAX_MESSAGE_ID
# We always need UserMessage in our query, because it has the unread
# flag for the user.
need_user_message = True
# Because we will need to call exclude_muting_conditions, unless
# the user hasn't muted anything, we will need to include Message
# in our query. It may be worth eventually adding an optimization
# for the case of a user who hasn't muted anything to avoid the
# join in that case, but it's low priority.
need_message = True
query, inner_msg_id_col = get_base_query_for_search(
user_profile=user_profile,
need_message=need_message,
need_user_message=need_user_message,
)
query, is_search = add_narrow_conditions(
user_profile=user_profile,
inner_msg_id_col=inner_msg_id_col,
query=query,
narrow=narrow,
is_web_public_query=False,
realm=user_profile.realm,
)
condition = column("flags", Integer).op("&")(UserMessage.flags.read.mask) == 0
# We exclude messages on muted topics when finding the first unread
# message in this narrow
muting_conditions = exclude_muting_conditions(user_profile, narrow)
if muting_conditions:
condition = and_(condition, *muting_conditions)
first_unread_query = query.where(condition)
first_unread_query = first_unread_query.order_by(inner_msg_id_col.asc()).limit(1)
first_unread_result = list(sa_conn.execute(first_unread_query).fetchall())
if len(first_unread_result) > 0:
anchor = first_unread_result[0][0]
else:
anchor = LARGER_THAN_MAX_MESSAGE_ID
return anchor
def parse_anchor_value(anchor_val: Optional[str], use_first_unread_anchor: bool) -> Optional[int]:
if use_first_unread_anchor:
# Backwards-compatibility: Before we added support for the
# special string-typed anchor values, clients would pass
# anchor=None and use_first_unread_anchor=True to indicate
# what is now expressed as anchor="first_unread".
return None
if anchor_val is None:
# Throw an exception if neither an anchor argument not
# use_first_unread_anchor was specified.
raise JsonableError(_("Missing 'anchor' argument."))
if anchor_val == "oldest":
return 0
if anchor_val == "newest":
return LARGER_THAN_MAX_MESSAGE_ID
if anchor_val == "first_unread":
return None
try:
# We don't use `.isnumeric()` to support negative numbers for
# anchor. We don't recommend it in the API (if you want the
# very first message, use 0 or 1), but it used to be supported
# and was used by the web app, so we need to continue
# supporting it for backwards-compatibility
anchor = int(anchor_val)
if anchor < 0:
return 0
elif anchor > LARGER_THAN_MAX_MESSAGE_ID:
return LARGER_THAN_MAX_MESSAGE_ID
return anchor
except ValueError:
raise JsonableError(_("Invalid anchor"))
@has_request_variables
def get_messages_backend(
request: HttpRequest,
maybe_user_profile: Union[UserProfile, AnonymousUser],
anchor_val: Optional[str] = REQ("anchor", default=None),
num_before: int = REQ(converter=to_non_negative_int),
num_after: int = REQ(converter=to_non_negative_int),
narrow: OptionalNarrowListT = REQ("narrow", converter=narrow_parameter, default=None),
use_first_unread_anchor_val: bool = REQ(
"use_first_unread_anchor", json_validator=check_bool, default=False
),
client_gravatar: bool = REQ(json_validator=check_bool, default=False),
apply_markdown: bool = REQ(json_validator=check_bool, default=True),
) -> HttpResponse:
anchor = parse_anchor_value(anchor_val, use_first_unread_anchor_val)
if num_before + num_after > MAX_MESSAGES_PER_FETCH:
return json_error(
_("Too many messages requested (maximum {}).").format(
MAX_MESSAGES_PER_FETCH,
)
)
if not maybe_user_profile.is_authenticated:
# If user is not authenticated, clients must include
# `streams:web-public` in their narrow query to indicate this
# is a web-public query. This helps differentiate between
# cases of web-public queries (where we should return the
# web-public results only) and clients with buggy
# authentication code (where we should return an auth error).
if not is_web_public_narrow(narrow):
raise MissingAuthenticationError()
assert narrow is not None
if not is_web_public_compatible(narrow):
raise MissingAuthenticationError()
realm = get_valid_realm_from_request(request)
# We use None to indicate unauthenticated requests as it's more
# readable than using AnonymousUser, and the lack of Django
# stubs means that mypy can't check AnonymousUser well.
user_profile: Optional[UserProfile] = None
is_web_public_query = True
else:
assert isinstance(maybe_user_profile, UserProfile)
user_profile = maybe_user_profile
assert user_profile is not None
realm = user_profile.realm
is_web_public_query = False
assert realm is not None
if (
is_web_public_query
or realm.email_address_visibility != Realm.EMAIL_ADDRESS_VISIBILITY_EVERYONE
):
# If email addresses are only available to administrators,
# clients cannot compute gravatars, so we force-set it to false.
client_gravatar = False
include_history = ok_to_include_history(narrow, user_profile, is_web_public_query)
if include_history:
# The initial query in this case doesn't use `zerver_usermessage`,
# and isn't yet limited to messages the user is entitled to see!
#
# This is OK only because we've made sure this is a narrow that
# will cause us to limit the query appropriately elsewhere.
# See `ok_to_include_history` for details.
#
# Note that is_web_public_query=True goes here, since
# include_history is semantically correct for is_web_public_query.
need_message = True
need_user_message = False
elif narrow is None:
# We need to limit to messages the user has received, but we don't actually
# need any fields from Message
need_message = False
need_user_message = True
else:
need_message = True
need_user_message = True
query: FromClause
query, inner_msg_id_col = get_base_query_for_search(
user_profile=user_profile,
need_message=need_message,
need_user_message=need_user_message,
)
query, is_search = add_narrow_conditions(
user_profile=user_profile,
inner_msg_id_col=inner_msg_id_col,
query=query,
narrow=narrow,
realm=realm,
is_web_public_query=is_web_public_query,
)
if narrow is not None:
# Add some metadata to our logging data for narrows
verbose_operators = []
for term in narrow:
if term["operator"] == "is":
verbose_operators.append("is:" + term["operand"])
else:
verbose_operators.append(term["operator"])
request._log_data["extra"] = "[{}]".format(",".join(verbose_operators))
sa_conn = get_sqlalchemy_connection()
if anchor is None:
# `anchor=None` corresponds to the anchor="first_unread" parameter.
anchor = find_first_unread_anchor(
sa_conn,
user_profile,
narrow,
)
anchored_to_left = anchor == 0
# Set value that will be used to short circuit the after_query
# altogether and avoid needless conditions in the before_query.
anchored_to_right = anchor >= LARGER_THAN_MAX_MESSAGE_ID
if anchored_to_right:
num_after = 0
first_visible_message_id = get_first_visible_message_id(realm)
query = limit_query_to_range(
query=query,
num_before=num_before,
num_after=num_after,
anchor=anchor,
anchored_to_left=anchored_to_left,
anchored_to_right=anchored_to_right,
id_col=inner_msg_id_col,
first_visible_message_id=first_visible_message_id,
)
main_query = alias(query)
query = select(main_query.c, None, main_query).order_by(column("message_id", Integer).asc())
# This is a hack to tag the query we use for testing
query = query.prefix_with("/* get_messages */")
rows = list(sa_conn.execute(query).fetchall())
query_info = post_process_limited_query(
rows=rows,
num_before=num_before,
num_after=num_after,
anchor=anchor,
anchored_to_left=anchored_to_left,
anchored_to_right=anchored_to_right,
first_visible_message_id=first_visible_message_id,
)
rows = query_info["rows"]
# The following is a little messy, but ensures that the code paths
# are similar regardless of the value of include_history. The
# 'user_messages' dictionary maps each message to the user's
# UserMessage object for that message, which we will attach to the
# rendered message dict before returning it. We attempt to
# bulk-fetch rendered message dicts from remote cache using the
# 'messages' list.
message_ids: List[int] = []
user_message_flags: Dict[int, List[str]] = {}
if is_web_public_query:
# For web-public users, we treat all historical messages as read.
for row in rows:
message_id = row[0]
message_ids.append(message_id)
user_message_flags[message_id] = ["read"]
elif include_history:
assert user_profile is not None
message_ids = [row[0] for row in rows]
# TODO: This could be done with an outer join instead of two queries
um_rows = UserMessage.objects.filter(user_profile=user_profile, message_id__in=message_ids)
user_message_flags = {um.message_id: um.flags_list() for um in um_rows}
for message_id in message_ids:
if message_id not in user_message_flags:
user_message_flags[message_id] = ["read", "historical"]
else:
for row in rows:
message_id = row[0]
flags = row[1]
user_message_flags[message_id] = UserMessage.flags_list_for_flags(flags)
message_ids.append(message_id)
search_fields: Dict[int, Dict[str, str]] = {}
if is_search:
for row in rows:
message_id = row[0]
(topic_name, rendered_content, content_matches, topic_matches) = row[-4:]
try:
search_fields[message_id] = get_search_fields(
rendered_content, topic_name, content_matches, topic_matches
)
except UnicodeDecodeError as err: # nocoverage
# No coverage for this block since it should be
# impossible, and we plan to remove it once we've
# debugged the case that makes it happen.
raise Exception(str(err), message_id, narrow)
message_list = messages_for_ids(
message_ids=message_ids,
user_message_flags=user_message_flags,
search_fields=search_fields,
apply_markdown=apply_markdown,
client_gravatar=client_gravatar,
allow_edit_history=realm.allow_edit_history,
)
statsd.incr("loaded_old_messages", len(message_list))
ret = dict(
messages=message_list,
result="success",
msg="",
found_anchor=query_info["found_anchor"],
found_oldest=query_info["found_oldest"],
found_newest=query_info["found_newest"],
history_limited=query_info["history_limited"],
anchor=anchor,
)
return json_success(ret)
def limit_query_to_range(
query: Select,
num_before: int,
num_after: int,
anchor: int,
anchored_to_left: bool,
anchored_to_right: bool,
id_col: "ColumnElement[int]",
first_visible_message_id: int,
) -> FromClause:
need_before_query = (not anchored_to_left) and (num_before > 0)
need_after_query = (not anchored_to_right) and (num_after > 0)
need_both_sides = need_before_query and need_after_query
# The semantics of our flags are as follows:
#
# num_after = number of rows < anchor
# num_after = number of rows > anchor
#
# But we also want the row where id == anchor (if it exists),
# and we don't want to union up to 3 queries. So in some cases
# we do things like `after_limit = num_after + 1` to grab the
# anchor row in the "after" query.
#
# Note that in some cases, if the anchor row isn't found, we
# actually may fetch an extra row at one of the extremes.
if need_both_sides:
before_anchor = anchor - 1
after_anchor = max(anchor, first_visible_message_id)
before_limit = num_before
after_limit = num_after + 1
elif need_before_query:
before_anchor = anchor
before_limit = num_before
if not anchored_to_right:
before_limit += 1
elif need_after_query:
after_anchor = max(anchor, first_visible_message_id)
after_limit = num_after + 1
if need_before_query:
before_query = query
if not anchored_to_right:
before_query = before_query.where(id_col <= before_anchor)
before_query = before_query.order_by(id_col.desc())
before_query = before_query.limit(before_limit)
if need_after_query:
after_query = query
if not anchored_to_left:
after_query = after_query.where(id_col >= after_anchor)
after_query = after_query.order_by(id_col.asc())
after_query = after_query.limit(after_limit)
if need_both_sides:
return union_all(before_query.self_group(), after_query.self_group())
elif need_before_query:
return before_query
elif need_after_query:
return after_query
else:
# If we don't have either a before_query or after_query, it's because
# some combination of num_before/num_after/anchor are zero or
# use_first_unread_anchor logic found no unread messages.
#
# The most likely reason is somebody is doing an id search, so searching
# for something like `message_id = 42` is exactly what we want. In other
# cases, which could possibly be buggy API clients, at least we will
# return at most one row here.
return query.where(id_col == anchor)
def post_process_limited_query(
rows: Sequence[Union[RowProxy, Sequence[Any]]],
num_before: int,
num_after: int,
anchor: int,
anchored_to_left: bool,
anchored_to_right: bool,
first_visible_message_id: int,
) -> Dict[str, Any]:
# Our queries may have fetched extra rows if they added
# "headroom" to the limits, but we want to truncate those
# rows.
#
# Also, in cases where we had non-zero values of num_before or
# num_after, we want to know found_oldest and found_newest, so
# that the clients will know that they got complete results.
if first_visible_message_id > 0:
visible_rows: Sequence[Union[RowProxy, Sequence[Any]]] = [
r for r in rows if r[0] >= first_visible_message_id
]
else:
visible_rows = rows
rows_limited = len(visible_rows) != len(rows)
if anchored_to_right:
num_after = 0
before_rows = visible_rows[:]
anchor_rows = []
after_rows = []
else:
before_rows = [r for r in visible_rows if r[0] < anchor]
anchor_rows = [r for r in visible_rows if r[0] == anchor]
after_rows = [r for r in visible_rows if r[0] > anchor]
if num_before:
before_rows = before_rows[-1 * num_before :]
if num_after:
after_rows = after_rows[:num_after]
visible_rows = [*before_rows, *anchor_rows, *after_rows]
found_anchor = len(anchor_rows) == 1
found_oldest = anchored_to_left or (len(before_rows) < num_before)
found_newest = anchored_to_right or (len(after_rows) < num_after)
# BUG: history_limited is incorrect False in the event that we had
# to bump `anchor` up due to first_visible_message_id, and there
# were actually older messages. This may be a rare event in the
# context where history_limited is relevant, because it can only
# happen in one-sided queries with no num_before (see tests tagged
# BUG in PostProcessTest for examples), and we don't generally do
# those from the UI, so this might be OK for now.
#
# The correct fix for this probably involves e.g. making a
# `before_query` when we increase `anchor` just to confirm whether
# messages were hidden.
history_limited = rows_limited and found_oldest
return dict(
rows=visible_rows,
found_anchor=found_anchor,
found_newest=found_newest,
found_oldest=found_oldest,
history_limited=history_limited,
)
@has_request_variables
def messages_in_narrow_backend(
request: HttpRequest,
user_profile: UserProfile,
msg_ids: List[int] = REQ(json_validator=check_list(check_int)),
narrow: OptionalNarrowListT = REQ(converter=narrow_parameter),
) -> HttpResponse:
first_visible_message_id = get_first_visible_message_id(user_profile.realm)
msg_ids = [message_id for message_id in msg_ids if message_id >= first_visible_message_id]
# This query is limited to messages the user has access to because they
# actually received them, as reflected in `zerver_usermessage`.
query = select(
[column("message_id", Integer), topic_column_sa(), column("rendered_content", Text)],
and_(
column("user_profile_id", Integer) == literal(user_profile.id),
column("message_id", Integer).in_(msg_ids),
),
join(
table("zerver_usermessage"),
table("zerver_message"),
literal_column("zerver_usermessage.message_id", Integer)
== literal_column("zerver_message.id", Integer),
),
)
builder = NarrowBuilder(user_profile, column("message_id", Integer), user_profile.realm)
if narrow is not None:
for term in narrow:
query = builder.add_term(query, term)
sa_conn = get_sqlalchemy_connection()
query_result = list(sa_conn.execute(query).fetchall())
search_fields = {}
for row in query_result:
message_id = row["message_id"]
topic_name = row[DB_TOPIC_NAME]
rendered_content = row["rendered_content"]
if "content_matches" in row:
content_matches = row["content_matches"]
topic_matches = row["topic_matches"]
else:
content_matches = topic_matches = []
search_fields[str(message_id)] = get_search_fields(
rendered_content,
topic_name,
content_matches,
topic_matches,
)
return json_success({"messages": search_fields})
| true | true |
f71ccbfa3508cc2de142272d5a5eb12f86208da2 | 2,165 | py | Python | Packs/FeedCyjax/Integrations/FeedCyjax/test_data/indicators.py | diCagri/content | c532c50b213e6dddb8ae6a378d6d09198e08fc9f | [
"MIT"
] | 799 | 2016-08-02T06:43:14.000Z | 2022-03-31T11:10:11.000Z | Packs/FeedCyjax/Integrations/FeedCyjax/test_data/indicators.py | diCagri/content | c532c50b213e6dddb8ae6a378d6d09198e08fc9f | [
"MIT"
] | 9,317 | 2016-08-07T19:00:51.000Z | 2022-03-31T21:56:04.000Z | Packs/FeedCyjax/Integrations/FeedCyjax/test_data/indicators.py | diCagri/content | c532c50b213e6dddb8ae6a378d6d09198e08fc9f | [
"MIT"
] | 1,297 | 2016-08-04T13:59:00.000Z | 2022-03-31T23:43:06.000Z | mocked_indicators = [
{
"type": "URL",
"industry_type": [
"IT",
"online gaming",
"Military"
],
"value": "https://test.domainos.com?test=true&id=32423",
"handling_condition": "GREEN",
"discovered_at": "2020-12-31T14:18:26+0000",
"description": "Incident report with some test iocs",
"source": "https://website.cyjax.com/report/incident/view?id=68646",
"ttp": [
"Remote Access Software",
"Download New Code at Runtime",
],
"geoip": {
"city_name": "Donetsk",
"location": {
"lon": 37.7759,
"lat": 47.9917
},
"country_code2": "UA",
"country_name": "Ukraine"
},
},
{
"type": "FileHash-SHA1",
"industry_type": [
"IT",
"online gaming",
"Military"
],
"value": "1f49429f805663702acf221177dd0e99f6ba3f46",
"handling_condition": "GREEN",
"discovered_at": "2020-12-31T14:18:26+0000",
"description": "Incident report with some test iocs",
"source": "https://website.cyjax.com/report/incident/view?id=68646"
},
{
"type": "FileHash-SSDEEP",
"industry_type": [
"IT",
"online gaming",
"Military"
],
"value": "3072:Rl0zyy95JFokb1sUUBTHxg1htzj5hZrUrYq2r5HsBnWR0:Rl0Lrh1sBS1hLhZrVh5HsNWO",
"handling_condition": "GREEN",
"discovered_at": "2020-12-31T14:18:26+0000",
"description": "Incident report with some test iocs",
"source": "https://website.com/report/incident/view?id=68646"
},
{
"type": "IPv6",
"industry_type": [
"IT",
"online gaming",
"Military"
],
"value": "2001:da8:8000:6300:1c22:6545:295d:f55c",
"handling_condition": "GREEN",
"discovered_at": "2021-12-31T22:00:32+0000",
"description": "Incident report with some test iocs",
"source": "https://website.com/report/incident/view?id=68646"
},
] | 32.313433 | 95 | 0.513164 | mocked_indicators = [
{
"type": "URL",
"industry_type": [
"IT",
"online gaming",
"Military"
],
"value": "https://test.domainos.com?test=true&id=32423",
"handling_condition": "GREEN",
"discovered_at": "2020-12-31T14:18:26+0000",
"description": "Incident report with some test iocs",
"source": "https://website.cyjax.com/report/incident/view?id=68646",
"ttp": [
"Remote Access Software",
"Download New Code at Runtime",
],
"geoip": {
"city_name": "Donetsk",
"location": {
"lon": 37.7759,
"lat": 47.9917
},
"country_code2": "UA",
"country_name": "Ukraine"
},
},
{
"type": "FileHash-SHA1",
"industry_type": [
"IT",
"online gaming",
"Military"
],
"value": "1f49429f805663702acf221177dd0e99f6ba3f46",
"handling_condition": "GREEN",
"discovered_at": "2020-12-31T14:18:26+0000",
"description": "Incident report with some test iocs",
"source": "https://website.cyjax.com/report/incident/view?id=68646"
},
{
"type": "FileHash-SSDEEP",
"industry_type": [
"IT",
"online gaming",
"Military"
],
"value": "3072:Rl0zyy95JFokb1sUUBTHxg1htzj5hZrUrYq2r5HsBnWR0:Rl0Lrh1sBS1hLhZrVh5HsNWO",
"handling_condition": "GREEN",
"discovered_at": "2020-12-31T14:18:26+0000",
"description": "Incident report with some test iocs",
"source": "https://website.com/report/incident/view?id=68646"
},
{
"type": "IPv6",
"industry_type": [
"IT",
"online gaming",
"Military"
],
"value": "2001:da8:8000:6300:1c22:6545:295d:f55c",
"handling_condition": "GREEN",
"discovered_at": "2021-12-31T22:00:32+0000",
"description": "Incident report with some test iocs",
"source": "https://website.com/report/incident/view?id=68646"
},
] | true | true |
f71ccc3dfa925bc05b65ff8afe4da56a24f1736f | 245 | py | Python | Python Programming/06. Classes/01-Classes.py | luckyrabbit85/Python | ed134fd70b4a7b84b183b87b85ad5190f54c9526 | [
"MIT"
] | 1 | 2021-07-15T18:40:26.000Z | 2021-07-15T18:40:26.000Z | Python Programming/06. Classes/01-Classes.py | luckyrabbit85/Python | ed134fd70b4a7b84b183b87b85ad5190f54c9526 | [
"MIT"
] | null | null | null | Python Programming/06. Classes/01-Classes.py | luckyrabbit85/Python | ed134fd70b4a7b84b183b87b85ad5190f54c9526 | [
"MIT"
] | null | null | null | # Class: blueprint for creating new objects
# Object: instances of a class
# Class: Human
# Objects: John, Mary, Jack
class Point:
def draw(self):
print("draw")
point = Point()
print(type(point))
print(isinstance(point, Point))
| 15.3125 | 43 | 0.677551 |
class Point:
def draw(self):
print("draw")
point = Point()
print(type(point))
print(isinstance(point, Point))
| true | true |
f71cccaab8c8334d17849d7af7fa89ed4b6eaf3b | 7,967 | py | Python | magnum/tests/unit/api/test_attr_validator.py | MatMaul/magnum | 4d5fd80d89e38e98aff24f01b967a57d0adcd191 | [
"Apache-2.0"
] | null | null | null | magnum/tests/unit/api/test_attr_validator.py | MatMaul/magnum | 4d5fd80d89e38e98aff24f01b967a57d0adcd191 | [
"Apache-2.0"
] | null | null | null | magnum/tests/unit/api/test_attr_validator.py | MatMaul/magnum | 4d5fd80d89e38e98aff24f01b967a57d0adcd191 | [
"Apache-2.0"
] | 1 | 2020-09-09T14:35:08.000Z | 2020-09-09T14:35:08.000Z | # Copyright 2015 EasyStack, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from glanceclient import exc as glance_exception
import mock
from novaclient import exceptions as nova_exc
from magnum.api import attr_validator
from magnum.common import exception
from magnum.tests import base
class TestAttrValidator(base.BaseTestCase):
def test_validate_flavor_with_vaild_flavor(self):
mock_flavor = mock.MagicMock()
mock_flavor.name = 'test_flavor'
mock_flavor.id = 'test_flavor_id'
mock_flavors = [mock_flavor]
mock_nova = mock.MagicMock()
mock_nova.flavors.list.return_value = mock_flavors
mock_os_cli = mock.MagicMock()
mock_os_cli.nova.return_value = mock_nova
attr_validator.validate_flavor(mock_os_cli, 'test_flavor')
self.assertTrue(mock_nova.flavors.list.called)
def test_validate_flavor_with_invaild_flavor(self):
mock_flavor = mock.MagicMock()
mock_flavor.name = 'test_flavor_not_equal'
mock_flavor.id = 'test_flavor_id_not_equal'
mock_flavors = [mock_flavor]
mock_nova = mock.MagicMock()
mock_nova.flavors.list.return_value = mock_flavors
mock_os_cli = mock.MagicMock()
mock_os_cli.nova.return_value = mock_nova
self.assertRaises(exception.FlavorNotFound,
attr_validator.validate_flavor,
mock_os_cli, 'test_flavor')
def test_validate_external_network_with_valid_network(self):
mock_networks = {'networks': [{'name': 'test_ext_net',
'id': 'test_ext_net_id'}]}
mock_neutron = mock.MagicMock()
mock_neutron.list_networks.return_value = mock_networks
mock_os_cli = mock.MagicMock()
mock_os_cli.neutron.return_value = mock_neutron
attr_validator.validate_external_network(mock_os_cli, 'test_ext_net')
self.assertTrue(mock_neutron.list_networks.called)
def test_validate_external_network_with_invalid_network(self):
mock_networks = {'networks': [{'name': 'test_ext_net_not_equal',
'id': 'test_ext_net_id_not_equal'}]}
mock_neutron = mock.MagicMock()
mock_neutron.list_networks.return_value = mock_networks
mock_os_cli = mock.MagicMock()
mock_os_cli.neutron.return_value = mock_neutron
self.assertRaises(exception.NetworkNotFound,
attr_validator.validate_external_network,
mock_os_cli, 'test_ext_net')
def test_validate_keypair_with_valid_keypair(self):
mock_keypair = mock.MagicMock()
mock_keypair.id = 'test-keypair'
mock_nova = mock.MagicMock()
mock_nova.keypairs.get.return_value = mock_keypair
mock_os_cli = mock.MagicMock()
mock_os_cli.nova.return_value = mock_nova
attr_validator.validate_keypair(mock_os_cli, 'test-keypair')
def test_validate_keypair_with_invalid_keypair(self):
mock_nova = mock.MagicMock()
mock_nova.keypairs.get.side_effect = nova_exc.NotFound('test-keypair')
mock_os_cli = mock.MagicMock()
mock_os_cli.nova.return_value = mock_nova
self.assertRaises(exception.KeyPairNotFound,
attr_validator.validate_keypair,
mock_os_cli, 'test_keypair')
@mock.patch('magnum.api.utils.get_openstack_resource')
def test_validate_image_with_valid_image_by_name(self, mock_os_res):
mock_image = {'name': 'fedora-21-atomic-5',
'id': 'e33f0988-1730-405e-8401-30cbc8535302',
'os_distro': 'fedora-atomic'}
mock_os_res.return_value = mock_image
mock_os_cli = mock.MagicMock()
attr_validator.validate_image(mock_os_cli, 'fedora-21-atomic-5')
self.assertTrue(mock_os_res.called)
@mock.patch('magnum.api.utils.get_openstack_resource')
def test_validate_image_with_valid_image_by_id(self, mock_os_res):
mock_image = {'name': 'fedora-21-atomic-5',
'id': 'e33f0988-1730-405e-8401-30cbc8535302',
'os_distro': 'fedora-atomic'}
mock_os_res.return_value = mock_image
mock_os_cli = mock.MagicMock()
attr_validator.validate_image(mock_os_cli,
'e33f0988-1730-405e-8401-30cbc8535302')
self.assertTrue(mock_os_res.called)
@mock.patch('magnum.api.utils.get_openstack_resource')
def test_validate_image_with_nonexist_image_by_name(self, mock_os_res):
mock_os_res.side_effect = exception.ResourceNotFound
mock_os_cli = mock.MagicMock()
self.assertRaises(exception.ImageNotFound,
attr_validator.validate_image,
mock_os_cli, 'fedora-21-atomic-5')
@mock.patch('magnum.api.utils.get_openstack_resource')
def test_validate_image_with_nonexist_image_by_id(self, mock_os_res):
mock_os_res.side_effect = glance_exception.NotFound
mock_os_cli = mock.MagicMock()
self.assertRaises(exception.ImageNotFound,
attr_validator.validate_image,
mock_os_cli, 'fedora-21-atomic-5')
@mock.patch('magnum.api.utils.get_openstack_resource')
def test_validate_image_with_multi_images_same_name(self, mock_os_res):
mock_os_res.side_effect = exception.Conflict
mock_os_cli = mock.MagicMock()
self.assertRaises(exception.Conflict,
attr_validator.validate_image,
mock_os_cli, 'fedora-21-atomic-5')
@mock.patch('magnum.api.utils.get_openstack_resource')
def test_validate_image_without_os_distro(self, mock_os_res):
mock_image = {'name': 'fedora-21-atomic-5',
'id': 'e33f0988-1730-405e-8401-30cbc8535302'}
mock_os_res.return_value = mock_image
mock_os_cli = mock.MagicMock()
self.assertRaises(exception.OSDistroFieldNotFound,
attr_validator.validate_image,
mock_os_cli, 'fedora-21-atomic-5')
@mock.patch('magnum.api.utils.get_openstack_resource')
def test_validate_image_with_empty_os_distro(self, mock_os_res):
mock_image = {'name': 'fedora-21-atomic-5',
'id': 'e33f0988-1730-405e-8401-30cbc8535302',
'os_distro': ''}
mock_os_res.return_value = mock_image
mock_os_cli = mock.MagicMock()
self.assertRaises(exception.OSDistroFieldNotFound,
attr_validator.validate_image,
mock_os_cli, 'fedora-21-atomic-5')
@mock.patch('magnum.common.clients.OpenStackClients')
def test_validate_os_resources_with_invalid_flavor(self,
mock_os_cli):
mock_baymodel = {'flavor_id': 'test_flavor'}
mock_flavor = mock.MagicMock()
mock_flavor.name = 'test_flavor_not_equal'
mock_flavor.id = 'test_flavor_id_not_equal'
mock_flavors = [mock_flavor]
mock_nova = mock.MagicMock()
mock_nova.flavors.list.return_value = mock_flavors
mock_os_cli.nova.return_value = mock_nova
mock_context = mock.MagicMock()
self.assertRaises(exception.FlavorNotFound,
attr_validator.validate_os_resources,
mock_context, mock_baymodel)
| 46.319767 | 78 | 0.667629 |
from glanceclient import exc as glance_exception
import mock
from novaclient import exceptions as nova_exc
from magnum.api import attr_validator
from magnum.common import exception
from magnum.tests import base
class TestAttrValidator(base.BaseTestCase):
def test_validate_flavor_with_vaild_flavor(self):
mock_flavor = mock.MagicMock()
mock_flavor.name = 'test_flavor'
mock_flavor.id = 'test_flavor_id'
mock_flavors = [mock_flavor]
mock_nova = mock.MagicMock()
mock_nova.flavors.list.return_value = mock_flavors
mock_os_cli = mock.MagicMock()
mock_os_cli.nova.return_value = mock_nova
attr_validator.validate_flavor(mock_os_cli, 'test_flavor')
self.assertTrue(mock_nova.flavors.list.called)
def test_validate_flavor_with_invaild_flavor(self):
mock_flavor = mock.MagicMock()
mock_flavor.name = 'test_flavor_not_equal'
mock_flavor.id = 'test_flavor_id_not_equal'
mock_flavors = [mock_flavor]
mock_nova = mock.MagicMock()
mock_nova.flavors.list.return_value = mock_flavors
mock_os_cli = mock.MagicMock()
mock_os_cli.nova.return_value = mock_nova
self.assertRaises(exception.FlavorNotFound,
attr_validator.validate_flavor,
mock_os_cli, 'test_flavor')
def test_validate_external_network_with_valid_network(self):
mock_networks = {'networks': [{'name': 'test_ext_net',
'id': 'test_ext_net_id'}]}
mock_neutron = mock.MagicMock()
mock_neutron.list_networks.return_value = mock_networks
mock_os_cli = mock.MagicMock()
mock_os_cli.neutron.return_value = mock_neutron
attr_validator.validate_external_network(mock_os_cli, 'test_ext_net')
self.assertTrue(mock_neutron.list_networks.called)
def test_validate_external_network_with_invalid_network(self):
mock_networks = {'networks': [{'name': 'test_ext_net_not_equal',
'id': 'test_ext_net_id_not_equal'}]}
mock_neutron = mock.MagicMock()
mock_neutron.list_networks.return_value = mock_networks
mock_os_cli = mock.MagicMock()
mock_os_cli.neutron.return_value = mock_neutron
self.assertRaises(exception.NetworkNotFound,
attr_validator.validate_external_network,
mock_os_cli, 'test_ext_net')
def test_validate_keypair_with_valid_keypair(self):
mock_keypair = mock.MagicMock()
mock_keypair.id = 'test-keypair'
mock_nova = mock.MagicMock()
mock_nova.keypairs.get.return_value = mock_keypair
mock_os_cli = mock.MagicMock()
mock_os_cli.nova.return_value = mock_nova
attr_validator.validate_keypair(mock_os_cli, 'test-keypair')
def test_validate_keypair_with_invalid_keypair(self):
mock_nova = mock.MagicMock()
mock_nova.keypairs.get.side_effect = nova_exc.NotFound('test-keypair')
mock_os_cli = mock.MagicMock()
mock_os_cli.nova.return_value = mock_nova
self.assertRaises(exception.KeyPairNotFound,
attr_validator.validate_keypair,
mock_os_cli, 'test_keypair')
@mock.patch('magnum.api.utils.get_openstack_resource')
def test_validate_image_with_valid_image_by_name(self, mock_os_res):
mock_image = {'name': 'fedora-21-atomic-5',
'id': 'e33f0988-1730-405e-8401-30cbc8535302',
'os_distro': 'fedora-atomic'}
mock_os_res.return_value = mock_image
mock_os_cli = mock.MagicMock()
attr_validator.validate_image(mock_os_cli, 'fedora-21-atomic-5')
self.assertTrue(mock_os_res.called)
@mock.patch('magnum.api.utils.get_openstack_resource')
def test_validate_image_with_valid_image_by_id(self, mock_os_res):
mock_image = {'name': 'fedora-21-atomic-5',
'id': 'e33f0988-1730-405e-8401-30cbc8535302',
'os_distro': 'fedora-atomic'}
mock_os_res.return_value = mock_image
mock_os_cli = mock.MagicMock()
attr_validator.validate_image(mock_os_cli,
'e33f0988-1730-405e-8401-30cbc8535302')
self.assertTrue(mock_os_res.called)
@mock.patch('magnum.api.utils.get_openstack_resource')
def test_validate_image_with_nonexist_image_by_name(self, mock_os_res):
mock_os_res.side_effect = exception.ResourceNotFound
mock_os_cli = mock.MagicMock()
self.assertRaises(exception.ImageNotFound,
attr_validator.validate_image,
mock_os_cli, 'fedora-21-atomic-5')
@mock.patch('magnum.api.utils.get_openstack_resource')
def test_validate_image_with_nonexist_image_by_id(self, mock_os_res):
mock_os_res.side_effect = glance_exception.NotFound
mock_os_cli = mock.MagicMock()
self.assertRaises(exception.ImageNotFound,
attr_validator.validate_image,
mock_os_cli, 'fedora-21-atomic-5')
@mock.patch('magnum.api.utils.get_openstack_resource')
def test_validate_image_with_multi_images_same_name(self, mock_os_res):
mock_os_res.side_effect = exception.Conflict
mock_os_cli = mock.MagicMock()
self.assertRaises(exception.Conflict,
attr_validator.validate_image,
mock_os_cli, 'fedora-21-atomic-5')
@mock.patch('magnum.api.utils.get_openstack_resource')
def test_validate_image_without_os_distro(self, mock_os_res):
mock_image = {'name': 'fedora-21-atomic-5',
'id': 'e33f0988-1730-405e-8401-30cbc8535302'}
mock_os_res.return_value = mock_image
mock_os_cli = mock.MagicMock()
self.assertRaises(exception.OSDistroFieldNotFound,
attr_validator.validate_image,
mock_os_cli, 'fedora-21-atomic-5')
@mock.patch('magnum.api.utils.get_openstack_resource')
def test_validate_image_with_empty_os_distro(self, mock_os_res):
mock_image = {'name': 'fedora-21-atomic-5',
'id': 'e33f0988-1730-405e-8401-30cbc8535302',
'os_distro': ''}
mock_os_res.return_value = mock_image
mock_os_cli = mock.MagicMock()
self.assertRaises(exception.OSDistroFieldNotFound,
attr_validator.validate_image,
mock_os_cli, 'fedora-21-atomic-5')
@mock.patch('magnum.common.clients.OpenStackClients')
def test_validate_os_resources_with_invalid_flavor(self,
mock_os_cli):
mock_baymodel = {'flavor_id': 'test_flavor'}
mock_flavor = mock.MagicMock()
mock_flavor.name = 'test_flavor_not_equal'
mock_flavor.id = 'test_flavor_id_not_equal'
mock_flavors = [mock_flavor]
mock_nova = mock.MagicMock()
mock_nova.flavors.list.return_value = mock_flavors
mock_os_cli.nova.return_value = mock_nova
mock_context = mock.MagicMock()
self.assertRaises(exception.FlavorNotFound,
attr_validator.validate_os_resources,
mock_context, mock_baymodel)
| true | true |
f71ccd96515f76c1d80e0d8132600385ef3f08bf | 3,311 | py | Python | huaweicloud-sdk-osm/huaweicloudsdkosm/v2/model/list_case_labels_response.py | huaweicloud/huaweicloud-sdk-python-v3 | 7a6270390fcbf192b3882bf763e7016e6026ef78 | [
"Apache-2.0"
] | 64 | 2020-06-12T07:05:07.000Z | 2022-03-30T03:32:50.000Z | huaweicloud-sdk-osm/huaweicloudsdkosm/v2/model/list_case_labels_response.py | huaweicloud/huaweicloud-sdk-python-v3 | 7a6270390fcbf192b3882bf763e7016e6026ef78 | [
"Apache-2.0"
] | 11 | 2020-07-06T07:56:54.000Z | 2022-01-11T11:14:40.000Z | huaweicloud-sdk-osm/huaweicloudsdkosm/v2/model/list_case_labels_response.py | huaweicloud/huaweicloud-sdk-python-v3 | 7a6270390fcbf192b3882bf763e7016e6026ef78 | [
"Apache-2.0"
] | 24 | 2020-06-08T11:42:13.000Z | 2022-03-04T06:44:08.000Z | # coding: utf-8
import re
import six
from huaweicloudsdkcore.sdk_response import SdkResponse
from huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization
class ListCaseLabelsResponse(SdkResponse):
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'case_label_list': 'list[CaseLabelInfo]'
}
attribute_map = {
'case_label_list': 'case_label_list'
}
def __init__(self, case_label_list=None):
"""ListCaseLabelsResponse - a model defined in huaweicloud sdk"""
super(ListCaseLabelsResponse, self).__init__()
self._case_label_list = None
self.discriminator = None
if case_label_list is not None:
self.case_label_list = case_label_list
@property
def case_label_list(self):
"""Gets the case_label_list of this ListCaseLabelsResponse.
工单关联的标签列表
:return: The case_label_list of this ListCaseLabelsResponse.
:rtype: list[CaseLabelInfo]
"""
return self._case_label_list
@case_label_list.setter
def case_label_list(self, case_label_list):
"""Sets the case_label_list of this ListCaseLabelsResponse.
工单关联的标签列表
:param case_label_list: The case_label_list of this ListCaseLabelsResponse.
:type: list[CaseLabelInfo]
"""
self._case_label_list = case_label_list
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
import simplejson as json
if six.PY2:
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)
def __repr__(self):
"""For `print`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ListCaseLabelsResponse):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 28.791304 | 83 | 0.583812 |
import re
import six
from huaweicloudsdkcore.sdk_response import SdkResponse
from huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization
class ListCaseLabelsResponse(SdkResponse):
sensitive_list = []
openapi_types = {
'case_label_list': 'list[CaseLabelInfo]'
}
attribute_map = {
'case_label_list': 'case_label_list'
}
def __init__(self, case_label_list=None):
super(ListCaseLabelsResponse, self).__init__()
self._case_label_list = None
self.discriminator = None
if case_label_list is not None:
self.case_label_list = case_label_list
@property
def case_label_list(self):
return self._case_label_list
@case_label_list.setter
def case_label_list(self, case_label_list):
self._case_label_list = case_label_list
def to_dict(self):
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
import simplejson as json
if six.PY2:
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)
def __repr__(self):
return self.to_str()
def __eq__(self, other):
if not isinstance(other, ListCaseLabelsResponse):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
| true | true |
f71cce801e6baa714b9a75a9300c23ab0372565e | 21,083 | py | Python | deepfence_backend/tasks/task_scheduler.py | Jramirezg/ThreatMapper | af5fda3ff585f8728a7a0b48ae6818ed189e4dbf | [
"Apache-2.0"
] | null | null | null | deepfence_backend/tasks/task_scheduler.py | Jramirezg/ThreatMapper | af5fda3ff585f8728a7a0b48ae6818ed189e4dbf | [
"Apache-2.0"
] | null | null | null | deepfence_backend/tasks/task_scheduler.py | Jramirezg/ThreatMapper | af5fda3ff585f8728a7a0b48ae6818ed189e4dbf | [
"Apache-2.0"
] | null | null | null | import arrow
from config.app import celery_app, app
from models.container_image_registry import RegistryCredential
from models.scheduler import Scheduler
from models.setting import Setting
from croniter import croniter
from utils import constants
import time
from datetime import datetime
from utils.helper import websocketio_channel_name_format, get_image_cve_status
from config.redisconfig import redis
from utils.esconn import ESConn
from resource_models.node import Node
from utils.reports import prepare_report_download, prepare_report_email_body
from utils.response import set_response
from flask import make_response
import json
import uuid
from copy import deepcopy
from utils.helper import get_all_scanned_node, get_all_scanned_images
import pandas as pd
import re
@celery_app.task
def task_scheduler():
with app.app_context():
curr_time = arrow.now(tz="+00:00").datetime.replace(minute=0, second=0, microsecond=0)
scheduled_tasks = Scheduler.query.filter_by(is_enabled=True).all()
if not scheduled_tasks:
return
for scheduled_task in scheduled_tasks:
if croniter.match(scheduled_task.cron_expr, curr_time):
run_node_task(scheduled_task.action, scheduled_task.nodes, scheduled_task.id, scheduled_task.cron_expr)
def run_node_task(action, node_action_details, scheduler_id=None, cron_expr=None):
with app.app_context():
curr_time = arrow.now(tz="+00:00").datetime
if scheduler_id:
try:
scheduled_task = Scheduler.query.get(scheduler_id)
scheduled_task.last_ran_at = curr_time
scheduled_task.status = "running"
scheduled_task.save()
except Exception as ex:
app.logger.error(ex)
return
def save_scheduled_task_status(status):
if scheduler_id:
try:
scheduled_task = Scheduler.query.get(scheduler_id)
scheduled_task.status = status
scheduled_task.save()
except Exception as ex:
app.logger.error(ex)
save_scheduled_task_status("In Progress")
node_type = node_action_details["node_type"]
df_id_to_scope_id_map = {}
topology_data_df_format = {}
registry_credential = None
if node_type == constants.NODE_TYPE_REGISTRY_IMAGE:
try:
registry_credential = RegistryCredential.query.get(
node_action_details["registry_images"]["registry_id"])
except Exception as ex:
save_scheduled_task_status("Error: " + str(ex))
app.logger.error(ex)
return
else:
if not node_action_details.get("node_id_list"):
node_action_details["node_id_list"] = []
for i in range(3):
try:
redis_pipe = redis.pipeline()
redis_pipe.hgetall(constants.DF_ID_TO_SCOPE_ID_REDIS_KEY_PREFIX + node_type.upper())
redis_pipe.get(websocketio_channel_name_format(node_type + "?format=deepfence")[1])
redis_resp = redis_pipe.execute()
df_id_to_scope_id_map = redis_resp[0]
if redis_resp[1]:
topology_data_df_format = json.loads(redis_resp[1])
if topology_data_df_format and df_id_to_scope_id_map:
break
else:
app.logger.error("topology data is empty, retrying")
time.sleep(10)
except Exception as ex:
app.logger.error(ex)
time.sleep(10)
if action in [constants.NODE_ACTION_CVE_SCAN_START, constants.NODE_ACTION_SCHEDULE_CVE_SCAN]:
if node_type == constants.NODE_TYPE_REGISTRY_IMAGE:
from config.app import celery_app
redis_lock_keys = []
redis_pipe = redis.pipeline()
image_list_details_str = redis.get("{0}:{1}".format(constants.REGISTRY_IMAGES_CACHE_KEY_PREFIX,
node_action_details["registry_images"][
"registry_id"]))
if image_list_details_str:
if node_action_details["registry_images"].get("all_registry_images", False):
image_dict = json.loads(image_list_details_str)
image_df = pd.DataFrame(image_dict['image_list'])
image_df['timestamp'] = pd.to_datetime(image_df.pushed_at)
sorted_df = image_df.sort_values(by=['timestamp'], ascending=False)
df_unique_list = sorted_df["image_tag"].unique()
df_unique = pd.DataFrame(data=df_unique_list, columns=["image_tag"])
sorted_df_by_image_tag = image_df.sort_values("image_tag")
images_by_tags = df_unique.merge(sorted_df_by_image_tag, on=["image_tag"], how="outer")[
"image_name_with_tag"]
node_action_details["registry_images"]["image_name_with_tag_list"] = images_by_tags
elif node_action_details["registry_images"].get("only_new_images", False):
image_dict = json.loads(image_list_details_str)
all_registry_images = set([image["image_name_with_tag"] for image in image_dict['image_list']])
if cron_expr:
pattern = '^0.*?\*/(\d).*?$'
match = re.search(pattern, cron_expr)
if match:
days_interval = int(match.group(1))
else:
days_interval = 1
images_need_to_be_scanned = all_registry_images - get_all_scanned_images(days_interval)
node_action_details["registry_images"]["image_name_with_tag_list"] = list(
images_need_to_be_scanned)
elif node_action_details["registry_images"].get("registry_scan_type", None) == "latest_timestamp":
image_dict = json.loads(image_list_details_str)
image_df = pd.DataFrame(image_dict['image_list'])
image_df['timestamp'] = pd.to_datetime(image_df.pushed_at)
grouped = image_df.groupby(['image_name']).agg({"timestamp": max}).reset_index()
latest_images_by_tags = image_df.merge(grouped, on=["image_name", "timestamp"], how="inner")[
'image_name_with_tag']
node_action_details["registry_images"]["image_name_with_tag_list"] = latest_images_by_tags
elif node_action_details["registry_images"].get("registry_scan_type", None) == "image_tags":
if node_action_details["registry_images"].get("image_tags", []):
image_tags = node_action_details["registry_images"].get("image_tags", [])
image_dict = json.loads(image_list_details_str)
image_df = pd.DataFrame(image_dict['image_list'])
images_by_tags = image_df[image_df["image_tag"].isin(image_tags)]["image_name_with_tag"]
node_action_details["registry_images"]["image_name_with_tag_list"] = images_by_tags
else:
node_action_details["registry_images"]["image_name_with_tag_list"] = []
for image_name_with_tag in node_action_details["registry_images"]["image_name_with_tag_list"]:
lock_key = "{0}:{1}".format(constants.NODE_ACTION_CVE_SCAN_START, image_name_with_tag)
redis_pipe.incr(lock_key)
redis_lock_keys.append(lock_key)
redis_resp = redis_pipe.execute()
time.sleep(1)
image_cve_status = get_image_cve_status()
for i, image_name_with_tag in enumerate(
node_action_details["registry_images"]["image_name_with_tag_list"]):
try:
if redis_resp[i] != 1:
continue
cve_status = image_cve_status.get(image_name_with_tag, {}).get("action", "")
if cve_status:
if cve_status == constants.CVE_SCAN_STATUS_QUEUED or cve_status in constants.CVE_SCAN_RUNNING_STATUS:
continue
datetime_now = datetime.now()
scan_id = image_name_with_tag + "_" + datetime_now.strftime("%Y-%m-%dT%H:%M:%S") + ".000"
body = {
"masked": "false", "type": constants.CVE_SCAN_LOGS_INDEX, "scan_id": scan_id, "host": "",
"@timestamp": datetime_now.strftime("%Y-%m-%dT%H:%M:%S.%fZ"), "cve_scan_message": "",
"action": constants.CVE_SCAN_STATUS_QUEUED, "host_name": "", "node_id": image_name_with_tag,
"time_stamp": int(time.time() * 1000.0), "node_type": constants.NODE_TYPE_CONTAINER_IMAGE
}
ESConn.create_doc(constants.CVE_SCAN_LOGS_INDEX, body)
scan_details = {
"cve_node_id": image_name_with_tag, "scan_types": node_action_details["scan_type"],
"registry_type": registry_credential.registry_type, "scan_id": scan_id,
"credential_id": registry_credential.id}
celery_task_id = "cve_scan:" + scan_id
if node_action_details["registry_images"].get("priority", False):
celery_app.send_task('tasks.vulnerability_scan_worker.vulnerability_scan', args=(),
task_id=celery_task_id, kwargs={"scan_details": scan_details},
queue=constants.VULNERABILITY_SCAN_PRIORITY_QUEUE)
else:
celery_app.send_task('tasks.vulnerability_scan_worker.vulnerability_scan', args=(),
task_id=celery_task_id, kwargs={"scan_details": scan_details},
queue=constants.VULNERABILITY_SCAN_QUEUE)
except Exception as ex:
save_scheduled_task_status("Error: " + str(ex))
app.logger.error(ex)
time.sleep(2)
redis_pipe = redis.pipeline()
for lock_key in redis_lock_keys:
redis.delete(lock_key)
redis_pipe.execute()
else:
node_list = []
redis_lock_keys = []
redis_pipe = redis.pipeline()
for node_id in node_action_details["node_id_list"]:
try:
node = Node(node_id, df_id_to_scope_id_map=df_id_to_scope_id_map,
topology_data_df_format=topology_data_df_format)
if node.type == constants.NODE_TYPE_HOST:
lock_key = "{0}:{1}".format(constants.NODE_ACTION_CVE_SCAN_START, node.host_name)
else:
if not node.image_name_tag:
continue
lock_key = "{0}:{1}".format(constants.NODE_ACTION_CVE_SCAN_START, node.image_name_tag)
if lock_key in redis_lock_keys:
# If same image, different container, already selected, don't scan again
continue
redis_lock_keys.append(lock_key)
redis_pipe.incr(lock_key)
node_list.append(node)
except Exception as ex:
save_scheduled_task_status("Error: " + str(ex))
app.logger.error(ex)
if not node_list:
error_message = "No node available for scan"
save_scheduled_task_status("Error: " + error_message)
app.logger.error(error_message)
return
redis_resp = redis_pipe.execute()
for i, node in enumerate(node_list):
if redis_resp[i] != 1:
continue
try:
node.cve_scan_start(node_action_details["scan_type"],
priority=node_action_details.get("priority", False))
except Exception as ex:
save_scheduled_task_status("Error: " + str(ex))
app.logger.error(ex)
time.sleep(1)
redis_pipe = redis.pipeline()
for lock_key in redis_lock_keys:
redis.delete(lock_key)
redis_pipe.execute()
elif action == constants.NODE_ACTION_CVE_SCAN_STOP:
if node_type == constants.NODE_TYPE_REGISTRY_IMAGE:
from config.app import celery_app
if node_action_details["registry_images"].get("all_registry_images", False):
image_list_details_str = redis.get("{0}:{1}".format(constants.REGISTRY_IMAGES_CACHE_KEY_PREFIX,
node_action_details["registry_images"][
"registry_id"]))
image_dict = json.loads(image_list_details_str)
node_action_details["registry_images"]["image_name_with_tag_list"] = [image["image_name_with_tag"]
for image in
image_dict['image_list']]
for image_name_with_tag in node_action_details["registry_images"]["image_name_with_tag_list"]:
try:
es_response = ESConn.search_by_and_clause(constants.CVE_SCAN_LOGS_INDEX,
{"node_id": image_name_with_tag}, 0, size=1)
latest_cve_scan_doc = {}
cve_scan_list = es_response.get("hits", [])
if cve_scan_list:
cve_scan_doc = cve_scan_list[0]
latest_cve_scan_doc = cve_scan_doc.get('_source', {})
latest_cve_scan_doc.update({'_id': cve_scan_doc.get('_id', "")})
if latest_cve_scan_doc:
status = latest_cve_scan_doc.get("action", "")
scan_id = latest_cve_scan_doc.get("scan_id", "")
if (status in constants.CVE_SCAN_NOT_RUNNING_STATUS) or (not scan_id):
continue
elif status != constants.CVE_SCAN_STATUS_QUEUED:
continue
celery_task_id = "cve_scan:" + scan_id
celery_app.control.revoke(celery_task_id, terminate=False)
body = {
"masked": "false", "type": constants.CVE_SCAN_LOGS_INDEX, "scan_id": scan_id,
"cve_scan_message": "Scan stopped by user", "time_stamp": int(time.time() * 1000.0),
"@timestamp": datetime.now().strftime("%Y-%m-%dT%H:%M:%S.%fZ"), "host": "",
"action": constants.CVE_SCAN_STATUS_STOPPED, "host_name": "",
"node_id": latest_cve_scan_doc.get("node_id", ""),
"node_type": constants.NODE_TYPE_CONTAINER_IMAGE
}
ESConn.create_doc(constants.CVE_SCAN_LOGS_INDEX, body)
except Exception as ex:
save_scheduled_task_status("Error: " + str(ex))
app.logger.error(ex)
else:
for node_id in node_action_details["node_id_list"]:
try:
node = Node(node_id, df_id_to_scope_id_map=df_id_to_scope_id_map,
topology_data_df_format=topology_data_df_format)
node.cve_scan_stop()
except Exception as ex:
save_scheduled_task_status("Error: " + str(ex))
app.logger.error(ex)
elif action == constants.NODE_ACTION_SCHEDULE_SEND_REPORT:
domain_name = ""
console_url_setting = Setting.query.filter_by(key="console_url").one_or_none()
if console_url_setting and console_url_setting.value:
domain_name = console_url_setting.value.get("value")
report_id = uuid.uuid4()
body = {
"type": constants.REPORT_INDEX,
"report_id": report_id,
"status": "started",
"masked": "false",
"@timestamp": datetime.now().strftime("%Y-%m-%dT%H:%M:%S.%fZ")
}
ESConn.create_doc(constants.REPORT_INDEX, body, refresh="wait_for")
if node_action_details.get('include_dead_nodes') is True:
if node_type == 'host':
if len(node_action_details['filters'].get('host_name', [])) == 0:
node_action_details['filters']['host_name'] = get_all_scanned_node()
from config.app import celery_app
celery_app.send_task(
'tasks.common_worker.generate_report', args=(),
kwargs={"report_id": report_id, "filters": node_action_details.get("filters", {}),
"lucene_query_string": "",
"number": node_action_details.get("duration", {}).get("number", 0),
"time_unit": node_action_details.get("duration", {}).get("time_unit", "day"),
"domain_name": domain_name, "resources": node_action_details.get("resources", {}),
"file_type": node_action_details.get("file_type", "xlsx"), "node_type": node_type,
"include_dead_nodes": node_action_details.get("include_dead_nodes", False),
"report_email": node_action_details["report_email"]})
return set_response(data="Started")
elif action == constants.NODE_ACTION_DOWNLOAD_REPORT:
domain_name = ""
console_url_setting = Setting.query.filter_by(key="console_url").one_or_none()
if console_url_setting and console_url_setting.value:
domain_name = console_url_setting.value.get("value")
report_id = uuid.uuid4()
body = {
"type": constants.REPORT_INDEX,
"report_id": report_id,
"status": "started",
"masked": "false",
"duration": "",
"@timestamp": datetime.now().strftime("%Y-%m-%dT%H:%M:%S.%fZ")
}
ESConn.create_doc(constants.REPORT_INDEX, body, refresh="wait_for")
if node_action_details.get('include_dead_nodes') is True:
if node_type == 'host':
if len(node_action_details['filters'].get('host_name', [])) == 0:
node_action_details['filters']['host_name'] = get_all_scanned_node()
from config.app import celery_app
celery_app.send_task(
'tasks.common_worker.generate_report', args=(),
kwargs={"report_id": report_id, "filters": node_action_details.get("filters", {}),
"lucene_query_string": "",
"number": node_action_details.get("duration", {}).get("number", 0),
"time_unit": node_action_details.get("duration", {}).get("time_unit", "d"),
"domain_name": domain_name, "resources": node_action_details.get("resources", {}),
"file_type": node_action_details.get("file_type", "xlsx"), "node_type": node_type,
"include_dead_nodes": node_action_details.get("include_dead_nodes", False),
"report_email": ""})
return set_response(data="Started")
save_scheduled_task_status("Success")
| 61.287791 | 129 | 0.538206 | import arrow
from config.app import celery_app, app
from models.container_image_registry import RegistryCredential
from models.scheduler import Scheduler
from models.setting import Setting
from croniter import croniter
from utils import constants
import time
from datetime import datetime
from utils.helper import websocketio_channel_name_format, get_image_cve_status
from config.redisconfig import redis
from utils.esconn import ESConn
from resource_models.node import Node
from utils.reports import prepare_report_download, prepare_report_email_body
from utils.response import set_response
from flask import make_response
import json
import uuid
from copy import deepcopy
from utils.helper import get_all_scanned_node, get_all_scanned_images
import pandas as pd
import re
@celery_app.task
def task_scheduler():
with app.app_context():
curr_time = arrow.now(tz="+00:00").datetime.replace(minute=0, second=0, microsecond=0)
scheduled_tasks = Scheduler.query.filter_by(is_enabled=True).all()
if not scheduled_tasks:
return
for scheduled_task in scheduled_tasks:
if croniter.match(scheduled_task.cron_expr, curr_time):
run_node_task(scheduled_task.action, scheduled_task.nodes, scheduled_task.id, scheduled_task.cron_expr)
def run_node_task(action, node_action_details, scheduler_id=None, cron_expr=None):
with app.app_context():
curr_time = arrow.now(tz="+00:00").datetime
if scheduler_id:
try:
scheduled_task = Scheduler.query.get(scheduler_id)
scheduled_task.last_ran_at = curr_time
scheduled_task.status = "running"
scheduled_task.save()
except Exception as ex:
app.logger.error(ex)
return
def save_scheduled_task_status(status):
if scheduler_id:
try:
scheduled_task = Scheduler.query.get(scheduler_id)
scheduled_task.status = status
scheduled_task.save()
except Exception as ex:
app.logger.error(ex)
save_scheduled_task_status("In Progress")
node_type = node_action_details["node_type"]
df_id_to_scope_id_map = {}
topology_data_df_format = {}
registry_credential = None
if node_type == constants.NODE_TYPE_REGISTRY_IMAGE:
try:
registry_credential = RegistryCredential.query.get(
node_action_details["registry_images"]["registry_id"])
except Exception as ex:
save_scheduled_task_status("Error: " + str(ex))
app.logger.error(ex)
return
else:
if not node_action_details.get("node_id_list"):
node_action_details["node_id_list"] = []
for i in range(3):
try:
redis_pipe = redis.pipeline()
redis_pipe.hgetall(constants.DF_ID_TO_SCOPE_ID_REDIS_KEY_PREFIX + node_type.upper())
redis_pipe.get(websocketio_channel_name_format(node_type + "?format=deepfence")[1])
redis_resp = redis_pipe.execute()
df_id_to_scope_id_map = redis_resp[0]
if redis_resp[1]:
topology_data_df_format = json.loads(redis_resp[1])
if topology_data_df_format and df_id_to_scope_id_map:
break
else:
app.logger.error("topology data is empty, retrying")
time.sleep(10)
except Exception as ex:
app.logger.error(ex)
time.sleep(10)
if action in [constants.NODE_ACTION_CVE_SCAN_START, constants.NODE_ACTION_SCHEDULE_CVE_SCAN]:
if node_type == constants.NODE_TYPE_REGISTRY_IMAGE:
from config.app import celery_app
redis_lock_keys = []
redis_pipe = redis.pipeline()
image_list_details_str = redis.get("{0}:{1}".format(constants.REGISTRY_IMAGES_CACHE_KEY_PREFIX,
node_action_details["registry_images"][
"registry_id"]))
if image_list_details_str:
if node_action_details["registry_images"].get("all_registry_images", False):
image_dict = json.loads(image_list_details_str)
image_df = pd.DataFrame(image_dict['image_list'])
image_df['timestamp'] = pd.to_datetime(image_df.pushed_at)
sorted_df = image_df.sort_values(by=['timestamp'], ascending=False)
df_unique_list = sorted_df["image_tag"].unique()
df_unique = pd.DataFrame(data=df_unique_list, columns=["image_tag"])
sorted_df_by_image_tag = image_df.sort_values("image_tag")
images_by_tags = df_unique.merge(sorted_df_by_image_tag, on=["image_tag"], how="outer")[
"image_name_with_tag"]
node_action_details["registry_images"]["image_name_with_tag_list"] = images_by_tags
elif node_action_details["registry_images"].get("only_new_images", False):
image_dict = json.loads(image_list_details_str)
all_registry_images = set([image["image_name_with_tag"] for image in image_dict['image_list']])
if cron_expr:
pattern = '^0.*?\*/(\d).*?$'
match = re.search(pattern, cron_expr)
if match:
days_interval = int(match.group(1))
else:
days_interval = 1
images_need_to_be_scanned = all_registry_images - get_all_scanned_images(days_interval)
node_action_details["registry_images"]["image_name_with_tag_list"] = list(
images_need_to_be_scanned)
elif node_action_details["registry_images"].get("registry_scan_type", None) == "latest_timestamp":
image_dict = json.loads(image_list_details_str)
image_df = pd.DataFrame(image_dict['image_list'])
image_df['timestamp'] = pd.to_datetime(image_df.pushed_at)
grouped = image_df.groupby(['image_name']).agg({"timestamp": max}).reset_index()
latest_images_by_tags = image_df.merge(grouped, on=["image_name", "timestamp"], how="inner")[
'image_name_with_tag']
node_action_details["registry_images"]["image_name_with_tag_list"] = latest_images_by_tags
elif node_action_details["registry_images"].get("registry_scan_type", None) == "image_tags":
if node_action_details["registry_images"].get("image_tags", []):
image_tags = node_action_details["registry_images"].get("image_tags", [])
image_dict = json.loads(image_list_details_str)
image_df = pd.DataFrame(image_dict['image_list'])
images_by_tags = image_df[image_df["image_tag"].isin(image_tags)]["image_name_with_tag"]
node_action_details["registry_images"]["image_name_with_tag_list"] = images_by_tags
else:
node_action_details["registry_images"]["image_name_with_tag_list"] = []
for image_name_with_tag in node_action_details["registry_images"]["image_name_with_tag_list"]:
lock_key = "{0}:{1}".format(constants.NODE_ACTION_CVE_SCAN_START, image_name_with_tag)
redis_pipe.incr(lock_key)
redis_lock_keys.append(lock_key)
redis_resp = redis_pipe.execute()
time.sleep(1)
image_cve_status = get_image_cve_status()
for i, image_name_with_tag in enumerate(
node_action_details["registry_images"]["image_name_with_tag_list"]):
try:
if redis_resp[i] != 1:
continue
cve_status = image_cve_status.get(image_name_with_tag, {}).get("action", "")
if cve_status:
if cve_status == constants.CVE_SCAN_STATUS_QUEUED or cve_status in constants.CVE_SCAN_RUNNING_STATUS:
continue
datetime_now = datetime.now()
scan_id = image_name_with_tag + "_" + datetime_now.strftime("%Y-%m-%dT%H:%M:%S") + ".000"
body = {
"masked": "false", "type": constants.CVE_SCAN_LOGS_INDEX, "scan_id": scan_id, "host": "",
"@timestamp": datetime_now.strftime("%Y-%m-%dT%H:%M:%S.%fZ"), "cve_scan_message": "",
"action": constants.CVE_SCAN_STATUS_QUEUED, "host_name": "", "node_id": image_name_with_tag,
"time_stamp": int(time.time() * 1000.0), "node_type": constants.NODE_TYPE_CONTAINER_IMAGE
}
ESConn.create_doc(constants.CVE_SCAN_LOGS_INDEX, body)
scan_details = {
"cve_node_id": image_name_with_tag, "scan_types": node_action_details["scan_type"],
"registry_type": registry_credential.registry_type, "scan_id": scan_id,
"credential_id": registry_credential.id}
celery_task_id = "cve_scan:" + scan_id
if node_action_details["registry_images"].get("priority", False):
celery_app.send_task('tasks.vulnerability_scan_worker.vulnerability_scan', args=(),
task_id=celery_task_id, kwargs={"scan_details": scan_details},
queue=constants.VULNERABILITY_SCAN_PRIORITY_QUEUE)
else:
celery_app.send_task('tasks.vulnerability_scan_worker.vulnerability_scan', args=(),
task_id=celery_task_id, kwargs={"scan_details": scan_details},
queue=constants.VULNERABILITY_SCAN_QUEUE)
except Exception as ex:
save_scheduled_task_status("Error: " + str(ex))
app.logger.error(ex)
time.sleep(2)
redis_pipe = redis.pipeline()
for lock_key in redis_lock_keys:
redis.delete(lock_key)
redis_pipe.execute()
else:
node_list = []
redis_lock_keys = []
redis_pipe = redis.pipeline()
for node_id in node_action_details["node_id_list"]:
try:
node = Node(node_id, df_id_to_scope_id_map=df_id_to_scope_id_map,
topology_data_df_format=topology_data_df_format)
if node.type == constants.NODE_TYPE_HOST:
lock_key = "{0}:{1}".format(constants.NODE_ACTION_CVE_SCAN_START, node.host_name)
else:
if not node.image_name_tag:
continue
lock_key = "{0}:{1}".format(constants.NODE_ACTION_CVE_SCAN_START, node.image_name_tag)
if lock_key in redis_lock_keys:
continue
redis_lock_keys.append(lock_key)
redis_pipe.incr(lock_key)
node_list.append(node)
except Exception as ex:
save_scheduled_task_status("Error: " + str(ex))
app.logger.error(ex)
if not node_list:
error_message = "No node available for scan"
save_scheduled_task_status("Error: " + error_message)
app.logger.error(error_message)
return
redis_resp = redis_pipe.execute()
for i, node in enumerate(node_list):
if redis_resp[i] != 1:
continue
try:
node.cve_scan_start(node_action_details["scan_type"],
priority=node_action_details.get("priority", False))
except Exception as ex:
save_scheduled_task_status("Error: " + str(ex))
app.logger.error(ex)
time.sleep(1)
redis_pipe = redis.pipeline()
for lock_key in redis_lock_keys:
redis.delete(lock_key)
redis_pipe.execute()
elif action == constants.NODE_ACTION_CVE_SCAN_STOP:
if node_type == constants.NODE_TYPE_REGISTRY_IMAGE:
from config.app import celery_app
if node_action_details["registry_images"].get("all_registry_images", False):
image_list_details_str = redis.get("{0}:{1}".format(constants.REGISTRY_IMAGES_CACHE_KEY_PREFIX,
node_action_details["registry_images"][
"registry_id"]))
image_dict = json.loads(image_list_details_str)
node_action_details["registry_images"]["image_name_with_tag_list"] = [image["image_name_with_tag"]
for image in
image_dict['image_list']]
for image_name_with_tag in node_action_details["registry_images"]["image_name_with_tag_list"]:
try:
es_response = ESConn.search_by_and_clause(constants.CVE_SCAN_LOGS_INDEX,
{"node_id": image_name_with_tag}, 0, size=1)
latest_cve_scan_doc = {}
cve_scan_list = es_response.get("hits", [])
if cve_scan_list:
cve_scan_doc = cve_scan_list[0]
latest_cve_scan_doc = cve_scan_doc.get('_source', {})
latest_cve_scan_doc.update({'_id': cve_scan_doc.get('_id', "")})
if latest_cve_scan_doc:
status = latest_cve_scan_doc.get("action", "")
scan_id = latest_cve_scan_doc.get("scan_id", "")
if (status in constants.CVE_SCAN_NOT_RUNNING_STATUS) or (not scan_id):
continue
elif status != constants.CVE_SCAN_STATUS_QUEUED:
continue
celery_task_id = "cve_scan:" + scan_id
celery_app.control.revoke(celery_task_id, terminate=False)
body = {
"masked": "false", "type": constants.CVE_SCAN_LOGS_INDEX, "scan_id": scan_id,
"cve_scan_message": "Scan stopped by user", "time_stamp": int(time.time() * 1000.0),
"@timestamp": datetime.now().strftime("%Y-%m-%dT%H:%M:%S.%fZ"), "host": "",
"action": constants.CVE_SCAN_STATUS_STOPPED, "host_name": "",
"node_id": latest_cve_scan_doc.get("node_id", ""),
"node_type": constants.NODE_TYPE_CONTAINER_IMAGE
}
ESConn.create_doc(constants.CVE_SCAN_LOGS_INDEX, body)
except Exception as ex:
save_scheduled_task_status("Error: " + str(ex))
app.logger.error(ex)
else:
for node_id in node_action_details["node_id_list"]:
try:
node = Node(node_id, df_id_to_scope_id_map=df_id_to_scope_id_map,
topology_data_df_format=topology_data_df_format)
node.cve_scan_stop()
except Exception as ex:
save_scheduled_task_status("Error: " + str(ex))
app.logger.error(ex)
elif action == constants.NODE_ACTION_SCHEDULE_SEND_REPORT:
domain_name = ""
console_url_setting = Setting.query.filter_by(key="console_url").one_or_none()
if console_url_setting and console_url_setting.value:
domain_name = console_url_setting.value.get("value")
report_id = uuid.uuid4()
body = {
"type": constants.REPORT_INDEX,
"report_id": report_id,
"status": "started",
"masked": "false",
"@timestamp": datetime.now().strftime("%Y-%m-%dT%H:%M:%S.%fZ")
}
ESConn.create_doc(constants.REPORT_INDEX, body, refresh="wait_for")
if node_action_details.get('include_dead_nodes') is True:
if node_type == 'host':
if len(node_action_details['filters'].get('host_name', [])) == 0:
node_action_details['filters']['host_name'] = get_all_scanned_node()
from config.app import celery_app
celery_app.send_task(
'tasks.common_worker.generate_report', args=(),
kwargs={"report_id": report_id, "filters": node_action_details.get("filters", {}),
"lucene_query_string": "",
"number": node_action_details.get("duration", {}).get("number", 0),
"time_unit": node_action_details.get("duration", {}).get("time_unit", "day"),
"domain_name": domain_name, "resources": node_action_details.get("resources", {}),
"file_type": node_action_details.get("file_type", "xlsx"), "node_type": node_type,
"include_dead_nodes": node_action_details.get("include_dead_nodes", False),
"report_email": node_action_details["report_email"]})
return set_response(data="Started")
elif action == constants.NODE_ACTION_DOWNLOAD_REPORT:
domain_name = ""
console_url_setting = Setting.query.filter_by(key="console_url").one_or_none()
if console_url_setting and console_url_setting.value:
domain_name = console_url_setting.value.get("value")
report_id = uuid.uuid4()
body = {
"type": constants.REPORT_INDEX,
"report_id": report_id,
"status": "started",
"masked": "false",
"duration": "",
"@timestamp": datetime.now().strftime("%Y-%m-%dT%H:%M:%S.%fZ")
}
ESConn.create_doc(constants.REPORT_INDEX, body, refresh="wait_for")
if node_action_details.get('include_dead_nodes') is True:
if node_type == 'host':
if len(node_action_details['filters'].get('host_name', [])) == 0:
node_action_details['filters']['host_name'] = get_all_scanned_node()
from config.app import celery_app
celery_app.send_task(
'tasks.common_worker.generate_report', args=(),
kwargs={"report_id": report_id, "filters": node_action_details.get("filters", {}),
"lucene_query_string": "",
"number": node_action_details.get("duration", {}).get("number", 0),
"time_unit": node_action_details.get("duration", {}).get("time_unit", "d"),
"domain_name": domain_name, "resources": node_action_details.get("resources", {}),
"file_type": node_action_details.get("file_type", "xlsx"), "node_type": node_type,
"include_dead_nodes": node_action_details.get("include_dead_nodes", False),
"report_email": ""})
return set_response(data="Started")
save_scheduled_task_status("Success")
| true | true |
f71ccea17261d0989d135fd953f856682a8dd848 | 1,284 | py | Python | python-2-apps/fn_bluecoat_recategorization/setup.py | JayDi11a/Geralds-IBM-SOAR-Integrations | 0e0eb18adbaf3a266e1dc5a316df7cd5a93f88d0 | [
"MIT"
] | null | null | null | python-2-apps/fn_bluecoat_recategorization/setup.py | JayDi11a/Geralds-IBM-SOAR-Integrations | 0e0eb18adbaf3a266e1dc5a316df7cd5a93f88d0 | [
"MIT"
] | 1 | 2022-03-06T00:10:13.000Z | 2022-03-06T00:10:13.000Z | python-2-apps/fn_bluecoat_recategorization/setup.py | JayDi11a/Geralds-IBM-SOAR-Integrations | 0e0eb18adbaf3a266e1dc5a316df7cd5a93f88d0 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from setuptools import setup, find_packages
setup(
name='fn_bluecoat_recategorization',
version='1.0.0',
license='<<insert here>>',
author='<<your name here>>',
author_email='you@example.com',
url='<<your company url>>',
description="Resilient Circuits Components for 'fn_bluecoat_recategorization'",
long_description="Resilient Circuits Components for 'fn_bluecoat_recategorization'",
install_requires=[
'resilient_circuits>=30.0.0'
],
packages=find_packages(),
include_package_data=True,
platforms='any',
classifiers=[
'Programming Language :: Python',
],
entry_points={
"resilient.circuits.components": [
"BluecoatSiteReviewRecategorizationFunctionComponent = fn_bluecoat_recategorization.components.bluecoat_site_review_recategorization:FunctionComponent"
],
"resilient.circuits.configsection": ["gen_config = fn_bluecoat_recategorization.util.config:config_section_data"],
"resilient.circuits.customize": ["customize = fn_bluecoat_recategorization.util.customize:customization_data"],
"resilient.circuits.selftest": ["selftest = fn_bluecoat_recategorization.util.selftest:selftest_function"]
}
) | 40.125 | 163 | 0.719626 |
from setuptools import setup, find_packages
setup(
name='fn_bluecoat_recategorization',
version='1.0.0',
license='<<insert here>>',
author='<<your name here>>',
author_email='you@example.com',
url='<<your company url>>',
description="Resilient Circuits Components for 'fn_bluecoat_recategorization'",
long_description="Resilient Circuits Components for 'fn_bluecoat_recategorization'",
install_requires=[
'resilient_circuits>=30.0.0'
],
packages=find_packages(),
include_package_data=True,
platforms='any',
classifiers=[
'Programming Language :: Python',
],
entry_points={
"resilient.circuits.components": [
"BluecoatSiteReviewRecategorizationFunctionComponent = fn_bluecoat_recategorization.components.bluecoat_site_review_recategorization:FunctionComponent"
],
"resilient.circuits.configsection": ["gen_config = fn_bluecoat_recategorization.util.config:config_section_data"],
"resilient.circuits.customize": ["customize = fn_bluecoat_recategorization.util.customize:customization_data"],
"resilient.circuits.selftest": ["selftest = fn_bluecoat_recategorization.util.selftest:selftest_function"]
}
) | true | true |
f71ccec79ef122b387b104a8ce307bf488b1bd1c | 15,996 | py | Python | src/auxil/eeMad_run.py | mortcanty/EESARDocker | 855b41a3da19f3b07b42438784309ab48fc7fe98 | [
"MIT"
] | 23 | 2018-11-11T02:43:42.000Z | 2021-08-21T21:53:19.000Z | src/auxil/eeMad_run.py | mortcanty/EESARDocker | 855b41a3da19f3b07b42438784309ab48fc7fe98 | [
"MIT"
] | 8 | 2020-03-24T16:25:42.000Z | 2021-08-23T20:35:38.000Z | src/auxil/eeMad_run.py | mortcanty/EESARDocker | 855b41a3da19f3b07b42438784309ab48fc7fe98 | [
"MIT"
] | 8 | 2019-12-20T13:33:47.000Z | 2021-10-24T02:18:37.000Z | '''
Created on 08.04.2019
@author: mort
ipywidget interface to the GEE for IR-MAD
'''
import ee, time, warnings, math
import ipywidgets as widgets
from IPython.display import display
from ipyleaflet import (Map,DrawControl,TileLayer,
basemaps,basemap_to_tiles,
LayersControl,
MeasureControl,
FullScreenControl)
from auxil.eeMad import imad,radcal
from geopy.geocoders import photon
ee.Initialize()
geolocator = photon.Photon(timeout=10)
warnings.filterwarnings("ignore", message="numpy.dtype size changed")
warnings.filterwarnings("ignore", message="numpy.ufunc size changed")
poly = ee.Geometry.MultiPolygon([])
# poly = ee.Geometry.Polygon([[6.30154, 50.948329], [6.293307, 50.877329],
# [6.427091, 50.875595], [6.417486, 50.947464],
# [6.30154, 50.948329]])
def chi2cdf(chi2,df):
''' Chi square cumulative distribution function '''
return ee.Image(chi2.divide(2)).gammainc(ee.Number(df).divide(2))
def makefeature(data):
''' for exporting as CSV to Drive '''
return ee.Feature(None, {'data': data})
def handle_draw(self, action, geo_json):
global poly
coords = geo_json['geometry']['coordinates']
if action == 'created':
poly = ee.Geometry.MultiPolygon(poly.coordinates().add(coords))
w_preview.disabled = True
w_export.disabled = True
w_collect.disabled = False
elif action == 'deleted':
poly1 = ee.Geometry.MultiPolygon(coords)
poly = poly.difference(poly1)
if len(poly.coordinates().getInfo()) == 0:
w_collect.disabled = True
dc = DrawControl(polyline={},circle={})
dc.on_draw(handle_draw)
# def GetTileLayerUrl(ee_image_object):
# map_id = ee.Image(ee_image_object).getMapId()
# tile_url_template = "https://earthengine.googleapis.com/map/{mapid}/{{z}}/{{x}}/{{y}}?token={token}"
# return tile_url_template.format(**map_id)
def GetTileLayerUrl(ee_image_object):
map_id = ee.Image(ee_image_object).getMapId()
return map_id["tile_fetcher"].url_format
w_text = widgets.Textarea(
layout = widgets.Layout(width='75%'),
value = 'Algorithm output',
rows = 4,
disabled = False
)
w_platform = widgets.RadioButtons(
options=['SENTINEL/S2(VNIR/SWIR)','SENTINEL/S2(NIR/SWIR)','LANDSAT LC08','LANDSAT LE07','LANDSAT LT05'],
value='SENTINEL/S2(VNIR/SWIR)',
description='Platform:',
disabled=False
)
w_startdate1 = widgets.Text(
value='2020-05-01',
placeholder=' ',
description='Start T1:',
disabled=False
)
w_enddate1 = widgets.Text(
value='2020-07-01',
placeholder=' ',
description='End T1:',
disabled=False
)
w_startdate2 = widgets.Text(
value='2020-08-01',
placeholder=' ',
description='Start T2:',
disabled=False
)
w_enddate2 = widgets.Text(
value='2020-10-01',
placeholder=' ',
description='End T2:',
disabled=False
)
w_iterations = widgets.IntText(
value=30,
placeholder=' ',
description='Max Iter:',
disabled=False
)
w_scale = widgets.IntText(
value=30,
placeholder=' ',
description='Scale:',
disabled=False
)
w_exportname = widgets.Text(
value='users/<username>/<path>',
placeholder=' ',
disabled=False
)
w_location = widgets.Text(
value='Jülich',
placeholder=' ',
description='',
disabled=False
)
w_goto = widgets.Button(description="GoTo",disabled=False)
w_collect = widgets.Button(description="Collect",disabled=True)
w_preview = widgets.Button(description="Preview",disabled=True)
w_export = widgets.Button(description='Export to assets',disabled=True)
w_dates1 = widgets.VBox([w_startdate1,w_enddate1,w_iterations])
w_dates2 = widgets.VBox([w_startdate2,w_enddate2,w_scale])
w_dates = widgets.HBox([w_platform,w_dates1,w_dates2])
w_exp = widgets.HBox([w_export,w_exportname])
w_go = widgets.HBox([w_collect,w_preview,w_exp])
w_txt = widgets.HBox([w_text,w_goto,w_location])
box = widgets.VBox([w_txt,w_dates,w_go])
def on_widget_change(b):
w_preview.disabled = True
w_export.disabled = True
w_platform.observe(on_widget_change,names='value')
w_startdate1.observe(on_widget_change,names='value')
w_enddate1.observe(on_widget_change,names='value')
w_startdate2.observe(on_widget_change,names='value')
w_enddate2.observe(on_widget_change,names='value')
def on_goto_button_clicked(b):
try:
location = geolocator.geocode(w_location.value)
m.center = (location.latitude,location.longitude)
m.zoom = 11
except Exception as e:
print('Error: %s'%e)
w_goto.on_click(on_goto_button_clicked)
def on_collect_button_clicked(b):
global result,m,collection,count, \
w_startdate1,w_enddate1,w_startdate2, \
w_platfform,w_enddate2,w_changemap, \
scale,nbands, \
image1,image2, \
madnames,coords,timestamp1,timestamp2
try:
coords = ee.List(poly.bounds().coordinates().get(0))
w_text.value = 'collecting, please wait ...'
cloudcover = 'CLOUD_COVER'
scale = 30.0
rgb = ['B4','B5','B7']
if w_platform.value=='SENTINEL/S2(VNIR/SWIR)':
collectionid = 'COPERNICUS/S2'
scale = 10.0
bands = ['B2','B3','B4','B8']
rgb = ['B8','B4','B3']
cloudcover = 'CLOUDY_PIXEL_PERCENTAGE'
elif w_platform.value=='SENTINEL/S2(NIR/SWIR)':
collectionid = 'COPERNICUS/S2'
scale = 20.0
bands = ['B5','B6','B7','B8A','B11','B12']
rgb = ['B5','B7','B11']
cloudcover = 'CLOUDY_PIXEL_PERCENTAGE'
elif w_platform.value=='LANDSAT LC08':
collectionid = 'LANDSAT/LC08/C01/T1_RT_TOA'
bands = ['B2','B3','B4','B5','B6','B7']
rgb = ['B5','B6','B7']
elif w_platform.value=='LANDSAT LE07':
collectionid = 'LANDSAT/LE07/C01/T1_RT_TOA'
bands = ['B1','B2','B3','B4','B5','B7']
else:
collectionid = 'LANDSAT/LT05/C01/T1_TOA'
bands = ['B1','B2','B3','B4','B5','B7']
collection1 = ee.ImageCollection(collectionid) \
.filterBounds(ee.Geometry.Point(coords.get(0))) \
.filterBounds(ee.Geometry.Point(coords.get(1))) \
.filterBounds(ee.Geometry.Point(coords.get(2))) \
.filterBounds(ee.Geometry.Point(coords.get(3))) \
.filterDate(ee.Date(w_startdate1.value), ee.Date(w_enddate1.value)) \
.sort(cloudcover, True)
count = collection1.size().getInfo()
if count==0:
raise ValueError('No images found for first time interval: '+collectionid)
collection2 = ee.ImageCollection(collectionid) \
.filterBounds(ee.Geometry.Point(coords.get(0))) \
.filterBounds(ee.Geometry.Point(coords.get(1))) \
.filterBounds(ee.Geometry.Point(coords.get(2))) \
.filterBounds(ee.Geometry.Point(coords.get(3))) \
.filterDate(ee.Date(w_startdate2.value), ee.Date(w_enddate2.value)) \
.sort(cloudcover, True)
count = collection2.size().getInfo()
if count==0:
raise ValueError('No images found for second time interval: '+collectionid)
image1 = ee.Image(collection1.first()).select(bands)
timestamp1 = ee.Date(image1.get('system:time_start')).getInfo()
timestamp1 = time.gmtime(int(timestamp1['value'])/1000)
timestamp1 = time.strftime('%c', timestamp1)
systemid1 = image1.get('system:id').getInfo()
cloudcover1 = image1.get(cloudcover).getInfo()
image2 = ee.Image(collection2.first()).select(bands)
timestamp2 = ee.Date(image2.get('system:time_start')).getInfo()
timestamp2 = time.gmtime(int(timestamp2['value'])/1000)
timestamp2 = time.strftime('%c', timestamp2)
systemid2 = image2.get('system:id').getInfo()
cloudcover2 = image2.get(cloudcover).getInfo()
txt = 'Image1: %s \n'%systemid1
txt += 'Acquisition date: %s, Cloud cover: %f \n'%(timestamp1,cloudcover1)
txt += 'Image2: %s \n'%systemid2
txt += 'Acquisition date: %s, Cloud cover: %f \n'%(timestamp2,cloudcover2)
w_text.value = txt
nbands = image1.bandNames().length()
madnames = ['MAD'+str(i+1) for i in range(nbands.getInfo())]
# co-register
image2 = image2.register(image1,60)
w_preview.disabled = False
w_export.disabled = False
# display first image
if len(m.layers)>3:
m.remove_layer(m.layers[3])
img = image1.clip(poly).select(rgb).rename('r','g','b')
ps = img.reduceRegion(ee.Reducer.percentile([2,98]),maxPixels=1e10).getInfo()
mn = [ps['r_p2'],ps['g_p2'],ps['b_p2']]
mx = [ps['r_p98'],ps['g_p98'],ps['b_p98']]
m.add_layer(TileLayer(url=GetTileLayerUrl(img.visualize(min=mn,max=mx))))
except Exception as e:
w_text.value = 'Error: %s'%e
w_collect.on_click(on_collect_button_clicked)
def on_preview_button_clicked(b):
global nbands
try:
w_text.value = 'iteration started, please wait ...\n'
# iMAD
inputlist = ee.List.sequence(1,w_iterations.value)
first = ee.Dictionary({'done':ee.Number(0),
'scale':ee.Number(w_scale.value),
'niter':ee.Number(0),
'image':image1.addBands(image2).clip(poly),
'allrhos': [ee.List.sequence(1,nbands)],
'chi2':ee.Image.constant(0),
'MAD':ee.Image.constant(0)})
result = ee.Dictionary(inputlist.iterate(imad,first))
MAD = ee.Image(result.get('MAD')).rename(madnames)
niter = ee.Number(result.get('niter')).getInfo()
# threshold
nbands = MAD.bandNames().length()
chi2 = ee.Image(result.get('chi2')).rename(['chi2'])
pval = chi2cdf(chi2,nbands).subtract(1).multiply(-1)
tst = pval.gt(ee.Image.constant(0.0001))
MAD = MAD.where(tst,ee.Image.constant(0))
allrhos = ee.Array(result.get('allrhos')).toList()
txt = 'Canonical correlations: %s \nIterations: %i\n'%(str(allrhos.get(-1).getInfo()),niter)
w_text.value += txt
if len(m.layers)>3:
m.remove_layer(m.layers[3])
MAD2 = MAD.select(1).rename('b')
ps = MAD2.reduceRegion(ee.Reducer.percentile([1,99])).getInfo()
mn = ps['b_p1']
mx = ps['b_p99']
m.add_layer(TileLayer(url=GetTileLayerUrl( MAD2.visualize(min=mn,max=mx))))
except Exception as e:
w_text.value = 'Error: %s\n Retry collect/preview or export to assets'%e
w_preview.on_click(on_preview_button_clicked)
def on_export_button_clicked(b):
global w_exportname, nbands
try:
# iMAD
inputlist = ee.List.sequence(1,w_iterations.value)
first = ee.Dictionary({'done':ee.Number(0),
'scale':ee.Number(w_scale.value),
'niter':ee.Number(0),
'image':image1.addBands(image2).clip(poly),
'allrhos': [ee.List.sequence(1,nbands)],
'chi2':ee.Image.constant(0),
'MAD':ee.Image.constant(0)})
result = ee.Dictionary(inputlist.iterate(imad,first))
MAD = ee.Image(result.get('MAD')).rename(madnames)
# threshold
chi2 = ee.Image(result.get('chi2')).rename(['chi2'])
pval = chi2cdf(chi2,nbands).subtract(1).multiply(-1)
tst = pval.gt(ee.Image.constant(0.0001))
MAD = MAD.where(tst,ee.Image.constant(0))
allrhos = ee.Array(result.get('allrhos')).toList().slice(1,-1)
# radcal
ncmask = chi2cdf(chi2,nbands).lt(ee.Image.constant(0.05)).rename(['invarpix'])
inputlist1 = ee.List.sequence(0,nbands.subtract(1))
first = ee.Dictionary({'image':image1.addBands(image2),
'ncmask':ncmask,
'nbands':nbands,
'scale':ee.Number(w_scale.value),
'rect':poly,
'coeffs': ee.List([]),
'normalized':ee.Image()})
result1 = ee.Dictionary(inputlist1.iterate(radcal,first))
coeffs = ee.List(result1.get('coeffs'))
sel = ee.List.sequence(1,nbands)
normalized = ee.Image(result1.get('normalized')).select(sel)
MADs = ee.Image.cat(MAD,chi2,ncmask,image1.clip(poly),image2.clip(poly),normalized)
assexport = ee.batch.Export.image.toAsset(MADs,
description='assetExportTask',
assetId=w_exportname.value,scale=scale,maxPixels=1e9)
assexport.start()
assexportid = str(assexport.id)
w_text.value= 'Exporting change map, chisqr, original images and normalized image to %s\n task id: %s'%(w_exportname.value,assexportid)
except Exception as e:
w_text.value = 'Error: %s'%e
# export metadata to drive
ninvar = ee.String(ncmask.reduceRegion(ee.Reducer.sum().unweighted(),
scale=scale,maxPixels= 1e9).toArray().project([0]))
metadata = ee.List(['IR-MAD: '+time.asctime(),
'Platform: '+w_platform.value,
'Asset export name: '+w_exportname.value,
'Timestamps: %s %s'%(timestamp1,timestamp2)]) \
.cat(['Canonical Correlations:']) \
.cat(allrhos) \
.cat(['Radiometric Normalization, Invariant Pixels:']) \
.cat([ninvar]) \
.cat(['Slope, Intercept, R:']) \
.cat(coeffs)
fileNamePrefix=w_exportname.value.replace('/','-')
gdexport = ee.batch.Export.table.toDrive(ee.FeatureCollection(metadata.map(makefeature)).merge(ee.Feature(poly)),
description='driveExportTask_meta',
folder = 'gee',
fileNamePrefix=fileNamePrefix )
gdexport.start()
w_text.value += '\n Exporting metadata to Drive/EarthEngineImages/%s\n task id: %s'%(fileNamePrefix,str(gdexport.id))
w_export.on_click(on_export_button_clicked)
def run():
global m,center
center = [51.0,6.4]
osm = basemap_to_tiles(basemaps.OpenStreetMap.Mapnik)
ews = basemap_to_tiles(basemaps.Esri.WorldStreetMap)
ewi = basemap_to_tiles(basemaps.Esri.WorldImagery)
dc = DrawControl(polyline={},circlemarker={})
dc.rectangle = {"shapeOptions": {"fillColor": "#0000ff","color": "#0000ff","fillOpacity": 0.05}}
dc.polygon = {"shapeOptions": {"fillColor": "#0000ff","color": "#0000ff","fillOpacity": 0.05}}
dc.on_draw(handle_draw)
lc = LayersControl(position='topright')
fs = FullScreenControl(position='topleft')
mc = MeasureControl(position='topright',primary_length_unit = 'kilometers')
m = Map(center=center, zoom=11, layout={'height':'500px'},layers=(ewi,ews,osm),controls=(mc,dc,lc,fs))
# m = Map(center=center, zoom=11, layout={'height':'500px'},controls=(lc,dc,fs,mc,sm_control))
display(m)
return box
| 42.429708 | 157 | 0.582146 | import ee, time, warnings, math
import ipywidgets as widgets
from IPython.display import display
from ipyleaflet import (Map,DrawControl,TileLayer,
basemaps,basemap_to_tiles,
LayersControl,
MeasureControl,
FullScreenControl)
from auxil.eeMad import imad,radcal
from geopy.geocoders import photon
ee.Initialize()
geolocator = photon.Photon(timeout=10)
warnings.filterwarnings("ignore", message="numpy.dtype size changed")
warnings.filterwarnings("ignore", message="numpy.ufunc size changed")
poly = ee.Geometry.MultiPolygon([])
def chi2cdf(chi2,df):
return ee.Image(chi2.divide(2)).gammainc(ee.Number(df).divide(2))
def makefeature(data):
return ee.Feature(None, {'data': data})
def handle_draw(self, action, geo_json):
global poly
coords = geo_json['geometry']['coordinates']
if action == 'created':
poly = ee.Geometry.MultiPolygon(poly.coordinates().add(coords))
w_preview.disabled = True
w_export.disabled = True
w_collect.disabled = False
elif action == 'deleted':
poly1 = ee.Geometry.MultiPolygon(coords)
poly = poly.difference(poly1)
if len(poly.coordinates().getInfo()) == 0:
w_collect.disabled = True
dc = DrawControl(polyline={},circle={})
dc.on_draw(handle_draw)
def GetTileLayerUrl(ee_image_object):
map_id = ee.Image(ee_image_object).getMapId()
return map_id["tile_fetcher"].url_format
w_text = widgets.Textarea(
layout = widgets.Layout(width='75%'),
value = 'Algorithm output',
rows = 4,
disabled = False
)
w_platform = widgets.RadioButtons(
options=['SENTINEL/S2(VNIR/SWIR)','SENTINEL/S2(NIR/SWIR)','LANDSAT LC08','LANDSAT LE07','LANDSAT LT05'],
value='SENTINEL/S2(VNIR/SWIR)',
description='Platform:',
disabled=False
)
w_startdate1 = widgets.Text(
value='2020-05-01',
placeholder=' ',
description='Start T1:',
disabled=False
)
w_enddate1 = widgets.Text(
value='2020-07-01',
placeholder=' ',
description='End T1:',
disabled=False
)
w_startdate2 = widgets.Text(
value='2020-08-01',
placeholder=' ',
description='Start T2:',
disabled=False
)
w_enddate2 = widgets.Text(
value='2020-10-01',
placeholder=' ',
description='End T2:',
disabled=False
)
w_iterations = widgets.IntText(
value=30,
placeholder=' ',
description='Max Iter:',
disabled=False
)
w_scale = widgets.IntText(
value=30,
placeholder=' ',
description='Scale:',
disabled=False
)
w_exportname = widgets.Text(
value='users/<username>/<path>',
placeholder=' ',
disabled=False
)
w_location = widgets.Text(
value='Jülich',
placeholder=' ',
description='',
disabled=False
)
w_goto = widgets.Button(description="GoTo",disabled=False)
w_collect = widgets.Button(description="Collect",disabled=True)
w_preview = widgets.Button(description="Preview",disabled=True)
w_export = widgets.Button(description='Export to assets',disabled=True)
w_dates1 = widgets.VBox([w_startdate1,w_enddate1,w_iterations])
w_dates2 = widgets.VBox([w_startdate2,w_enddate2,w_scale])
w_dates = widgets.HBox([w_platform,w_dates1,w_dates2])
w_exp = widgets.HBox([w_export,w_exportname])
w_go = widgets.HBox([w_collect,w_preview,w_exp])
w_txt = widgets.HBox([w_text,w_goto,w_location])
box = widgets.VBox([w_txt,w_dates,w_go])
def on_widget_change(b):
w_preview.disabled = True
w_export.disabled = True
w_platform.observe(on_widget_change,names='value')
w_startdate1.observe(on_widget_change,names='value')
w_enddate1.observe(on_widget_change,names='value')
w_startdate2.observe(on_widget_change,names='value')
w_enddate2.observe(on_widget_change,names='value')
def on_goto_button_clicked(b):
try:
location = geolocator.geocode(w_location.value)
m.center = (location.latitude,location.longitude)
m.zoom = 11
except Exception as e:
print('Error: %s'%e)
w_goto.on_click(on_goto_button_clicked)
def on_collect_button_clicked(b):
global result,m,collection,count, \
w_startdate1,w_enddate1,w_startdate2, \
w_platfform,w_enddate2,w_changemap, \
scale,nbands, \
image1,image2, \
madnames,coords,timestamp1,timestamp2
try:
coords = ee.List(poly.bounds().coordinates().get(0))
w_text.value = 'collecting, please wait ...'
cloudcover = 'CLOUD_COVER'
scale = 30.0
rgb = ['B4','B5','B7']
if w_platform.value=='SENTINEL/S2(VNIR/SWIR)':
collectionid = 'COPERNICUS/S2'
scale = 10.0
bands = ['B2','B3','B4','B8']
rgb = ['B8','B4','B3']
cloudcover = 'CLOUDY_PIXEL_PERCENTAGE'
elif w_platform.value=='SENTINEL/S2(NIR/SWIR)':
collectionid = 'COPERNICUS/S2'
scale = 20.0
bands = ['B5','B6','B7','B8A','B11','B12']
rgb = ['B5','B7','B11']
cloudcover = 'CLOUDY_PIXEL_PERCENTAGE'
elif w_platform.value=='LANDSAT LC08':
collectionid = 'LANDSAT/LC08/C01/T1_RT_TOA'
bands = ['B2','B3','B4','B5','B6','B7']
rgb = ['B5','B6','B7']
elif w_platform.value=='LANDSAT LE07':
collectionid = 'LANDSAT/LE07/C01/T1_RT_TOA'
bands = ['B1','B2','B3','B4','B5','B7']
else:
collectionid = 'LANDSAT/LT05/C01/T1_TOA'
bands = ['B1','B2','B3','B4','B5','B7']
collection1 = ee.ImageCollection(collectionid) \
.filterBounds(ee.Geometry.Point(coords.get(0))) \
.filterBounds(ee.Geometry.Point(coords.get(1))) \
.filterBounds(ee.Geometry.Point(coords.get(2))) \
.filterBounds(ee.Geometry.Point(coords.get(3))) \
.filterDate(ee.Date(w_startdate1.value), ee.Date(w_enddate1.value)) \
.sort(cloudcover, True)
count = collection1.size().getInfo()
if count==0:
raise ValueError('No images found for first time interval: '+collectionid)
collection2 = ee.ImageCollection(collectionid) \
.filterBounds(ee.Geometry.Point(coords.get(0))) \
.filterBounds(ee.Geometry.Point(coords.get(1))) \
.filterBounds(ee.Geometry.Point(coords.get(2))) \
.filterBounds(ee.Geometry.Point(coords.get(3))) \
.filterDate(ee.Date(w_startdate2.value), ee.Date(w_enddate2.value)) \
.sort(cloudcover, True)
count = collection2.size().getInfo()
if count==0:
raise ValueError('No images found for second time interval: '+collectionid)
image1 = ee.Image(collection1.first()).select(bands)
timestamp1 = ee.Date(image1.get('system:time_start')).getInfo()
timestamp1 = time.gmtime(int(timestamp1['value'])/1000)
timestamp1 = time.strftime('%c', timestamp1)
systemid1 = image1.get('system:id').getInfo()
cloudcover1 = image1.get(cloudcover).getInfo()
image2 = ee.Image(collection2.first()).select(bands)
timestamp2 = ee.Date(image2.get('system:time_start')).getInfo()
timestamp2 = time.gmtime(int(timestamp2['value'])/1000)
timestamp2 = time.strftime('%c', timestamp2)
systemid2 = image2.get('system:id').getInfo()
cloudcover2 = image2.get(cloudcover).getInfo()
txt = 'Image1: %s \n'%systemid1
txt += 'Acquisition date: %s, Cloud cover: %f \n'%(timestamp1,cloudcover1)
txt += 'Image2: %s \n'%systemid2
txt += 'Acquisition date: %s, Cloud cover: %f \n'%(timestamp2,cloudcover2)
w_text.value = txt
nbands = image1.bandNames().length()
madnames = ['MAD'+str(i+1) for i in range(nbands.getInfo())]
image2 = image2.register(image1,60)
w_preview.disabled = False
w_export.disabled = False
if len(m.layers)>3:
m.remove_layer(m.layers[3])
img = image1.clip(poly).select(rgb).rename('r','g','b')
ps = img.reduceRegion(ee.Reducer.percentile([2,98]),maxPixels=1e10).getInfo()
mn = [ps['r_p2'],ps['g_p2'],ps['b_p2']]
mx = [ps['r_p98'],ps['g_p98'],ps['b_p98']]
m.add_layer(TileLayer(url=GetTileLayerUrl(img.visualize(min=mn,max=mx))))
except Exception as e:
w_text.value = 'Error: %s'%e
w_collect.on_click(on_collect_button_clicked)
def on_preview_button_clicked(b):
global nbands
try:
w_text.value = 'iteration started, please wait ...\n'
inputlist = ee.List.sequence(1,w_iterations.value)
first = ee.Dictionary({'done':ee.Number(0),
'scale':ee.Number(w_scale.value),
'niter':ee.Number(0),
'image':image1.addBands(image2).clip(poly),
'allrhos': [ee.List.sequence(1,nbands)],
'chi2':ee.Image.constant(0),
'MAD':ee.Image.constant(0)})
result = ee.Dictionary(inputlist.iterate(imad,first))
MAD = ee.Image(result.get('MAD')).rename(madnames)
niter = ee.Number(result.get('niter')).getInfo()
nbands = MAD.bandNames().length()
chi2 = ee.Image(result.get('chi2')).rename(['chi2'])
pval = chi2cdf(chi2,nbands).subtract(1).multiply(-1)
tst = pval.gt(ee.Image.constant(0.0001))
MAD = MAD.where(tst,ee.Image.constant(0))
allrhos = ee.Array(result.get('allrhos')).toList()
txt = 'Canonical correlations: %s \nIterations: %i\n'%(str(allrhos.get(-1).getInfo()),niter)
w_text.value += txt
if len(m.layers)>3:
m.remove_layer(m.layers[3])
MAD2 = MAD.select(1).rename('b')
ps = MAD2.reduceRegion(ee.Reducer.percentile([1,99])).getInfo()
mn = ps['b_p1']
mx = ps['b_p99']
m.add_layer(TileLayer(url=GetTileLayerUrl( MAD2.visualize(min=mn,max=mx))))
except Exception as e:
w_text.value = 'Error: %s\n Retry collect/preview or export to assets'%e
w_preview.on_click(on_preview_button_clicked)
def on_export_button_clicked(b):
global w_exportname, nbands
try:
inputlist = ee.List.sequence(1,w_iterations.value)
first = ee.Dictionary({'done':ee.Number(0),
'scale':ee.Number(w_scale.value),
'niter':ee.Number(0),
'image':image1.addBands(image2).clip(poly),
'allrhos': [ee.List.sequence(1,nbands)],
'chi2':ee.Image.constant(0),
'MAD':ee.Image.constant(0)})
result = ee.Dictionary(inputlist.iterate(imad,first))
MAD = ee.Image(result.get('MAD')).rename(madnames)
chi2 = ee.Image(result.get('chi2')).rename(['chi2'])
pval = chi2cdf(chi2,nbands).subtract(1).multiply(-1)
tst = pval.gt(ee.Image.constant(0.0001))
MAD = MAD.where(tst,ee.Image.constant(0))
allrhos = ee.Array(result.get('allrhos')).toList().slice(1,-1)
ncmask = chi2cdf(chi2,nbands).lt(ee.Image.constant(0.05)).rename(['invarpix'])
inputlist1 = ee.List.sequence(0,nbands.subtract(1))
first = ee.Dictionary({'image':image1.addBands(image2),
'ncmask':ncmask,
'nbands':nbands,
'scale':ee.Number(w_scale.value),
'rect':poly,
'coeffs': ee.List([]),
'normalized':ee.Image()})
result1 = ee.Dictionary(inputlist1.iterate(radcal,first))
coeffs = ee.List(result1.get('coeffs'))
sel = ee.List.sequence(1,nbands)
normalized = ee.Image(result1.get('normalized')).select(sel)
MADs = ee.Image.cat(MAD,chi2,ncmask,image1.clip(poly),image2.clip(poly),normalized)
assexport = ee.batch.Export.image.toAsset(MADs,
description='assetExportTask',
assetId=w_exportname.value,scale=scale,maxPixels=1e9)
assexport.start()
assexportid = str(assexport.id)
w_text.value= 'Exporting change map, chisqr, original images and normalized image to %s\n task id: %s'%(w_exportname.value,assexportid)
except Exception as e:
w_text.value = 'Error: %s'%e
ninvar = ee.String(ncmask.reduceRegion(ee.Reducer.sum().unweighted(),
scale=scale,maxPixels= 1e9).toArray().project([0]))
metadata = ee.List(['IR-MAD: '+time.asctime(),
'Platform: '+w_platform.value,
'Asset export name: '+w_exportname.value,
'Timestamps: %s %s'%(timestamp1,timestamp2)]) \
.cat(['Canonical Correlations:']) \
.cat(allrhos) \
.cat(['Radiometric Normalization, Invariant Pixels:']) \
.cat([ninvar]) \
.cat(['Slope, Intercept, R:']) \
.cat(coeffs)
fileNamePrefix=w_exportname.value.replace('/','-')
gdexport = ee.batch.Export.table.toDrive(ee.FeatureCollection(metadata.map(makefeature)).merge(ee.Feature(poly)),
description='driveExportTask_meta',
folder = 'gee',
fileNamePrefix=fileNamePrefix )
gdexport.start()
w_text.value += '\n Exporting metadata to Drive/EarthEngineImages/%s\n task id: %s'%(fileNamePrefix,str(gdexport.id))
w_export.on_click(on_export_button_clicked)
def run():
global m,center
center = [51.0,6.4]
osm = basemap_to_tiles(basemaps.OpenStreetMap.Mapnik)
ews = basemap_to_tiles(basemaps.Esri.WorldStreetMap)
ewi = basemap_to_tiles(basemaps.Esri.WorldImagery)
dc = DrawControl(polyline={},circlemarker={})
dc.rectangle = {"shapeOptions": {"fillColor": "#0000ff","color": "#0000ff","fillOpacity": 0.05}}
dc.polygon = {"shapeOptions": {"fillColor": "#0000ff","color": "#0000ff","fillOpacity": 0.05}}
dc.on_draw(handle_draw)
lc = LayersControl(position='topright')
fs = FullScreenControl(position='topleft')
mc = MeasureControl(position='topright',primary_length_unit = 'kilometers')
m = Map(center=center, zoom=11, layout={'height':'500px'},layers=(ewi,ews,osm),controls=(mc,dc,lc,fs))
display(m)
return box
| true | true |
f71ccf566a61303c7989a45da65942c1b7aef635 | 10,678 | py | Python | examples/scales/step1.py | KNPSystem/server | 85aa991cf86b10330054bd8ea4a12543851cb9fc | [
"MIT"
] | null | null | null | examples/scales/step1.py | KNPSystem/server | 85aa991cf86b10330054bd8ea4a12543851cb9fc | [
"MIT"
] | null | null | null | examples/scales/step1.py | KNPSystem/server | 85aa991cf86b10330054bd8ea4a12543851cb9fc | [
"MIT"
] | null | null | null | """
Get the data from covidtracking.com.
Store it in a knpsValue.
Assign to a knpsVariable.
Everytime we run this, the knpsVariable is not changed.
But the knpsValue it points to should be updated.
i.e. a new knpsValue is created.
"""
import requests
import json
from collections import defaultdict
from datetime import datetime, timedelta
from urllib.request import urlopen
from lib import get_user_id, create_data_object, update_data_object
USER_NAME = "Mike Anderson"
USER_EMAIL = "mrander@umich.edu"
sample_data_file = "data/Unemployment_data_2019.csv"
sample_data_file2 = "data/all_court_records.csv"
sample_data_file3 = "data/judicial_districts.csv"
sample_data_file4 = "data/fips_counties.csv"
if __name__ == "__main__":
user_id = get_user_id(USER_EMAIL, USER_NAME)
user_id2 = get_user_id("andrewpaley2022@u.northwestern.edu", "Andrew Paley")
user_id3 = get_user_id("alicezou@umich.edu", "Jiayun Zou")
user_id4 = get_user_id("michjc@csail.mit.edu", "Michael Cafarella")
user_id5 = get_user_id("ctm310@yahoo.com", "Carol McLaughlin")
with urlopen('https://raw.githubusercontent.com/plotly/datasets/master/geojson-counties-fips.json') as response:
counties = json.load(response)
json_obj_data = create_data_object(
name = 'GeoJSON US County FIPS data',
ownerid = user_id,
description = 'Geo FIPS data for US Counties',
data = counties,
comment = 'Downloaded from Plotly',
datatype = '/datatypes/json',
mimetype = 'application/json'
)
csv_obj_data = create_data_object(
name = '2019 U.S. Unemployment and Income Data',
ownerid = user_id,
description = 'Unemployment and income data by county',
datafile = sample_data_file,
comment = 'Downloaded from USDA',
datatype = '/datatypes/csv',
mimetype = 'text/csv'
)
csv_obj_data = create_data_object(
name = '2016 Court Cases - All Districts',
ownerid = user_id2,
description = 'Court cases by district',
datafile = sample_data_file2,
comment = 'Downloaded from Scales',
datatype = '/datatypes/csv',
mimetype = 'text/csv'
)
csv_obj_data = create_data_object(
name = 'U.S. Judicial Districts by County',
ownerid = user_id2,
description = 'US counts annotated by Judicial District',
datafile = sample_data_file3,
comment = 'From the web',
datatype = '/datatypes/csv',
mimetype = 'text/csv'
)
csv_obj_data = create_data_object(
name = 'FIPS Codes for US Counties',
ownerid = user_id4,
description = 'FIPS Codes for US Counties',
datafile = sample_data_file4,
comment = 'Downloaded from bls.gov',
datatype = '/datatypes/csv',
mimetype = 'text/csv'
)
map_func = """def cloropleth_county_map(dobj_id, columns=[]):
from urllib.request import urlopen
import json
import plotly.graph_objects as go
import pandas as pd
from io import BytesIO, StringIO
GEO_DATA_ID = 25
counties = get_dobj_contents(GEO_DATA_ID)
input_data = get_dobj_contents(dobj_id)
df = pd.read_csv(StringIO(input_data.decode('utf-8')), dtype={columns[0]: str})
fig = go.Figure(go.Choroplethmapbox(geojson=counties, locations=df[columns[0]], z=df[columns[1]],
colorscale="Viridis", zmin=min(df[columns[1]]), zmax=max(df[columns[1]]),
marker_opacity=0.5, marker_line_width=0))
fig.update_layout(mapbox_style="carto-positron",
mapbox_zoom=5.6, mapbox_center = {"lat": 43.15, "lon": -76.14})
fig.update_layout(margin={"r":0,"t":0,"l":0,"b":0})
#{"lat": 37.0902, "lon": -95.7129}
output_buffer = BytesIO()
fig.write_image(output_buffer, format='png')
output = output_buffer.getvalue()
return {'contents': output, 'datatype': '/datatypes/img', 'mimetype': 'image/png', 'predecessors': [GEO_DATA_ID]}"""
code_obj_data = create_data_object(
name = 'US County Chloropleth Map Function',
ownerid = user_id,
description = 'Function to create Chloropleth Maps from US County Data',
code = map_func,
comment = 'Inputs: (dobj_id, [fips_col_name, data_col_name])'
)
fips_func = """def add_fips_codes_counties(dobj_id, params=[]):
# params = (county column, state column)
from io import StringIO
import csv
FIPS_DATA = 29
fips_csv = StringIO(get_dobj_contents(FIPS_DATA).decode())
fips = {}
fips_header = {}
reader = csv.reader(fips_csv, delimiter=',', quotechar='"')
for row in reader:
if len(fips_header) == 0:
fips_header = {x: i for i, x in enumerate(row)}
else:
fips[row[fips_header['area_name']]] = row[fips_header['fips_txt']]
input_data = get_dobj_contents(dobj_id)
csv_file = StringIO(input_data.decode())
reader = csv.reader(csv_file, delimiter=',', quotechar='"')
output = []
header = {}
out_str = StringIO()
writer = csv.writer(out_str, delimiter=',', quotechar='"')
for row in reader:
if len(header) == 0:
writer.writerow(row + ['fips_code'])
header = {x: i for i, x in enumerate(row)}
else:
county = row[header[params[0]]]
state = row[header[params[1]]]
if state.lower() in ABBREV_US_STATE:
state = ABBREV_US_STATE[state.lower()]
fips_code = fips["{}, {}".format(county, state)]
writer.writerow(row + [fips_code])
return {'contents': out_str.getvalue().encode(), 'datatype': '/datatypes/csv', 'mimetype': 'text/csv', 'predecessors': [FIPS_DATA]}"""
code_obj_data = create_data_object(
name = 'Add FIPS',
ownerid = user_id3,
description = 'Adds additional FIPS column to CSV containing US county column',
code = fips_func,
comment = 'Inputs: (dobj_id, [county_col_name, state_col_name])'
)
filter_func = """def filter_csv_by_text(dobj_id, params=[]):
# params = (column to filter, string to match)
from io import StringIO
import csv
input_data = get_dobj_contents(dobj_id)
csv_file = StringIO(input_data.decode())
reader = csv.reader(csv_file, delimiter=',', quotechar='"')
output = []
header = {}
out_str = StringIO()
writer = csv.writer(out_str, delimiter=',', quotechar='"')
for row in reader:
if len(header) == 0:
writer.writerow(row)
header = {x: i for i, x in enumerate(row)}
else:
if params[1] in row[header[params[0]]]:
writer.writerow(row)
return {'contents': out_str.getvalue().encode(), 'datatype': '/datatypes/csv', 'mimetype': 'text/csv', 'predecessors': []}"""
code_obj_data = create_data_object(
name = 'Filter CSV by text value',
ownerid = user_id4,
description = 'Function to filter CSV by text value in one column',
code = filter_func,
comment = 'Inputs: (dobj_id, [col_name, filter_text])'
)
filter_func = """def aggregate_csv_mean(dobj_id, params=[]):
# params = (group by column, aggegrate column)
from io import StringIO
import csv
input_data = get_dobj_contents(dobj_id)
csv_file = StringIO(input_data.decode())
reader = csv.reader(csv_file, delimiter=',', quotechar='"')
header = {}
vals = {}
for row in reader:
if len(header) == 0:
header = {x: i for i, x in enumerate(row)}
else:
if row[header[params[0]]] not in vals:
vals[row[header[params[0]]]] = []
try:
vals[row[header[params[0]]]].append(float(row[header[params[1]]]))
except:
pass
out_str = StringIO()
writer = csv.writer(out_str, delimiter=',', quotechar='"')
writer.writerow([params[0], params[1]])
for k, v in vals.items():
writer.writerow([k, sum(v)/len(v)])
return {'contents': out_str.getvalue().encode(), 'datatype': '/datatypes/csv', 'mimetype': 'text/csv', 'predecessors': []}"""
code_obj_data = create_data_object(
name = 'Mean of CSV column, group by',
ownerid = user_id,
description = 'Function to find mean of CSV column, grouped by another column',
code = filter_func,
comment = 'Inputs: (dobj_id, [group by column, aggregate column])'
)
filter_func = """def join_csvs(dobj_id, params=[]):
# params = (join csv, join column1, join column2, filter_col, filter_val)
from io import StringIO
import csv
import json
input_data = get_dobj_contents(dobj_id)
csv_file = StringIO(input_data.decode())
join_data = get_dobj_contents(params[0])
join_file = StringIO(join_data.decode())
reader = csv.reader(csv_file, delimiter=',', quotechar='"')
header = {}
table1 = {}
output_header = []
for row in reader:
if len(header) == 0:
header = {x: i for i, x in enumerate(row)}
output_header += row
else:
join_idx = header[params[1]]
if row[join_idx] not in table1:
table1[row[join_idx]] = []
table1[row[join_idx]].append(row)
reader = csv.reader(join_file, delimiter=',', quotechar='"')
out_str = StringIO()
writer = csv.writer(out_str, delimiter=',', quotechar='"')
join_header = {}
for row in reader:
if len(join_header) == 0:
join_header = {x: i for i, x in enumerate(row)}
output_header += row[:join_header[params[2]]]
output_header += row[join_header[params[2]]+1:]
writer.writerow(output_header)
else:
if params[3] in join_header and row[join_header[params[3]]] != params[4]:
continue
join_idx = join_header[params[2]]
if row[join_idx] in table1:
for t1 in table1[row[join_idx]]:
out_data = []
out_data += t1
out_data += row[:join_idx]
out_data += row[join_idx+1:]
writer.writerow(out_data)
return {'contents': out_str.getvalue().encode(), 'datatype': '/datatypes/csv', 'mimetype': 'text/csv', 'predecessors': [params[0]]}"""
code_obj_data = create_data_object(
name = 'Join CSV',
ownerid = user_id5,
description = 'Function to join CSVs',
code = filter_func,
comment = 'Inputs: (dobj_id, [join_table, join_column1, join_column2])'
)
| 34.445161 | 138 | 0.613879 | import requests
import json
from collections import defaultdict
from datetime import datetime, timedelta
from urllib.request import urlopen
from lib import get_user_id, create_data_object, update_data_object
USER_NAME = "Mike Anderson"
USER_EMAIL = "mrander@umich.edu"
sample_data_file = "data/Unemployment_data_2019.csv"
sample_data_file2 = "data/all_court_records.csv"
sample_data_file3 = "data/judicial_districts.csv"
sample_data_file4 = "data/fips_counties.csv"
if __name__ == "__main__":
user_id = get_user_id(USER_EMAIL, USER_NAME)
user_id2 = get_user_id("andrewpaley2022@u.northwestern.edu", "Andrew Paley")
user_id3 = get_user_id("alicezou@umich.edu", "Jiayun Zou")
user_id4 = get_user_id("michjc@csail.mit.edu", "Michael Cafarella")
user_id5 = get_user_id("ctm310@yahoo.com", "Carol McLaughlin")
with urlopen('https://raw.githubusercontent.com/plotly/datasets/master/geojson-counties-fips.json') as response:
counties = json.load(response)
json_obj_data = create_data_object(
name = 'GeoJSON US County FIPS data',
ownerid = user_id,
description = 'Geo FIPS data for US Counties',
data = counties,
comment = 'Downloaded from Plotly',
datatype = '/datatypes/json',
mimetype = 'application/json'
)
csv_obj_data = create_data_object(
name = '2019 U.S. Unemployment and Income Data',
ownerid = user_id,
description = 'Unemployment and income data by county',
datafile = sample_data_file,
comment = 'Downloaded from USDA',
datatype = '/datatypes/csv',
mimetype = 'text/csv'
)
csv_obj_data = create_data_object(
name = '2016 Court Cases - All Districts',
ownerid = user_id2,
description = 'Court cases by district',
datafile = sample_data_file2,
comment = 'Downloaded from Scales',
datatype = '/datatypes/csv',
mimetype = 'text/csv'
)
csv_obj_data = create_data_object(
name = 'U.S. Judicial Districts by County',
ownerid = user_id2,
description = 'US counts annotated by Judicial District',
datafile = sample_data_file3,
comment = 'From the web',
datatype = '/datatypes/csv',
mimetype = 'text/csv'
)
csv_obj_data = create_data_object(
name = 'FIPS Codes for US Counties',
ownerid = user_id4,
description = 'FIPS Codes for US Counties',
datafile = sample_data_file4,
comment = 'Downloaded from bls.gov',
datatype = '/datatypes/csv',
mimetype = 'text/csv'
)
map_func = """def cloropleth_county_map(dobj_id, columns=[]):
from urllib.request import urlopen
import json
import plotly.graph_objects as go
import pandas as pd
from io import BytesIO, StringIO
GEO_DATA_ID = 25
counties = get_dobj_contents(GEO_DATA_ID)
input_data = get_dobj_contents(dobj_id)
df = pd.read_csv(StringIO(input_data.decode('utf-8')), dtype={columns[0]: str})
fig = go.Figure(go.Choroplethmapbox(geojson=counties, locations=df[columns[0]], z=df[columns[1]],
colorscale="Viridis", zmin=min(df[columns[1]]), zmax=max(df[columns[1]]),
marker_opacity=0.5, marker_line_width=0))
fig.update_layout(mapbox_style="carto-positron",
mapbox_zoom=5.6, mapbox_center = {"lat": 43.15, "lon": -76.14})
fig.update_layout(margin={"r":0,"t":0,"l":0,"b":0})
#{"lat": 37.0902, "lon": -95.7129}
output_buffer = BytesIO()
fig.write_image(output_buffer, format='png')
output = output_buffer.getvalue()
return {'contents': output, 'datatype': '/datatypes/img', 'mimetype': 'image/png', 'predecessors': [GEO_DATA_ID]}"""
code_obj_data = create_data_object(
name = 'US County Chloropleth Map Function',
ownerid = user_id,
description = 'Function to create Chloropleth Maps from US County Data',
code = map_func,
comment = 'Inputs: (dobj_id, [fips_col_name, data_col_name])'
)
fips_func = """def add_fips_codes_counties(dobj_id, params=[]):
# params = (county column, state column)
from io import StringIO
import csv
FIPS_DATA = 29
fips_csv = StringIO(get_dobj_contents(FIPS_DATA).decode())
fips = {}
fips_header = {}
reader = csv.reader(fips_csv, delimiter=',', quotechar='"')
for row in reader:
if len(fips_header) == 0:
fips_header = {x: i for i, x in enumerate(row)}
else:
fips[row[fips_header['area_name']]] = row[fips_header['fips_txt']]
input_data = get_dobj_contents(dobj_id)
csv_file = StringIO(input_data.decode())
reader = csv.reader(csv_file, delimiter=',', quotechar='"')
output = []
header = {}
out_str = StringIO()
writer = csv.writer(out_str, delimiter=',', quotechar='"')
for row in reader:
if len(header) == 0:
writer.writerow(row + ['fips_code'])
header = {x: i for i, x in enumerate(row)}
else:
county = row[header[params[0]]]
state = row[header[params[1]]]
if state.lower() in ABBREV_US_STATE:
state = ABBREV_US_STATE[state.lower()]
fips_code = fips["{}, {}".format(county, state)]
writer.writerow(row + [fips_code])
return {'contents': out_str.getvalue().encode(), 'datatype': '/datatypes/csv', 'mimetype': 'text/csv', 'predecessors': [FIPS_DATA]}"""
code_obj_data = create_data_object(
name = 'Add FIPS',
ownerid = user_id3,
description = 'Adds additional FIPS column to CSV containing US county column',
code = fips_func,
comment = 'Inputs: (dobj_id, [county_col_name, state_col_name])'
)
filter_func = """def filter_csv_by_text(dobj_id, params=[]):
# params = (column to filter, string to match)
from io import StringIO
import csv
input_data = get_dobj_contents(dobj_id)
csv_file = StringIO(input_data.decode())
reader = csv.reader(csv_file, delimiter=',', quotechar='"')
output = []
header = {}
out_str = StringIO()
writer = csv.writer(out_str, delimiter=',', quotechar='"')
for row in reader:
if len(header) == 0:
writer.writerow(row)
header = {x: i for i, x in enumerate(row)}
else:
if params[1] in row[header[params[0]]]:
writer.writerow(row)
return {'contents': out_str.getvalue().encode(), 'datatype': '/datatypes/csv', 'mimetype': 'text/csv', 'predecessors': []}"""
code_obj_data = create_data_object(
name = 'Filter CSV by text value',
ownerid = user_id4,
description = 'Function to filter CSV by text value in one column',
code = filter_func,
comment = 'Inputs: (dobj_id, [col_name, filter_text])'
)
filter_func = """def aggregate_csv_mean(dobj_id, params=[]):
# params = (group by column, aggegrate column)
from io import StringIO
import csv
input_data = get_dobj_contents(dobj_id)
csv_file = StringIO(input_data.decode())
reader = csv.reader(csv_file, delimiter=',', quotechar='"')
header = {}
vals = {}
for row in reader:
if len(header) == 0:
header = {x: i for i, x in enumerate(row)}
else:
if row[header[params[0]]] not in vals:
vals[row[header[params[0]]]] = []
try:
vals[row[header[params[0]]]].append(float(row[header[params[1]]]))
except:
pass
out_str = StringIO()
writer = csv.writer(out_str, delimiter=',', quotechar='"')
writer.writerow([params[0], params[1]])
for k, v in vals.items():
writer.writerow([k, sum(v)/len(v)])
return {'contents': out_str.getvalue().encode(), 'datatype': '/datatypes/csv', 'mimetype': 'text/csv', 'predecessors': []}"""
code_obj_data = create_data_object(
name = 'Mean of CSV column, group by',
ownerid = user_id,
description = 'Function to find mean of CSV column, grouped by another column',
code = filter_func,
comment = 'Inputs: (dobj_id, [group by column, aggregate column])'
)
filter_func = """def join_csvs(dobj_id, params=[]):
# params = (join csv, join column1, join column2, filter_col, filter_val)
from io import StringIO
import csv
import json
input_data = get_dobj_contents(dobj_id)
csv_file = StringIO(input_data.decode())
join_data = get_dobj_contents(params[0])
join_file = StringIO(join_data.decode())
reader = csv.reader(csv_file, delimiter=',', quotechar='"')
header = {}
table1 = {}
output_header = []
for row in reader:
if len(header) == 0:
header = {x: i for i, x in enumerate(row)}
output_header += row
else:
join_idx = header[params[1]]
if row[join_idx] not in table1:
table1[row[join_idx]] = []
table1[row[join_idx]].append(row)
reader = csv.reader(join_file, delimiter=',', quotechar='"')
out_str = StringIO()
writer = csv.writer(out_str, delimiter=',', quotechar='"')
join_header = {}
for row in reader:
if len(join_header) == 0:
join_header = {x: i for i, x in enumerate(row)}
output_header += row[:join_header[params[2]]]
output_header += row[join_header[params[2]]+1:]
writer.writerow(output_header)
else:
if params[3] in join_header and row[join_header[params[3]]] != params[4]:
continue
join_idx = join_header[params[2]]
if row[join_idx] in table1:
for t1 in table1[row[join_idx]]:
out_data = []
out_data += t1
out_data += row[:join_idx]
out_data += row[join_idx+1:]
writer.writerow(out_data)
return {'contents': out_str.getvalue().encode(), 'datatype': '/datatypes/csv', 'mimetype': 'text/csv', 'predecessors': [params[0]]}"""
code_obj_data = create_data_object(
name = 'Join CSV',
ownerid = user_id5,
description = 'Function to join CSVs',
code = filter_func,
comment = 'Inputs: (dobj_id, [join_table, join_column1, join_column2])'
)
| true | true |
f71ccf5e40a5d9802ed0c6e2043dda4342fb9258 | 391 | py | Python | serveup/wsgi.py | ASquirrelsTail/serve-up | 9533ba82f5b4989434b3b20352d17a8131bb9619 | [
"MIT"
] | null | null | null | serveup/wsgi.py | ASquirrelsTail/serve-up | 9533ba82f5b4989434b3b20352d17a8131bb9619 | [
"MIT"
] | 10 | 2021-03-30T14:05:21.000Z | 2022-03-12T00:41:15.000Z | serveup/wsgi.py | ASquirrelsTail/serve-up | 9533ba82f5b4989434b3b20352d17a8131bb9619 | [
"MIT"
] | null | null | null | """
WSGI config for serveup project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'serveup.settings')
application = get_wsgi_application()
| 23 | 78 | 0.785166 |
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'serveup.settings')
application = get_wsgi_application()
| true | true |
f71ccfaa61b4b5f0ebf12cbfe2ca50b9ddc66c78 | 417 | py | Python | pypy/rlib/test/test_rgc.py | camillobruni/pygirl | ddbd442d53061d6ff4af831c1eab153bcc771b5a | [
"MIT"
] | 12 | 2016-01-06T07:10:28.000Z | 2021-05-13T23:02:02.000Z | pypy/rlib/test/test_rgc.py | camillobruni/pygirl | ddbd442d53061d6ff4af831c1eab153bcc771b5a | [
"MIT"
] | null | null | null | pypy/rlib/test/test_rgc.py | camillobruni/pygirl | ddbd442d53061d6ff4af831c1eab153bcc771b5a | [
"MIT"
] | 2 | 2016-07-29T07:09:50.000Z | 2016-10-16T08:50:26.000Z | from pypy.rpython.test.test_llinterp import gengraph, interpret
from pypy.rlib import rgc # Force registration of gc.collect
import gc
def test_collect():
def f():
return gc.collect()
t, typer, graph = gengraph(f, [])
ops = list(graph.iterblockops())
assert len(ops) == 1
op = ops[0][1]
assert op.opname == 'gc__collect'
res = interpret(f, [])
assert res is None
| 20.85 | 63 | 0.630695 | from pypy.rpython.test.test_llinterp import gengraph, interpret
from pypy.rlib import rgc
import gc
def test_collect():
def f():
return gc.collect()
t, typer, graph = gengraph(f, [])
ops = list(graph.iterblockops())
assert len(ops) == 1
op = ops[0][1]
assert op.opname == 'gc__collect'
res = interpret(f, [])
assert res is None
| true | true |
f71cd18cea2ada57b893164f00c100b5a386de43 | 3,792 | py | Python | test/core_arguments.py | sdarwin/build | 2c4217ebb6bdeb5001b33a5d0d6718420aef988c | [
"BSL-1.0"
] | 106 | 2015-08-07T04:23:50.000Z | 2020-12-27T18:25:15.000Z | test/core_arguments.py | sdarwin/build | 2c4217ebb6bdeb5001b33a5d0d6718420aef988c | [
"BSL-1.0"
] | 130 | 2016-06-22T22:11:25.000Z | 2020-11-29T20:24:09.000Z | test/core_arguments.py | sdarwin/build | 2c4217ebb6bdeb5001b33a5d0d6718420aef988c | [
"BSL-1.0"
] | 41 | 2015-07-08T19:18:35.000Z | 2021-01-14T16:39:56.000Z | #!/usr/bin/python
# Copyright 2001 Dave Abrahams
# Copyright 2011 Steven Watanabe
# Distributed under the Boost Software License, Version 1.0.
# (See accompanying file LICENSE.txt or copy at
# https://www.bfgroup.xyz/b2/LICENSE.txt)
import BoostBuild
def simple_args(start, finish):
return " : ".join("%d" % x for x in range(start, finish + 1))
def test(t, type, input, output, status=0):
code = ["include echo_args.jam ; echo_%s" % type]
if input: code.append(input)
code.append(";")
t.write("file.jam", " ".join(code))
t.run_build_system(["-ffile.jam"], status=status)
t.expect_output_lines(output)
def test_args(t, *args, **kwargs):
test(t, "args", *args, **kwargs)
def test_varargs(t, *args, **kwargs):
test(t, "varargs", *args, **kwargs)
t = BoostBuild.Tester(pass_toolset=0)
t.write("echo_args.jam", """\
NOCARE all ;
rule echo_args ( a b ? c ? : d + : e * )
{
ECHO a= $(a) b= $(b) c= $(c) ":" d= $(d) ":" e= $(e) ;
}
rule echo_varargs ( a b ? c ? : d + : e * : * )
{
ECHO a= $(a) b= $(b) c= $(c) ":" d= $(d) ":" e= $(e)
": rest= "$(4[1]) $(4[2-])
": "$(5[1]) $(5[2-]) ": "$(6[1]) $(6[2-]) ": "$(7[1]) $(7[2-])
": "$(8[1]) $(8[2-]) ": "$(9[1]) $(9[2-]) ": "$(10[1]) $(10[2-])
": "$(11[1]) $(11[2-]) ": "$(12[1]) $(12[2-]) ": "$(13[1]) $(13[2-])
": "$(14[1]) $(14[2-]) ": "$(15[1]) $(15[2-]) ": "$(16[1]) $(16[2-])
": "$(17[1]) $(17[2-]) ": "$(18[1]) $(18[2-]) ": "$(19[1]) $(19[2-])
": "$(20[1]) $(20[2-]) ": "$(21[1]) $(21[2-]) ": "$(22[1]) $(22[2-])
": "$(23[1]) $(23[2-]) ": "$(24[1]) $(24[2-]) ": "$(25[1]) $(25[2-]) ;
}
""")
test_args(t, "", "* missing argument a", status=1)
test_args(t, "1 2 : 3 : 4 : 5", "* extra argument 5", status=1)
test_args(t, "a b c1 c2 : d", "* extra argument c2", status=1)
# Check modifier '?'
test_args(t, "1 2 3 : 4", "a= 1 b= 2 c= 3 : d= 4 : e=")
test_args(t, "1 2 : 3", "a= 1 b= 2 c= : d= 3 : e=")
test_args(t, "1 2 : 3", "a= 1 b= 2 c= : d= 3 : e=")
test_args(t, "1 : 2", "a= 1 b= c= : d= 2 : e=")
# Check modifier '+'
test_args(t, "1", "* missing argument d", status=1)
test_args(t, "1 : 2 3", "a= 1 b= c= : d= 2 3 : e=")
test_args(t, "1 : 2 3 4", "a= 1 b= c= : d= 2 3 4 : e=")
# Check modifier '*'
test_args(t, "1 : 2 : 3", "a= 1 b= c= : d= 2 : e= 3")
test_args(t, "1 : 2 : 3 4", "a= 1 b= c= : d= 2 : e= 3 4")
test_args(t, "1 : 2 : 3 4 5", "a= 1 b= c= : d= 2 : e= 3 4 5")
# Check varargs
test_varargs(t, "1 : 2 : 3 4 5", "a= 1 b= c= : d= 2 : e= 3 4 5")
test_varargs(t, "1 : 2 : 3 4 5 : 6", "a= 1 b= c= : d= 2 : e= 3 4 5 : rest= 6")
test_varargs(t, "1 : 2 : 3 4 5 : 6 7",
"a= 1 b= c= : d= 2 : e= 3 4 5 : rest= 6 7")
test_varargs(t, "1 : 2 : 3 4 5 : 6 7 : 8",
"a= 1 b= c= : d= 2 : e= 3 4 5 : rest= 6 7 : 8")
test_varargs(t, "1 : 2 : 3 4 5 : 6 7 : 8 : 9",
"a= 1 b= c= : d= 2 : e= 3 4 5 : rest= 6 7 : 8 : 9")
test_varargs(t, "1 : 2 : 3 4 5 : 6 7 : 8 : 9 : 10 : 11 : 12 : 13 : 14 : 15 : "
"16 : 17 : 18 : 19a 19b", "a= 1 b= c= : d= 2 : e= 3 4 5 : rest= 6 7 : 8 : "
"9 : 10 : 11 : 12 : 13 : 14 : 15 : 16 : 17 : 18 : 19a 19b")
test_varargs(t, "1 : 2 : 3 4 5 : 6 7 : 8 : 9 : 10 : 11 : 12 : 13 : 14 : 15 : "
"16 : 17 : 18 : 19a 19b 19c : 20", "a= 1 b= c= : d= 2 : e= 3 4 5 : rest= "
"6 7 : 8 : 9 : 10 : 11 : 12 : 13 : 14 : 15 : 16 : 17 : 18 : 19a 19b 19c : "
"20")
# Check varargs upper limit
expected = "a= 1 b= c= : d= 2 : e= 3 : rest= " + simple_args(4, 19)
test_varargs(t, simple_args(1, 19), expected)
test_varargs(t, simple_args(1, 19) + " 19b 19c 19d", expected + " 19b 19c 19d")
test_varargs(t, simple_args(1, 19) + " 19b 19c 19d : 20", expected + " 19b "
"19c 19d")
test_varargs(t, simple_args(1, 20), expected)
test_varargs(t, simple_args(1, 50), expected)
t.cleanup()
| 36.461538 | 79 | 0.474947 |
import BoostBuild
def simple_args(start, finish):
return " : ".join("%d" % x for x in range(start, finish + 1))
def test(t, type, input, output, status=0):
code = ["include echo_args.jam ; echo_%s" % type]
if input: code.append(input)
code.append(";")
t.write("file.jam", " ".join(code))
t.run_build_system(["-ffile.jam"], status=status)
t.expect_output_lines(output)
def test_args(t, *args, **kwargs):
test(t, "args", *args, **kwargs)
def test_varargs(t, *args, **kwargs):
test(t, "varargs", *args, **kwargs)
t = BoostBuild.Tester(pass_toolset=0)
t.write("echo_args.jam", """\
NOCARE all ;
rule echo_args ( a b ? c ? : d + : e * )
{
ECHO a= $(a) b= $(b) c= $(c) ":" d= $(d) ":" e= $(e) ;
}
rule echo_varargs ( a b ? c ? : d + : e * : * )
{
ECHO a= $(a) b= $(b) c= $(c) ":" d= $(d) ":" e= $(e)
": rest= "$(4[1]) $(4[2-])
": "$(5[1]) $(5[2-]) ": "$(6[1]) $(6[2-]) ": "$(7[1]) $(7[2-])
": "$(8[1]) $(8[2-]) ": "$(9[1]) $(9[2-]) ": "$(10[1]) $(10[2-])
": "$(11[1]) $(11[2-]) ": "$(12[1]) $(12[2-]) ": "$(13[1]) $(13[2-])
": "$(14[1]) $(14[2-]) ": "$(15[1]) $(15[2-]) ": "$(16[1]) $(16[2-])
": "$(17[1]) $(17[2-]) ": "$(18[1]) $(18[2-]) ": "$(19[1]) $(19[2-])
": "$(20[1]) $(20[2-]) ": "$(21[1]) $(21[2-]) ": "$(22[1]) $(22[2-])
": "$(23[1]) $(23[2-]) ": "$(24[1]) $(24[2-]) ": "$(25[1]) $(25[2-]) ;
}
""")
test_args(t, "", "* missing argument a", status=1)
test_args(t, "1 2 : 3 : 4 : 5", "* extra argument 5", status=1)
test_args(t, "a b c1 c2 : d", "* extra argument c2", status=1)
test_args(t, "1 2 3 : 4", "a= 1 b= 2 c= 3 : d= 4 : e=")
test_args(t, "1 2 : 3", "a= 1 b= 2 c= : d= 3 : e=")
test_args(t, "1 2 : 3", "a= 1 b= 2 c= : d= 3 : e=")
test_args(t, "1 : 2", "a= 1 b= c= : d= 2 : e=")
test_args(t, "1", "* missing argument d", status=1)
test_args(t, "1 : 2 3", "a= 1 b= c= : d= 2 3 : e=")
test_args(t, "1 : 2 3 4", "a= 1 b= c= : d= 2 3 4 : e=")
test_args(t, "1 : 2 : 3", "a= 1 b= c= : d= 2 : e= 3")
test_args(t, "1 : 2 : 3 4", "a= 1 b= c= : d= 2 : e= 3 4")
test_args(t, "1 : 2 : 3 4 5", "a= 1 b= c= : d= 2 : e= 3 4 5")
test_varargs(t, "1 : 2 : 3 4 5", "a= 1 b= c= : d= 2 : e= 3 4 5")
test_varargs(t, "1 : 2 : 3 4 5 : 6", "a= 1 b= c= : d= 2 : e= 3 4 5 : rest= 6")
test_varargs(t, "1 : 2 : 3 4 5 : 6 7",
"a= 1 b= c= : d= 2 : e= 3 4 5 : rest= 6 7")
test_varargs(t, "1 : 2 : 3 4 5 : 6 7 : 8",
"a= 1 b= c= : d= 2 : e= 3 4 5 : rest= 6 7 : 8")
test_varargs(t, "1 : 2 : 3 4 5 : 6 7 : 8 : 9",
"a= 1 b= c= : d= 2 : e= 3 4 5 : rest= 6 7 : 8 : 9")
test_varargs(t, "1 : 2 : 3 4 5 : 6 7 : 8 : 9 : 10 : 11 : 12 : 13 : 14 : 15 : "
"16 : 17 : 18 : 19a 19b", "a= 1 b= c= : d= 2 : e= 3 4 5 : rest= 6 7 : 8 : "
"9 : 10 : 11 : 12 : 13 : 14 : 15 : 16 : 17 : 18 : 19a 19b")
test_varargs(t, "1 : 2 : 3 4 5 : 6 7 : 8 : 9 : 10 : 11 : 12 : 13 : 14 : 15 : "
"16 : 17 : 18 : 19a 19b 19c : 20", "a= 1 b= c= : d= 2 : e= 3 4 5 : rest= "
"6 7 : 8 : 9 : 10 : 11 : 12 : 13 : 14 : 15 : 16 : 17 : 18 : 19a 19b 19c : "
"20")
expected = "a= 1 b= c= : d= 2 : e= 3 : rest= " + simple_args(4, 19)
test_varargs(t, simple_args(1, 19), expected)
test_varargs(t, simple_args(1, 19) + " 19b 19c 19d", expected + " 19b 19c 19d")
test_varargs(t, simple_args(1, 19) + " 19b 19c 19d : 20", expected + " 19b "
"19c 19d")
test_varargs(t, simple_args(1, 20), expected)
test_varargs(t, simple_args(1, 50), expected)
t.cleanup()
| true | true |
f71cd1ac7f84132149ddc09f332427ff7a78ff75 | 2,799 | py | Python | python/group_merger.py | CSCI5470/testing-alexjohnny1207 | 36ab3fa0cd7d32e27b167a985f9d47991d690fce | [
"MIT"
] | 414 | 2016-06-28T16:24:47.000Z | 2022-03-25T13:30:27.000Z | python/group_merger.py | CSCI5470/testing-alexjohnny1207 | 36ab3fa0cd7d32e27b167a985f9d47991d690fce | [
"MIT"
] | 35 | 2017-03-07T16:36:20.000Z | 2021-07-18T04:53:41.000Z | python/group_merger.py | CSCI5470/testing-alexjohnny1207 | 36ab3fa0cd7d32e27b167a985f9d47991d690fce | [
"MIT"
] | 162 | 2016-02-08T21:58:19.000Z | 2022-02-16T09:40:11.000Z | import caffe
import re
from pittnuts import *
import os
import matplotlib.pyplot as plt
import argparse
import caffeparser
import caffe_apps
import numpy as np
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--original_alexnet', type=str, required=True,help="The original alexnet with group.")
parser.add_argument('--split_alexnet', type=str, required=True, help="The split alexnet without group.")
parser.add_argument('--caffemodel', type=str, required=True,help="The caffemodel of split alexnet.")
args = parser.parse_args()
original_alexnet = args.original_alexnet
caffemodel = args.caffemodel
split_alexnet = args.split_alexnet
net_parser = caffeparser.CaffeProtoParser(original_alexnet)
orig_net_msg = net_parser.readProtoNetFile()
net_parser = caffeparser.CaffeProtoParser(split_alexnet)
split_net_msg = net_parser.readProtoNetFile()
caffe.set_mode_cpu()
# GPU mode
#caffe.set_device(0)
#caffe.set_mode_gpu()
src_net = caffe.Net(split_alexnet,caffemodel, caffe.TEST)
print("blobs {}\nparams {}".format(src_net.blobs.keys(), src_net.params.keys()))
loop_layers = orig_net_msg.layer[:] # adding : implicitly makes a copy to avoid being modified in the loop
layer_idx = -1
new_parameters = {}
for cur_layer in loop_layers:
layer_idx += 1
layer_name = cur_layer.name
if 'Convolution' == cur_layer.type:
if cur_layer.convolution_param.bias_term:
new_parameters[layer_name] = {0: np.concatenate(( src_net.params[layer_name+"_group0"][0].data, src_net.params[layer_name+"_group1"][0].data)),
1: np.concatenate(( src_net.params[layer_name+"_group0"][1].data, src_net.params[layer_name+"_group1"][1].data)) }
else:
new_parameters[layer_name] = {0: np.concatenate(
(src_net.params[layer_name + "_group0"][0].data, src_net.params[layer_name + "_group1"][0].data))}
else:
if layer_name in src_net.params:
cur_param = {}
for idx in range(0,len(src_net.params[layer_name])):
cur_param[idx]=src_net.params[layer_name][idx].data[:]
new_parameters[layer_name] = cur_param
# open and generate the caffemodel
dst_net = caffe.Net(original_alexnet, caffe.TRAIN)
for key,val in new_parameters.iteritems():
for keykey,valval in val.iteritems():
dst_net.params[key][keykey].data[:] = valval[:]
#file_split = os.path.splitext(caffemodel)
filepath_caffemodel = caffemodel + '.merge.caffemodel.h5'
dst_net.save_hdf5(filepath_caffemodel)
print "Saved as {}".format(filepath_caffemodel)
print "Done!"
| 41.776119 | 161 | 0.673455 | import caffe
import re
from pittnuts import *
import os
import matplotlib.pyplot as plt
import argparse
import caffeparser
import caffe_apps
import numpy as np
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--original_alexnet', type=str, required=True,help="The original alexnet with group.")
parser.add_argument('--split_alexnet', type=str, required=True, help="The split alexnet without group.")
parser.add_argument('--caffemodel', type=str, required=True,help="The caffemodel of split alexnet.")
args = parser.parse_args()
original_alexnet = args.original_alexnet
caffemodel = args.caffemodel
split_alexnet = args.split_alexnet
net_parser = caffeparser.CaffeProtoParser(original_alexnet)
orig_net_msg = net_parser.readProtoNetFile()
net_parser = caffeparser.CaffeProtoParser(split_alexnet)
split_net_msg = net_parser.readProtoNetFile()
caffe.set_mode_cpu()
src_net = caffe.Net(split_alexnet,caffemodel, caffe.TEST)
print("blobs {}\nparams {}".format(src_net.blobs.keys(), src_net.params.keys()))
loop_layers = orig_net_msg.layer[:]
layer_idx = -1
new_parameters = {}
for cur_layer in loop_layers:
layer_idx += 1
layer_name = cur_layer.name
if 'Convolution' == cur_layer.type:
if cur_layer.convolution_param.bias_term:
new_parameters[layer_name] = {0: np.concatenate(( src_net.params[layer_name+"_group0"][0].data, src_net.params[layer_name+"_group1"][0].data)),
1: np.concatenate(( src_net.params[layer_name+"_group0"][1].data, src_net.params[layer_name+"_group1"][1].data)) }
else:
new_parameters[layer_name] = {0: np.concatenate(
(src_net.params[layer_name + "_group0"][0].data, src_net.params[layer_name + "_group1"][0].data))}
else:
if layer_name in src_net.params:
cur_param = {}
for idx in range(0,len(src_net.params[layer_name])):
cur_param[idx]=src_net.params[layer_name][idx].data[:]
new_parameters[layer_name] = cur_param
dst_net = caffe.Net(original_alexnet, caffe.TRAIN)
for key,val in new_parameters.iteritems():
for keykey,valval in val.iteritems():
dst_net.params[key][keykey].data[:] = valval[:]
filepath_caffemodel = caffemodel + '.merge.caffemodel.h5'
dst_net.save_hdf5(filepath_caffemodel)
print "Saved as {}".format(filepath_caffemodel)
print "Done!"
| false | true |
f71cd2549c8bf4f50eefa40b961f15351236ec5b | 350 | py | Python | openctrl/display.py | openctrl-python/openctrl | 5adda5d79262950eceab91b8412ead3d3a13e712 | [
"MIT"
] | null | null | null | openctrl/display.py | openctrl-python/openctrl | 5adda5d79262950eceab91b8412ead3d3a13e712 | [
"MIT"
] | 1 | 2021-06-02T18:59:19.000Z | 2021-06-02T18:59:19.000Z | openctrl/display.py | pyopencontrol/openctrl | 24f08970052301cb0c4a13fc855b80353a3cb975 | [
"MIT"
] | null | null | null | import threading
try:
from .grab import Image
except:pass
def grab_bytes():
return Image().asbytes
def send(s,a):
s.post(b's'+grab_bytes(),a)
def show_bytes(r):
if not r.startswith('s'):return
Image(r[1:]).show()
def conf(s,a):
def _conf():
while True:
send(s,a)
threading.Thread(target=_conf).start()
| 20.588235 | 42 | 0.617143 | import threading
try:
from .grab import Image
except:pass
def grab_bytes():
return Image().asbytes
def send(s,a):
s.post(b's'+grab_bytes(),a)
def show_bytes(r):
if not r.startswith('s'):return
Image(r[1:]).show()
def conf(s,a):
def _conf():
while True:
send(s,a)
threading.Thread(target=_conf).start()
| true | true |
f71cd2c0ab36287199c78e7dfc110494800caf19 | 8,955 | py | Python | test_pretrain.py | anonymous-cv/cvpr-sub | 6307520c73716de73ef63f5239bdac8dda20da41 | [
"BSD-3-Clause"
] | null | null | null | test_pretrain.py | anonymous-cv/cvpr-sub | 6307520c73716de73ef63f5239bdac8dda20da41 | [
"BSD-3-Clause"
] | null | null | null | test_pretrain.py | anonymous-cv/cvpr-sub | 6307520c73716de73ef63f5239bdac8dda20da41 | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import os
import time
import argparse
import sys
import numpy as np
import torch
import torch.optim as optim
from tqdm import tqdm
from network.BEV_Unet import BEV_Unet
from network.ptBEV import ptBEVnet
from dataloader.dataset import collate_fn_BEV,collate_fn_BEV_test,SemKITTI,SemKITTI_label_name,spherical_dataset,voxel_dataset
#ignore weird np warning
import warnings
warnings.filterwarnings("ignore")
def fast_hist(pred, label, n):
k = (label >= 0) & (label < n)
bin_count=np.bincount(
n * label[k].astype(int) + pred[k], minlength=n ** 2)
return bin_count[:n ** 2].reshape(n, n)
def per_class_iu(hist):
return np.diag(hist) / (hist.sum(1) + hist.sum(0) - np.diag(hist))
def fast_hist_crop(output, target, unique_label):
hist = fast_hist(output.flatten(), target.flatten(), np.max(unique_label)+1)
hist=hist[unique_label,:]
hist=hist[:,unique_label]
return hist
def SemKITTI2train(label):
if isinstance(label, list):
return [SemKITTI2train_single(a) for a in label]
else:
return SemKITTI2train_single(label)
def SemKITTI2train_single(label):
remove_ind = label == 0
label -= 1
label[remove_ind] = 255
return label
def train2SemKITTI(input_label):
# delete 0 label
new_labels=np.copy(input_label)
new_labels[input_label==255]=0
for label_num in range(0,19):
new_labels[input_label==label_num]=label_num+1
return new_labels
def main(args):
data_path = args.data_dir
test_batch_size = args.test_batch_size
model_save_path = args.model_save_path
output_path = args.test_output_path
compression_model = args.grid_size[2]
grid_size = args.grid_size
pytorch_device = torch.device('cuda:0')
model = args.model
if model == 'polar':
fea_dim = 9
circular_padding = True
elif model == 'traditional':
fea_dim = 7
circular_padding = False
# prepare miou fun
unique_label=np.asarray(sorted(list(SemKITTI_label_name.keys())))[1:] - 1
unique_label_str=[SemKITTI_label_name[x] for x in unique_label+1]
# prepare model
my_BEV_model=BEV_Unet(n_class=len(unique_label), n_height = compression_model, input_batch_norm = True, dropout = 0.5, circular_padding = circular_padding)
my_model = ptBEVnet(my_BEV_model, pt_model = 'pointnet', grid_size = grid_size, fea_dim = fea_dim, max_pt_per_encode = 256,
out_pt_fea_dim = 512, kernal_size = 1, pt_selection = 'random', fea_compre = compression_model)
if os.path.exists(model_save_path):
my_model.load_state_dict(torch.load(model_save_path))
my_model.to(pytorch_device)
# prepare dataset
test_pt_dataset = SemKITTI(data_path + '/sequences/', imageset = 'test', return_ref = True)
val_pt_dataset = SemKITTI(data_path + '/sequences/', imageset = 'val', return_ref = True)
if model == 'polar':
test_dataset=spherical_dataset(test_pt_dataset, grid_size = grid_size, ignore_label = 0, fixed_volume_space = True, return_test= True)
val_dataset=spherical_dataset(val_pt_dataset, grid_size = grid_size, ignore_label = 0, fixed_volume_space = True)
elif model == 'traditional':
test_dataset=voxel_dataset(test_pt_dataset, grid_size = grid_size, ignore_label = 0, fixed_volume_space = True, return_test= True)
val_dataset=voxel_dataset(val_pt_dataset, grid_size = grid_size, ignore_label = 0, fixed_volume_space = True)
test_dataset_loader = torch.utils.data.DataLoader(dataset = test_dataset,
batch_size = test_batch_size,
collate_fn = collate_fn_BEV_test,
shuffle = False,
num_workers = 4)
val_dataset_loader = torch.utils.data.DataLoader(dataset = val_dataset,
batch_size = test_batch_size,
collate_fn = collate_fn_BEV,
shuffle = False,
num_workers = 4)
# validation
print('*'*80)
print('Test network performance on validation split')
print('*'*80)
pbar = tqdm(total=len(val_dataset_loader))
my_model.eval()
hist_list = []
time_list = []
with torch.no_grad():
for i_iter_val,(_,val_vox_label,val_grid,val_pt_labs,val_pt_fea) in enumerate(val_dataset_loader):
val_vox_label = SemKITTI2train(val_vox_label)
val_pt_labs = SemKITTI2train(val_pt_labs)
val_pt_fea_ten = [torch.from_numpy(i).type(torch.FloatTensor).to(pytorch_device) for i in val_pt_fea]
val_grid_ten = [torch.from_numpy(i[:,:2]).to(pytorch_device) for i in val_grid]
val_label_tensor=val_vox_label.type(torch.LongTensor).to(pytorch_device)
torch.cuda.synchronize()
start_time = time.time()
predict_labels = my_model(val_pt_fea_ten, val_grid_ten)
torch.cuda.synchronize()
time_list.append(time.time()-start_time)
predict_labels = torch.argmax(predict_labels,dim=1)
predict_labels = predict_labels.cpu().detach().numpy()
for count,i_val_grid in enumerate(val_grid):
hist_list.append(fast_hist_crop(predict_labels[count,val_grid[count][:,0],val_grid[count][:,1],val_grid[count][:,2]],val_pt_labs[count],unique_label))
pbar.update(1)
iou = per_class_iu(sum(hist_list))
print('Validation per class iou: ')
for class_name, class_iou in zip(unique_label_str,iou):
print('%s : %.2f%%' % (class_name, class_iou*100))
val_miou = np.nanmean(iou) * 100
del val_vox_label,val_grid,val_pt_fea,val_grid_ten
pbar.close()
print('Current val miou is %.3f ' % val_miou)
print('Inference time per %d is %.4f seconds\n' %
(test_batch_size,np.mean(time_list)))
# test
print('*'*80)
print('Generate predictions for test split')
print('*'*80)
pbar = tqdm(total=len(test_dataset_loader))
for i_iter_test,(_,_,test_grid,_,test_pt_fea,test_index) in enumerate(test_dataset_loader):
# predict
test_pt_fea_ten = [torch.from_numpy(i).type(torch.FloatTensor).to(pytorch_device) for i in test_pt_fea]
test_grid_ten = [torch.from_numpy(i[:,:2]).to(pytorch_device) for i in test_grid]
predict_labels = my_model(test_pt_fea_ten,test_grid_ten)
predict_labels = torch.argmax(predict_labels,1)
predict_labels = predict_labels.cpu().detach().numpy()
# write to label file
for count,i_test_grid in enumerate(test_grid):
test_pred_label = predict_labels[count,test_grid[count][:,0],test_grid[count][:,1],test_grid[count][:,2]]
test_pred_label = train2SemKITTI(test_pred_label)
test_pred_label = np.expand_dims(test_pred_label,axis=1)
save_dir = test_pt_dataset.im_idx[test_index[count]]
_,dir2 = save_dir.split('/sequences/',1)
new_save_dir = output_path + '/sequences/' +dir2.replace('velodyne','predictions')[:-3]+'label'
if not os.path.exists(os.path.dirname(new_save_dir)):
try:
os.makedirs(os.path.dirname(new_save_dir))
except OSError as exc:
if exc.errno != errno.EEXIST:
raise
test_pred_label = test_pred_label.astype(np.uint32)
test_pred_label.tofile(new_save_dir)
pbar.update(1)
del test_grid,test_pt_fea,test_index
pbar.close()
print('Predicted test labels are saved in %s. Need to be shifted to original label format before submitting to the Competition website.' % output_path)
print('Remap script can be found in semantic-kitti-api.')
if __name__ == '__main__':
# Testing settings
parser = argparse.ArgumentParser(description='')
parser.add_argument('-d', '--data_dir', default='data')
parser.add_argument('-p', '--model_save_path', default='pretained_weight/SemKITTI_PolarSeg.pt')
parser.add_argument('-o', '--test_output_path', default='out/SemKITTI_test')
parser.add_argument('-m', '--model', choices=['polar','traditional'], default='polar', help='training model: polar or traditional (default: polar)')
parser.add_argument('-s', '--grid_size', nargs='+', type=int, default = [480,360,32], help='grid size of BEV representation (default: [480,360,32])')
parser.add_argument('--test_batch_size', type=int, default=1, help='batch size for training (default: 1)')
args = parser.parse_args()
if not len(args.grid_size) == 3:
raise Exception('Invalid grid size! Grid size should have 3 dimensions.')
print(' '.join(sys.argv))
print(args)
main(args) | 46.884817 | 166 | 0.656505 |
import os
import time
import argparse
import sys
import numpy as np
import torch
import torch.optim as optim
from tqdm import tqdm
from network.BEV_Unet import BEV_Unet
from network.ptBEV import ptBEVnet
from dataloader.dataset import collate_fn_BEV,collate_fn_BEV_test,SemKITTI,SemKITTI_label_name,spherical_dataset,voxel_dataset
import warnings
warnings.filterwarnings("ignore")
def fast_hist(pred, label, n):
k = (label >= 0) & (label < n)
bin_count=np.bincount(
n * label[k].astype(int) + pred[k], minlength=n ** 2)
return bin_count[:n ** 2].reshape(n, n)
def per_class_iu(hist):
return np.diag(hist) / (hist.sum(1) + hist.sum(0) - np.diag(hist))
def fast_hist_crop(output, target, unique_label):
hist = fast_hist(output.flatten(), target.flatten(), np.max(unique_label)+1)
hist=hist[unique_label,:]
hist=hist[:,unique_label]
return hist
def SemKITTI2train(label):
if isinstance(label, list):
return [SemKITTI2train_single(a) for a in label]
else:
return SemKITTI2train_single(label)
def SemKITTI2train_single(label):
remove_ind = label == 0
label -= 1
label[remove_ind] = 255
return label
def train2SemKITTI(input_label):
new_labels=np.copy(input_label)
new_labels[input_label==255]=0
for label_num in range(0,19):
new_labels[input_label==label_num]=label_num+1
return new_labels
def main(args):
data_path = args.data_dir
test_batch_size = args.test_batch_size
model_save_path = args.model_save_path
output_path = args.test_output_path
compression_model = args.grid_size[2]
grid_size = args.grid_size
pytorch_device = torch.device('cuda:0')
model = args.model
if model == 'polar':
fea_dim = 9
circular_padding = True
elif model == 'traditional':
fea_dim = 7
circular_padding = False
unique_label=np.asarray(sorted(list(SemKITTI_label_name.keys())))[1:] - 1
unique_label_str=[SemKITTI_label_name[x] for x in unique_label+1]
my_BEV_model=BEV_Unet(n_class=len(unique_label), n_height = compression_model, input_batch_norm = True, dropout = 0.5, circular_padding = circular_padding)
my_model = ptBEVnet(my_BEV_model, pt_model = 'pointnet', grid_size = grid_size, fea_dim = fea_dim, max_pt_per_encode = 256,
out_pt_fea_dim = 512, kernal_size = 1, pt_selection = 'random', fea_compre = compression_model)
if os.path.exists(model_save_path):
my_model.load_state_dict(torch.load(model_save_path))
my_model.to(pytorch_device)
test_pt_dataset = SemKITTI(data_path + '/sequences/', imageset = 'test', return_ref = True)
val_pt_dataset = SemKITTI(data_path + '/sequences/', imageset = 'val', return_ref = True)
if model == 'polar':
test_dataset=spherical_dataset(test_pt_dataset, grid_size = grid_size, ignore_label = 0, fixed_volume_space = True, return_test= True)
val_dataset=spherical_dataset(val_pt_dataset, grid_size = grid_size, ignore_label = 0, fixed_volume_space = True)
elif model == 'traditional':
test_dataset=voxel_dataset(test_pt_dataset, grid_size = grid_size, ignore_label = 0, fixed_volume_space = True, return_test= True)
val_dataset=voxel_dataset(val_pt_dataset, grid_size = grid_size, ignore_label = 0, fixed_volume_space = True)
test_dataset_loader = torch.utils.data.DataLoader(dataset = test_dataset,
batch_size = test_batch_size,
collate_fn = collate_fn_BEV_test,
shuffle = False,
num_workers = 4)
val_dataset_loader = torch.utils.data.DataLoader(dataset = val_dataset,
batch_size = test_batch_size,
collate_fn = collate_fn_BEV,
shuffle = False,
num_workers = 4)
print('*'*80)
print('Test network performance on validation split')
print('*'*80)
pbar = tqdm(total=len(val_dataset_loader))
my_model.eval()
hist_list = []
time_list = []
with torch.no_grad():
for i_iter_val,(_,val_vox_label,val_grid,val_pt_labs,val_pt_fea) in enumerate(val_dataset_loader):
val_vox_label = SemKITTI2train(val_vox_label)
val_pt_labs = SemKITTI2train(val_pt_labs)
val_pt_fea_ten = [torch.from_numpy(i).type(torch.FloatTensor).to(pytorch_device) for i in val_pt_fea]
val_grid_ten = [torch.from_numpy(i[:,:2]).to(pytorch_device) for i in val_grid]
val_label_tensor=val_vox_label.type(torch.LongTensor).to(pytorch_device)
torch.cuda.synchronize()
start_time = time.time()
predict_labels = my_model(val_pt_fea_ten, val_grid_ten)
torch.cuda.synchronize()
time_list.append(time.time()-start_time)
predict_labels = torch.argmax(predict_labels,dim=1)
predict_labels = predict_labels.cpu().detach().numpy()
for count,i_val_grid in enumerate(val_grid):
hist_list.append(fast_hist_crop(predict_labels[count,val_grid[count][:,0],val_grid[count][:,1],val_grid[count][:,2]],val_pt_labs[count],unique_label))
pbar.update(1)
iou = per_class_iu(sum(hist_list))
print('Validation per class iou: ')
for class_name, class_iou in zip(unique_label_str,iou):
print('%s : %.2f%%' % (class_name, class_iou*100))
val_miou = np.nanmean(iou) * 100
del val_vox_label,val_grid,val_pt_fea,val_grid_ten
pbar.close()
print('Current val miou is %.3f ' % val_miou)
print('Inference time per %d is %.4f seconds\n' %
(test_batch_size,np.mean(time_list)))
print('*'*80)
print('Generate predictions for test split')
print('*'*80)
pbar = tqdm(total=len(test_dataset_loader))
for i_iter_test,(_,_,test_grid,_,test_pt_fea,test_index) in enumerate(test_dataset_loader):
test_pt_fea_ten = [torch.from_numpy(i).type(torch.FloatTensor).to(pytorch_device) for i in test_pt_fea]
test_grid_ten = [torch.from_numpy(i[:,:2]).to(pytorch_device) for i in test_grid]
predict_labels = my_model(test_pt_fea_ten,test_grid_ten)
predict_labels = torch.argmax(predict_labels,1)
predict_labels = predict_labels.cpu().detach().numpy()
for count,i_test_grid in enumerate(test_grid):
test_pred_label = predict_labels[count,test_grid[count][:,0],test_grid[count][:,1],test_grid[count][:,2]]
test_pred_label = train2SemKITTI(test_pred_label)
test_pred_label = np.expand_dims(test_pred_label,axis=1)
save_dir = test_pt_dataset.im_idx[test_index[count]]
_,dir2 = save_dir.split('/sequences/',1)
new_save_dir = output_path + '/sequences/' +dir2.replace('velodyne','predictions')[:-3]+'label'
if not os.path.exists(os.path.dirname(new_save_dir)):
try:
os.makedirs(os.path.dirname(new_save_dir))
except OSError as exc:
if exc.errno != errno.EEXIST:
raise
test_pred_label = test_pred_label.astype(np.uint32)
test_pred_label.tofile(new_save_dir)
pbar.update(1)
del test_grid,test_pt_fea,test_index
pbar.close()
print('Predicted test labels are saved in %s. Need to be shifted to original label format before submitting to the Competition website.' % output_path)
print('Remap script can be found in semantic-kitti-api.')
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='')
parser.add_argument('-d', '--data_dir', default='data')
parser.add_argument('-p', '--model_save_path', default='pretained_weight/SemKITTI_PolarSeg.pt')
parser.add_argument('-o', '--test_output_path', default='out/SemKITTI_test')
parser.add_argument('-m', '--model', choices=['polar','traditional'], default='polar', help='training model: polar or traditional (default: polar)')
parser.add_argument('-s', '--grid_size', nargs='+', type=int, default = [480,360,32], help='grid size of BEV representation (default: [480,360,32])')
parser.add_argument('--test_batch_size', type=int, default=1, help='batch size for training (default: 1)')
args = parser.parse_args()
if not len(args.grid_size) == 3:
raise Exception('Invalid grid size! Grid size should have 3 dimensions.')
print(' '.join(sys.argv))
print(args)
main(args) | true | true |
f71cd34ea15347fc0a22c67ddefbd517bbd2ed66 | 101 | py | Python | src/ytdl/playlists/models/__init__.py | Asday/ytdl | 96a51ba3589e855b27f75095b0cd4a6f00f8eefa | [
"MIT"
] | null | null | null | src/ytdl/playlists/models/__init__.py | Asday/ytdl | 96a51ba3589e855b27f75095b0cd4a6f00f8eefa | [
"MIT"
] | 1 | 2019-04-15T02:09:37.000Z | 2019-04-15T02:09:37.000Z | src/ytdl/playlists/models/__init__.py | Asday/ytdl | 96a51ba3589e855b27f75095b0cd4a6f00f8eefa | [
"MIT"
] | null | null | null | from .playlist import Playlist
from .video import Video
__all__ = [
'Playlist',
'Video',
]
| 11.222222 | 30 | 0.653465 | from .playlist import Playlist
from .video import Video
__all__ = [
'Playlist',
'Video',
]
| true | true |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.