hexsha stringlengths 40 40 | size int64 3 1.03M | ext stringclasses 10
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 3 972 | max_stars_repo_name stringlengths 6 130 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 972 | max_issues_repo_name stringlengths 6 130 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 116k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 972 | max_forks_repo_name stringlengths 6 130 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 3 1.03M | avg_line_length float64 1.13 941k | max_line_length int64 2 941k | alphanum_fraction float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
1426ab298bc4e50c9d39d51582ef042c6fe4c915 | 3,800 | py | Python | eutils/_internal/client.py | jvansan/eutils | 22a58d314c062dae2da42a026116984748b6c30b | [
"Apache-2.0"
] | null | null | null | eutils/_internal/client.py | jvansan/eutils | 22a58d314c062dae2da42a026116984748b6c30b | [
"Apache-2.0"
] | null | null | null | eutils/_internal/client.py | jvansan/eutils | 22a58d314c062dae2da42a026116984748b6c30b | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function, unicode_literals
import logging
import os
import lxml.etree as le
from .exceptions import EutilsError
from .queryservice import QueryService
from .xmlfacades.dbsnp import ExchangeSet
from .xmlfacades.einforesult import EInfoResult
from .xmlfacades.entrezgeneset import EntrezgeneSet
from .xmlfacades.esearchresult import ESearchResult
from .xmlfacades.gbset import GBSet
from .xmlfacades.pubmedarticleset import PubmedArticleSet
from .xmlfacades.pubmedcentralarticleset import PubmedCentralArticleSet
logger = logging.getLogger(__name__)
class Client(object):
"""class-based access to NCBI E-Utilities, returning Python classes
with rich data accessors
"""
def __init__(self, cache=False, api_key=None):
"""
:param str cache: passed to QueryService, which see for explanation
:param str api_key: API key from NCBI
:raises EutilsError: if cache file couldn't be created
"""
self._qs = QueryService(cache=cache, api_key=api_key)
@property
def databases(self):
"""
list of databases available from eutils (per einfo query)
"""
try:
return self._databases
except AttributeError:
self._databases = self.einfo().databases
return self._databases
def einfo(self, db=None):
"""query the einfo endpoint
:param db: string (optional)
:rtype: EInfo or EInfoDB object
If db is None, the reply is a list of databases, which is returned
in an EInfo object (which has a databases() method).
If db is not None, the reply is information about the specified
database, which is returned in an EInfoDB object. (Version 2.0
data is automatically requested.)
"""
if db is None:
return EInfoResult(self._qs.einfo()).dblist
return EInfoResult(self._qs.einfo({'db': db, 'version': '2.0'})).dbinfo
def esearch(self, db, term):
"""query the esearch endpoint
"""
esr = ESearchResult(self._qs.esearch({'db': db, 'term': term}))
if esr.count > esr.retmax:
logger.warning("NCBI found {esr.count} results, but we truncated the reply at {esr.retmax}"
" results; see https://github.com/biocommons/eutils/issues/124/".format(esr=esr))
return esr
def efetch(self, db, id):
"""query the efetch endpoint
"""
db = db.lower()
xml = self._qs.efetch({'db': db, 'id': str(id)})
doc = le.XML(xml)
if db in ['gene']:
return EntrezgeneSet(doc)
if db in ['nuccore', 'nucest', 'protein']:
# TODO: GBSet is misnamed; it should be GBSeq and get the GBSeq XML node as root (see gbset.py)
return GBSet(doc)
if db in ['pubmed']:
return PubmedArticleSet(doc)
if db in ['snp']:
return ExchangeSet(xml)
if db in ['pmc']:
return PubmedCentralArticleSet(doc)
raise EutilsError('database {db} is not currently supported by eutils'.format(db=db))
# <LICENSE>
# Copyright 2015 eutils Committers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
# or implied. See the License for the specific language governing
# permissions and limitations under the License.
# </LICENSE>
| 33.333333 | 107 | 0.658684 |
39fdae30f41e5796d4ea296c491b2a4c813f83df | 18,155 | py | Python | make_master_db.py | candrsn/fl_voter_processing | a9241bc0f59d3edc091d2e9b3d9de9b81b493994 | [
"MIT"
] | null | null | null | make_master_db.py | candrsn/fl_voter_processing | a9241bc0f59d3edc091d2e9b3d9de9b81b493994 | [
"MIT"
] | null | null | null | make_master_db.py | candrsn/fl_voter_processing | a9241bc0f59d3edc091d2e9b3d9de9b81b493994 | [
"MIT"
] | null | null | null |
import sys
import os
import json
import sqlite3
import glob
import subprocess
import logging
import time
def master_tables_ddl(cur):
# Reference only
voter_ddl = """
CREATE TABLE IF NOT EXISTS voter (county_code text(3), voter_id text(10), name_last text(30), name_suffix text(5), name_first text(30), name_middle text(30),
is_suppressed text(1),
res_address1 text(50), res_address2 text(40), res_city text(40), res_state text(2), res_zipcode text(10),
mail_address1 text(40), mail_address2 text(40), mail_address3 text(40), mail_city text(40), mail_state text(2), mail_zipcode text(12), mail_country text(40),
gender text(1), race text(1), birth_date text(10), registration_date text(10), party_affiliation text(3),
precinct text(6), precinct_group text(3), precinct_split text(6), precinct_suffix text(3),
voter_status text(3), cong_dist text(3), house_dist text(3), senate_dist text(3), county_comm_dist text(3), school_brd_dist text(2),
day_phone_area_code text(3), day_phone_number text(7), day_phone_ext text(4), email text(100), extract_date text(10));
"""
ddl_cmds = ["""
CREATE TABLE IF NOT EXISTS vp.voter_person (voter_id text(10), name_last text(30), name_suffix text(5), name_first text(30), name_middle text(30),
gender text(1), race text(1), birth_date text(10), registration_date text(10), party_affiliation text(3),
day_phone_area_code text(3), day_phone_number text(7), day_phone_ext text(4),
voter_status text(3),
first_instance date, last_instance date );
""","""
CREATE TABLE IF NOT EXISTS va.voter_address (voter_id text(10), address_type char(4),
address1 text(50), address2 text(40), city text(40), state text(2), zipcode text(10),
first_instance date, last_instance date );
""","""
CREATE TABLE IF NOT EXISTS vd.voter_districts (voter_id text(10),
precinct text(6), precinct_group text(3), precinct_split text(6), precinct_suffix text(3),
cong_dist text(3), house_dist text(3), senate_dist text(3), county_comm_dist text(3), school_brd_dist text(2),
first_instance date, last_instance date );
""","""
CREATE TABLE IF NOT EXISTS vl.voter_action_log (action_date date,
action TEXT, result TEXT, info TEXT );
""","""
CREATE TABLE IF NOT EXISTS vl.voter_stats (action_date date, extract_file TEXT, voters INTEGER, voter_status TEXT);
"""]
for cmd in ddl_cmds:
cur.execute(cmd)
assert cur.fetchall() is not None, "failed to connect to monthly database"
def connect_dbs(cur, dbpath="data/db/"):
setup = f"""
ATTACH DATABASE '{dbpath}main_person.db' as vp;
ATTACH DATABASE '{dbpath}main_address.db' as va;
ATTACH DATABASE '{dbpath}main_districts.db' as vd;
ATTACH DATABASE '{dbpath}main_log.db' as vl;
"""
cur.executescript(setup)
assert cur.fetchall() is not None, "failed to connect to main databases"
def connect_month(cur, dbname):
cmd = f"""
ATTACH DATABASE '{dbname}' as fl;
"""
cur.execute(cmd)
assert cur.fetchall() is not None, "failed to connect to monthly database"
def disconnect_month(cur):
assert cur.fetchall() is not None, "problem waiting for cursor to complete a prior statement"
cmd = "DETACH DATABASE 'fl';"
cnt = 3
idx = 0
while idx < cnt:
try:
cur.execute(cmd)
assert cur.fetchall() is not None, "problem detaching the monthly database"
# if we get here no need to try again
idx = cnt
except sqlite3.OperationalError:
logging.warning("failed to DETACH monthly database")
if idx == cnt:
raise sqlite3.OperationalError
idx += 1
time.sleep(60)
assert cur.fetchall() is not None, "failed to disconnect from monthly database"
def build_unique_indexes(cur):
unique_rules = """
CREATE UNIQUE INDEX IF NOT EXISTS vp.voter_person_unq__ind ON voter_person(voter_id, name_first, name_last, birth_date);
CREATE UNIQUE INDEX IF NOT EXISTS va.voter_address_unq__ind ON voter_address(voter_id, address1, address2, city, state, zipcode);
CREATE UNIQUE INDEX IF NOT EXISTS vd.voter_districts_unq__ind ON voter_districts(voter_id, precinct, precinct_group, precinct_split);
CREATE INDEX IF NOT EXISTS vp.voter_person__voter_id__ind ON voter_person(voter_id);
CREATE INDEX IF NOT EXISTS va.voter_address__voter_id__ind ON voter_address(voter_id);
CREATE INDEX IF NOT EXISTS vd.voter_districts__voter_id__ind ON voter_districts(voter_id);
"""
cur.executescript(unique_rules)
assert cur.fetchall() is not None, "failed to build unique indexes for voters"
def build_newdata_tables(cur):
unique_rules = """
CREATE UNIQUE INDEX IF NOT EXISTS vp.voter_person_unq__ind ON voter_person(voter_id, name_first, name_last, birth_date);
CREATE UNIQUE INDEX IF NOT EXISTS va.voter_address_unq__ind ON voter_address(voter_id, address1, address2, city, state, zipcode);
CREATE UNIQUE INDEX IF NOT EXISTS vd.voter_districts_unq__ind ON voter_districts(voter_id, precinct, precinct_group, precinct_split);
"""
cur.executescript(unique_rules)
assert cur.fetchall() is not None, "failed to build newdata voter tables"
def populate_master(cur, srcfile):
effective_date = srcfile[7:-3]
logging.info(f"building knowledge from {srcfile}")
# check for reloads
cmd = f"SELECT action, info from vl.voter_action_log WHERE action = 'MERGE' and info = '{srcfile}';"
cur.execute(cmd)
res = cur.fetchall()
if len(res) > 0 and len(res[0]) > 0:
logging.warning(f"{srcfile} has already been loaded, skipping it")
# skip this file as it is already loaded
return
cmd = "PRAGMA SYNCHRONOUS=off;"
cur.execute(cmd)
build_unique_indexes(cur)
main_dml = ["""CREATE TEMP TABLE new_voters AS SELECT *
FROM fl.voter nv
WHERE NOT EXISTS (SELECT 1 FROM vp.voter_person p WHERE nv.voter_id = p.voter_id);
""","""CREATE TEMP TABLE exist_voters AS SELECT *
FROM fl.voter nv
WHERE EXISTS (SELECT 1 FROM vp.voter_person p WHERE nv.voter_id = p.voter_id);
""","""
-- only update the times when everything matches
INSERT INTO vp.voter_person (voter_id, name_last, name_suffix, name_first, name_middle, gender, race, birth_date, registration_date,
party_affiliation, day_phone_area_code, day_phone_number, day_phone_ext, voter_status, first_instance, last_instance)
SELECT voter_id, name_last, name_suffix, name_first, name_middle, gender, race, birth_date, registration_date,
party_affiliation, day_phone_area_code, day_phone_number, day_phone_ext, voter_status, extract_date, extract_date
-- fake where clause may be needed
FROM temp.exist_voters nv
WHERE EXISTS (SELECT 1 FROM vp.voter_person ev WHERE ev.voter_id = nv.voter_id and
ev.name_first = nv.name_first and
ev.name_last = nv.name_last and
ev.birth_date = nv.birth_date and
(nv.extract_date < ev.first_instance or
nv.extract_date > ev.last_instance) )
ON CONFLICT (voter_id, name_first, name_last, birth_date)
DO UPDATE
SET
first_instance =
CASE
WHEN excluded.first_instance < first_instance
THEN first_instance = excluded.first_instance
ELSE first_instance
END,
last_instance =
CASE
WHEN excluded.last_instance > last_instance
THEN last_instance = excluded.last_instance
ELSE last_instance
END
;
""","""
-- add data for existing voters with new info
INSERT INTO vp.voter_person (voter_id, name_last, name_suffix, name_first, name_middle, gender, race, birth_date, registration_date,
party_affiliation, day_phone_area_code, day_phone_number, day_phone_ext, voter_status, first_instance, last_instance)
SELECT voter_id, name_last, name_suffix, name_first, name_middle, gender, race, birth_date, registration_date,
party_affiliation, day_phone_area_code, day_phone_number, day_phone_ext, voter_status, extract_date, extract_date
-- fake where clause may be needed
FROM temp.exist_voters nv
WHERE NOT EXISTS (SELECT 1 FROM vp.voter_person ev WHERE ev.voter_id = nv.voter_id and
ev.name_first = nv.name_first and
ev.name_last = nv.name_last and
ev.birth_date = nv.birth_date and
nv.extract_date >= ev.first_instance and
nv.extract_date <= ev.last_instance)
;
""","""
-- add voters who are new
INSERT INTO vp.voter_person (voter_id, name_last, name_suffix, name_first, name_middle, gender, race, birth_date, registration_date,
party_affiliation, day_phone_area_code, day_phone_number, day_phone_ext, voter_status, first_instance, last_instance)
SELECT voter_id, name_last, name_suffix, name_first, name_middle, gender, race, birth_date, registration_date,
party_affiliation, day_phone_area_code, day_phone_number, day_phone_ext, voter_status, extract_date, extract_date
-- fake where clause may be needed
FROM temp.new_voters nv
;
""","""
-- only update data with new timestamps
INSERT INTO va.voter_address (voter_id, address_type, address1, address2, city, state, zipcode, first_instance, last_instance)
SELECT voter_id, 'RES', res_address1, res_address2, res_city, res_state, res_zipcode, extract_date, extract_date
FROM temp.exist_voters nv
WHERE EXISTS (SELECT 1 FROM va.voter_address ev WHERE ev.voter_id = nv.voter_id and
ev.address1 = nv.res_address1 and
ev.address2 = nv.res_address2 and
ev.city = nv.res_city and
ev.state = nv.res_state and
ev.zipcode = nv.res_zipcode and
(nv.extract_date < ev.first_instance or
nv.extract_date > ev.last_instance) )
ON CONFLICT (voter_id, address1, address2, city, state, zipcode)
DO UPDATE
SET
first_instance =
CASE
WHEN excluded.first_instance < first_instance
THEN first_instance = excluded.first_instance
ELSE first_instance
END,
last_instance =
CASE
WHEN excluded.last_instance > last_instance
THEN last_instance = excluded.last_instance
ELSE last_instance
END
;
""","""
-- add voters who are new
INSERT INTO va.voter_address (voter_id, address_type, address1, address2, city, state, zipcode, first_instance, last_instance)
SELECT voter_id, 'RES', res_address1, res_address2, res_city, res_state, res_zipcode, extract_date, extract_date
FROM temp.exist_voters nv
WHERE NOT EXISTS (SELECT 1 FROM va.voter_address ev WHERE ev.voter_id = nv.voter_id and
ev.address1 = nv.res_address1 and
ev.address2 = nv.res_address2 and
ev.city = nv.res_city and
ev.state = nv.res_state and
nv.extract_date >= ev.first_instance and
nv.extract_date <= ev.last_instance)
;
""","""
-- only update data with new timestamps
INSERT INTO va.voter_address (voter_id, address_type, address1, address2, city, state, zipcode, first_instance, last_instance)
SELECT voter_id, 'RES', mail_address1, mail_address2, mail_city, mail_state, mail_zipcode, extract_date, extract_date
FROM temp.exist_voters nv
WHERE EXISTS (SELECT 1 FROM va.voter_address ev WHERE ev.voter_id = nv.voter_id and
ev.address1 = nv.mail_address1 and
ev.address2 = nv.mail_address2 and
ev.city = nv.mail_city and
ev.state = nv.mail_state and
ev.zipcode = nv.mail_zipcode and
(nv.extract_date < ev.first_instance or
nv.extract_date > ev.last_instance) )
ON CONFLICT (voter_id, address1, address2, city, state, zipcode)
DO UPDATE
SET
first_instance =
CASE
WHEN excluded.first_instance < first_instance
THEN first_instance = excluded.first_instance
ELSE first_instance
END,
last_instance =
CASE
WHEN excluded.last_instance > last_instance
THEN last_instance = excluded.last_instance
ELSE last_instance
END
;
""","""
-- add voters who are new
INSERT INTO va.voter_address (voter_id, address_type, address1, address2, city, state, zipcode, first_instance, last_instance)
SELECT voter_id, 'MAIL', mail_address1, mail_address2, mail_city, mail_state, mail_zipcode, extract_date, extract_date
FROM temp.new_voters nv
WHERE NOT EXISTS (SELECT 1 FROM va.voter_address ev WHERE ev.voter_id = nv.voter_id)
;
""","""
-- add voters who are new
INSERT INTO va.voter_address (voter_id, address_type, address1, address2, city, state, zipcode, first_instance, last_instance)
SELECT voter_id, 'MAIL', mail_address1, mail_address2, mail_city, mail_state, mail_zipcode, extract_date, extract_date
FROM temp.new_voters nv
WHERE NOT EXISTS (SELECT 1 FROM va.voter_address ev WHERE ev.voter_id = nv.voter_id)
;
""","""
-- only update data with new timestamps
INSERT INTO vd.voter_districts (voter_id, precinct, precinct_group, precinct_split, first_instance, last_instance)
SELECT voter_id, precinct, precinct_group, precinct_split, extract_date, extract_date
FROM temp.exist_voters nv
WHERE EXISTS (SELECT 1 FROM vd.voter_districts ev WHERE ev.voter_id = nv.voter_id and
ev.precinct = nv.precinct and
ev.precinct_group = nv.precinct_group and
ev.precinct_split = nv.precinct_split and
(nv.extract_date < ev.first_instance or
nv.extract_date > ev.last_instance) )
ON CONFLICT (voter_id, precinct, precinct_group, precinct_split)
DO UPDATE
SET
first_instance =
CASE
WHEN excluded.first_instance < first_instance
THEN first_instance = excluded.first_instance
ELSE first_instance
END,
last_instance =
CASE
WHEN excluded.last_instance > last_instance
THEN last_instance = excluded.last_instance
ELSE last_instance
END
;
""","""
-- add voters who have new info
INSERT INTO vd.voter_districts (voter_id, precinct, precinct_group, precinct_split, first_instance, last_instance)
SELECT voter_id, precinct, precinct_group, precinct_split, extract_date, extract_date
FROM temp.exist_voters nv
WHERE NOT EXISTS (SELECT 1 FROM vd.voter_districts ev WHERE ev.voter_id = nv.voter_id and
ev.precinct = nv.precinct and
ev.precinct_group = nv.precinct_group and
ev.precinct_split = nv.precinct_split and
nv.extract_date >= ev.first_instance and
nv.extract_date <= ev.last_instance )
;
""","""
-- add voters who are new
INSERT INTO vd.voter_districts (voter_id, precinct, precinct_group, precinct_split, first_instance, last_instance)
SELECT voter_id, precinct, precinct_group, precinct_split, extract_date, extract_date
FROM temp.new_voters nv
WHERE NOT EXISTS (SELECT 1 FROM vd.voter_districts ev WHERE ev.voter_id = nv.voter_id)
;
""",f"""
INSERT INTO vl.voter_action_log (action_date, action, info)
VALUES (CURRENT_TIMESTAMP, 'MERGE', '{srcfile}');
""",f"""
INSERT INTO vl.voter_stats (action_date, extract_file, voters, voter_status)
SELECT CURRENT_TIMESTAMP, '{srcfile}', count(*), voter_status from fl.voter;
"""]
itr = 0
for cmd in main_dml:
st = time.time()
try:
cur.execute(cmd)
except (sqlite3.OperationalError):
logging.error(f"SQL error with {cmd}")
raise sqlite3.OperationalError
logging.info(f"statement {itr} took {(time.time() - st):.2f} seconds")
itr += 1
logging.info(f"completed merge of {srcfile}")
assert cur.fetchall() is not None, "failed to apply monthy file"
def mount_zipfile(zfilename, mntlocation):
subprocess.call(["fuse-zip", "-r", zfilename, mntlocation])
def umount_zipfile(mntlocation):
subprocess.call(["fusermount", "-u", mntlocation])
def db_connect(dbfile=":memory:", dbpath="data/db/"):
db = sqlite3.connect("tmpx/work.db", timeout=30)
cur = db.cursor()
cur.execute("PRAGMA SYNCHRONOUS=OFF;")
connect_dbs(cur, dbpath)
master_tables_ddl(cur)
db.commit()
return db, cur
def main(args=[]):
dbpath="data/db/"
dbpath="tmpx/"
dbfile="tmpx/work.db"
for monthlyfile in glob.glob(f"{dbpath}import_*.db"):
db,cur = db_connect(dbfile=dbfile, dbpath=dbpath)
connect_month(cur, monthlyfile)
populate_master(cur, os.path.basename(monthlyfile))
db.commit()
os.sync()
disconnect_month(cur)
db.close()
time.sleep(30)
logging.info("Completed processing")
if __name__ == "__main__":
logging.basicConfig(level=logging.DEBUG)
main(sys.argv) | 47.902375 | 162 | 0.649132 |
120021d01ea5d95b391d56e285880705838a3b5c | 266 | py | Python | operators/crossplane/python/pulumi_pulumi_kubernetes_crds_operators_crossplane/storage/v1alpha1/__init__.py | pulumi/pulumi-kubernetes-crds | 372c4c0182f6b899af82d6edaad521aa14f22150 | [
"Apache-2.0"
] | null | null | null | operators/crossplane/python/pulumi_pulumi_kubernetes_crds_operators_crossplane/storage/v1alpha1/__init__.py | pulumi/pulumi-kubernetes-crds | 372c4c0182f6b899af82d6edaad521aa14f22150 | [
"Apache-2.0"
] | 2 | 2020-09-18T17:12:23.000Z | 2020-12-30T19:40:56.000Z | operators/crossplane/python/pulumi_pulumi_kubernetes_crds_operators_crossplane/storage/v1alpha1/__init__.py | pulumi/pulumi-kubernetes-crds | 372c4c0182f6b899af82d6edaad521aa14f22150 | [
"Apache-2.0"
] | null | null | null | # coding=utf-8
# *** WARNING: this file was generated by crd2pulumi. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
# Export this package's modules as members:
from .Bucket import *
from ._inputs import *
from . import outputs
| 29.555556 | 80 | 0.706767 |
77e7c0e093d9a8a0c1301a5a00b1ca8c2d3fbe9a | 7,886 | py | Python | tensorflow/contrib/bayesflow/python/ops/special_math.py | atfkaka/tensorflow | 5657d0dee8d87f4594b3e5902ed3e3ca8d6dfc0a | [
"Apache-2.0"
] | 101 | 2016-12-03T11:40:52.000Z | 2017-12-23T02:02:03.000Z | tensorflow/contrib/bayesflow/python/ops/special_math.py | atfkaka/tensorflow | 5657d0dee8d87f4594b3e5902ed3e3ca8d6dfc0a | [
"Apache-2.0"
] | 9 | 2016-12-14T03:27:46.000Z | 2017-09-13T02:29:07.000Z | tensorflow/contrib/bayesflow/python/ops/special_math.py | atfkaka/tensorflow | 5657d0dee8d87f4594b3e5902ed3e3ca8d6dfc0a | [
"Apache-2.0"
] | 47 | 2016-12-04T12:37:24.000Z | 2018-01-14T18:13:07.000Z | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Special Math Ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import ops
from tensorflow.python.ops import math_ops
__all__ = [
"ndtr",
"log_ndtr",
]
# log_ndtr uses different functions over the ranges
# (-infty, lower](lower, upper](upper, infty)
# Lower bound values were chosen by examining where the support of ndtr
# appears to be zero, relative to scipy's (which is always 64bit). They were
# then made more conservative just to be safe. (Conservative means use the
# expansion more than we probably need to.) See `NdtrTest` in
# special_math_test.py.
LOGNDTR_FLOAT64_LOWER = -20
LOGNDTR_FLOAT32_LOWER = -10
# Upper bound values were chosen by examining for which values of 'x'
# Log[cdf(x)] is 0, after which point we need to use the approximation
# Log[cdf(x)] = Log[1 - cdf(-x)] approx -cdf(-x). We chose a value slightly
# conservative, meaning we use the approximation earlier than needed.
LOGNDTR_FLOAT64_UPPER = 8
LOGNDTR_FLOAT32_UPPER = 5
def ndtr(x, name="ndtr"):
"""Normal distribution function.
Returns the area under the Gaussian probability density function, integrated
from minus infinity to x:
```
1 / x
ndtr(x) = ---------- | exp(-0.5 t^2) dt
sqrt(2 pi) /-inf
= 0.5 (1 + erf(x / sqrt(2)))
= 0.5 erfc(x / sqrt(2))
```
Args:
x: `Tensor` of type `float32`, `float64`.
name: Python string. A name for the operation (default="ndtr").
Returns:
ndtr: `Tensor` with `dtype=x.dtype`.
Raises:
TypeError: if `x` is not floating-type.
"""
with ops.name_scope(name, values=[x]):
x = ops.convert_to_tensor(x, name="x")
if x.dtype.as_numpy_dtype not in [np.float32, np.float64]:
raise TypeError(
"x.dtype=%s is not handled, see docstring for supported types."
% x.dtype)
return _ndtr(x)
def _ndtr(x):
"""Implements ndtr core logic."""
half_sqrt_2 = constant_op.constant(
0.5 * math.sqrt(2.), dtype=x.dtype, name="half_sqrt_2")
w = x * half_sqrt_2
z = math_ops.abs(w)
y = math_ops.select(math_ops.less(z, half_sqrt_2),
1. + math_ops.erf(w),
math_ops.select(math_ops.greater(w, 0.),
2. - math_ops.erfc(z),
math_ops.erfc(z)))
return 0.5 * y
def log_ndtr(x, series_order=3, name="log_ndtr"):
"""Log Normal distribution function.
For details of the Normal distribution function see `ndtr`.
This function calculates `(log o ndtr)(x)` by either calling `log(ndtr(x))` or
using an asymptotic series. Specifically:
- For `x > upper_segment`, use the approximation `-ndtr(-x)` based on
`log(1-x) ~= -x, x << 1`.
- For `lower_segment < x <= upper_segment`, use the existing `ndtr` technique
and take a log.
- For `x <= lower_segment`, we use the series approximation of erf to compute
the log CDF directly.
The `lower_segment` is set based on the precision of the input:
```
lower_segment = { -20, x.dtype=float64
{ -10, x.dtype=float32
upper_segment = { 8, x.dtype=float64
{ 5, x.dtype=float32
```
When `x < lower_segment`, the `ndtr` asymptotic series approximation is:
```
ndtr(x) = scale * (1 + sum) + R_N
scale = exp(-0.5 x^2) / (-x sqrt(2 pi))
sum = Sum{(-1)^n (2n-1)!! / (x^2)^n, n=1:N}
R_N = O(exp(-0.5 x^2) (2N+1)!! / |x|^{2N+3})
```
where `(2n-1)!! = (2n-1) (2n-3) (2n-5) ... (3) (1)` is a
[double-factorial](https://en.wikipedia.org/wiki/Double_factorial).
Args:
x: `Tensor` of type `float32`, `float64`.
series_order: Positive Python `integer`. Maximum depth to
evaluate the asymptotic expansion. This is the `N` above.
name: Python string. A name for the operation (default="log_ndtr").
Returns:
log_ndtr: `Tensor` with `dtype=x.dtype`.
Raises:
TypeError: if `x.dtype` is not handled.
TypeError: if `series_order` is a not Python `integer.`
ValueError: if `series_order` is not in `[0, 30]`.
"""
if not isinstance(series_order, int):
raise TypeError("series_order must be a Python integer.")
if series_order < 0:
raise ValueError("series_order must be non-negative.")
if series_order > 30:
raise ValueError("series_order must be <= 30.")
with ops.name_scope(name, values=[x]):
x = ops.convert_to_tensor(x, name="x")
if x.dtype.as_numpy_dtype == np.float64:
lower_segment = LOGNDTR_FLOAT64_LOWER
upper_segment = LOGNDTR_FLOAT64_UPPER
elif x.dtype.as_numpy_dtype == np.float32:
lower_segment = LOGNDTR_FLOAT32_LOWER
upper_segment = LOGNDTR_FLOAT32_UPPER
else:
raise TypeError("x.dtype=%s is not supported." % x.dtype)
# The basic idea here was ported from py/scipy/special/cephes/ndtr.c.
# We copy the main idea, with a few changes
# * For x >> 1, and X ~ Normal(0, 1),
# Log[P[X < x]] = Log[1 - P[X < -x]] approx -P[X < -x],
# which extends the range of validity of this function.
# * We use one fixed series_order for all of 'x', rather than adaptive.
# * Our docstring properly reflects that this is an asymptotic series, not a
# Tayor series. We also provided a correct bound on the remainder.
# * We need to use the max/min in the _log_ndtr_lower arg to avoid nan when
# x=0. This happens even though the branch is unchosen because when x=0
# the gradient of a select involves the calculation 1*dy+0*(-inf)=nan
# regardless of whether dy is finite. Note that the minimum is a NOP if
# the branch is chosen.
return math_ops.select(
math_ops.greater(x, upper_segment),
-_ndtr(-x), # log(1-x) ~= -x, x << 1
math_ops.select(math_ops.greater(x, lower_segment),
math_ops.log(_ndtr(math_ops.maximum(x, lower_segment))),
_log_ndtr_lower(math_ops.minimum(x, lower_segment),
series_order)))
def _log_ndtr_lower(x, series_order):
"""Asymptotic expansion version of `Log[cdf(x)]`, apppropriate for `x<<-1`."""
x_2 = math_ops.square(x)
# Log of the term multiplying (1 + sum)
log_scale = -0.5 * x_2 - math_ops.log(-x) - 0.5 * math.log(2. * math.pi)
return log_scale + math_ops.log(_log_ndtr_asymptotic_series(x, series_order))
def _log_ndtr_asymptotic_series(x, series_order):
"""Calculates the asymptotic series used in log_ndtr."""
if series_order <= 0:
return 1.
x_2 = math_ops.square(x)
even_sum = 0.
odd_sum = 0.
x_2n = x_2 # Start with x^{2*1} = x^{2*n} with n = 1.
for n in range(1, series_order + 1):
if n % 2:
odd_sum += _double_factorial(2 * n - 1) / x_2n
else:
even_sum += _double_factorial(2 * n - 1) / x_2n
x_2n *= x_2
return 1. + even_sum - odd_sum
def _double_factorial(n):
"""The double factorial function for small Python integer `n`."""
return np.prod(np.arange(n, 1, -2))
| 35.845455 | 80 | 0.640249 |
30688e92f49fe2fc614e7fad3734b2d2ff8e4144 | 209 | py | Python | Robotsvsdino/weapon.py | techysoldier/OOP | 1579c603465158caba9f05b29771f02644ddb8dd | [
"MIT"
] | null | null | null | Robotsvsdino/weapon.py | techysoldier/OOP | 1579c603465158caba9f05b29771f02644ddb8dd | [
"MIT"
] | null | null | null | Robotsvsdino/weapon.py | techysoldier/OOP | 1579c603465158caba9f05b29771f02644ddb8dd | [
"MIT"
] | null | null | null | class Weapon:
def __init__(self,name,attack_power):
self.name = name
self.attack_power = attack_power
# devin_dino = weapon("devin", "6")
# print(devin_dino.name,devin_dino.attack_power) | 26.125 | 48 | 0.69378 |
72350de4d85ff53eaa8c088fa50bce07805b3ffd | 426,573 | py | Python | tests/python/unittest/test_operator.py | aws-taylor/incubator-mxnet | c91084d40879cb4b49e5fb1ff281b28feee4c7e3 | [
"Apache-2.0"
] | null | null | null | tests/python/unittest/test_operator.py | aws-taylor/incubator-mxnet | c91084d40879cb4b49e5fb1ff281b28feee4c7e3 | [
"Apache-2.0"
] | null | null | null | tests/python/unittest/test_operator.py | aws-taylor/incubator-mxnet | c91084d40879cb4b49e5fb1ff281b28feee4c7e3 | [
"Apache-2.0"
] | null | null | null | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: skip-file
from __future__ import print_function
from __future__ import division
import numpy as np
import mxnet as mx
import copy
import math
import random
import itertools
from distutils.version import LooseVersion
from numpy.testing import assert_allclose, assert_array_equal
from mxnet.test_utils import *
from mxnet.operator import *
from mxnet.base import py_str, MXNetError, _as_list
from common import setup_module, with_seed, teardown, assert_raises_cudnn_not_satisfied, assertRaises
from common import run_in_spawned_process
from nose.tools import assert_raises, ok_
import unittest
import os
def check_rnn_consistency(cell1, cell2, T, N, I, H, grad_req, rtol=1e-2, atol=1e-4):
dshape = (N, T, I)
data = mx.sym.Variable('data')
Y1, _ = cell1.unroll(T, data, layout='NTC', merge_outputs=True)
mod1 = mx.mod.Module(Y1, label_names=None, context=default_context())
mod1.bind(data_shapes=[('data', dshape)], label_shapes=None, inputs_need_grad=True, grad_req=grad_req)
Y2, _ = cell2.unroll(T, data, layout='NTC', merge_outputs=True)
mod2 = mx.mod.Module(Y2, label_names=None, context=default_context())
mod2.bind(data_shapes=[('data', dshape)], label_shapes=None, inputs_need_grad=True, grad_req=grad_req)
mod1.init_params()
args, auxs = mod1.get_params()
args = cell1.unpack_weights(args)
args = cell2.pack_weights(args)
mod2.set_params(args, auxs)
x = mx.random.uniform(shape=dshape)
batch=mx.io.DataBatch(data=[x])
# check inference
mod1.forward(batch, is_train=False)
mod2.forward(batch, is_train=False)
assert_allclose(mod1.get_outputs()[0].asnumpy(), mod2.get_outputs()[0].asnumpy(), rtol=rtol, atol=atol)
# check training
mod1.forward(batch, is_train=True)
mod2.forward(batch, is_train=True)
assert_allclose(mod1.get_outputs()[0].asnumpy(), mod2.get_outputs()[0].asnumpy(), rtol=rtol, atol=atol)
dy = mx.random.uniform(shape=mod1.get_outputs()[0].shape)
mod1.backward(out_grads=[dy])
mod2.backward(out_grads=[dy])
if type(grad_req) is dict and grad_req['data'] == 'null' or grad_req == 'null':
assert(mod1.get_input_grads()[0] == None)
assert(mod2.get_input_grads()[0] == None)
else:
assert_allclose(mod1.get_input_grads()[0].asnumpy(), mod2.get_input_grads()[0].asnumpy(), rtol=rtol, atol=atol)
@with_seed()
@assert_raises_cudnn_not_satisfied(min_version='5.1.10')
def test_rnn_with_new_param():
rnn_modes = ['rnn_relu', 'rnn_tanh', 'gru', 'lstm']
ngates_ = [1, 1, 3, 4]
num_layers, input_size, seq_len, batch_size, state_size = 3, 128, 5, 64, 8
for bidirectional in [False, True]:
directions = 2 if bidirectional else 1
for mode, ngates in zip(rnn_modes, ngates_):
first_layer_size = (input_size * state_size + state_size * state_size + state_size * 2) * ngates
rest_layer_size = (state_size * directions * state_size + state_size * state_size + state_size * 2) \
* ngates * (num_layers - 1)
param_size = (first_layer_size + rest_layer_size) * directions
sym = mx.sym.RNN(mode=mode, num_layers=num_layers, bidirectional=bidirectional,
state_outputs=False, state_size=state_size, name='rnn')
bind_dict = {
'rnn_data': mx.ndarray.random.uniform(low=-1, high=1, shape=(seq_len, batch_size, input_size)),
'rnn_parameters': mx.ndarray.random.uniform(low=-1, high=1, shape=(param_size)),
'rnn_state': mx.ndarray.zeros(shape=(num_layers * directions, batch_size, state_size))
}
if mode == 'lstm':
bind_dict['rnn_state_cell'] = mx.ndarray.zeros(
shape=(num_layers * directions, batch_size, state_size))
ex = sym.bind(default_context(), bind_dict)
ex.forward(is_train=True)
ex01 = ex.output_dict['rnn_output'].asnumpy()
ex.forward(is_train=False)
ex02 = ex.output_dict['rnn_output'].asnumpy()
assert_allclose(ex01, ex02, rtol=1e-2, atol=1e-4)
bind_dict['rnn_parameters'] = mx.ndarray.random.uniform(low=-1, high=1, shape=(param_size))
ex.copy_params_from(bind_dict)
ex.forward(is_train=True)
ex03 = ex.output_dict['rnn_output'].asnumpy()
ex.forward(is_train=False)
ex04 = ex.output_dict['rnn_output'].asnumpy()
assert_allclose(ex03, ex04, rtol=1e-2, atol=1e-4)
@with_seed()
@assert_raises_cudnn_not_satisfied(min_version='5.1.10')
def test_lstm_sym():
Ts = [1, 5]
Ns = [1, 32]
Is = [32, 128, 512]
Hs = [32, 128, 512]
for T, N, I, H in itertools.product(Ts, Ns, Is, Hs):
fused = mx.rnn.FusedRNNCell(H, num_layers=3, mode='lstm', get_next_state=True, prefix='')
stack = mx.rnn.SequentialRNNCell()
stack.add(mx.rnn.LSTMCell(H, prefix='l0_'))
stack.add(mx.rnn.LSTMCell(H, prefix='l1_'))
stack.add(mx.rnn.LSTMCell(H, prefix='l2_'))
check_rnn_consistency(fused, stack, T, N, I, H, 'write')
check_rnn_consistency(fused, stack, T, N, I, H, 'add')
check_rnn_consistency(fused, stack, T, N, I, H, 'null')
@with_seed()
@assert_raises_cudnn_not_satisfied(min_version='5.1.10')
def test_lstm_bidirectional():
Ts = [1, 5]
Ns = [1, 32]
Is = [32, 128, 512]
Hs = [32, 128, 512]
for T, N, I, H in itertools.product(Ts, Ns, Is, Hs):
fused = mx.rnn.FusedRNNCell(H, num_layers=2, mode='lstm',
bidirectional=True, get_next_state=True, prefix='')
stack = mx.rnn.SequentialRNNCell()
stack.add(mx.rnn.BidirectionalCell(
mx.rnn.LSTMCell(H, prefix='l0_'),
mx.rnn.LSTMCell(H, prefix='r0_'),
output_prefix='bi_lstm_0_'))
stack.add(mx.rnn.BidirectionalCell(
mx.rnn.LSTMCell(H, prefix='l1_'),
mx.rnn.LSTMCell(H, prefix='r1_'),
output_prefix='bi_lstm_1_'))
check_rnn_consistency(fused, stack, T, N, I, H, 'write')
check_rnn_consistency(fused, stack, T, N, I, H, 'add')
check_rnn_consistency(fused, stack, T, N, I, H, 'null')
check_rnn_consistency(fused, stack, T, N, I, H, {'data': 'add', 'parameters': 'null'})
@with_seed()
@assert_raises_cudnn_not_satisfied(min_version='5.1.10')
def test_gru_sym():
Ts = [1, 5]
Ns = [1, 32]
Is = [32, 128, 512]
Hs = [32, 128, 512]
for T, N, I, H in itertools.product(Ts, Ns, Is, Hs):
fused = mx.rnn.FusedRNNCell(H, num_layers=3, mode='gru', get_next_state=True, prefix='')
stack = mx.rnn.SequentialRNNCell()
stack.add(mx.rnn.GRUCell(H, prefix='l0_'))
stack.add(mx.rnn.GRUCell(H, prefix='l1_'))
stack.add(mx.rnn.GRUCell(H, prefix='l2_'))
check_rnn_consistency(fused, stack, T, N, I, H, 'write')
check_rnn_consistency(fused, stack, T, N, I, H, 'add')
check_rnn_consistency(fused, stack, T, N, I, H, 'null')
@with_seed()
@assert_raises_cudnn_not_satisfied(min_version='5.1.10')
def test_gru_bidirectional():
Ts = [1, 5]
Ns = [1, 32]
Is = [32, 128, 512]
Hs = [32, 128, 512]
for T, N, I, H in itertools.product(Ts, Ns, Is, Hs):
fused = mx.rnn.FusedRNNCell(H, num_layers=2, mode='gru',
bidirectional=True, get_next_state=True, prefix='')
stack = mx.rnn.SequentialRNNCell()
stack.add(mx.rnn.BidirectionalCell(
mx.rnn.GRUCell(H, prefix='l0_'),
mx.rnn.GRUCell(H, prefix='r0_'),
output_prefix='bi_gru_0_'))
stack.add(mx.rnn.BidirectionalCell(
mx.rnn.GRUCell(H, prefix='l1_'),
mx.rnn.GRUCell(H, prefix='r1_'),
output_prefix='bi_gru_1_'))
check_rnn_consistency(fused, stack, T, N, I, H, 'write')
check_rnn_consistency(fused, stack, T, N, I, H, 'add')
check_rnn_consistency(fused, stack, T, N, I, H, 'null')
@with_seed()
@assert_raises_cudnn_not_satisfied(min_version='5.1.10')
def test_rnntanh_sym():
Ts = [1, 5]
Ns = [1, 32]
Is = [32, 128, 512]
Hs = [32, 128, 512]
for T, N, I, H in itertools.product(Ts, Ns, Is, Hs):
fused = mx.rnn.FusedRNNCell(H, num_layers=3, mode='rnn_tanh', get_next_state=True, prefix='')
stack = mx.rnn.SequentialRNNCell()
stack.add(mx.rnn.RNNCell(H, activation='tanh', prefix='l0_'))
stack.add(mx.rnn.RNNCell(H, activation='tanh', prefix='l1_'))
stack.add(mx.rnn.RNNCell(H, activation='tanh', prefix='l2_'))
check_rnn_consistency(fused, stack, T, N, I, H, 'write')
check_rnn_consistency(fused, stack, T, N, I, H, 'add')
check_rnn_consistency(fused, stack, T, N, I, H, 'null')
@with_seed()
@assert_raises_cudnn_not_satisfied(min_version='5.1.10')
def test_rnntanh_bidirectional():
Ts = [1, 5]
Ns = [1, 32]
Is = [32, 128, 512]
Hs = [32, 128, 512]
for T, N, I, H in itertools.product(Ts, Ns, Is, Hs):
fused = mx.rnn.FusedRNNCell(H, num_layers=2, mode='rnn_tanh',
bidirectional=True, get_next_state=True, prefix='')
stack = mx.rnn.SequentialRNNCell()
stack.add(mx.rnn.BidirectionalCell(
mx.rnn.RNNCell(H, activation='tanh', prefix='l0_'),
mx.rnn.RNNCell(H, activation='tanh', prefix='r0_'),
output_prefix='bi_rnntanh_0_'))
stack.add(mx.rnn.BidirectionalCell(
mx.rnn.RNNCell(H, activation='tanh', prefix='l1_'),
mx.rnn.RNNCell(H, activation='tanh', prefix='r1_'),
output_prefix='bi_rnntanh_1_'))
check_rnn_consistency(fused, stack, T, N, I, H, 'write')
check_rnn_consistency(fused, stack, T, N, I, H, 'add')
check_rnn_consistency(fused, stack, T, N, I, H, 'null')
@with_seed()
@assert_raises_cudnn_not_satisfied(min_version='5.1.10')
def test_rnnrelu_sym():
T, N, I, H = 5, 32, 200, 200
fused = mx.rnn.FusedRNNCell(H, num_layers=3, mode='rnn_relu', get_next_state=True, prefix='')
stack = mx.rnn.SequentialRNNCell()
stack.add(mx.rnn.RNNCell(H, activation='relu', prefix='l0_'))
stack.add(mx.rnn.RNNCell(H, activation='relu', prefix='l1_'))
stack.add(mx.rnn.RNNCell(H, activation='relu', prefix='l2_'))
check_rnn_consistency(fused, stack, T, N, I, H, 'write')
check_rnn_consistency(fused, stack, T, N, I, H, 'add')
check_rnn_consistency(fused, stack, T, N, I, H, 'null')
@with_seed()
@assert_raises_cudnn_not_satisfied(min_version='5.1.10')
def test_rnnrelu_bidirectional():
Ts = [1, 5]
Ns = [1, 32]
Is = [32, 128, 512]
Hs = [32, 128, 512]
for T, N, I, H in itertools.product(Ts, Ns, Is, Hs):
fused = mx.rnn.FusedRNNCell(H, num_layers=2, mode='rnn_relu',
bidirectional=True, get_next_state=True, prefix='')
stack = mx.rnn.SequentialRNNCell()
stack.add(mx.rnn.BidirectionalCell(
mx.rnn.RNNCell(H, activation='relu', prefix='l0_'),
mx.rnn.RNNCell(H, activation='relu', prefix='r0_'),
output_prefix='bi_rnnrelu_0_'))
stack.add(mx.rnn.BidirectionalCell(
mx.rnn.RNNCell(H, activation='relu', prefix='l1_'),
mx.rnn.RNNCell(H, activation='relu', prefix='r1_'),
output_prefix='bi_rnnrelu_1_'))
check_rnn_consistency(fused, stack, T, N, I, H, 'write', rtol=1e-2, atol=1e-2)
check_rnn_consistency(fused, stack, T, N, I, H, 'add', rtol=1e-2, atol=1e-2)
check_rnn_consistency(fused, stack, T, N, I, H, 'null', rtol=1e-2, atol=1e-2)
@with_seed()
def test_lstm_dropout():
X = mx.sym.Variable('x')
Params = mx.sym.Variable('params')
HX = mx.sym.Variable('state')
CX = mx.sym.Variable('state_cell')
T, N, I, H = 300, 20, 800, 800
rnn = mx.sym.RNN(data=X, parameters=Params, state=HX, state_cell=CX,
state_size=H, num_layers=5, mode='lstm', p=0.5, state_outputs=True, name='LSTM')
exe = rnn.simple_bind(ctx=mx.cpu(), x=(T, N, I))
out = exe.forward(is_train=True)
out[0].wait_to_read()
@with_seed()
def test_gru_dropout():
X = mx.sym.Variable('x')
Params = mx.sym.Variable('params')
HX = mx.sym.Variable('state')
T, N, I, H = 300, 20, 800, 800
rnn = mx.sym.RNN(data=X, parameters=Params, state=HX,
state_size=H, num_layers=5, mode='gru', p=0.5, state_outputs=True, name='GRU')
exe = rnn.simple_bind(ctx=mx.cpu(), x=(T, N, I))
out = exe.forward(is_train=True)
out[0].wait_to_read()
@with_seed()
def test_rnntanh_dropout():
X = mx.sym.Variable('x')
Params = mx.sym.Variable('params')
HX = mx.sym.Variable('state')
T, N, I, H = 300, 20, 800, 800
rnn = mx.sym.RNN(data=X, parameters=Params, state=HX,
state_size=H, num_layers=5, mode='rnn_tanh', p=0.5, state_outputs=True, name='RNN_TANH')
exe = rnn.simple_bind(ctx=mx.cpu(), x=(T, N, I))
out = exe.forward(is_train=True)
out[0].wait_to_read()
@with_seed()
def test_rnnrelu_dropout():
X = mx.sym.Variable('x')
Params = mx.sym.Variable('params')
HX = mx.sym.Variable('state')
T, N, I, H = 300, 20, 800, 800
rnn = mx.sym.RNN(data=X, parameters=Params, state=HX,
state_size=H, num_layers=5, mode='rnn_relu', p=0.5, state_outputs=True, name='RNN_RELU')
exe = rnn.simple_bind(ctx=mx.cpu(), x=(T, N, I))
out = exe.forward(is_train=True)
out[0].wait_to_read()
def test_RNN_float64():
if default_context().device_type == 'gpu':
return
sym = mx.sym.RNN(
mx.sym.Variable('in'),
mx.sym.Variable('par'),
mx.sym.Variable('s'),
state_size = (2),
num_layers = 1,
mode = 'rnn_tanh'
)
dtype = 'float64'
explicit_grad = {
'in': mx.nd.ones([2, 1, 2], dtype=dtype),
'par': mx.nd.ones([12], dtype=dtype),
's': mx.nd.ones([1, 1, 2], dtype=dtype)
}
args_grad = explicit_grad
grad_req = 'write'
ex = sym.bind(default_context(),
{
'in': mx.nd.ones([2, 1, 2], dtype=dtype),
'par': mx.nd.ones([12], dtype=dtype),
's': mx.nd.ones([1, 1, 2], dtype=dtype)
},
args_grad = args_grad,
grad_req = grad_req
)
ex.forward()
ex.outputs[0].wait_to_read()
def np_softmax(x, axis=-1, temperature=1.0):
x = x - np.max(x, axis=axis, keepdims=True)
x = np.exp(x/temperature)
x /= np.sum(x, axis=axis, keepdims=True)
return x
def check_elementwise_sum_with_shape(shape, n):
# forward
inputs = [mx.symbol.Variable('arg%d' % i) for i in range(n)]
out = mx.symbol.ElementWiseSum(*inputs, name='esum')
arr = [mx.nd.empty(shape) for i in range(n)]
arr_grad = [mx.nd.empty(shape) for i in range(n)]
for i in range(n):
arr[i][:] = np.random.uniform(-10, 10, shape)
exec1 = out.bind(default_context(),
args=arr,
args_grad=arr_grad)
exec1.forward(is_train=True)
out1 = exec1.outputs[0]
out = sum(a.asnumpy() for a in arr)
assert_almost_equal(out, out1, rtol=1e-5, atol=1e-5)
out_grad = mx.nd.empty(shape)
out_grad[:] = np.random.uniform(-10, 10, shape)
# backward
exec1.backward([out_grad])
for a in arr_grad:
assert_almost_equal(a, out_grad, rtol=1e-5, atol=1e-5)
@with_seed()
def test_elementwise_sum():
nrepeat = 2
maxdim = 4
for repeat in range(nrepeat):
for dim in range(1, maxdim):
shape = tuple(np.random.randint(1, int(1000**(1.0/dim)), size=dim))
check_elementwise_sum_with_shape(shape, np.random.randint(1, 8))
def check_concat_with_shape(shapes, dimension, skip_second):
# if skip_second is True, second argument will not have gradient.
# it is to test #1130
n = len(shapes)
# forward
target_dim = 0
for shape in shapes:
target_dim += shape[dimension]
inputs = [mx.symbol.Variable('arg%d' % i) for i in range(n)]
out = mx.symbol.Concat(*inputs, name='conc',dim=dimension)
arr = [mx.nd.empty(shape) for shape in shapes]
for i in range(n):
arr[i][:] = shapes[i][dimension]
arr_np = [np.copy(narray.asnumpy()) for narray in arr]
arr_grad = [mx.nd.empty(shape) for shape in shapes]
dict_grad = {}
arg_names = out.list_arguments()
for name, g in zip(arg_names, arr_grad):
if not skip_second or name != 'arg1':
dict_grad[name] = g
args = out.list_arguments()
arg_shapes, out_shapes, aux_shapes = out.infer_shape(**dict(zip(args, shapes)))
out_grad = mx.nd.empty(out_shapes[0])
exec1 = out.bind(default_context(),
args=arr,
args_grad=dict_grad)
exec1.forward(is_train=True)
out1 = exec1.outputs[0]
ret = np.concatenate([narray.asnumpy() for narray in arr], axis=dimension)
assert_almost_equal(out1, ret)
# backward
out1.copyto(out_grad)
out_grad[:] += 1
exec1.backward([out_grad])
for i, name in enumerate(arg_names):
if not skip_second or name != 'arg1':
grad = dict_grad[name]
np_grad = arr_np[i]
assert_almost_equal(grad, np_grad + 1)
@with_seed()
def test_concat():
for dimension in range(4):
n = 2
merge = [2, 3, 4, 5, 6]
a = 2
b = 3
c = 4
# test 2D
if dimension<2:
for dim in range(2, 6):
shapes = []
for i in range(dim):
if dimension == 0:
shapes.append((merge[i], a))
elif dimension == 1:
shapes.append((a, merge[i]))
check_concat_with_shape(shapes,dimension,True)
check_concat_with_shape(shapes,dimension,False)
# Test negative dim
check_concat_with_shape(shapes, dimension - 2, True)
check_concat_with_shape(shapes, dimension - 2, False)
#test 3D
if dimension<3:
for dim in range(2, 6):
shapes = []
for i in range(dim):
if dimension == 0:
shapes.append((merge[i], a,b))
elif dimension ==1:
shapes.append((a,merge[i],b))
elif dimension ==2:
shapes.append((a,b,merge[i]))
check_concat_with_shape(shapes,dimension,True)
check_concat_with_shape(shapes,dimension,False)
# Test negative dim
check_concat_with_shape(shapes, dimension - 3, True)
check_concat_with_shape(shapes, dimension - 3, False)
# test 4D
for dim in range(2, 6):
shapes = []
for i in range(dim):
if dimension == 0:
shapes.append((merge[i],a,b,c))
elif dimension == 1:
shapes.append((a,merge[i],b,c))
elif dimension ==2:
shapes.append((a,b,merge[i],c))
elif dimension ==3:
shapes.append((a,b,c,merge[i]))
check_concat_with_shape(shapes,dimension,True)
check_concat_with_shape(shapes,dimension,False)
# Test negative dim
check_concat_with_shape(shapes, dimension - 4, True)
check_concat_with_shape(shapes, dimension - 4, False)
@with_seed()
def test_slice_channel():
def check_slice_channel(data_ndim, axis, num_outputs, squeeze_axis):
ins = []
if squeeze_axis:
shape = np.random.randint(2, 5, data_ndim).tolist()
shape[axis] = num_outputs
out_ele_shape = [ele for ele in shape]
del out_ele_shape[axis]
else:
shape = np.random.randint(1, 5, data_ndim).tolist()
shape[axis] *= num_outputs
out_ele_shape = [ele for ele in shape]
out_ele_shape[axis] //= num_outputs
data_npy = np.random.normal(size=shape)
out_grads_npy = [np.random.normal(size=out_ele_shape) for i in range(num_outputs)]
data = mx.sym.Variable('data')
sym = mx.sym.SliceChannel(data=data, num_outputs=num_outputs, axis=axis, squeeze_axis=squeeze_axis)
exe = sym.simple_bind(ctx=default_context(), data=data_npy.shape)
assert len(exe.outputs) == num_outputs
outputs = exe.forward(is_train=True, data=data_npy)
for i in range(num_outputs):
gt = data_npy.take(np.arange(i * shape[axis]/num_outputs,
(i+1) * shape[axis]/num_outputs).astype(np.int), axis=axis)
if squeeze_axis:
assert_almost_equal(outputs[i], gt.reshape(outputs[i].shape))
else:
assert_almost_equal(outputs[i], gt)
# test backward
exe.backward(out_grads=[mx.nd.array(ele, ctx=default_context()) for ele in out_grads_npy])
if squeeze_axis:
assert_almost_equal(exe.grad_arrays[0],
np.concatenate([np.expand_dims(ele, axis=axis) for ele in out_grads_npy],
axis=axis))
else:
assert_almost_equal(exe.grad_arrays[0],
np.concatenate(out_grads_npy, axis=axis))
check_slice_channel(data_ndim=2, axis=1, num_outputs=3, squeeze_axis=True)
check_slice_channel(data_ndim=4, axis=2, num_outputs=3, squeeze_axis=False)
check_slice_channel(data_ndim=3, axis=-1, num_outputs=2, squeeze_axis=False)
check_slice_channel(data_ndim=5, axis=-2, num_outputs=3, squeeze_axis=True)
@with_seed()
def test_regression():
''' test regression operator '''
def check_regression(symbol, forward, backward, shape, stype='default', densities=[0, 0.5, 1]):
# init executor
data = mx.symbol.Variable('data')
label = mx.symbol.Variable('label', stype=stype)
out = symbol(data, label)
grad_req = {'data': 'write', 'label': 'null'}
out_exec = out.simple_bind(default_context(), grad_req=grad_req,
data=shape, label=shape)
arg_map = dict(zip(out.list_arguments(), out_exec.arg_arrays))
grad_map = dict(zip(out.list_arguments(), out_exec.grad_arrays))
# init data
arr_data = mx.random.uniform(-1, 1, shape)
arg_map["data"][:] = arr_data
# init label based on density
arr_label = arg_map["label"]
atol = 1e-5
for density in densities:
arr_label[:] = rand_ndarray(shape, stype, density=density)
out_exec.forward(is_train=True)
out_exec.backward()
np_out = forward(arr_data.asnumpy())
out_grad = backward(np_out, arr_label.asnumpy().reshape(np_out.shape)) / shape[1]
assert_almost_equal(out_exec.outputs[0], np_out, atol=atol)
assert_almost_equal(grad_map["data"], out_grad, atol=atol)
shape = (50, 30)
check_regression(mx.symbol.LogisticRegressionOutput,
lambda x: 1.0 / (1.0 + np.exp(-x)),
lambda x, y : x - y,
shape)
check_regression(mx.symbol.LinearRegressionOutput,
lambda x: x,
lambda x, y : x - y,
shape)
check_regression(mx.symbol.MAERegressionOutput,
lambda x: x,
lambda x, y : np.where(x > y, np.ones(x.shape), -np.ones(x.shape)),
shape)
check_regression(mx.symbol.LogisticRegressionOutput,
lambda x: 1.0 / (1.0 + np.exp(-x)),
lambda x, y : x - y,
shape, stype='csr')
check_regression(mx.symbol.LinearRegressionOutput,
lambda x: x,
lambda x, y : x - y,
shape, stype='csr')
def check_softmax_grad(xpu):
x = mx.sym.Variable('x')
label = mx.sym.Variable('label')
x_nd = mx.nd.array([[1, 6, 4, 2]], ctx=xpu)
grad_x = mx.nd.zeros((1,4), ctx=xpu)
label_nd = mx.nd.array([1], ctx=xpu)
sym = mx.sym.SoftmaxOutput(data=x, label=label, ignore_label=0, use_ignore=False)
ex = sym.bind(ctx=xpu, args={'x': x_nd, 'label': label_nd}, args_grad={'x': grad_x})
ex.forward(is_train=True)
softmax_out = ex.outputs[0].asnumpy()
expected_softmax_out = [[0.005806628, 0.861780069, 0.116629249, 0.015784052]]
assert np.isclose(softmax_out, expected_softmax_out).all()
ex.backward(is_train=True)
grad_out = ex.grad_arrays[0].asnumpy()
k = int(label_nd[0].asscalar())
expected_grad_out = np.zeros((1,4))
expected_grad_out[0, k] = -1
assert np.isclose(grad_out - softmax_out, expected_grad_out).all()
def check_smoothed_softmax_grad(xpu):
alpha = 0.2
x = mx.sym.Variable('x')
label = mx.sym.Variable('label')
x_nd = mx.nd.array([[1, 6, 4, 2]], ctx=xpu)
grad_x = mx.nd.zeros((1,4), ctx=xpu)
label_nd = mx.nd.array([1], ctx=xpu)
sym = mx.sym.SoftmaxOutput(data=x, label=label, ignore_label=0, use_ignore=False, smooth_alpha=alpha)
ex = sym.bind(ctx=xpu, args={'x': x_nd, 'label': label_nd}, args_grad={'x': grad_x})
ex.forward(is_train=True)
softmax_out = ex.outputs[0].asnumpy()
expected_softmax_out = [[0.005806628, 0.861780069, 0.116629249, 0.015784052]]
assert np.isclose(softmax_out, expected_softmax_out).all()
ex.backward(is_train=True)
grad_out = ex.grad_arrays[0].asnumpy()
k = int(label_nd[0].asscalar())
expected_grad_out = np.full((1,4), fill_value=-alpha/float(4-1))
expected_grad_out[0, k] = - (1 - alpha)
assert np.isclose(grad_out - softmax_out, expected_grad_out).all()
def check_softmax_with_ignore_label(xpu):
X = mx.symbol.Variable('X')
L = mx.symbol.Variable('L')
Y = mx.symbol.SoftmaxOutput(data=X, label=L, ignore_label=0, use_ignore=True)
shape = (20, 10)
x = mx.nd.empty(shape, ctx = xpu)
l = mx.nd.empty((shape[0],), ctx = xpu)
x_np = np.random.rand(*shape)
l_np = np.random.randint(0, shape[1]-1, (shape[0],))
x[:] = x_np
l[:] = l_np
grad = mx.nd.empty(shape, ctx = xpu)
exec1 = Y.bind(xpu, args = [x, l], args_grad = {'X': grad})
exec1.forward(is_train=True)
exec1.backward()
grad0 = grad.asnumpy()
for i in range(int(shape[0]/2)):
l_np[i] = 0
l[:] = l_np
exec1.forward(is_train=True)
exec1.backward()
grad1 = grad.asnumpy()
assert abs(np.sum(grad1[:int(shape[0]/2)])) < 1e-5
assert_almost_equal(grad0[int(shape[0]/2):], grad1[int(shape[0]/2):])
def check_softmax_with_shape(shape, xpu, preserve_shape=False):
# bind with label
X = mx.symbol.Variable('X')
L = mx.symbol.Variable('L')
Y = mx.symbol.SoftmaxOutput(data=X, label=L, preserve_shape=preserve_shape)
x = mx.random.uniform(-1, 1, shape, ctx=xpu)
l = mx.random.uniform(-1, 1, shape, ctx=xpu)
l[:] = np_softmax(l.asnumpy())
grad = mx.nd.empty(shape, ctx = xpu)
exec1 = Y.bind(xpu, args = [x, l], args_grad = {'X': grad})
exec1.forward(is_train=True)
out = exec1.outputs[0].asnumpy()
# Non-zero atol required by test_softmax with seed 781663739
rtol = 1e-4
atol = 1e-6
assert_almost_equal(out, np_softmax(x.asnumpy()), rtol=rtol, atol=atol)
exec1.backward()
assert_almost_equal(grad, np_softmax(x.asnumpy()) - l.asnumpy(), rtol=rtol, atol=atol)
def test_python_op():
X = mx.symbol.Variable('X')
op = mx.operator.NumpyOp()
s = op.get_symbol(X, name='numpy_op')
x = mx.ndarray.ones((10))*10
dx = mx.ndarray.zeros((10))
dy = mx.ndarray.ones((10))
exec1 = s.bind(default_context(), args=[x], args_grad = {'X': dx})
exec1.forward(is_train=True)
assert_almost_equal(x, exec1.outputs[0])
exec1.backward(dy)
assert_almost_equal(dy, dx)
def test_swapaxes():
data = mx.symbol.Variable('data')
shape = (2, 3, 4)
data_tmp = np.ones(shape)
data_tmp[0] = 1
data_tmp[1] = 2
arr_data = mx.nd.array(data_tmp)
swap0 = mx.symbol.SwapAxis(data=data, dim1=0, dim2=2)
swap = mx.symbol.SwapAxis(data=swap0, dim1=1, dim2=2)
exe_c = swap.bind(default_context(), args=[arr_data])
exe_c.forward(is_train=True)
out = exe_c.outputs[0]
swap0_ = np.swapaxes(data_tmp, 0, 2)
swap_ = np.swapaxes(swap0_, 1, 2)
assert_almost_equal(out, swap_)
config = [((1, 1, 2), 0, 1),
((1, 1, 2), -1, -2),
((4, 5, 6, 7), 1, 1),
((4, 5, 6, 7), 2, 3),
((4, 5, 6, 7), -2, 2),
((4, 5, 6, 7), -2, -3)]
for shape, axis1, axis2 in config:
data_np = np.random.uniform(size=shape)
data_mx = mx.nd.array(data_np, dtype=data_np.dtype)
ret_np = np.swapaxes(data_np, axis1=axis1, axis2=axis2)
ret_mx = mx.symbol.SwapAxis(data, dim1=axis1, dim2=axis2)
exe_c = ret_mx.bind(default_context(), args=[data_mx])
exe_c.forward(is_train=True)
out = exe_c.outputs[0]
assert_almost_equal(out, ret_np)
@with_seed()
def test_scalarop():
data = mx.symbol.Variable('data')
shape = (3, 4)
data_tmp = np.ones(shape)*5
arr_data = mx.nd.array(data_tmp)
arr_grad = mx.nd.empty(shape)
arr_grad[:]=3
test = 2 / (4-((1+data+1)*2/5)-0.8-(data!=0))
npout_1 = (4-((1+data_tmp+1)*2/5)-0.8-(data_tmp!=0))
npout = 2/npout_1
check_symbolic_forward(test, [data_tmp], [npout])
npout_grad = 2.*2/5
npout_grad = 2*npout_grad /(npout_1 *npout_1 )
check_symbolic_backward(test, [data_tmp], [np.ones(shape)*2], [npout_grad])
@with_seed()
def test_scalar_pow():
data = mx.symbol.Variable('data')
shape = (1, 1)
data_tmp = np.ones(shape)
test = data ** 2
check_numeric_gradient(test, [data_tmp])
check_symbolic_forward(test, [data_tmp], [data_tmp ** 2])
check_symbolic_backward(test, [data_tmp], [np.ones(shape)], [2 * data_tmp])
@with_seed()
def test_symbol_pow():
shape = (1, 1)
data = mx.symbol.Variable('data')
data_tmp = np.ones(shape)*2
exp = mx.symbol.Variable('exp')
exp_tmp = np.ones(shape)*3
test = data**exp
check_numeric_gradient(test, [data_tmp, exp_tmp])
check_symbolic_forward(test, [data_tmp, exp_tmp], [data_tmp**exp_tmp])
data_dir = data_tmp**(exp_tmp - 1) * exp_tmp
exp_dir = data_tmp**(exp_tmp) * np.log(data_tmp)
check_symbolic_backward(test, [data_tmp, exp_tmp], [np.ones(shape)], [data_dir, exp_dir])
@with_seed()
def test_fully_connected():
data = mx.sym.var("data")
fc_weight = mx.sym.var("weight")
fc_bias = mx.sym.var("bias")
fc = mx.sym.FullyConnected(data=data, weight=fc_weight, bias=fc_bias, num_hidden=10, no_bias=False, name='fc')
data = mx.nd.random.uniform(shape=(5, 5, 5, 13), dtype=np.float32)
fc_weight = mx.nd.random.uniform(shape=(10, 325), dtype=np.float32)
fc_bias = mx.nd.random.uniform(shape=(10), dtype=np.float32)
fc_bias2 = mx.nd.random.uniform(shape=(10, 1), dtype=np.float32)
data_np = data.asnumpy().reshape(5, 325)
fc_weight_np = np.transpose(fc_weight.asnumpy())
fc_bias_np = fc_bias.asnumpy()
res = np.dot(data_np, fc_weight_np) + fc_bias.asnumpy()
check_symbolic_forward(fc, {'data': data_np, 'weight': fc_weight.asnumpy(), 'bias': fc_bias_np}, {'fc_output': res})
check_numeric_gradient(fc, {'data': data_np, 'weight': fc_weight.asnumpy(), 'bias': fc_bias_np},
numeric_eps=1e-2, rtol=1e-4, atol=1e-2)
# TODO: Fix Bug #15032 when bias has ndim > 1
#check_symbolic_forward(fc, {'data': data_np, 'weight': fc_weight.asnumpy(), 'bias': fc_bias2.asnumpy()}, {'fc_output': res})
@with_seed()
def test_pow_fn():
shape = (3, 4)
exp = mx.symbol.Variable("exp")
x = np.ones(shape)*3
for y in [mx.sym.pow(2, exp), mx.sym.power(2, exp)]:
check_numeric_gradient(y, [x], numeric_eps=1E-3)
check_symbolic_forward(y, [x], [2**x])
check_symbolic_backward(y, [x], [np.ones(shape)], [np.log(2) * 2**x])
@with_seed()
def test_relu():
def frelu(x):
return np.maximum(x, 0.0)
def frelu_grad(x):
return 1.0 * (x > 0.0)
shape = (3, 4)
x = mx.symbol.Variable("x")
y = mx.sym.relu(x)
xa = np.random.uniform(low=-1.0,high=1.0,size=shape)
eps = 1e-4
# Avoid finite difference method inaccuracies due to discontinuous gradient at the origin.
# Here we replace small problematic inputs with 1.0. Repro issue with seed 97264195.
xa[abs(xa) < eps] = 1.0
ya = frelu(xa)
ga = frelu_grad(xa)
check_numeric_gradient(y, [xa], numeric_eps=eps)
check_symbolic_forward(y, [xa], [ya])
check_symbolic_backward(y, [xa], [np.ones(shape)], [ga])
# NOTE(haojin2): Skipping the numeric check tests for float16 data type due to precision issues,
# the analytical checks are still performed on each and every data type to verify the correctness.
@with_seed()
def test_leaky_relu():
def fleaky_relu(x, act_type, slope=0.25):
neg_indices = x < 0
out = x.copy()
if act_type == 'elu':
out[neg_indices] = slope * np.expm1(out[neg_indices])
elif act_type == 'leaky':
out[neg_indices] = slope * out[neg_indices]
return out
def fleaky_relu_grad(grad, x, y, act_type, slope=0.25):
neg_indices = x < 0
out = np.ones(x.shape)
if act_type == 'elu':
out[neg_indices] = y[neg_indices] + slope
elif act_type == 'leaky':
out[neg_indices] = slope
return out * grad
for ndim in range(1, 4):
shape = rand_shape_nd(ndim)
x = mx.symbol.Variable("x")
slp = 0.25
for dtype in [np.float16, np.float32, np.float64]:
xa = np.random.uniform(low=-1.0,high=1.0,size=shape).astype(dtype)
eps = 1e-4
rtol = 1e-2
atol = 1e-3
xa[abs(xa) < eps] = 1.0
for act_type in ['elu', 'leaky']:
y = mx.symbol.LeakyReLU(data=x, slope=slp, act_type=act_type)
ya = fleaky_relu(xa, slope=slp, act_type=act_type)
ga = fleaky_relu_grad(np.ones(shape), xa, ya, slope=slp, act_type=act_type)
# Skip numeric check for float16 type to get rid of flaky behavior
if dtype is not np.float16:
check_numeric_gradient(y, [xa], numeric_eps=eps, rtol=rtol, atol=atol, dtype=dtype)
check_symbolic_forward(y, [xa], [ya], rtol=rtol, atol=atol, dtype=dtype)
check_symbolic_backward(y, [xa], [np.ones(shape)], [ga], rtol=rtol, atol=atol, dtype=dtype)
# NOTE(haojin2): Skipping the numeric check tests for float16 data type due to precision issues,
# the analytical checks are still performed on each and every data type to verify the correctness.
@with_seed()
def test_prelu():
def fprelu(x, gamma):
pos_indices = x > 0
out = x.copy()
if len(x.shape) == 4:
out = out.transpose(2,3,0,1)
out = np.multiply(out, gamma)
out = out.transpose(2,3,0,1)
else:
out = np.multiply(out, gamma)
out[pos_indices] = x[pos_indices]
return out
def fprelu_grad(x, y, gamma):
pos_indices = x > 0
if len(x.shape) == 4:
grad_x = np.multiply(np.ones(x.shape).transpose(2,3,0,1), gamma)
grad_x = grad_x.transpose(2,3,0,1)
else:
grad_x = np.multiply(np.ones(x.shape), gamma)
grad_gam = np.zeros(gamma.shape)
copy_x = x.copy()
copy_x[pos_indices] = 0.0
grad_x[pos_indices] = 1.0
if len(gamma.shape) > 1 and len(x.shape) != 4:
grad_gam = copy_x
elif len(gamma.shape) > 1 and len(x.shape) == 4:
grad_gam = np.sum(copy_x, axis=(2,3))
elif gamma.shape[0] == 1:
grad_gam = np.sum(np.sum(copy_x))
elif gamma.shape[0] > 1 and len(x.shape) != 4:
grad_gam = np.sum(copy_x, axis=0)
elif gamma.shape[0] > 1 and len(x.shape) == 4:
grad_gam = np.sum(copy_x, axis=(0,2,3))
return (grad_x, grad_gam)
x = mx.symbol.Variable("x")
gamma = mx.symbol.Variable("gamma")
for shape in [(3,4), (3,4,4,5)]:
for dtype in [np.float16, np.float32, np.float64]:
for gam in [np.array([0.1, 0.2, 0.3, 0.4], dtype=dtype)]:
gam_full = np.array([gam, gam, gam])
xa = np.random.uniform(low=-1.0,high=1.0,size=shape).astype(dtype)
rtol = 1e-2
atol = 1e-3
eps = 1e-4
xa[abs(xa) < eps] = 1.0
y = mx.symbol.LeakyReLU(data=x, gamma=gamma, act_type='prelu')
ya = fprelu(xa, gam)
ya_full = fprelu(xa, gam_full)
g_xa, g_gam = fprelu_grad(xa, ya, gamma=gam)
g_xa_full, g_gam_full = fprelu_grad(xa, ya_full, gamma=gam_full)
# Skip numeric check for float16 type to get rid of flaky behavior
if dtype is not np.float16:
check_numeric_gradient(y, [xa, gam], numeric_eps=eps, rtol=rtol, atol=atol, dtype=dtype)
check_numeric_gradient(y, [xa, gam_full], numeric_eps=eps, rtol=rtol, atol=atol, dtype=dtype)
check_symbolic_forward(y, [xa, gam], [ya], rtol=rtol, atol=atol, dtype=dtype)
check_symbolic_backward(y, [xa, gam], [np.ones(shape), np.ones(gam.shape)], [g_xa, g_gam], rtol=rtol, atol=atol, dtype=dtype)
check_symbolic_forward(y, [xa, gam_full], [ya_full], rtol=rtol, atol=atol, dtype=dtype)
check_symbolic_backward(y, [xa, gam_full], [np.ones(shape), np.ones(gam_full.shape)],
[g_xa_full, g_gam_full], rtol=rtol, atol=atol, dtype=dtype)
@with_seed()
def test_selu():
alpha = 1.6732632423543772848170429916717
lamb = 1.0507009873554804934193349852946
def fselu(x):
neg_indices = x < 0
out = x.copy()
out[neg_indices] = alpha * np.expm1(out[neg_indices])
return out * lamb
def fselu_grad(grad, x, y):
neg_indices = x < 0
out = np.ones(x.shape).astype(x.dtype)
out[neg_indices] = y[neg_indices] + alpha
return out * lamb
shape = (3, 4)
x = mx.sym.Variable("x")
y = mx.sym.LeakyReLU(data=x, act_type="selu")
for dtype in [np.float16, np.float32, np.float64]:
xa = np.random.uniform(low=-0.1,high=0.1,size=shape).astype(dtype)
eps, rtol, atol = (7.5e-4, 1e-1, 1e-2) if dtype is np.float16 else (1e-4, 1e-2, 1e-4)
if dtype is np.float16:
xa /= 10.0
xa[abs(xa) < eps] = 0.01
ya = fselu(xa)
ga = fselu_grad(np.ones(shape).astype(dtype), xa, ya)
check_numeric_gradient(y, [xa], numeric_eps=eps, rtol=rtol, atol=atol, dtype=dtype)
check_symbolic_forward(y, [xa], [ya], rtol=rtol, atol=atol, dtype=dtype)
check_symbolic_backward(y, [xa], [np.ones(shape)], [ga], rtol=rtol, atol=atol, dtype=dtype)
@with_seed()
def test_gelu():
CUBE_CONSTANT = 0.044715
ROOT_TWO_OVER_PI = 0.7978845608028654
def g(x):
return ROOT_TWO_OVER_PI * (x + CUBE_CONSTANT * np.power(x, 3))
def g_grad(x):
return ROOT_TWO_OVER_PI * (1.0 + 3.0 * CUBE_CONSTANT * np.power(x, 2))
def f(x):
return 1.0 + np.tanh(g(x))
def f_grad(x):
return (1.0 - np.tanh(g(x)) * np.tanh(g(x))) * g_grad(x)
def fgelu(x):
return 0.5 * x * f(x)
def fgelu_grad(grad, x, y):
return grad * (y / x + y * (1 - np.tanh(g(x))) * g_grad(x))
shape = (3, 4)
x = mx.sym.Variable("x")
y = mx.sym.LeakyReLU(data=x, act_type="gelu")
for dtype in [np.float16, np.float32, np.float64]:
xa = np.random.uniform(low=-0.1,high=0.1,size=shape).astype(dtype)
eps, rtol, atol = (7.5e-4, 2e-2, 1e-3) if dtype is np.float16 else (1e-4, 1e-3, 1e-5)
if dtype is np.float16:
xa /= 10.0
xa[abs(xa) < eps] = 0.01
ya = fgelu(xa)
ga = fgelu_grad(np.ones(shape).astype(dtype), xa, ya)
check_numeric_gradient(y, [xa], numeric_eps=eps, rtol=rtol, atol=atol, dtype=dtype)
check_symbolic_forward(y, [xa], [ya], rtol=rtol, atol=atol, dtype=dtype)
check_symbolic_backward(y, [xa], [np.ones(shape)], [ga], rtol=rtol, atol=atol, dtype=dtype)
@with_seed()
def test_sigmoid():
def fsigmoid(a):
return np.divide(1.0, (1.0 + np.exp(-a)))
shape = (3, 4)
x = mx.symbol.Variable("x")
y = mx.sym.sigmoid(x)
xa = np.random.uniform(low=-1.0,high=1.0,size=shape)
ya = fsigmoid(xa)
check_numeric_gradient(y, [xa], numeric_eps=1E-3)
check_symbolic_forward(y, [xa], [ya])
check_symbolic_backward(y, [xa], [np.ones(shape)], [ya * (1 - ya)])
@with_seed()
def test_shape_array():
for i in range(1,6):
shape = rand_shape_nd(i)
x = mx.sym.var('x')
y = mx.sym.shape_array(x)
xa = mx.nd.array(np.random.ranf(shape))
xg = mx.nd.empty(xa.shape)
ya = np.shape(xa)
yg = mx.nd.ones(ya)
exe = y.bind(ctx=default_context(), args={'x': xa},
args_grad={'x': xg})
exe.forward(is_train=True)
exe.backward([yg])
yo = exe.outputs[0].asnumpy()
same(yo, ya)
assert_almost_equal(xg, np.zeros_like(xg.asnumpy()))
@with_seed()
def test_size_array():
for i in range(1,6):
shape = rand_shape_nd(i)
x = mx.sym.var('x')
y = mx.sym.size_array(x)
xa = mx.nd.array(np.random.ranf(shape))
xg = mx.nd.empty(xa.shape)
ya = np.size(xa)
yg = mx.nd.ones(ya)
exe = y.bind(ctx=default_context(), args={'x': xa},
args_grad={'x': xg})
exe.forward(is_train=True)
exe.backward([yg])
yo = exe.outputs[0].asnumpy()
same(yo, ya)
assert_almost_equal(xg, np.zeros_like(xg.asnumpy()))
@with_seed()
def test_hard_sigmoid():
def fhardsigmoid(a, alpha=0.2, beta=0.5):
return np.maximum(np.zeros(a.shape, dtype=a.dtype),
np.minimum(np.ones(a.shape, dtype=a.dtype), alpha*a+beta))
def fhardsigmoid_grad(a, out_grad, alpha=0.2, beta=0.5):
orig_out = fhardsigmoid(a, alpha, beta)
res = out_grad * alpha
res[orig_out <= 0.0] = 0.0
res[orig_out >= 1.0] = 0.0
return res
shape = (3, 4)
x = mx.symbol.Variable("x")
y = mx.sym.hard_sigmoid(x)
for dtype in [np.float16, np.float32, np.float64]:
if dtype is np.float16:
rtol = 1e-2
else:
rtol = 1e-3
atol = 1e-3
eps = 1e-3
xa = np.random.uniform(low=-3.0,high=3.0,size=shape).astype(dtype)
# function not differentiable at x=2.5 and -2.5
xa[abs(xa-2.5) < eps] -= 2 * eps
xa[abs(xa+2.5) < eps] += 2 * eps
ya = fhardsigmoid(xa)
grad_xa = fhardsigmoid_grad(xa, np.ones(shape))
if dtype is not np.float16:
check_numeric_gradient(y, [xa], numeric_eps=eps, rtol=rtol, atol=atol, dtype=dtype)
check_symbolic_forward(y, [xa], [ya], rtol=rtol, atol=atol, dtype=dtype)
check_symbolic_backward(y, [xa], [np.ones(shape)], [grad_xa], rtol=rtol, atol=atol, dtype=dtype)
@with_seed()
def test_softsign():
def fsoftsign(a):
return np.divide(a, (1.0 + np.abs(a)))
def fsoftsign_grad(a):
return np.divide(1.0, np.square((1.0 + np.abs(a))))
shape = (3, 4)
x = mx.symbol.Variable("x")
y = mx.sym.softsign(x)
xa = np.random.uniform(low=-1.0,high=1.0,size=shape)
ya = fsoftsign(xa)
ya_grad = fsoftsign_grad(xa)
check_numeric_gradient(y, [xa], numeric_eps=1E-3)
check_symbolic_forward(y, [xa], [ya])
check_symbolic_backward(y, [xa], [np.ones(shape)], [ya_grad])
@with_seed()
def test_binary_logic():
def _inner_test(forward_gt, logic_sym, x_shape, y_shape, test_scalar=True):
x = mx.symbol.Variable("x")
y = mx.symbol.Variable("y")
z = logic_sym(x, y)
x_npy = np.random.randint(0, 4, size=x_shape).astype(np.float32)
y_npy = np.random.randint(0, 4, size=y_shape).astype(np.float32)
exe = z.simple_bind(ctx=default_context(), x=x_shape, y=y_shape)
mx_out = exe.forward(is_train=True, x=x_npy, y=y_npy)[0]
assert_almost_equal(mx_out, forward_gt(x_npy, y_npy))
exe.backward()
if test_scalar:
z_lscalar = logic_sym(1, y)
z_rscalar = logic_sym(x, 1)
exe_lscalar = z_lscalar.simple_bind(ctx=default_context(), y=y_shape)
exe_rscalar = z_rscalar.simple_bind(ctx=default_context(), x=x_shape)
mx_lscalar_out = exe_lscalar.forward(is_train=True, y=y_npy)[0]
mx_rscalar_out = exe_rscalar.forward(is_train=True, x=x_npy)[0]
assert_almost_equal(mx_lscalar_out, forward_gt(1, y_npy))
assert_almost_equal(mx_rscalar_out, forward_gt(x_npy, 1))
exe_lscalar.backward()
exe_rscalar.backward()
# Test the no-broadcasting binary logic ops + scalar logic ops
_inner_test(forward_gt=lambda x, y: x == y,
logic_sym=lambda x, y: x == y, x_shape=(10, 10), y_shape=(10, 10))
_inner_test(forward_gt=lambda x, y: x > y,
logic_sym=lambda x, y: x > y, x_shape=(10, 10), y_shape=(10, 10))
_inner_test(forward_gt=lambda x, y: x >= y,
logic_sym=lambda x, y: x >= y, x_shape=(10, 10), y_shape=(10, 10))
_inner_test(forward_gt=lambda x, y: x < y,
logic_sym=lambda x, y: x < y, x_shape=(10, 10), y_shape=(10, 10))
_inner_test(forward_gt=lambda x, y: x <= y,
logic_sym=lambda x, y: x <= y, x_shape=(10, 10), y_shape=(10, 10))
_inner_test(forward_gt=lambda x, y: x != y,
logic_sym=lambda x, y: x != y, x_shape=(10, 10), y_shape=(10, 10))
# Test the broadcasting binary logic ops
_inner_test(forward_gt=lambda x, y: x == y,
logic_sym=lambda x, y: mx.sym.broadcast_equal(x, y),
x_shape=(1, 10), y_shape=(10, 1), test_scalar=False)
_inner_test(forward_gt=lambda x, y: x > y,
logic_sym=lambda x, y: mx.sym.broadcast_greater(x, y),
x_shape=(1, 10), y_shape=(10, 1), test_scalar=False)
_inner_test(forward_gt=lambda x, y: x >= y,
logic_sym=lambda x, y: mx.sym.broadcast_greater_equal(x, y),
x_shape=(1, 10), y_shape=(10, 1), test_scalar=False)
_inner_test(forward_gt=lambda x, y: x < y,
logic_sym=lambda x, y: mx.sym.broadcast_lesser(x, y),
x_shape=(1, 10), y_shape=(10, 1), test_scalar=False)
_inner_test(forward_gt=lambda x, y: x <= y,
logic_sym=lambda x, y: mx.sym.broadcast_lesser_equal(x, y),
x_shape=(1, 10), y_shape=(10, 1), test_scalar=False)
_inner_test(forward_gt=lambda x, y: x != y,
logic_sym=lambda x, y: mx.sym.broadcast_not_equal(x, y),
x_shape=(1, 10), y_shape=(10, 1), test_scalar=False)
@with_seed()
def test_unary_logic():
def reference(a, dtype):
return np.logical_not(a).astype(dtype)
shape = (3, 4)
xa = np.random.randint(-2, 2, size=shape).astype(np.float32)
mx_xa = mx.nd.array(xa)
mx_out = mx.nd.logical_not(mx_xa)
assert_almost_equal(mx_out, reference(xa, dtype=xa.dtype))
x = mx.sym.Variable('x')
y = mx.sym.logical_not(data=x)
exe = y.simple_bind(ctx=default_context(), x=shape)
sym_out = exe.forward(is_train=True, x=mx_xa)[0]
assert_almost_equal(sym_out, reference(xa, dtype=xa.dtype))
@with_seed()
def test_embedding():
in_dim = 10
out_dim = 4
batch = 24
data = mx.sym.Variable("data")
embed = mx.sym.Embedding(data=data, input_dim=in_dim, output_dim=out_dim, name="embed")
exe_test = embed.simple_bind(default_context(), grad_req={'data': 'null', 'embed_weight': 'write'}, data=(batch,))
arg_map = dict(zip(embed.list_arguments(), exe_test.arg_arrays))
grad_map = dict(zip(embed.list_arguments(), exe_test.grad_arrays))
np_data = np.random.randint(low=0, high=in_dim, size=batch)
np_weight = np.random.uniform(-0.01, 0.01, arg_map["embed_weight"].shape)
np_onehot = np.zeros((batch, in_dim))
np_onehot[np.arange(batch), np_data] = 1.0
# forward
arg_map["data"][:] = np_data
arg_map["embed_weight"][:] = np_weight
exe_test.forward(is_train=True)
# Non-zero atol required, as exposed by seed 781663739
rtol = 1e-5
atol = 1e-5
assert_almost_equal(exe_test.outputs[0], np.dot(np_onehot, np_weight), rtol=rtol, atol=atol)
# backward
np_grad = np.random.uniform(-1, 1, exe_test.outputs[0].shape)
grad = mx.nd.zeros(np_grad.shape)
grad[:] = np_grad
exe_test.backward([grad])
assert_almost_equal(grad_map["embed_weight"], np.dot(np_onehot.T, np_grad), rtol=rtol, atol=atol)
# check ops handle duplicate input correctly.
@with_seed()
def test_binary_op_duplicate_input():
data = mx.symbol.Variable('data')
shape = (3, 4)
data_tmp = np.ones(shape)
data_tmp[:] = 5
arr_data = mx.nd.array(data_tmp)
arr_grad = mx.nd.empty(shape)
arr_grad[:] = 3
out_grad = mx.nd.empty(shape)
out_grad[:] = 1
square = data * data
exe_square = square.bind(default_context(), args=[arr_data], args_grad=[arr_grad])
exe_square.forward(is_train=True)
assert_almost_equal(exe_square.outputs[0], data_tmp * data_tmp)
exe_square.backward(out_grad)
assert_almost_equal(arr_grad, 2.0 * data_tmp)
@with_seed()
def test_sign():
data = mx.symbol.Variable('data')
shape = (3, 4)
data_tmp = np.ones(shape)
data_tmp[:]=5
arr_data = mx.nd.array(data_tmp)
arr_grad = mx.nd.empty(shape)
arr_grad[:]=3
test = mx.sym.sign(data)
exe_test = test.bind(default_context(), args=[arr_data], args_grad=[arr_grad])
exe_test.forward(is_train=True)
out = exe_test.outputs[0]
npout = np.sign(data_tmp)
assert_almost_equal(out, npout)
out_grad = mx.nd.empty(shape)
out_grad[:] = 2;
npout_grad = out_grad.asnumpy()
npout_grad = 0;
exe_test.backward(out_grad)
assert_almost_equal(arr_grad, npout_grad)
@with_seed()
def test_round_ceil_floor():
data = mx.symbol.Variable('data')
shape = (3, 4)
data_tmp = np.ones(shape)
data_tmp[:]=5.543
arr_data = mx.nd.array(data_tmp)
arr_grad = mx.nd.empty(shape)
arr_grad[:]= 2
test = mx.sym.round(data) + mx.sym.ceil(data) + mx.sym.floor(data)
exe_test = test.bind(default_context(), args=[arr_data])
exe_test.forward(is_train=True)
out = exe_test.outputs[0]
npout = np.round(data_tmp) + np.ceil(data_tmp) + np.floor(data_tmp)
assert_almost_equal(out, npout)
@with_seed()
def test_trunc():
data_tmp = np.random.rand(3, 4) * 10 - 5
arr_data = mx.nd.array(data_tmp)
data = mx.symbol.Variable('data')
test = mx.sym.trunc(data)
exe_test = test.bind(default_context(), args=[arr_data])
exe_test.forward(is_train=True)
out = exe_test.outputs[0]
# 'trunc' is sensitive to the precision of the calculation. Force numpy to match mxnet's float32.
# Repro issue with seed 1660190454
npout = np.trunc(np.float32(data_tmp))
assert_almost_equal(out, npout)
@with_seed()
def test_rsqrt_cos_sin():
data = mx.symbol.Variable('data')
shape = (3, 4)
data_tmp = np.ones(shape)
data_tmp[:]=5
arr_data = mx.nd.array(data_tmp)
arr_grad = mx.nd.empty(shape)
arr_grad[:]=3
test = mx.sym.rsqrt(data) + mx.sym.cos(data) + mx.sym.sin(data)
exe_test = test.bind(default_context(), args=[arr_data], args_grad=[arr_grad])
exe_test.forward(is_train=True)
out = exe_test.outputs[0]
npout = 1/ np.sqrt(data_tmp) + np.cos(data_tmp) + np.sin(data_tmp)
assert_almost_equal(out, npout)
out_grad = mx.nd.empty(shape)
out_grad[:] = 2
npout_grad = out_grad.asnumpy()
npout_grad = npout_grad * -(1.0 / (2.0 * data_tmp * np.sqrt(data_tmp))) + npout_grad * -1 * np.sin(data_tmp) + npout_grad * np.cos(data_tmp)
exe_test.backward(out_grad)
assert_almost_equal(arr_grad, npout_grad)
@with_seed()
def test_maximum_minimum():
data1 = mx.symbol.Variable('data1')
data2 = mx.symbol.Variable('data2')
shape = (3, 4)
data_tmp1 = np.random.rand(3,4)
data_tmp2 = np.random.rand(3,4)
data_tmp1[:] = 2
data_tmp2[:] = 3
arr_data1 = mx.nd.array(data_tmp1)
arr_data2 = mx.nd.array(data_tmp2)
arr_grad1 = mx.nd.empty(shape)
arr_grad2 = mx.nd.empty(shape)
test = mx.sym.maximum(data1,data2) + mx.sym.minimum(data1,data2)
exe_test = test.bind(default_context(), args=[arr_data1,arr_data2], args_grad=[arr_grad1,arr_grad2])
exe_test.forward(is_train=True)
out = exe_test.outputs[0]
npout = np.maximum(data_tmp1,data_tmp2) + np.minimum(data_tmp1,data_tmp2)
assert_almost_equal(out, npout)
out_grad = mx.nd.empty(shape)
out_grad[:] = 2
exe_test.backward(out_grad)
npout_grad = np.ones(shape)
npout_grad[:] = 2
mask1 = (data_tmp1 > data_tmp2).astype('float')
mask2 = (data_tmp1 < data_tmp2).astype('float')
npout_grad1 = npout_grad * mask1 + npout_grad * mask2
npout_grad2 = (npout_grad - npout_grad * mask1) + (npout_grad - npout_grad * mask2)
assert_almost_equal(arr_grad1, npout_grad1)
assert_almost_equal(arr_grad2, npout_grad2)
@with_seed()
def test_maximum_minimum_scalar():
data1 = mx.symbol.Variable('data')
shape = (3, 4)
data_tmp1 = np.random.rand(3,4)
data_tmp1[:] = 2
arr_data1 = mx.nd.array(data_tmp1)
arr_grad1 = mx.nd.empty(shape)
test = mx.sym.maximum(data1,3) + mx.sym.maximum(9,data1) + mx.sym.minimum(5,data1) + mx.sym.minimum(data1,4)
exe_test = test.bind(default_context(), args=[arr_data1], args_grad=[arr_grad1])
exe_test.forward(is_train=True)
out = exe_test.outputs[0]
npout = np.maximum(data_tmp1,3) + np.maximum(9,data_tmp1) + np.minimum(5,data_tmp1) + np.minimum(data_tmp1,4)
assert_almost_equal(out, npout)
out_grad = mx.nd.empty(shape)
out_grad[:] = 2
exe_test.backward(out_grad)
npout_grad = np.ones(shape)
npout_grad[:] = 2
mask1 = (data_tmp1 > 3).astype('float')
mask2 = (9 > data_tmp1).astype('float')
mask3 = (5 < data_tmp1).astype('float')
mask4 = (data_tmp1 < 4).astype('float')
npout_grad1 = npout_grad * mask1 + (npout_grad - npout_grad * mask2) + (npout_grad - npout_grad * mask3) + npout_grad * mask4
assert_almost_equal(arr_grad1, npout_grad1)
@with_seed()
def test_abs():
data = mx.symbol.Variable('data')
shape = (3, 4)
data_tmp = np.ones(shape)
data_tmp[:]=5
arr_data = mx.nd.array(data_tmp)
arr_grad = mx.nd.empty(shape)
arr_grad[:]=3
test = mx.sym.abs(data)
exe_test = test.bind(default_context(), args=[arr_data], args_grad=[arr_grad])
exe_test.forward(is_train=True)
out = exe_test.outputs[0]
npout = abs(data_tmp)
assert_almost_equal(out, npout)
out_grad = mx.nd.empty(shape)
out_grad[:] = 2;
npout_grad = out_grad.asnumpy()
npout_grad = npout_grad * np.sign(data_tmp)
exe_test.backward(out_grad)
assert_almost_equal(arr_grad, npout_grad)
def check_deconvolution_forward_backward(input_shape, num_filter, kernel, stride, pad):
"""configure A: input --> conv --> deconv --> output.
the convolution and deconvoluiton has similar parameter which ensure
the input shape is the same as output, and the same weights between conv
and deconv;
If the input value of forward() and backwrad() is the same, then
the output value of them should also the same;
"""
assert input_shape[1] == num_filter
data = mx.sym.Variable(name="data")
conv = mx.sym.Convolution(
data=data, kernel=kernel, stride=stride, pad=pad,
num_filter=num_filter, no_bias = "true", name = "conv")
deconv = mx.sym.Deconvolution(
data=conv, kernel=kernel, stride=stride, pad=pad,
num_filter=num_filter, no_bias = "true", name = "deconv")
arg_names = deconv.list_arguments()
arg_shapes, out_shapes, _ = deconv.infer_shape(data=input_shape)
input_data = mx.random.uniform(-5, 5, input_shape, ctx=mx.cpu()).copyto(default_context())
out_grad = input_data
args = {}
args["data"] = input_data
args['conv_weight'] = args['deconv_weight'] = mx.random.normal(0, 1,
(num_filter, input_shape[1]) + kernel, ctx=mx.cpu()).copyto(default_context())
args_grad = [mx.nd.empty(s) for s in arg_shapes]
exe = deconv.bind(default_context(), args=args, args_grad=args_grad)
exe.forward(is_train=True)
out = exe.outputs[0]
exe.backward(out_grad)
assert_almost_equal(out, args_grad[0], rtol=1E-3, atol=1e-3)
args_grad_addto_npy = [np.random.normal(size=s) for s in arg_shapes]
args_grad_addto = [mx.nd.array(ele) for ele in args_grad_addto_npy]
exe = deconv.bind(default_context(), args=args, args_grad=args_grad_addto, grad_req="add")
exe.forward(is_train=True)
out = exe.outputs[0].asnumpy()
exe.backward(out_grad)
assert_almost_equal(out + args_grad_addto_npy[0], args_grad_addto[0].asnumpy(), rtol=1e-3, atol=1e-3)
def check_deconvolution_gradient(input_shape, num_filter, pad):
"""configure A: input --> conv --> output.
configure B: input --> deconv --> output
the convolution and deconvoluiton has similar parameter which ensure
the input shape is the same as output;
During backward(), if the input of A equals output of B, and the output
of A equals input of B, then the grad of weight should be the same;
"""
ndim = len(pad)
stride = (1,) * ndim
kernel = tuple(2 * np.array(pad) + 1)
data_conv = mx.sym.Variable(name="data_conv")
conv = mx.sym.Convolution(
data=data_conv, kernel=kernel, stride=stride, pad=pad,
num_filter=num_filter, no_bias = "true", name = "conv")
data_deconv = mx.sym.Variable(name="data_deconv")
deconv = mx.sym.Deconvolution(
data=data_deconv, kernel=kernel, stride=stride, pad=pad,
num_filter=num_filter, no_bias = "true", name = "deconv")
conv_data = mx.random.uniform(-5, 5, input_shape, ctx=mx.cpu()).copyto(default_context())
conv_args = {}
conv_args["data_conv"] = conv_data
conv_args['conv_weight'] = \
mx.random.normal(0, 1,(num_filter, input_shape[1]) + kernel, ctx=mx.cpu()).copyto(default_context())
conv_args_grad = [mx.nd.zeros(conv_data.shape),
mx.nd.zeros((num_filter, input_shape[1]) + kernel)]
exe_conv = conv.bind(default_context(), args=conv_args, args_grad=conv_args_grad)
exe_conv.forward(is_train=True)
conv_out_grad = mx.random.normal(0, 2, exe_conv.outputs[0].shape, ctx=mx.cpu()).copyto(default_context())
exe_conv.backward(conv_out_grad)
deconv_data = conv_out_grad
deconv_args = {}
deconv_args['data_deconv'] = deconv_data
deconv_args['deconv_weight'] = conv_args['conv_weight']
deconv_args_grad = [mx.nd.zeros(deconv_data.shape),
mx.nd.zeros((num_filter, input_shape[1]) + kernel)]
deconv_addto_args_grad_npy = [np.random.normal(size=deconv_data.shape),
np.random.normal(size=(num_filter, input_shape[1]) + kernel)]
deconv_addto_args_grad = [mx.nd.array(deconv_addto_args_grad_npy[0]),
mx.nd.array(deconv_addto_args_grad_npy[1])]
exe_deconv = deconv.bind(default_context(), args=deconv_args, args_grad=deconv_args_grad)
exe_deconv.forward(is_train=True)
deconv_out_grad = conv_data[:]
exe_deconv.backward(deconv_out_grad)
assert_almost_equal(conv_args_grad[1], deconv_args_grad[1], rtol=1e-3, atol=1e-2)
# Test AddTo
exe_deconv_addto = deconv.bind(default_context(), args=deconv_args,
args_grad=deconv_addto_args_grad,
grad_req="add")
exe_deconv_addto.forward(is_train=True)
deconv_out_grad = conv_data[:]
exe_deconv_addto.backward(deconv_out_grad)
assert_almost_equal(conv_args_grad[1].asnumpy() + deconv_addto_args_grad_npy[1],
deconv_addto_args_grad[1].asnumpy(), rtol=1e-3, atol=1e-2)
def check_deconvolution_target_shape(input_shape, kernel, stride, pad, adj, target_shape=None):
data = mx.sym.Variable(name="data")
if target_shape:
deconv = mx.sym.Deconvolution(
data=data, kernel=kernel, stride=stride, pad=pad, adj=adj, num_filter=5,
target_shape = target_shape)
else:
deconv = mx.sym.Deconvolution(
data=data, kernel=kernel, stride=stride, pad=pad, adj=adj, num_filter=5)
arg_names = deconv.list_arguments()
arg_shapes, out_shapes, _ = deconv.infer_shape(data=input_shape)
default_target_size = 8
if target_shape is None:
target_shape = (default_target_size,) * len(kernel)
assert out_shapes[0] == (input_shape[0], 5) + target_shape
@with_seed()
def test_deconvolution():
# 2D
check_deconvolution_target_shape(
input_shape = (2,3,4,4),
kernel = (3,3),
stride = (2,2),
target_shape = (8,8),
pad = (99,99), # will be ignored
adj = (101,101), # will be ignored
)
check_deconvolution_target_shape(
input_shape = (2,3,4,4),
kernel = (3,3),
stride = (2,2),
pad = (1,1),
adj = (1,1),
)
check_deconvolution_forward_backward(
input_shape = (1,1,5,5),
num_filter = 1,
kernel = (3,3),
stride = (1,1),
pad = (1,1)
)
check_deconvolution_forward_backward(
input_shape = (32,3,28,28),
num_filter = 3,
kernel = (3,3),
stride = (1,1),
pad = (1,1)
)
check_deconvolution_forward_backward(
input_shape = (10, 3, 403, 403),
num_filter = 3,
kernel = (7,7),
stride = (5,5),
pad = (2,2)
)
check_deconvolution_gradient(
input_shape = (1,3,5,5),
num_filter = 3,
pad = (1,1)
)
check_deconvolution_gradient(
input_shape = (5,3,100,100),
num_filter = 3,
pad = (3,3)
)
# 1D
check_deconvolution_target_shape(
input_shape = (2,3,4),
kernel = (3,),
stride = (2,),
target_shape = (8,),
pad = (99,), # will be ignored
adj = (101,), # will be ignored
)
check_deconvolution_target_shape(
input_shape = (2,3,4),
kernel = (3,),
stride = (2,),
pad = (1,),
adj = (1,),
)
check_deconvolution_forward_backward(
input_shape = (1,1,5),
num_filter = 1,
kernel = (3,),
stride = (1,),
pad = (1,)
)
check_deconvolution_forward_backward(
input_shape = (32,3,28),
num_filter = 3,
kernel = (3,),
stride = (1,),
pad = (1,)
)
check_deconvolution_forward_backward(
input_shape = (10, 3, 403),
num_filter = 3,
kernel = (7,),
stride = (5,),
pad = (2,)
)
check_deconvolution_gradient(
input_shape = (1,3,5),
num_filter = 3,
pad = (1,)
)
check_deconvolution_gradient(
input_shape = (5,3,100),
num_filter = 3,
pad = (3,)
)
@with_seed()
def test_deconvolution_forward_with_bias():
"""Check if deconvolution forward can work well with bias=True
"""
def check_deconvolution_forward_with_bias(shape=(1, 16, 5, 5), num_filter=32, num_group=1, kernel=(3, 3), pad=(1, 1)):
x = mx.sym.Variable('x')
w = mx.sym.Variable('w')
input_data = mx.random.uniform(-5, 5, shape, ctx=mx.cpu())
y = mx.sym.Deconvolution(data=x, weight=w, num_filter=num_filter, num_group=num_group, kernel=kernel, no_bias=False, pad=pad)
exe = y.simple_bind(ctx=mx.cpu(), x=shape, grad_req='null')
exe.arg_arrays[0][:] = np.random.normal(size=exe.arg_arrays[0].shape)
exe.arg_arrays[1][:] = np.random.normal(size=exe.arg_arrays[1].shape)
exe.forward(is_train=False)
o = exe.outputs[0]
t = o.asnumpy()
check_deconvolution_forward_with_bias((1, 16, 5), 32, 1, (3,), (1,))
check_deconvolution_forward_with_bias((32, 16, 5), 32, 1, (3,), (1,))
check_deconvolution_forward_with_bias((1, 16, 5, 5), 32, 1, (3, 3), (1, 1))
check_deconvolution_forward_with_bias((32, 16, 5, 5), 32, 1, (3, 3), (1, 1))
def check_nearest_upsampling_with_shape(shapes, scale, root_scale):
arr = {'arg_%d'%i: mx.random.uniform(-10.0, 10.0, shape, ctx=mx.cpu()).copyto(default_context()) for i, shape in zip(range(len(shapes)), shapes)}
arr_grad = {'arg_%d'%i: mx.nd.zeros(shape) for i, shape in zip(range(len(shapes)), shapes)}
up = mx.sym.UpSampling(*[mx.sym.Variable('arg_%d'%i) for i in range(len(shapes))], sample_type='nearest', scale=root_scale)
exe = up.bind(default_context(), args=arr, args_grad=arr_grad)
exe.forward(is_train=True)
exe.backward(exe.outputs)
for k in range(len(shapes)):
name = 'arg_%d'%k
assert_allclose(arr[name].asnumpy()*root_scale**2*scale**(2*k), arr_grad[name].asnumpy(), rtol=1e-4)
def check_bilinear_upsampling_with_shape(data_shape, weight_shape, scale, root_scale, num_filter):
def _init_bilinear(arr, f):
weight = np.zeros(np.prod(arr.shape), dtype='float32')
shape = arr.shape
c = (2 * f - 1 - f % 2) / (2. * f)
for i in range(np.prod(shape)):
x = i % shape[3]
y = (i // shape[3]) % shape[2]
weight[i] = (1 - abs(x / f - c)) * (1 - abs(y / f - c))
arr[:] = weight.reshape(shape)
return arr
up = mx.sym.UpSampling(mx.sym.Variable("data"),
mx.sym.Variable('weight'), sample_type='bilinear', scale=root_scale,
num_filter=num_filter, num_args=2)
arg_shapes, out_shapes, _ = up.infer_shape(data=data_shape)
arr = {'data': mx.random.uniform(-5, 5, data_shape, ctx=mx.cpu()).copyto(default_context()),
'weight': mx.nd.array(_init_bilinear(mx.ndarray.empty(arg_shapes[1]).asnumpy(), root_scale))}
arr_grad = [mx.nd.empty(s) for s in arg_shapes]
exe = up.bind(default_context(), args=arr, args_grad=arr_grad)
exe.forward(is_train=True)
out = exe.outputs[0].asnumpy()
exe.backward(exe.outputs)
target_shape = (data_shape[2] * root_scale, data_shape[3] * root_scale)
assert out.shape == data_shape[:2] + target_shape
@with_seed()
def test_nearest_upsampling():
for root_scale in [1,2,3]:
for scale in [1,2,3]:
for num_shape in [1,2,3]:
for base in [1,2,3]:
shapes = [(1,3,base*root_scale*scale**(num_shape-1-i),base*root_scale*scale**(num_shape-1-i)) for i in range(num_shape)]
check_nearest_upsampling_with_shape(shapes, scale, root_scale)
@with_seed()
def test_bilinear_upsampling():
rootscale = [2,3]
scales = [1,2,3]
filters = [1,2,3]
bases = [1,2,3]
for params in itertools.product(rootscale, scales, filters, bases):
root_scale, scale, num_filter, base = params
# bilinear upsampling takes only 1 data and 1 weight
# multi input mode is not applicable
dimension = base*root_scale*scale
kernel = 2 * root_scale - root_scale % 2
data_shape = (1, num_filter, dimension, dimension)
weight_shape = (1, num_filter, kernel, kernel)
check_bilinear_upsampling_with_shape(data_shape, weight_shape, scale, root_scale, num_filter)
@with_seed()
def test_batchnorm_training():
def check_batchnorm_training(stype):
for shape in [(2, 3), (2, 3, 2, 2), (2, 8, 2, 2)]:
data_tmp = np.random.normal(-0.1, 0.1, size=shape)
s = shape[1],
gamma = np.ones(s)
beta = np.ones(s)
gamma[1] = 3
beta[0] = 3
rolling_mean = np.random.uniform(size=s)
rolling_std = np.random.uniform(size=s)
data = mx.symbol.Variable('data', stype=stype)
in_location = [mx.nd.array(data_tmp).tostype(stype), mx.nd.array(gamma).tostype(stype),
mx.nd.array(beta).tostype(stype)]
mean_std = [mx.nd.array(rolling_mean).tostype(stype), mx.nd.array(rolling_std).tostype(stype)]
test = mx.symbol.BatchNorm_v1(data, fix_gamma=True)
check_numeric_gradient(test, in_location, mean_std, numeric_eps=1e-2, rtol=0.16, atol=1e-2)
test = mx.symbol.BatchNorm(data, fix_gamma=True)
check_numeric_gradient(test, in_location, mean_std, numeric_eps=1e-2, rtol=0.16, atol=1e-2)
test = mx.symbol.BatchNorm_v1(data, fix_gamma=True, use_global_stats=True)
check_numeric_gradient(test, in_location, mean_std, numeric_eps=1e-2, rtol=0.16, atol=1e-2)
test = mx.symbol.BatchNorm(data, fix_gamma=True, use_global_stats=True)
check_numeric_gradient(test, in_location, mean_std, numeric_eps=1e-2, rtol=0.16, atol=1e-2)
test = mx.symbol.BatchNorm_v1(data, fix_gamma=False)
check_numeric_gradient(test, in_location, mean_std, numeric_eps=1e-2, rtol=0.16, atol=1e-2)
test = mx.symbol.BatchNorm(data, fix_gamma=False)
check_numeric_gradient(test, in_location, mean_std, numeric_eps=1e-2, rtol=0.16, atol=1e-2)
test = mx.symbol.BatchNorm_v1(data, fix_gamma=False, use_global_stats=True)
check_numeric_gradient(test, in_location, mean_std, numeric_eps=1e-2, rtol=0.16, atol=1e-2)
test = mx.symbol.BatchNorm(data, fix_gamma=False, use_global_stats=True)
check_numeric_gradient(test, in_location, mean_std, numeric_eps=1e-2, rtol=0.16, atol=1e-2)
# Test varying channel axis
dim = len(shape)
for chaxis in range(-dim, dim):
chaxis_true = chaxis
if chaxis < 0:
chaxis_true = dim + chaxis
shapex = shape
channel_count = shapex[chaxis_true]
data_tmp = np.random.normal(-0.1, 0.1, size=shapex)
gamma = np.ones(channel_count)
beta = np.ones(channel_count)
if channel_count > 1:
gamma[1] = 3
beta[0] = 3
in_location = [mx.nd.array(data_tmp).tostype(stype), mx.nd.array(gamma).tostype(stype),
mx.nd.array(beta).tostype(stype)]
xrolling_mean = np.random.uniform(size=channel_count)
xrolling_std = np.random.uniform(size=channel_count)
xmean_std = [mx.nd.array(xrolling_mean).tostype(stype),
mx.nd.array(xrolling_std).tostype(stype)]
test = mx.symbol.BatchNorm(data, fix_gamma=True, axis=chaxis)
check_numeric_gradient(test, in_location, xmean_std, numeric_eps=1e-2, rtol=0.2, atol=0.01)
test = mx.symbol.BatchNorm(data, fix_gamma=True, use_global_stats=True, axis=chaxis)
check_numeric_gradient(test, in_location, xmean_std, numeric_eps=1e-2, rtol=0.2, atol=0.01)
test = mx.symbol.BatchNorm(data, fix_gamma=False, axis=chaxis)
check_numeric_gradient(test, in_location, xmean_std, numeric_eps=1e-2, rtol=0.2, atol=0.01)
test = mx.symbol.BatchNorm(data, fix_gamma=False, use_global_stats=True, axis=chaxis)
check_numeric_gradient(test, in_location, xmean_std, numeric_eps=1e-2, rtol=0.2, atol=0.01)
check_batchnorm_training('default')
@with_seed()
def test_batchnorm():
momentum = 0.9
epsilon = 1e-5
def _test_batchnorm_impl(op, shape, axis, cudnn_off, output_mean_var):
print(str((op, shape, axis, cudnn_off)))
kwargs = dict(output_mean_var=output_mean_var)
if op == mx.nd.contrib.SyncBatchNorm:
if axis != 1:
return
key = str(op) + str(shape) + str(axis)
kwargs.update(dict(key=key))
if cudnn_off:
return
else:
kwargs.update(dict(axis=axis, cudnn_off=cudnn_off))
nch = shape[axis]
bn_gamma = mx.nd.random.uniform(shape=(nch,))
bn_gamma.attach_grad()
bn_beta = mx.nd.random.uniform(shape=(nch,))
bn_beta.attach_grad()
bn_running_mean = mx.nd.zeros(nch)
bn_running_var = mx.nd.ones(nch)
running_mean = mx.nd.zeros(nch)
running_var = mx.nd.ones(nch)
num_iters = 10
expand_shape = [1] * len(shape)
expand_shape[axis] = shape[axis]
for _ in range(num_iters):
data = mx.nd.random.uniform(shape=shape)
data.attach_grad()
ograd = mx.nd.random.uniform(shape=shape)
with mx.autograd.record():
output = op(data, bn_gamma, bn_beta,
bn_running_mean, bn_running_var,
momentum=momentum, eps=epsilon,
fix_gamma=False, **kwargs)
if output_mean_var:
output, output_mean, output_std = output
output.backward(ograd)
mx.nd.waitall()
data_mean = data.mean(
axis=axis, exclude=True, keepdims=True)
data_var = (data - data_mean).square().mean(axis=axis,
exclude=True,
keepdims=True)
target_output = (data - data_mean) / \
(data_var + epsilon).sqrt() * \
bn_gamma.reshape(expand_shape) + \
bn_beta.reshape(expand_shape)
# squeeze data_mean and data_var
data_mean_flat = data_mean.squeeze()
data_var_flat = data_var.squeeze()
running_mean = running_mean * momentum + \
data_mean_flat * (1 - momentum)
running_var = running_var * momentum + \
data_var_flat * (1 - momentum)
W = bn_gamma.reshape(expand_shape)
dnx = ograd * W
xsm = data - data_mean
nd = 1.0 / mx.nd.sqrt(data_var + epsilon)
nx = xsm * nd
m = np.prod(shape) / shape[axis]
dvar = (dnx * xsm).sum(axis=axis, keepdims=True,
exclude=True) * (-0.5) * mx.nd.power(nd, 3)
dmean = -nd * dnx.sum(axis=axis, keepdims=True, exclude=True) - \
dvar * xsm.mean(axis=axis, keepdims=True,
exclude=True) * 2.0
dX = dnx * nd + dvar * xsm * (2.0 / m) + dmean * (1.0 / m)
dW = (ograd * nx).sum(axis=axis, exclude=True)
db = ograd.sum(axis=axis, exclude=True)
atol = 1e-2
rtol = 1e-2
if output_mean_var:
assert_almost_equal(output_mean.asnumpy(),
data_mean_flat.asnumpy(),
atol=atol, rtol=rtol)
if op != mx.nd.contrib.SyncBatchNorm:
assert_almost_equal(output_std.asnumpy(),
(1.0 / (data_var_flat +
epsilon).sqrt()).asnumpy(),
atol=atol, rtol=rtol)
else:
assert_almost_equal(output_std.asnumpy(),
data_var_flat.asnumpy(),
atol=atol, rtol=rtol)
assert_almost_equal(output.asnumpy(), target_output.asnumpy(),
atol=atol, rtol=rtol)
assert_almost_equal(bn_running_mean.asnumpy(
), running_mean.asnumpy(), atol=atol, rtol=rtol)
assert_almost_equal(bn_running_var.asnumpy(
), running_var.asnumpy(), atol=atol, rtol=rtol)
assert_almost_equal(data.grad.asnumpy(),
dX.asnumpy(), atol=atol, rtol=rtol)
assert_almost_equal(
bn_gamma.grad.asnumpy(), dW.asnumpy(), atol=atol, rtol=rtol)
assert_almost_equal(
bn_beta.grad.asnumpy(), db.asnumpy(), atol=atol, rtol=rtol)
for op in [mx.nd.BatchNorm, mx.nd.contrib.SyncBatchNorm]:
for shape in [(24, 2), (24, 3, 4), (24, 4, 4, 4), (24, 8, 4, 4), (24, 5, 6, 4, 4)]:
for axis in range(len(shape)):
for cudnn_off in [False, True]:
for output_mean_var in [False, True]:
_test_batchnorm_impl(op, shape, axis,
cudnn_off, output_mean_var)
@with_seed()
def test_groupnorm():
acc_types = {'float16': 'float32', 'float32': 'float64', 'float64': 'float64'}
def x_hat_helper(x, num_groups, eps):
dtype = x.dtype
dshape = x.shape
assert len(dshape) == 4
acc_type = acc_types[str(dtype)]
new_shape = (dshape[0], num_groups, int(dshape[1] / num_groups), dshape[2], dshape[3])
new_moments_shape = (dshape[0], num_groups, 1, 1, 1)
data = x.reshape(new_shape)
mean = np.mean(data, axis=(2, 3, 4), keepdims=False, dtype=acc_type).astype(dtype)
std = np.sqrt(np.var(data, axis=(2, 3, 4), dtype=acc_type, keepdims=False).astype(dtype) + eps)
x_hat = (data - mean.reshape(new_moments_shape)) / std.reshape(new_moments_shape)
return x_hat, mean, std
def np_groupnorm(data, gamma, beta, num_groups, eps):
new_param_shape = (1, num_groups, 1, 1, 1)
x_hat, mean, std = x_hat_helper(data, num_groups, eps)
out = x_hat * gamma.reshape(new_param_shape) + beta.reshape(new_param_shape)
return out.reshape(dshape), mean, std
def np_groupnorm_grad(ograd, data, gamma, beta, mean, std, num_groups, eps):
x_hat, mean, std = x_hat_helper(data, num_groups, eps)
new_shape = x_hat.shape
dshape = data.shape
dtype = data.dtype
new_moments_shape = (new_shape[0], num_groups, 1, 1, 1)
new_param_shape = (1, num_groups, 1, 1, 1)
acc_type = acc_types[str(dtype)]
ograd = ograd.reshape(new_shape)
data = data.reshape(new_shape)
gamma = gamma.reshape(new_param_shape)
beta = beta.reshape(new_param_shape)
mean = mean.reshape(new_moments_shape)
std = std.reshape(new_moments_shape)
beta_grad = np.sum(ograd, axis=(0, 2, 3, 4), dtype=acc_type, keepdims=False).astype(dtype)
gamma_grad = np.sum(x_hat * ograd, axis=(0, 2, 3, 4), dtype=acc_type, keepdims=False).astype(dtype)
x_hat_grad = ograd * gamma
ograd_mult = x_hat_grad / std
red_out = np.mean(ograd_mult, axis=(2, 3, 4), dtype=acc_type, keepdims=True).astype(dtype)
data_grad = ograd_mult - red_out
red_out = np.mean(ograd_mult * x_hat, axis=(2, 3, 4), dtype=acc_type, keepdims=True).astype(dtype)
data_grad = data_grad - x_hat * red_out
return data_grad.reshape(dshape), gamma_grad, beta_grad
batch_size = random.randint(1, 8)
num_groups = random.randint(2, 3)
num_channels = random.randint(2, 3) * num_groups
height = random.randint(1, 5)
width = random.randint(1, 5)
dshape = (batch_size, num_channels, height, width)
param_shape = (num_groups,)
temp_shape = (batch_size, num_groups, int(num_channels / num_groups), height, width)
np_data = np.random.uniform(0.2, 1.0, dshape)
np_gamma = np.random.uniform(-1.0, 1.0, param_shape)
np_beta = np.random.uniform(-1.0, 1.0, param_shape)
data_sym = mx.sym.Variable("data")
gamma_sym = mx.sym.Variable("gamma")
beta_sym = mx.sym.Variable("beta")
for dtype in [np.float16, np.float32, np.float64]:
eps = 1e-2 if dtype == np.float16 else 1e-5
mx_data = mx.nd.array(np_data, dtype=dtype)
mx_gamma = mx.nd.array(np_gamma, dtype=dtype)
mx_beta = mx.nd.array(np_beta, dtype=dtype)
np_out, np_mean, np_std = np_groupnorm(np_data.astype(dtype),
np_gamma.astype(dtype),
np_beta.astype(dtype),
num_groups=num_groups,
eps=eps)
mx_sym = mx.sym.GroupNorm(data=data_sym, gamma=gamma_sym, beta=beta_sym,
num_groups=num_groups, eps=eps, output_mean_var=True)
check_symbolic_forward(mx_sym, [mx_data, mx_gamma, mx_beta], [np_out, np_mean, np_std],
rtol=1e-2 if dtype == np.float16 else 1e-3,
atol=5e-3 if dtype == np.float16 else 1e-4, dtype=dtype)
mx_sym = mx.sym.GroupNorm(data=data_sym, gamma=gamma_sym, beta=beta_sym,
num_groups=num_groups, eps=eps, output_mean_var=False)
np_ograd = np.random.uniform(-1.0, 1.0, dshape).astype(dtype)
np_data_grad, np_gamma_grad, np_beta_grad = np_groupnorm_grad(np_ograd,
np_data.astype(dtype),
np_gamma.astype(dtype),
np_beta.astype(dtype),
np_mean, np_std,
num_groups, eps)
check_symbolic_backward(mx_sym, [mx_data, mx_gamma, mx_beta], [mx.nd.array(np_ograd)],
[np_data_grad, np_gamma_grad, np_beta_grad],
rtol=1e-2 if dtype == np.float16 else 1e-3,
atol=5e-2 if dtype == np.float16 else 1e-4, dtype=dtype)
@with_seed()
def test_convolution_grouping():
for dim in [1, 2, 3]:
num_filter = 4
for num_group in [1, 2]:
kernel = (3,) * dim
shape = (1, 4) + (9,) * dim
x = mx.sym.Variable('x')
w = mx.sym.Variable('w')
b = mx.sym.Variable('b')
y1 = mx.sym.Convolution(data=x, weight=w, bias=b, num_filter=num_filter, num_group=num_group, kernel=kernel)
xslice = mx.sym.SliceChannel(data=x, num_outputs=num_group, axis=1)
wslice = mx.sym.SliceChannel(data=w, num_outputs=num_group, axis=0)
bslice = mx.sym.SliceChannel(data=b, num_outputs=num_group, axis=0)
y2 = mx.sym.Concat(*[mx.sym.Convolution(data=xslice[i], weight=wslice[i], bias=bslice[i],
num_filter=num_filter//num_group, kernel=kernel)
for i in range(num_group)])
exe1 = y1.simple_bind(default_context(), x=shape)
exe2 = y2.simple_bind(default_context(), x=shape, w=(num_filter, shape[1]//num_group) + kernel, b=(num_filter,))
for arr1, arr2 in zip(exe1.arg_arrays, exe2.arg_arrays):
arr1[:] = np.float32(np.random.normal(size=arr1.shape))
arr2[:] = arr1
exe1.forward(is_train=True)
exe1.backward(exe1.outputs[0])
exe2.forward(is_train=True)
exe2.backward(exe2.outputs[0])
for arr1, arr2 in zip(exe1.outputs + exe1.grad_arrays, exe2.outputs + exe2.grad_arrays):
np.testing.assert_allclose(arr1.asnumpy(), arr2.asnumpy(), rtol=1e-3, atol=1e-3)
@unittest.skip("Flaky test https://github.com/apache/incubator-mxnet/issues/14052")
@with_seed()
def test_depthwise_convolution():
for dim in [1,2]:
for num_base in [1, 4, 16, 32, 64]:
for kernel_x in [3, 5]:
for stride_x in [1, 2]:
for pad_x in [0, 1]:
for in_size in [7, 32]:
kernel = (kernel_x,) * dim
stride = (stride_x,) * dim
pad = (pad_x,) * dim
num_filter = num_base
num_group = num_base
shape = (2, num_base) + (in_size,) * dim
x = mx.sym.Variable('x')
w = mx.sym.Variable('w')
b = mx.sym.Variable('b')
y1 = mx.sym.Convolution(data=x, weight=w, bias=b, num_filter=num_filter, num_group=num_group,
kernel=kernel, stride=stride, pad=pad)
xslice = mx.sym.SliceChannel(data=x, num_outputs=num_group, axis=1)
wslice = mx.sym.SliceChannel(data=w, num_outputs=num_group, axis=0)
bslice = mx.sym.SliceChannel(data=b, num_outputs=num_group, axis=0)
y2 = mx.sym.Concat(*[mx.sym.Convolution(data=xslice[i], weight=wslice[i], bias=bslice[i],
num_filter=num_filter//num_group, kernel=kernel,
stride=stride, pad=pad)
for i in range(num_group)])
dev = default_context()
exe1 = y1.simple_bind(dev, x=shape)
exe2 = y2.simple_bind(dev, x=shape, w=(num_filter, shape[1]//num_group)+kernel,
b=(num_filter,))
for arr1, arr2 in zip(exe1.arg_arrays, exe2.arg_arrays):
arr1[:] = np.random.normal(size=arr1.shape)
arr2[:] = arr1
exe1.forward(is_train=True)
exe1.backward(exe1.outputs[0])
exe2.forward(is_train=True)
exe2.backward(exe2.outputs[0])
for arr1, arr2 in zip(exe1.outputs + exe1.grad_arrays, exe2.outputs + exe2.grad_arrays):
assert_allclose(arr1, arr2, rtol=1e-3, atol=1e-3)
@with_seed()
def test_convolution_independent_gradients():
# NOTE(zixuanweeei): Flaky test tracked by https://github.com/apache/incubator-mxnet/issues/15603.
# GPU context will be enabled after figuring out the possible issue tracked at
# https://github.com/apache/incubator-mxnet/issues/15638.
ctx = mx.cpu()
atol = 1.0e-3
rtol = 1.0e-3
reqs = ["null", "write", "add"]
var_names = ["x", "w", "b"]
dims = [1, 2]
num_bases = [1, 8]
kernel_xs = [3, 5]
stride_xs = [1, 2]
pad_xs = [0, 1]
in_sizes = [7, 32]
no_biases = [True, False]
for dim, num_base, kernel_x, stride_x, pad_x , in_size, no_bias in \
itertools.product(dims, num_bases, kernel_xs, stride_xs, pad_xs, in_sizes, no_biases):
# Prepare params shape
kernel = (kernel_x,) * dim
stride = (stride_x,) * dim
pad = (pad_x,) * dim
num_filter = num_base
x_shape = (2, num_base) + (in_size,) * dim
w_shape = (num_filter, num_base) + kernel
# Symbols definition
x = mx.sym.Variable('x')
w = mx.sym.Variable('w')
b = mx.sym.Variable('b') if not no_bias else None
conv = mx.sym.Convolution(x, w, b, num_filter=num_filter,
kernel=kernel, stride=stride, pad=pad, no_bias=no_bias)
for req_kind in reqs:
# Binding args for conv with possible dependent gradients
base_args = {
'x': mx.nd.random.normal(shape=x_shape, ctx=ctx),
'w': mx.nd.random.normal(shape=w_shape, ctx=ctx),
'b': mx.nd.random.normal(shape=(num_filter, ), ctx=ctx) if not no_bias else None}
args1 = copy.deepcopy(base_args)
grad1 = {
'x': mx.nd.zeros(shape=x_shape, ctx=ctx),
'w': mx.nd.zeros(shape=w_shape, ctx=ctx),
'b': mx.nd.zeros(shape=(num_filter, ), ctx=ctx) if not no_bias else None}
grad_req1 = [req_kind] * 3
grad_req1 = dict(zip(var_names, grad_req1))
exe1 = conv.bind(ctx, args1, args_grad=grad1, grad_req=grad_req1)
exe1.forward(is_train=True)
exe1.backward(exe1.outputs[0])
for x_req, w_req, b_req in itertools.product(reqs, repeat=3):
# Binding args for conv with independent gradients
args2 = copy.deepcopy(base_args) # Deepcopy the same params of `exe1`
grad2 = {
'x': mx.nd.zeros(shape=x_shape, ctx=ctx),
'w': mx.nd.zeros(shape=w_shape, ctx=ctx),
'b': mx.nd.zeros(shape=(num_filter, ), ctx=ctx) if not no_bias else None}
grad_req2 = {"x": x_req, "w": w_req, "b": b_req}
exe2 = conv.bind(ctx, args2, args_grad=grad2, grad_req=grad_req2)
exe2.forward(is_train=True)
np.testing.assert_allclose(exe1.outputs[0].asnumpy(),
exe2.outputs[0].asnumpy(), rtol=rtol, atol=atol)
exe2.backward(exe2.outputs[0])
for var_name in var_names:
if var_name == "b" and no_bias:
continue
if grad_req2[var_name] == "null":
exe2_var_grad = grad2[var_name].asnumpy()
np.testing.assert_allclose(exe2_var_grad,
np.zeros_like(exe2_var_grad), rtol=rtol, atol=atol)
if grad_req2[var_name] != grad_req1[var_name]:
continue
np.testing.assert_allclose(args1[var_name].asnumpy(),
args2[var_name].asnumpy(), rtol=rtol, atol=atol)
np.testing.assert_allclose(grad1[var_name].asnumpy(),
grad2[var_name].asnumpy(), rtol=rtol, atol=atol)
def gen_broadcast_data(idx):
# Manually set test cases
binary_op_data_shape = np.array(
[[[2, 5, 1, 30, 7], [1, 5, 448, 30, 1]],
[[10, 49, 1, 77, 17], [10, 1, 2, 1, 17]],
[[13, 2, 65, 2, 1], [13, 1, 65, 1, 225]],
[[9, 434, 4, 2, 37], [9, 1, 4, 1, 37]],
[[2, 52, 1, 4, 1], [1, 52, 60, 1, 37]],
[[1, 23, 7, 122, 50], [2, 1, 7, 1, 50]],
[[1, 17, 1, 5, 1], [22, 1, 2, 1, 28]],
[[29, 1, 2, 1, 8], [29, 22, 1, 130, 1]],
[[2, 36, 1, 427, 3], [1, 36, 11, 427, 1]],
[[1, 2, 1, 100, 7], [1, 2, 448, 100, 1]],
[[1, 2, 495, 77, 7], [1, 2, 1, 1, 7]],
[[1, 43, 65, 2, 1], [1, 43, 65, 1, 225]],
[[1, 92, 434, 2, 2], [1, 92, 1, 2, 2]],
[[1, 92, 1, 4, 1], [1, 92, 134, 1, 17]],
[[1, 53, 2, 122, 143], [1, 1, 2, 1, 143]],
[[1, 179, 1, 87, 17], [1, 179, 1, 1, 17]],
[[1, 1, 17, 5, 1], [1, 22, 1, 1, 28]],
[[1, 2, 1, 1, 8], [1, 2, 52, 430, 1]],
[[1, 163, 1, 22, 3], [1, 163, 116, 22, 1]],
[[1, 1, 44, 30, 7], [1, 1, 44, 30, 1]],
[[1, 1, 1, 1, 28], [1, 127, 1, 5, 28]],
[[1, 2, 394, 38, 1], [1, 2, 394, 38, 16]],
[[1, 10, 49, 77, 17], [1, 1, 1, 1, 17]],
[[1, 431, 6, 2, 225], [1, 1, 6, 2, 225]],
[[1, 15, 1, 28, 1], [1, 15, 1, 28, 463]],
[[1, 129, 2, 48, 96], [1, 129, 2, 1, 1]],
[[1, 1, 403, 17, 2], [1, 44, 403, 17, 2]],
[[1, 1, 65, 2, 22], [1, 1, 65, 1, 1]],
[[1, 24, 103, 17, 18], [1, 24, 1, 1, 1]],
[[1, 1, 1, 1, 2], [1, 24, 194, 50, 1]],
[[1, 1, 107, 84, 9], [1, 1, 1, 1, 1]]])
if idx < binary_op_data_shape.shape[0]:
l_shape = binary_op_data_shape[idx][0]
r_shape = binary_op_data_shape[idx][1]
else:
# Generate random data that has ndim between 1-7 and all the shape dims between 1-5
ndim = np.random.randint(1, 6)
shape = np.random.randint(1, 6, size=(ndim,))
l_same_dim = np.random.randint(0, 5)
r_same_dim = np.random.randint(0, 5)
l_axis_flags = np.random.randint(0, 2, size=ndim)
r_axis_flags = np.random.randint(0, 2, size=ndim)
if l_same_dim == 4:
l_axis_flags = np.ones(ndim)
if r_same_dim == 4:
r_axis_flags = np.ones(ndim)
l_shape = shape.copy()
r_shape = shape.copy()
l_shape[np.where(l_axis_flags == 0)] = 1
r_shape[np.where(r_axis_flags == 0)] = 1
return [np.random.random(l_shape), np.random.random(r_shape)]
def gen_broadcast_data_int(idx):
d = gen_broadcast_data(idx);
return [np.round(d[0]*100).astype(int), np.round(d[1]*100).astype(int)]
def gen_binary_data(dummy):
ndim = np.random.randint(1, 6)
shape = np.random.randint(1, 6, size=(ndim,))
#print("gen shape {}".format(shape))
return [np.random.random(shape), np.random.random(shape)]
def gen_binary_data_int(dummy):
d = gen_binary_data(dummy);
return [np.round(d[0]*100).astype(int), np.round(d[1]*100).astype(int)]
def check_binary_op_forward(symbol, baseline, gen_data, rtol=1e-3, atol=1e-5, mx_nd_func=None):
sample_num = 200
for i in range(sample_num):
d = gen_data(i)
y = symbol.bind(default_context(), args={'a': mx.nd.array(d[0]), 'b': mx.nd.array(d[1])})
y.forward(is_train=True)
y = y.outputs[0].asnumpy()
x = baseline(d[0], d[1]).astype(y.dtype)
#np.set_printoptions(precision=20)
a = d[0]
b = d[1]
#print("a: {} {}".format(a.dtype, a))
#print("a: {} {}".format(b.dtype, b))
#print("x: {} {}".format(x.dtype, x))
#print("y: {} {}".format(y.dtype, y))
if mx_nd_func is not None:
d0 = mx.nd.array(d[0], dtype=d[0].dtype)
d1 = mx.nd.array(d[1], dtype=d[1].dtype)
assert_almost_equal(y, mx_nd_func(d0, d1).asnumpy(), rtol=rtol, atol=atol)
idx = np.abs(x-y) > atol+rtol*np.abs(x)
if idx.any():
import binascii
np.set_printoptions(precision=20)
logging.error('found precision problem:')
d[0] = np.broadcast_to(d[0], x.shape)
d[1] = np.broadcast_to(d[1], x.shape)
logging.error('input a: {}'.format(d[0][idx]))
logging.error('input b: {}'.format(d[1][idx]))
logging.error("output x: {} {}".format(x.dtype, x))
logging.error("output y: {} {}".format(y.dtype, y))
def ftohex(xs):
import struct
return list(map(lambda x: binascii.hexlify(struct.pack('d', x)), xs.flatten()))
logging.error('output x in baseline(a, b): {}'.format(x[idx]))
logging.error('output y in symbol(a, b): {}'.format(y[idx]))
logging.error('output x in baseline(a,b) hex: {}'.format(ftohex(x[idx])))
logging.error('output y in symbol(a,b) hex: {}'.format(ftohex(y[idx])))
logging.error('input a hex: {}'.format(ftohex(d[0][idx])))
logging.error('input a hex: {}'.format(ftohex(d[1][idx])))
logging.error('diff: {}'.format(np.abs(x-y)[idx] - atol-rtol*np.abs(x)[idx]))
assert_allclose(y, x, rtol=rtol, atol=atol)
def check_binary_op_backward(symbol, baseline, gen_data, rtol=1e-3, atol=1e-5):
sample_num = 200
for i in range(sample_num):
d = gen_data(i)
out = np.random.random((d[0] + d[1]).shape)
def reduce_op(shape, x):
if shape == x.shape:
return x
keepdims_shape = list(x.shape)
for i in range(len(shape)):
if x.shape[i] != shape[i]:
keepdims_shape[i] = 1
x = np.sum(x, axis=i).reshape(keepdims_shape)
return x
baseline_grad1, baseline_grad2 = baseline(out, d[0], d[1])
x_1 = reduce_op(d[0].shape, baseline_grad1)
x_2 = reduce_op(d[1].shape, baseline_grad2)
y_1 = mx.nd.empty(d[0].shape)
y_2 = mx.nd.empty(d[1].shape)
y = symbol.bind(default_context(), args={'a': mx.nd.array(d[0]), 'b': mx.nd.array(d[1])},
args_grad=[y_1, y_2])
y.forward(is_train=True)
y.backward([mx.nd.array(out)])
assert_allclose(y_1.asnumpy(), x_1, rtol=rtol, atol=atol)
assert_allclose(y_2.asnumpy(), x_2, rtol=rtol, atol=atol)
@with_seed()
def test_binary_op():
a = mx.sym.Variable('a')
b = mx.sym.Variable('b')
def test_bplus(a, b):
c = a + b
check_binary_op_forward(c, lambda a, b: a + b, gen_binary_data)
check_binary_op_backward(c, lambda g_out, a, b: (g_out, g_out), gen_binary_data)
def test_bminus(a, b):
c = a - b
check_binary_op_forward(c, lambda a, b: a - b, gen_binary_data)
check_binary_op_backward(c, lambda g_out, a, b: (g_out, - g_out), gen_binary_data)
def test_bmul(a, b):
c = a * b
check_binary_op_forward(c, lambda a, b: a * b, gen_binary_data)
check_binary_op_backward(c, lambda g_out, a, b: (g_out * b, g_out * a), gen_binary_data)
def test_bdiv(a, b):
c = a / b
check_binary_op_forward(c, lambda a, b: a / b, gen_binary_data)
check_binary_op_backward(c, lambda g_out, a, b: (g_out / b, - g_out * a / (b * b)), gen_binary_data)
def test_bmod(a, b):
# Python and numpy operate only in double so to avoid numerical errors we have to use
# doubles as well. This was a flaky test before when using float32. seed 1688524483, 1768433044
#c = a % b
c = mx.sym.cast(a, dtype='float64') % mx.sym.cast(b, dtype='float64')
# '%' is sensitive to the precision of the calculation. Force numpy to match mxnet's float32.
check_binary_op_forward(c, lambda a, b: np.float32(a) % np.float32(b), gen_binary_data, rtol=0, atol=0)
check_binary_op_backward(c,
lambda g_out, a, b: (g_out, - g_out * (np.float32(a) // np.float32(b))), gen_binary_data)
def test_bmod_int(a, b):
c = mx.sym.cast(a, dtype='int32') % mx.sym.cast(b, dtype='int32')
check_binary_op_forward(c, lambda a, b: a % b, gen_binary_data_int)
check_binary_op_backward(c, lambda g_out, a, b: (np.zeros_like(a), np.zeros_like(b)), gen_binary_data_int)
def test_bpow(a, b):
c = a ** b
check_binary_op_forward(c, lambda a, b: a ** b, gen_binary_data)
check_binary_op_backward(c, lambda g_out, a, b: (g_out * a **(b - 1) * b,
g_out * a ** b * np.log(a)), gen_binary_data)
def test_bneq(a, b):
c = a != b
# '!=' is sensitive to the precision of the comparison. Force numpy to match mxnet's float32.
# Issue exposed with seed 1644387363
check_binary_op_forward(c, lambda a, b: (np.float32(a) != np.float32(b)).astype(a.dtype), gen_binary_data)
check_binary_op_backward(c, lambda g_out, a, b: (np.zeros_like(a), np.zeros_like(b)), gen_binary_data)
test_bplus(a, b)
test_bminus(a, b)
test_bmul(a, b)
test_bdiv(a, b)
test_bmod(a, b)
test_bmod_int(a, b)
test_bpow(a, b)
test_bneq(a, b)
@with_seed()
def test_broadcast_binary_op():
def check_bmaxmin_gradient(test_sym, x, y, delta, rtol, atol):
"""This function ensures that checking the numerical gradient of
broadcast_max/min is not crossing the boundary y=x where there
is no gradient definition at those sigularities."""
x_max = np.max(x)
y = x_max + 2 * delta + np.random.random(y.shape)
check_numeric_gradient(test_sym, [x, y], numeric_eps=delta, rtol=rtol, atol=atol)
x_min = np.min(x)
y = x_min - 2 * delta - np.random.random(y.shape)
check_numeric_gradient(test_sym, [x, y], numeric_eps=delta, rtol=rtol, atol=atol)
a = mx.sym.Variable('a')
b = mx.sym.Variable('b')
def test_bplus(a, b):
c = mx.sym.broadcast_plus(a, b)
check_binary_op_forward(c, lambda a, b: a + b, gen_broadcast_data, mx_nd_func=mx.nd.add)
check_binary_op_backward(c, lambda g_out, a, b: (g_out, g_out), gen_broadcast_data)
def test_bminus(a, b):
c = mx.sym.broadcast_minus(a, b)
check_binary_op_forward(c, lambda a, b: a - b, gen_broadcast_data, mx_nd_func=mx.nd.subtract)
check_binary_op_backward(c, lambda g_out, a, b: (g_out, - g_out), gen_broadcast_data)
def test_bmul(a, b):
c = mx.sym.broadcast_mul(a, b)
check_binary_op_forward(c, lambda a, b: a * b, gen_broadcast_data, mx_nd_func=mx.nd.multiply)
check_binary_op_backward(c, lambda g_out, a, b: (g_out * b, g_out * a), gen_broadcast_data)
def test_bdiv(a, b):
c = mx.sym.broadcast_div(a, b)
check_binary_op_forward(c, lambda a, b: a / b, gen_broadcast_data, mx_nd_func=mx.nd.divide)
check_binary_op_backward(c, lambda g_out, a, b: (g_out / b, - g_out * a / (b * b)), gen_broadcast_data)
def test_bmod(a_, b_):
# Python and numpy operate only in double so to avoid numerical errors we have to use
# doubles as well. This was a flaky test before when using float32. seed 1688524483, 1768433044
a = mx.sym.cast(a_, dtype='float64')
b = mx.sym.cast(b_, dtype='float64')
# '%' is sensitive to the precision of the calculation. Force numpy to match mxnet's float32.
c = mx.sym.broadcast_mod(a, b)
check_binary_op_forward(c, lambda a, b: a % b, gen_broadcast_data, atol=1, mx_nd_func=mx.nd.modulo)
check_binary_op_backward(c,
lambda g_out, a, b: (g_out, - g_out * (np.float32(a) // np.float32(b))), gen_binary_data)
def test_bmod_int(a, b):
c = mx.sym.broadcast_mod(mx.sym.cast(a, dtype='int32'), mx.sym.cast(b, dtype='int32'))
check_binary_op_forward(c, lambda a, b: a % b, gen_broadcast_data_int, mx_nd_func=mx.nd.modulo)
check_binary_op_backward(c, lambda g_out, a, b: (np.zeros_like(a), np.zeros_like(b)), gen_broadcast_data_int)
def test_bpow(a, b):
c = mx.sym.broadcast_power(a, b)
check_binary_op_forward(c, lambda a, b: a ** b, gen_broadcast_data, mx_nd_func=mx.nd.power)
check_binary_op_backward(c, lambda g_out, a, b: (g_out * a **(b - 1) * b,
g_out * a ** b * np.log(a)), gen_broadcast_data)
def test_bequal(a, b):
c = mx.sym.broadcast_equal(a, b)
check_binary_op_forward(c, lambda a, b: (a == b).astype(a.dtype), gen_broadcast_data_int,
mx_nd_func=mx.nd.equal)
check_binary_op_backward(c, lambda g_out, a, b: (np.zeros_like(a), np.zeros_like(b)), gen_broadcast_data_int)
def test_bmax(a, b):
c = mx.sym.broadcast_maximum(a, b)
check_binary_op_forward(c, lambda x, y: np.maximum(x, y), gen_broadcast_data, mx_nd_func=mx.nd.maximum)
# pass idx=200 to gen_broadcast_data so that generated ndarrays' sizes are not too big
data = gen_broadcast_data(idx=200)
check_bmaxmin_gradient(c, data[0], data[1], 0.001, 1e-2, 1e-3)
def test_bmin(a, b):
c = mx.sym.broadcast_minimum(a, b)
check_binary_op_forward(c, lambda x, y: np.minimum(x, y), gen_broadcast_data, mx_nd_func=mx.nd.minimum)
# pass idx=200 to gen_broadcast_data so that generated ndarrays' sizes are not too big
data = gen_broadcast_data(idx=200)
check_bmaxmin_gradient(c, data[0], data[1], 0.001, 1e-2, 1e-3)
def test_band(a, b):
c = mx.sym.broadcast_logical_and(a, b)
check_binary_op_forward(c, lambda x, y: np.logical_and(x, y), gen_broadcast_data, mx_nd_func=mx.nd.logical_and)
# pass idx=200 to gen_broadcast_data so that generated ndarrays' sizes are not too big
data = gen_broadcast_data(idx=200)
check_bmaxmin_gradient(c, data[0], data[1], 0.001, 1e-2, 1e-3)
def test_bor(a, b):
c = mx.sym.broadcast_logical_or(a, b)
check_binary_op_forward(c, lambda x, y: np.logical_or(x, y), gen_broadcast_data, mx_nd_func=mx.nd.logical_or)
# pass idx=200 to gen_broadcast_data so that generated ndarrays' sizes are not too big
data = gen_broadcast_data(idx=200)
check_bmaxmin_gradient(c, data[0], data[1], 0.001, 1e-2, 1e-3)
def test_bxor(a, b):
c = mx.sym.broadcast_logical_xor(a, b)
check_binary_op_forward(c, lambda x, y: np.logical_xor(x, y), gen_broadcast_data, mx_nd_func=mx.nd.logical_xor)
# pass idx=200 to gen_broadcast_data so that generated ndarrays' sizes are not too big
data = gen_broadcast_data(idx=200)
check_bmaxmin_gradient(c, data[0], data[1], 0.001, 1e-2, 1e-3)
test_bplus(a, b)
test_bminus(a, b)
test_bmul(a, b)
test_bdiv(a, b)
test_bmod(a, b)
test_bmod_int(a, b)
test_bpow(a, b)
test_bequal(a, b)
test_bmax(a, b)
test_bmin(a, b)
test_band(a, b)
test_bor(a, b)
test_bxor(a, b)
@with_seed()
def test_run_convolution_dilated_impulse_response(dil=(1,1), kernel_shape=(3,3), verbose=False):
dim = len(dil)
assert(len(kernel_shape) == dim)
# Input for spike response
data_size = 33
data_shape = (1, 1) + (data_size,) * dim
center = (0,0) + (data_size // 2,) * dim
spike_imgs = np.zeros(shape=data_shape, dtype=np.float32)
spike_imgs[center] = 1.0
spike_img = mx.nd.array(spike_imgs)
spike_img2 = mx.nd.array(spike_imgs)
kernel_weights = mx.nd.ones(shape=tuple([1,1]+list(kernel_shape)), dtype=np.float32)
kernel_weights2 = mx.nd.ones(shape=tuple([1,1]+list(kernel_shape)), dtype=np.float32)
kernel = mx.symbol.Variable('kernel')
in_img = mx.symbol.Variable('input')
net = mx.symbol.Convolution(in_img, num_filter=1,kernel=kernel_shape, dilate=dil, no_bias="true", name='test_convolution')
net.list_arguments()
be = net.bind(default_context(), args={ 'input' : spike_img, 'test_convolution_weight' : kernel_weights},
args_grad={'input' : spike_img2, 'test_convolution_weight' : kernel_weights2 } )
be.forward(True)
out_o = be.outputs[0].asnumpy()
ndo = be.outputs[0]
out_grads = np.zeros(shape=be.outputs[0].shape, dtype=np.float32)
out_grads[center] = 1.0
out_grad = mx.nd.array(out_grads)
be.backward([out_grad])
vgrad = be.grad_arrays[0].asnumpy()
out = out_o.reshape(out_o.shape[2:])
nz_loc = np.nonzero(out)
assert_allclose(np.sum(out),np.prod(kernel_shape),atol=1e-5)
assert_allclose(np.sum(vgrad),np.prod(kernel_shape),atol=1e-5)
# Now check whether the input gradient was computed correctly
input_grad = mx.nd.array(vgrad)
be = net.bind(default_context(), args={ 'input' : input_grad, 'test_convolution_weight' : kernel_weights})
be.forward(True)
out_o = be.outputs[0].asnumpy()
assert_allclose(out_o[center],np.prod(kernel_shape),atol=1e-5)
rnd_kernel_s = np.random.uniform(low=0.0, high=1.0, size=tuple([1,1]+list(kernel_shape))).astype(np.float32)
impulse_error = mx.nd.array(out_o/np.sum(out_o)) # This should be 1.0 at [0,0,16,16]
rnd_kernel = mx.nd.array(rnd_kernel_s)
rnd_kernel2 = mx.nd.array(rnd_kernel_s)
white_in = mx.nd.ones(shape=data_shape)
white_in2 = mx.nd.ones(shape=data_shape)
be = net.bind(default_context(), args={ 'input' : white_in, 'test_convolution_weight' : rnd_kernel},
args_grad={'input' : white_in2, 'test_convolution_weight' : rnd_kernel2 } )
be.forward(True)
be.backward([impulse_error])
out_orig = be.outputs[0].asnumpy()
kernel_gradient = be.grad_arrays[1].asnumpy()
dkernel = mx.nd.array(rnd_kernel_s + kernel_gradient)
be = net.bind(default_context(), args={ 'input' : white_in, 'test_convolution_weight' : dkernel})
be.forward(True)
out = be.outputs[0].asnumpy()
# Now do a simple check of the kernel gradient
assert(out[center] - np.sum(kernel_gradient) - out_orig[center] < 0.001)
@with_seed()
def test_convolution_dilated_impulse_response():
# 1D
for dil in [ (1,), (2,), (3,) ]:
for ks in [ (1,), (2,), (3,), (4,)]:
test_run_convolution_dilated_impulse_response(dil=dil, kernel_shape=ks)
# 2D
for dil in [ (1,1), (2,2), (3,3) ]:
for ks in [ (3,3), (4,4), (2,3), (3,2), (1,1) ]:
test_run_convolution_dilated_impulse_response(dil=dil, kernel_shape=ks)
@with_seed()
def test_reshape():
def test_reshape_new(src_shape, shape_args, reverse, dst_shape):
net = mx.sym.Variable("data")
net = mx.sym.Reshape(net, shape=shape_args, reverse=reverse)
js = net.tojson()
net = mx.sym.load_json(js)
_, output_shape, __ = net.infer_shape(data=src_shape)
assert output_shape[0] == dst_shape, \
'Src Shape = %s, Shape Arguments = %s, Reverse = %s, Dst Shape = %s, ' \
'Output Shape = %s' %(str(src_shape), str(shape_args), str(reverse),
str(dst_shape), str(output_shape[0]))
dat_npy = np.random.rand(*src_shape)
grad_npy = np.random.rand(*dst_shape)
exe = net.simple_bind(default_context(), data=src_shape)
exe.arg_dict['data'][:] = dat_npy
exe.forward(is_train=True)
assert np.square(exe.outputs[0].asnumpy() - dat_npy.reshape(dst_shape)).mean() < 1E-7, \
'Src Shape = %s, Shape Arguments = %s, Reverse = %s, Dst Shape = %s'\
%(str(src_shape), str(shape_args), str(reverse), str(dst_shape))
exe.backward(out_grads=mx.nd.array(grad_npy))
assert np.square(exe.grad_dict['data'].asnumpy() - grad_npy.reshape(src_shape)).mean() < 1E-7, \
'Src Shape = %s, Shape Arguments = %s, Reverse = %s, Dst Shape = %s'\
%(str(src_shape), str(shape_args), str(reverse), str(dst_shape))
for i in range(len(src_shape)):
holdout_src_shape = list(src_shape)
holdout_src_shape[i] = 0
holdout_src_shape = tuple(holdout_src_shape)
net = mx.sym.Variable('data')
net = mx.sym.elemwise_add(net.reshape(shape_args, reverse=reverse), mx.sym.ones(shape=dst_shape))
input_shape, output_shape, __ = net.infer_shape(data=holdout_src_shape)
assert output_shape[0] == dst_shape, \
'Holdout Src Shape = %s, Shape Arguments = %s, Reverse = %s, Dst Shape = %s, ' \
'Output Shape = %s' %(str(holdout_src_shape), str(shape_args), str(reverse),
str(dst_shape), str(output_shape[0]))
assert input_shape[0] == src_shape, \
'Holdout Src Shape = %s, Shape Arguments = %s, Reverse = %s, Dst Shape = %s, ' \
'Output Shape = %s' %(str(holdout_src_shape), str(shape_args), str(reverse),
str(dst_shape), str(output_shape[0]))
# Test new api (Using shape)
test_cases = [
[(2, 3, 5, 5), (0, -1), False, (2, 75)],
[(2, 3, 5, 5), (0, 0, -1), False, (2, 3, 25)],
[(5, 3, 4, 5), (0, -1, 0), False, (5, 15, 4)],
[(2, 3, 5, 4), (-1, 0, 0), False, (8, 3, 5)],
[(2, 3, 5, 5), (0, 0, 0, 0), False, (2, 3, 5, 5)],
[(2, 4, 5, 3), (-1, 2, 2, 1), False, (30, 2, 2, 1)],
[(2, 3, 5, 6), (-2,), False, (2, 3, 5, 6)],
[(2, 3, 5, 6), (6, 1, -2), False, (6, 1, 5, 6)],
[(2, 3, 5, 6), (-3, -3), False, (6, 30)],
[(2, 3, 5, 6), (-3, -1), False, (6, 30)],
[(64,), (-4, 16, 4), False, (16, 4)],
[(64,), (-4, 16, -1), False, (16, 4)],
[(64, 1, 2, 3), (-4, 16, -1, -2), False, (16, 4, 1, 2, 3)],
[(2, 3, 5, 5), (0, -1), True, (5, 30)],
[(2, 3, 5, 5), (0, 0, -1), True, (3, 5, 10)],
[(5, 3, 4, 5), (0, -1, 0), True, (3, 20, 5)],
[(2, 3, 5, 4), (-1, 0, 0), True, (6, 5, 4)],
[(2, 3, 4, 5), (3, -1, 0), True, (3, 8, 5)],
[(2, 3, 5, 5), (5, 3, 0, -1), True, (5, 3, 5, 2)],
[(2, 3, 5, 5), (0, 0, 0, 0), True, (2, 3, 5, 5)],
[(2, 3, 5, 6), (-2,), True, (2, 3, 5, 6)],
[(2, 3, 5, 6), (-2, 1, 30), True, (2, 3, 1, 30)],
[(2, 3, 5, 6), (-3, -3), True, (6, 30)],
[(64,), (16, 4, -4), True, (16, 4)],
[(64,), (16, -1, -4), True, (16, 4)],
[(1, 2, 3, 64), (-2, -1, 16, -4), True, (1, 2, 3, 4, 16)]]
for test_case in test_cases:
test_reshape_new(*test_case)
# Test old api
net = mx.sym.Variable("data")
net = mx.sym.Reshape(net, target_shape=(2, 0))
js = net.tojson()
net = mx.sym.load_json(js)
_, output_shape, __ = net.infer_shape(data=(2, 3, 5, 5))
assert(output_shape[0] == (2, 75))
# Test for Flatten
data = mx.sym.Variable("data")
net = mx.sym.Flatten(data)
exe = net.simple_bind(ctx=default_context(), data=(5, 4, 3, 7))
data_npy = np.random.normal(size=(5, 4, 3, 7))
out_grad_npy = np.random.normal(size=(5, 4 * 3 * 7))
outputs = exe.forward(is_train=True, data=data_npy)[0].asnumpy()
assert_allclose(outputs, data_npy.reshape((5, 4 * 3 * 7)))
exe.backward(out_grads=[mx.nd.array(out_grad_npy, ctx=default_context())])
assert_allclose(exe.grad_arrays[0].asnumpy(), out_grad_npy.reshape((5, 4, 3, 7)))
@with_seed()
def test_reshape_like():
def test_reshape_like_new(lhs_shape, rhs_shape, lbeg, lend, rbeg, rend, dst_shape):
lhs = mx.sym.Variable("lhs")
rhs = mx.sym.Variable("rhs")
net = mx.sym.reshape_like(lhs, rhs, lhs_begin=lbeg, lhs_end=lend, rhs_begin=rbeg, rhs_end=rend)
js = net.tojson()
net = mx.sym.load_json(js)
_, output_shape, __ = net.infer_shape(lhs=lhs_shape, rhs=rhs_shape)
assert output_shape[0] == dst_shape, \
'LHS Shape = %s, RHS Shape = %s, lhs_begin = %s, lhs_end = %s, rhs_begin= %s, rhs_end= %s'\
%(str(lhs_shape), str(rhs_shape), str(lbeg), str(lend), str(rbeg), str(rend))
lhs_npy = np.random.rand(*lhs_shape)
rhs_npy = np.random.rand(*rhs_shape)
grad_npy = np.random.rand(*dst_shape)
exe = net.simple_bind(default_context(), lhs=lhs_shape, rhs=rhs_shape)
exe.arg_dict['lhs'][:] = lhs_npy
exe.arg_dict['rhs'][:] = rhs_npy
exe.forward(is_train=True)
assert np.square(exe.outputs[0].asnumpy() - lhs_npy.reshape(dst_shape)).mean() < 1E-7, \
'LHS Shape = %s, RHS Shape = %s, lhs_begin = %s, lhs_end = %s, rhs_begin= %s, rhs_end= %s'\
%(str(lhs_shape), str(rhs_shape), str(lbeg), str(lend), str(rbeg), str(rend))
exe.backward(out_grads=mx.nd.array(grad_npy))
assert np.square(exe.grad_dict['lhs'].asnumpy() - grad_npy.reshape(lhs_shape)).mean() < 1E-7, \
'LHS Shape = %s, RHS Shape = %s, lhs_begin = %s, lhs_end = %s, rhs_begin= %s, rhs_end= %s'\
%(str(lhs_shape), str(rhs_shape), str(lbeg), str(lend), str(rbeg), str(rend))
# Test new api (Using shape)
test_cases = [
[(30,), (15,2,4), 0, None, 0, 2, (15,2)],
[(30,), (15,2,4), None, 1, None, 2, (15,2)],
[(30,7), (15,2,4), 0, 1, 0, 2, (15,2,7)],
[(3,5), (1,15,4), 0, 2, 1, 2, (15,)],
[(3,5), (1,15,4), 0, None, 1, -1, (15,)],
[(30,12), (4,2,2,3), -1, None, 1, None, (30,2,2,3)],
[(1,1,7,3,1,1), (81,1,1,21), 1, -1, 1, None, (1,1,1,21,1)]
]
# for test_case in test_cases:
for test_case in test_cases:
test_reshape_like_new(*test_case)
# Test old api
lhs = mx.sym.Variable("lhs")
rhs = mx.sym.Variable("rhs")
net = mx.sym.reshape_like(lhs, rhs)
js = net.tojson()
net = mx.sym.load_json(js)
_, output_shape, __ = net.infer_shape(lhs=(40, 30), rhs=(30,20,2))
assert(output_shape[0] == (30,20,2))
@with_seed()
def test_reduce():
sample_num = 500
def test_reduce_inner(numpy_reduce_func, numpy_reduce_grad_func, mx_reduce_sym, nan_prob=0,
test_exclude=True, test_none_axis=False):
for i in range(sample_num):
# Generate random data that has ndim between 1-7 and all the shape dims between 1-5
# Insert a NaN with probability equal to nan_prob
ndim = np.random.randint(1, 6)
shape = np.random.randint(1, 6, size=(ndim,))
axis_num = np.random.randint(0, ndim, size=1)
axis_flags = np.random.randint(0, 2, size=ndim)
if test_exclude:
exclude = np.random.randint(0, 2)
else:
exclude = False
axes = []
for (axis, flag) in enumerate(axis_flags):
if flag:
axes.append(axis)
if 0 == len(axes):
axes = None
elif 1 == len(axes):
axes = axes[0]
else:
axes = tuple(axes)
keepdims = np.random.randint(0, 2)
a = mx.symbol.Variable('a')
if axes is None:
if test_none_axis:
b = mx_reduce_sym(a, keepdims=keepdims, axis=axes)
else:
b = mx_reduce_sym(a, keepdims=keepdims)
elif exclude and isinstance(axes, tuple) and len(axes) < ndim:
naxes = [i for i in range(ndim) if i not in axes]
b = mx_reduce_sym(a, axis=naxes, keepdims=keepdims, exclude=True)
else:
b = mx_reduce_sym(a, axis=axes, keepdims=keepdims)
dat_npy = np.random.rand(*shape)
# Test with both negative and positive values (randomly). Avoid having both in the same
# test, which can be problematic for error checking due to near-zero values.
if np.random.rand() > 0.5:
dat_npy = -dat_npy
if nan_prob > 0:
dat_npy[np.random.rand(*shape) < nan_prob] = np.nan
sum_groundtruth = np.array(numpy_reduce_func(dat_npy, axis=axes, keepdims=keepdims))
if sum_groundtruth.shape == ():
sum_groundtruth = np.array([sum_groundtruth])
grad_nd = mx.nd.empty(shape)
outgrad_npy = np.array(np.random.rand(*sum_groundtruth.shape))
keepdim_shape = np_reduce(dat_npy, axes, 1, np.sum).shape
grad_groundtruth = numpy_reduce_grad_func(outgrad=outgrad_npy, data=dat_npy,
outdata=sum_groundtruth,
axis=axes, keepdims=keepdims,
keepdim_shape=keepdim_shape)
net = b.bind(default_context(), args={'a': mx.nd.array(dat_npy)},
args_grad={'a': grad_nd})
net.forward(is_train=True)
equal_forward = almost_equal_ignore_nan(net.outputs[0].asnumpy(), sum_groundtruth, 1E-4, 1E-4)
assert equal_forward
net.backward(out_grads=mx.nd.array(outgrad_npy))
bc_grad_groundtruth = np.broadcast_to(grad_groundtruth, grad_nd.shape)
equal_backward = almost_equal_ignore_nan(grad_nd.asnumpy(), bc_grad_groundtruth, 1E-4, 1E-4)
assert equal_backward
test_none_axis = [True, False]
for test_none in test_none_axis:
test_reduce_inner(lambda data, axis, keepdims:np_reduce(data, axis, keepdims, np.sum),
lambda outgrad, data, outdata, axis, keepdims, keepdim_shape:
outgrad.reshape(keepdim_shape),
mx.symbol.sum, test_none_axis=test_none)
test_reduce_inner(lambda data, axis, keepdims:np_reduce(data, axis, keepdims, np.mean),
lambda outgrad, data, outdata, axis, keepdims, keepdim_shape:
outgrad.reshape(keepdim_shape)/(data.size/outdata.size),
mx.symbol.mean, test_none_axis=test_none)
test_reduce_inner(lambda data, axis, keepdims:np_reduce(data, axis, keepdims, np.prod),
lambda outgrad, data, outdata, axis, keepdims, keepdim_shape:
outgrad.reshape(keepdim_shape) * (outdata.reshape(keepdim_shape) / data),
mx.symbol.prod, test_none_axis=test_none)
test_reduce_inner(lambda data, axis, keepdims:np_reduce(data, axis, keepdims, np.nansum),
lambda outgrad, data, outdata, axis, keepdims, keepdim_shape:
np.where(np.isnan(data), 0, outgrad.reshape(keepdim_shape)),
mx.symbol.nansum, 0.3, test_none_axis=test_none)
test_reduce_inner(lambda data, axis, keepdims:np_reduce(data, axis, keepdims, np.nanprod),
lambda outgrad, data, outdata, axis, keepdims, keepdim_shape:
np.where(np.isnan(data), 0, outgrad.reshape(keepdim_shape) *
(outdata.reshape(keepdim_shape) / data)),
mx.symbol.nanprod, 0.3, test_none_axis=test_none)
# grad of max and min are sensitive to the precision of the calculation.
# Force numpy to match mxnet's float32.
test_reduce_inner(lambda data, axis, keepdims:np_reduce(np.float32(data), axis, keepdims, np.max),
lambda outgrad, data, outdata, axis, keepdims, keepdim_shape:
outgrad.reshape(keepdim_shape) *
(np.equal(np.float32(data), outdata.reshape(keepdim_shape))),
mx.symbol.max)
test_reduce_inner(lambda data, axis, keepdims:np_reduce(np.float32(data), axis, keepdims, np.min),
lambda outgrad, data, outdata, axis, keepdims, keepdim_shape:
outgrad.reshape(keepdim_shape) *
(np.equal(np.float32(data), outdata.reshape(keepdim_shape))),
mx.symbol.min)
test_reduce_inner(lambda data, axis, keepdims:np_reduce(data, axis, keepdims, np.linalg.norm),
lambda outgrad, data, outdata, axis, keepdims, keepdim_shape:
outgrad.reshape(keepdim_shape) * (data / outdata.reshape(keepdim_shape)),
mx.symbol.norm, test_exclude=False, test_none_axis=test_none)
@with_seed()
def test_broadcast():
sample_num = 200
for i in range(sample_num):
# Generate random data that has ndim between 1-7 and all the shape dims between 1-5
ndim = np.random.randint(1, 6)
target_shape = np.random.randint(1, 6, size=(ndim,))
axis = tuple(set(np.random.randint(0, ndim, np.random.randint(1, ndim + 1))))
shape = target_shape.copy()
size = tuple([shape[ele] for ele in axis])
for ele in axis:
shape[ele] = 1
target_shape_with_zero = list(target_shape)
for idx in range(len(target_shape_with_zero)):
if idx not in axis:
target_shape_with_zero[idx] = 0
break
a = mx.symbol.Variable('a')
sym_bcast_axis = mx.symbol.broadcast_axis(a, axis=axis, size=size)
sym_bcast_to = mx.symbol.broadcast_to(a, shape=tuple(target_shape))
sym_bcast_to_with_zero = mx.symbol.broadcast_to(a, shape=tuple(target_shape_with_zero))
sym_bcast_like = mx.symbol.broadcast_like(a, sym_bcast_to)
def test_broadcasting_ele(sym_bcast):
dat_npy = np.random.rand(*shape)
groundtruth = dat_npy
grad_nd = mx.nd.empty(shape)
outgrad_npy = np.random.rand(*target_shape)
grad_groundtruth = np_reduce(outgrad_npy, axis=axis, keepdims=True,
numpy_reduce_func=np.sum)
net = sym_bcast.bind(default_context(), args={'a': mx.nd.array(dat_npy)},
args_grad={'a': grad_nd})
net.forward(is_train=True)
assert (net.outputs[0].shape == target_shape).all()
assert_almost_equal(net.outputs[0], groundtruth, rtol=1e-4)
net.backward(out_grads=mx.nd.array(outgrad_npy))
assert_almost_equal(grad_nd, grad_groundtruth, rtol=1e-4)
test_broadcasting_ele(sym_bcast_axis)
test_broadcasting_ele(sym_bcast_to)
test_broadcasting_ele(sym_bcast_to_with_zero)
test_broadcasting_ele(sym_bcast_like)
@with_seed()
def test_transpose():
for ndim in range(1, 7):
for t in range(5):
dims = list(np.random.randint(1, 10, size=ndim))
axes = list(range(ndim))
random.shuffle(axes)
axes = tuple(axes)
x = mx.nd.array(np.random.normal(size=dims))
y = mx.nd.transpose(x, axes=axes)
assert_allclose(np.transpose(x.asnumpy(), axes=axes), y.asnumpy())
y = mx.nd.transpose(x)
assert_allclose(np.transpose(x.asnumpy()), y.asnumpy())
@with_seed()
def test_pseudo2dtranspose():
def getTwoInts(mn, mx):
n1 = np.random.randint(mn, mx)
n2 = np.random.randint(mn, mx-1)
n2 = n2 if n2 < n1 else n2+1
return tuple(np.sort([n1, n2]))
def getTranspAxes(ndim):
axes = list(range(ndim))
n1, n2 = getTwoInts(0,ndim)
return tuple(axes[:n1]+axes[n2:]+axes[n1:n2])
for ndim in range(2, 7):
for dt in ['int8', 'half', 'int32', 'int64']:
for _ in range(5):
dims = list(np.random.randint(5, 20, size=ndim))
axes = getTranspAxes(ndim)
x = mx.nd.array(np.random.normal(size=dims), dtype=dt)
y = mx.nd.transpose(x, axes=axes)
assert_allclose(np.transpose(x.asnumpy(), axes=axes), y.asnumpy())
@with_seed()
def test_big_transpose():
n = [1]
d = list(np.random.randint(132, 160, size=1))
hw = list(np.random.randint(256, 320, size=2))
c = [10]
dims = n + d + hw + c
axes = (0,4,1,2,3)
x_np = np.random.normal(size=dims).astype('uint8')
x = mx.nd.array(x_np, dtype='uint8')
y = mx.nd.transpose(x, axes=axes)
assert_allclose(np.transpose(x_np, axes=axes), y.asnumpy().astype('uint8'))
axes = (0,2,3,4,1)
z = mx.nd.transpose(y, axes=axes)
assert_allclose(x_np, z.asnumpy().astype('uint8'))
@with_seed()
def test_larger_transpose():
x = mx.nd.random.normal(shape=(50,51))
y = mx.nd.transpose(x)
assert_allclose(np.transpose(x.asnumpy()), y.asnumpy())
@with_seed()
def test_expand_dims():
for ndim in range(1, 6):
for axis in range(-ndim + 1, ndim):
x = np.random.normal(size=list(np.random.randint(1, 10, size=ndim)))
y = mx.nd.array(x)
x1 = np.expand_dims(x, axis=axis)
y1 = mx.nd.expand_dims(y, axis=axis)
assert_allclose(x1, y1.asnumpy())
assert_allclose(x1.shape, y1.shape)
@with_seed()
def test_crop():
for ndim in range(1, 6):
for t in range(5):
dims = []
begin = []
end = []
idx = []
for i in range(ndim):
d = random.randint(1, 5)
b = random.randint(0, d-1)
e = random.randint(b+1, d)
if b == 0 and random.randint(0, 1):
b = None
elif b != 0 and random.randint(0, 1):
b -= d
if e == d and random.randint(0, 1):
e = None
elif e != d and random.randint(0, 1):
e -= d
dims.append(d)
begin.append(b)
end.append(e)
idx.append(slice(b, e))
x = mx.nd.array(np.random.normal(size=dims))
y = mx.nd.crop(x, begin=tuple(begin), end=tuple(end))
assert_allclose(x.asnumpy()[idx], y.asnumpy())
vx = mx.sym.Variable('x')
vy = mx.sym.crop(vx, begin=tuple(begin), end=tuple(end))
check_numeric_gradient(vy, [x.asnumpy()])
@with_seed()
def test_slice_axis():
for ndim in range(1, 6):
shape = np.random.randint(1, 11, size=(ndim,))
for t in range(ndim):
d = shape[t]
b = random.randint(0, d-1)
e = random.randint(b+1, d)
if np.random.rand() > 0.6:
e = None
else:
if e < d and np.random.rand() > 0.5:
e = e - d
if np.random.rand() > 0.5:
b = b - d
idx = []
for i in range(ndim):
idx.append(slice(0, shape[i]))
idx[t] = slice(b, e)
X = mx.symbol.Variable('X')
x = mx.nd.array(np.random.normal(size=shape))
Y = mx.symbol.slice_axis(data=X, axis=t, begin=b, end=e)
xgrad = mx.nd.empty(x.shape)
exec1 = Y.bind(default_context(), args = [x], args_grad = {'X': xgrad})
exec1.forward(is_train=True)
y = exec1.outputs[0]
assert_allclose(x.asnumpy()[idx], y.asnumpy())
exec1.backward([y])
xx = x.asnumpy()
xx[:] = 0.0
xx[idx] = x.asnumpy()[idx]
assert_allclose(xx, xgrad.asnumpy())
x_grad_npy = np.random.normal(size=x.shape)
xgrad = mx.nd.array(x_grad_npy)
exec2 = Y.bind(default_context(), args=[x], args_grad={'X': xgrad}, grad_req="add")
exec2.forward(is_train=True)
exec2.backward([exec2.outputs[0]])
xx = np.zeros(shape=x.shape, dtype=np.float32)
xx[idx] = x.asnumpy()[idx]
assert_allclose(xx + x_grad_npy, xgrad.asnumpy(), atol=1E-5)
@with_seed()
def test_slice_like():
for ndim in range(1, 6):
from_shape = np.random.randint(1, 11, size=(ndim,))
shape = [s + np.random.randint(0, 3) for s in from_shape]
for t in range(ndim):
if t > 0:
axes = np.random.randint(0, ndim, size=t).tolist()
else:
axes = []
idx = []
for i in range(ndim):
idx.append(slice(0, shape[i]))
if i in axes or not axes:
idx[i] = slice(0, from_shape[i])
if axes:
pos = np.random.randint(0, t)
if axes[pos] > 0:
axes[pos] -= ndim # negative index
X = mx.symbol.Variable('X')
X_1 = mx.symbol.Variable('X1')
x = mx.nd.array(np.random.normal(size=shape))
x1 = mx.nd.array(np.random.normal(size=from_shape))
Y = mx.symbol.slice_like(data=X, shape_like=X_1, axes=axes)
xgrad = mx.nd.empty(x.shape)
xgrad1 = mx.nd.empty(x1.shape)
exec1 = Y.bind(default_context(), args = [x, x1],
args_grad = {'X': xgrad, 'X1': xgrad1})
exec1.forward(is_train=True)
y = exec1.outputs[0]
assert_allclose(x.asnumpy()[idx], y.asnumpy())
exec1.backward([y])
xx = x.asnumpy()
xx[:] = 0.0
xx[idx] = x.asnumpy()[idx]
assert_allclose(xx, xgrad.asnumpy())
assert_allclose(xgrad1.asnumpy(), mx.nd.zeros_like(xgrad1).asnumpy())
@with_seed()
def test_slice_like_different_types():
x = [[ 1., 2., 3., 4.],
[ 5., 6., 7., 8.],
[ 9., 10., 11., 12.]]
y = [[ 0., 0., 0.],
[ 0., 0., 0.]]
x = mx.nd.array(x)
y = mx.nd.array(y).astype('int32')
z = mx.nd.slice_like(x, y)
assert_allclose(z.asnumpy(), [[1,2,3],[5,6,7]])
@with_seed()
def test_reshape_like_different_types():
x = mx.nd.zeros((2, 3))
y = mx.nd.array([[1, 2], [3, 4], [5, 6]])
y = mx.nd.array(y).astype('int32')
z = mx.nd.reshape_like(x, y)
assert_allclose(z.asnumpy(), [[0,0],[0,0],[0,0]])
@with_seed()
def test_flip():
for ndim in range(1, 6):
for t in range(5):
dims = [random.randint(1,10) for i in range(ndim)]
axis = random.randint(0, ndim-1)
idx = [slice(None, None, -1) if i == axis else slice(None, None) for i in range(ndim)]
x = mx.nd.array(np.random.normal(size=dims))
y = mx.nd.flip(x, axis=axis)
assert_allclose(x.asnumpy()[idx], y.asnumpy())
@with_seed()
def test_stn():
import sys
np.set_printoptions(threshold=sys.maxsize)
num_filter = 2 # conv of loc net
kernel = (3, 3) # conv of loc net
num_hidden = 6 # fc of loc net
for n in [1, 2, 3, 4]:
for c in [1, 2, 3, 4]:
for h in [5, 9, 13, 17]: # for convenience test, this third and forth input dim should be 4x + 1
for w in [5, 9, 13, 17]:
data_shape = (n, c, h, w)
target_shape = (int((data_shape[2]+1)/2), int((data_shape[3]+1)/2))
data = mx.sym.Variable(name="data")
loc = mx.sym.Convolution(data=data, kernel=kernel, pad=(1, 1), num_filter=num_filter, name="loc_conv")
loc = mx.sym.Flatten(data=loc)
loc = mx.sym.FullyConnected(data=loc, num_hidden=num_hidden, name="loc_fc")
stn = mx.sym.SpatialTransformer(data=data, loc=loc, target_shape=target_shape,
transform_type="affine", sampler_type="bilinear")
arg_names = stn.list_arguments()
arg_shapes, out_shapes, _ = stn.infer_shape(data=data_shape)
# check shape
assert out_shapes[0] == (data_shape[0], data_shape[1], target_shape[0], target_shape[1])
dev = default_context()
#dev = mx.gpu(0)
args = {}
args['data'] = mx.random.normal(0, 1, data_shape, ctx=mx.cpu()).copyto(dev)
args['loc_conv_weight'] = mx.nd.zeros((num_filter, data_shape[1], kernel[0], kernel[1]), ctx=dev)
args['loc_conv_bias'] = mx.nd.zeros((num_filter,), ctx=dev)
args['loc_fc_weight'] = mx.nd.zeros((6, num_filter*data_shape[2]*data_shape[3]), ctx=dev)
args['loc_fc_bias'] = mx.nd.array([0.5, 0, 0, 0, 0.5, 0], ctx=dev)
grad_grad = [mx.nd.zeros(shape, ctx=dev) for shape in arg_shapes]
exe = stn.bind(dev, args=args, args_grad=grad_grad)
exe.forward(is_train=True)
out = exe.outputs[0]
# check forward
assert_almost_equal(out, args['data'].asnumpy()[:, :, h//4:h-h//4, w//4:w-w//4], rtol=1e-2, atol=1e-4)
out_grad = mx.nd.ones(out.shape, ctx=dev)
exe.backward([out_grad])
# check backward
assert_almost_equal(out_grad, grad_grad[0].asnumpy()[:, :, h//4:h-h//4, w//4:w-w//4], rtol=1e-2, atol=1e-4)
def test_stn_valid_sampling():
target_shape = (
28,
28,
)
src_shape = (
42,
42,
)
data = mx.sym.Variable(name="data")
loc = mx.sym.Variable(name="loc")
data_array = np.zeros((
1,
1,
) + src_shape)
# Have an ever so slight rotation.
loc_array = np.array(
[[9.03887e-05, 1.00015, 0.00174931, 1.0003, 0.000311901,
-0.000919065]])
stn = mx.sym.SpatialTransformer(
data=data,
loc=loc,
target_shape=target_shape,
transform_type="affine",
sampler_type="bilinear")
grad_req = {k: 'write' for k in stn.list_arguments()}
grads = {
'data': mx.nd.array(np.zeros_like(data_array)),
'loc': mx.nd.array(np.zeros_like(loc_array))
}
executor = stn.bind(
ctx=default_context(),
args={'data': mx.nd.array(data_array),
'loc': mx.nd.array(loc_array)},
grad_req=grad_req,
args_grad=grads)
executor.forward(is_train=True)
executor.backward(mx.nd.ones((
1,
1,
) + target_shape))
@with_seed()
def test_dot():
ctx = default_context()
dtypes = ['float32', 'float64']
ndims = [2]
if ctx.device_type == 'gpu':
dtypes += ['float16']
ndims += [1]
# Test normal dot.
for ndim in ndims:
for data_type in dtypes:
tol = 1e-2 if data_type == 'float16' else 1e-3
for m in range(1, 5):
for k in range(1, 5):
if ndim == 1 and k != 1:
pass
for n in range(1, 5):
a_shape = (m, k) if ndim == 2 else (m,)
b_shape = (k, n) if ndim == 2 else (n,)
a_npy = np.random.normal(0, 1, (m, k))
a_npy = a_npy.astype(data_type)
b_npy = np.random.normal(0, 1, (k, n))
b_npy = b_npy.astype(data_type)
c_npy = np.empty((m, n), dtype=data_type)
ograd_npy = np.random.normal(0, 1, (m, n))
ograd_npy = ograd_npy.astype(data_type)
agrad_npy = np.empty((m, k), dtype=data_type)
bgrad_npy = np.empty((k, n), dtype=data_type)
c_npy[:, :] = np.dot(a_npy[:, :], b_npy[:, :])
bgrad_npy[:, :] = np.dot(a_npy[:, :].T, ograd_npy[:, :])
agrad_npy[:, :] = np.dot(ograd_npy[:, :], b_npy[:, :].T)
a = mx.sym.Variable('a', dtype=data_type)
b = mx.sym.Variable('b', dtype=data_type)
c = mx.sym.dot(a, b)
exe = c.simple_bind(ctx=ctx, a=a_npy.shape, b=b_npy.shape)
outputs = exe.forward(is_train=True, a=a_npy, b=b_npy)
assert_almost_equal(outputs[0], c_npy, rtol=tol, atol=tol)
exe.backward(out_grads=[mx.nd.array(ograd_npy, mx.cpu()).astype(data_type)])
assert_almost_equal(exe.grad_dict['a'], agrad_npy, rtol=tol, atol=tol)
assert_almost_equal(exe.grad_dict['b'], bgrad_npy, rtol=tol, atol=tol)
# Test dot with transpose flag using gradient checker.
def dot_sym(data_type):
x = mx.sym.Variable('x', dtype=data_type)
y = mx.sym.Variable('y', dtype=data_type)
return mx.sym.dot(x, y)
def dot_sym_xT(data_type):
x = mx.sym.Variable('x', dtype=data_type)
y = mx.sym.Variable('y', dtype=data_type)
return mx.sym.dot(x, y, transpose_a=True)
def dot_sym_yT(data_type):
x = mx.sym.Variable('x', dtype=data_type)
y = mx.sym.Variable('y', dtype=data_type)
return mx.sym.dot(x, y, transpose_b=True)
def dot_sym_xT_yT(data_type):
x = mx.sym.Variable('x', dtype=data_type)
y = mx.sym.Variable('y', dtype=data_type)
return mx.sym.dot(x, y, transpose_a=True, transpose_b=True)
for data_type in dtypes:
for ashape, bshape in [((3, 4), (4, 5)), ((2, 3, 4), (4, 5, 6))]:
m1_npy = np.random.uniform(-1, 1, ashape)
m1_npy = m1_npy.astype(data_type)
m2_npy = np.random.uniform(-1, 1, bshape)
m2_npy = m2_npy.astype(data_type)
check_numeric_gradient(dot_sym(data_type), [m1_npy, m2_npy], numeric_eps=1e-1, rtol=2e-2, atol=1e-3)
check_numeric_gradient(dot_sym_xT(data_type), [m1_npy.T, m2_npy], numeric_eps=1e-1, rtol=2e-2, atol=1e-3)
check_numeric_gradient(dot_sym_yT(data_type), [m1_npy, m2_npy.T], numeric_eps=1e-1, rtol=2e-2, atol=1e-3)
check_numeric_gradient(dot_sym_xT_yT(data_type), [m1_npy.T, m2_npy.T], numeric_eps=1e-1, rtol=2e-2, atol=1e-3)
@with_seed()
def test_batch_dot():
ctx = default_context()
dtypes = ['float32', 'float64']
if ctx.device_type == 'gpu':
dtypes += ['float16']
for data_type in dtypes:
for batch_size in range(1, 5):
for m in range(1, 5):
for k in range(1, 5):
for n in range(1, 5):
transpose_a = (np.random.rand() > 0.5)
transpose_b = (np.random.rand() > 0.5)
a_npy = np.random.normal(0, 1, (batch_size, m, k))
a_npy = a_npy.astype(data_type)
b_npy = np.random.normal(0, 1, (batch_size, k, n))
b_npy = b_npy.astype(data_type)
c_npy = np.empty((batch_size, m, n), dtype=data_type)
ograd_npy = np.random.normal(0, 1, (batch_size, m, n))
ograd_npy = ograd_npy.astype(data_type)
agrad_npy = np.empty((batch_size, m, k), dtype=data_type)
bgrad_npy = np.empty((batch_size, k, n), dtype=data_type)
a_init_grad_npy = np.random.normal(size=(batch_size, m, k))
a_init_grad_npy = a_init_grad_npy.astype(data_type)
b_init_grad_npy = np.random.normal(size=(batch_size, k, n))
b_init_grad_npy = b_init_grad_npy.astype(data_type)
for i in range(batch_size):
c_npy[i, :, :] = np.dot(a_npy[i, :, :], b_npy[i, :, :])
bgrad_npy[i, :, :] = np.dot(a_npy[i, :, :].T, ograd_npy[i, :, :])
agrad_npy[i, :, :] = np.dot(ograd_npy[i, :, :], b_npy[i, :, :].T)
a = mx.sym.Variable('a', dtype=data_type)
b = mx.sym.Variable('b', dtype=data_type)
c = mx.sym.batch_dot(a, b, transpose_a=transpose_a, transpose_b=transpose_b)
if transpose_a:
a_npy = np.transpose(a_npy, axes=(0, 2, 1))
agrad_npy = np.transpose(agrad_npy, axes=(0, 2, 1))
a_init_grad_npy = np.transpose(a_init_grad_npy, axes=(0, 2, 1))
if transpose_b:
b_npy = np.transpose(b_npy, axes=(0, 2, 1))
bgrad_npy = np.transpose(bgrad_npy, axes=(0, 2, 1))
b_init_grad_npy = np.transpose(b_init_grad_npy, axes=(0, 2, 1))
exe = c.simple_bind(ctx=ctx,
a=a_npy.shape, b=b_npy.shape, grad_req='write')
exe_add = c.simple_bind(ctx=ctx,
a=a_npy.shape, b=b_npy.shape, grad_req='add')
exe_add.grad_dict['a'][:] = a_init_grad_npy
exe_add.grad_dict['b'][:] = b_init_grad_npy
outputs = exe.forward(is_train=True, a=a_npy, b=b_npy)
assert_almost_equal(outputs[0], c_npy,
rtol=1e-2 if data_type == 'float16' else 1e-3,
atol=1e-2 if data_type == 'float16' else 1e-4)
exe.backward(out_grads=[mx.nd.array(ograd_npy, ctx=exe._ctx)])
assert_almost_equal(exe.grad_dict['a'], agrad_npy,
rtol=1e-2 if data_type == 'float16' else 1e-3,
atol=1e-2 if data_type == 'float16' else 1e-4)
assert_almost_equal(exe.grad_dict['b'], bgrad_npy,
rtol=1e-2 if data_type == 'float16' else 1e-3,
atol=1e-2 if data_type == 'float16' else 1e-4)
exe_add.forward(is_train=True, a=a_npy, b=b_npy)
exe_add.backward(out_grads=[mx.nd.array(ograd_npy, ctx=exe._ctx)])
assert_almost_equal(exe_add.grad_dict['a'],
agrad_npy + a_init_grad_npy,
rtol=1e-2 if data_type == 'float16' else 1e-3,
atol=1e-2 if data_type == 'float16' else 1e-4)
assert_almost_equal(exe_add.grad_dict['b'],
bgrad_npy + b_init_grad_npy,
rtol=1e-2 if data_type == 'float16' else 1e-3,
atol=1e-2 if data_type == 'float16' else 1e-4)
def get_correlation(data1,data2,kernel_size,max_displacement,stride1,stride2,pad_size,is_multiply):
img1 = mx.sym.Variable('img1')
img2 = mx.sym.Variable('img2')
return mx.sym.Correlation(data1=img1,data2=img2,kernel_size =kernel_size,max_displacement = max_displacement,
stride1 = stride1,stride2 = stride2,pad_size= pad_size,is_multiply = is_multiply)
def correlation_forward(data1,data2,pad_size,kernel_size,stride1,stride2,max_displacement,is_multiply):
# compute output's dimension
paddedbottomheight = data1.shape[2] + 2 * pad_size
paddedbottomwidth = data1.shape[3] + 2 * pad_size
kernel_radius = (kernel_size - 1) // 2
border_size = max_displacement + kernel_radius
top_width = (paddedbottomwidth - border_size * 2) // stride1
top_height = (paddedbottomheight - border_size * 2) // stride1
neighborhood_grid_radius = max_displacement // stride2
neighborhood_grid_width = neighborhood_grid_radius * 2 + 1
top_channels = neighborhood_grid_width * neighborhood_grid_width
out = np.zeros((data1.shape[0], top_channels, top_height, top_width))
tmp1 = np.zeros((data1.shape[0],data1.shape[1],paddedbottomheight, paddedbottomwidth))
tmp2 = np.zeros((data1.shape[0],data1.shape[1],paddedbottomheight, paddedbottomwidth))
tmp1[:, :, pad_size:pad_size + data1.shape[2], pad_size:pad_size + data1.shape[3]] = data1[:,:,:,:]
tmp2[:, :, pad_size:pad_size + data2.shape[2], pad_size:pad_size + data2.shape[3]] = data2[:,:,:,:]
for i in range(top_height):
for j in range(top_width):
for nbatch in range(data1.shape[0]):
# x1,y1 is the location in data1 , i,j is the location in output
x1 = j * stride1 + max_displacement
y1 = i * stride1 + max_displacement
for top_channel in range(top_channels):
s2o = (top_channel % neighborhood_grid_width - neighborhood_grid_radius) * stride2
s2p = (top_channel // neighborhood_grid_width - neighborhood_grid_radius) * stride2
# location in data2
x2 = x1 + s2o
y2 = y1 + s2p
for h in range(kernel_size):
for w in range(kernel_size):
for channel in range(data1.shape[1]):
if is_multiply:
out[nbatch, top_channel, i, j] += tmp1[nbatch, channel,y1 + h, x1 + w] * tmp2[nbatch, channel, y2 + h,x2 + w]
else:
out[nbatch, top_channel, i, j] += abs(tmp1[nbatch, channel, y1 + h, x1 + w] - tmp2[nbatch, channel, y2 + h, x2 + w])
out /= float(kernel_size**2*data1.shape[1])
return out,tmp1,tmp2
def correlation_backward(out_grad,tmp1,tmp2,data1,data2,pad_size,kernel_size,stride1,stride2,max_displacement,is_multiply):
# compute output's dimension
paddedbottomheight = data1.shape[2] + 2 * pad_size
paddedbottomwidth = data1.shape[3] + 2 * pad_size
kernel_radius = (kernel_size - 1) // 2
border_size = max_displacement + kernel_radius
top_width = (paddedbottomwidth - border_size * 2) // stride1
top_height = (paddedbottomheight - border_size * 2) // stride1
neighborhood_grid_radius = max_displacement // stride2
neighborhood_grid_width = neighborhood_grid_radius * 2 + 1
top_channels = neighborhood_grid_width * neighborhood_grid_width
out = np.zeros((data1.shape[0], top_channels, top_height, top_width))
tmp1_grad = np.zeros(tmp1.shape)
tmp2_grad = np.zeros(tmp2.shape)
for i in range(top_height):
for j in range(top_width):
for nbatch in range(data1.shape[0]):
# x1,y1 is the location in data1 , i,j is the location in output
x1 = j * stride1 + max_displacement
y1 = i * stride1 + max_displacement
for top_channel in range(top_channels):
s2o = (top_channel % neighborhood_grid_width - neighborhood_grid_radius) * stride2
s2p = (top_channel // neighborhood_grid_width - neighborhood_grid_radius) * stride2
# location in data2
x2 = x1 + s2o
y2 = y1 + s2p
for h in range(kernel_size):
for w in range(kernel_size):
for channel in range(data1.shape[1]):
if is_multiply:
tmp1_grad[nbatch,channel,y1+h,x1+w]+= out_grad[nbatch,top_channel,i,j]*tmp2[nbatch, channel, y2 + h,x2 + w]
tmp2_grad[nbatch,channel,y2+h,x2+w]+= out_grad[nbatch,top_channel,i,j]*tmp1[nbatch, channel, y1 + h,x1 + w]
else:
sgn = 1 if (tmp1[nbatch, channel, y1 + h,x1 + w]>=tmp2[nbatch, channel, y2 + h,x2 + w]) else -1
tmp1_grad[nbatch,channel,y1+h,x1+w]+= out_grad[nbatch,top_channel,i,j]*sgn
tmp2_grad[nbatch,channel,y2+h,x2+w]+= out_grad[nbatch,top_channel,i,j]*(-sgn)
tmp1_grad = tmp1_grad / float(kernel_size**2*data1.shape[1])
tmp2_grad = tmp2_grad / float(kernel_size**2*data1.shape[1])
return tmp1_grad[:,:,pad_size:pad_size+data1.shape[2],pad_size:pad_size+data1.shape[3]],tmp2_grad[:,:,pad_size:pad_size+data1.shape[2],pad_size:pad_size+data1.shape[3]],
def unittest_correlation(data_shape,kernel_size,max_displacement,stride1,stride2,pad_size,is_multiply,dtype):
img1 = np.random.random(data_shape)
img1 = img1.astype(dtype)
img2 = np.random.random(data_shape)
img2 = img2.astype(dtype)
net1 = get_correlation(img1,img2,kernel_size,max_displacement,stride1,stride2,pad_size,is_multiply)
net2 = get_correlation(img1,img2,kernel_size,max_displacement,stride1,stride2,pad_size,is_multiply )
exe1 = net1.simple_bind(default_context(),img1=img1.shape,img2=img1.shape)
exe1.arg_dict['img1'][:] = img1
exe1.arg_dict['img2'][:] = img2
#cpu forward
exe1.forward(is_train=True)
# python forward
forward_result,tmp1,tmp2 = correlation_forward(img1,img2,pad_size,kernel_size,stride1,stride2,max_displacement,is_multiply)
# forward error
assert_almost_equal(exe1.outputs[0], forward_result, rtol=1e-4, atol=1e-4)
# out_grad
a = np.ones(forward_result.shape)
out_grad1 = mx.nd.array(a,default_context())
# cpu backward
exe1.backward(out_grads=out_grad1)
# python backward
grad1,grad2 = correlation_backward(a,tmp1,tmp2,img1,img2,pad_size,kernel_size,stride1,stride2,max_displacement,is_multiply)
# backward error
assert_almost_equal(exe1.grad_dict['img1'], grad1, rtol=1e-3, atol=1e-4)
assert_almost_equal(exe1.grad_dict['img2'], grad2, rtol=1e-3, atol=1e-4)
@with_seed()
def test_correlation():
def test_infer_type(dtype):
a = mx.sym.Variable('a')
b = mx.sym.Variable('b')
corr = mx.sym.Correlation(data1=a, data2=b)
arg_type1, out_type1, _ = corr.infer_type(a=dtype)
if arg_type1[0] != np.dtype(dtype) and arg_type1[1] != np.dtype(dtype) and out_type1[0] != np.dtype(dtype):
msg = npt.npt.build_err_msg([a, b],
err_msg="Inferred type from a is not as expected, "
"Expected :%s %s %s, Got: %s %s %s"
% (dtype, dtype, dtype, arg_type1[0], arg_type1[1], out_type1[0]),
names=['a', 'b'])
raise AssertionError(msg)
arg_type2, out_type2, _ = corr.infer_type(b=dtype)
if arg_type2[0] != np.dtype(dtype) and arg_type2[1] != np.dtype(dtype) and out_type2[0] != np.dtype(dtype):
msg = npt.npt.build_err_msg([a, b],
err_msg="Inferred type from b is not as expected, "
"Expected :%s %s %s, Got: %s %s %s"
% (dtype, dtype, dtype, arg_type1[0], arg_type1[1], out_type1[0]),
names=['a', 'b'])
raise AssertionError(msg)
for dtype in ['float16', 'float32']:
test_infer_type(dtype)
unittest_correlation((1,3,10,10), kernel_size = 1,max_displacement = 4,stride1 = 1,stride2 = 1,pad_size = 4,is_multiply = False, dtype = dtype)
unittest_correlation((5,1,15,15), kernel_size = 1,max_displacement = 5,stride1 = 1,stride2 = 1,pad_size = 5,is_multiply = False, dtype = dtype)
unittest_correlation((5,1,15,15), kernel_size = 1,max_displacement = 5,stride1 = 1,stride2 = 1,pad_size = 5,is_multiply = True, dtype = dtype)
unittest_correlation((5,1,15,15), kernel_size = 1,max_displacement = 10,stride1 = 1,stride2 = 2,pad_size = 10,is_multiply = True, dtype = dtype)
unittest_correlation((5,1,4,4), kernel_size = 3,max_displacement = 1,stride1 = 1,stride2 = 1,pad_size = 2,is_multiply = True, dtype = dtype)
unittest_correlation((5,1,4,4), kernel_size = 3,max_displacement = 1,stride1 = 2,stride2 = 1,pad_size = 2,is_multiply = True, dtype = dtype)
unittest_correlation((5,1,4,4), kernel_size = 3,max_displacement = 1,stride1 = 2,stride2 = 1,pad_size = 2,is_multiply = False, dtype = dtype)
unittest_correlation((5,1,6,4), kernel_size = 3,max_displacement = 1,stride1 = 2,stride2 = 1,pad_size = 2,is_multiply = False, dtype = dtype)
unittest_correlation((5,1,11,11), kernel_size = 5,max_displacement = 1,stride1 = 1,stride2 = 1,pad_size = 2,is_multiply = False, dtype = dtype)
@with_seed()
def test_support_vector_machine_l1_svm():
xpu = default_context()
shape = (20, 10)
X = mx.symbol.Variable('X')
L = mx.symbol.Variable('L')
Y = mx.symbol.SVMOutput(data=X, label=L, use_linear=True)
x = mx.nd.empty(shape, ctx = xpu)
l = mx.nd.empty((shape[0],), ctx = xpu)
x_np = np.random.rand(*shape)
l_np = np.random.randint(0, shape[1], (shape[0],))
x[:] = x_np
l[:] = l_np
grad = mx.nd.empty(shape, ctx = xpu)
exec1 = Y.bind(xpu, args = [x, l], args_grad = {'X': grad})
exec1.forward(is_train=True)
assert_almost_equal(x_np, exec1.outputs[0])
exec1.backward()
l_mask = np.equal(l_np.reshape(shape[0],1),range(shape[1]))
l_mask = np.array(l_mask, dtype=np.float32)*2 -1
grad_np = (-1) * l_mask * np.greater(1 - l_mask * x_np, 0)
assert_almost_equal(grad_np, grad)
@with_seed()
def test_support_vector_machine_l2_svm():
xpu = default_context()
shape = (20, 10)
X = mx.symbol.Variable('X')
L = mx.symbol.Variable('L')
Y = mx.symbol.SVMOutput(data=X, label=L)
x = mx.nd.empty(shape, ctx = xpu)
l = mx.nd.empty((shape[0],), ctx = xpu)
x_np = np.random.rand(*shape)
x_np = x_np.astype(np.float32)
l_np = np.random.randint(0, shape[1], (shape[0],))
x[:] = x_np
l[:] = l_np
grad = mx.nd.empty(shape, ctx = xpu)
exec1 = Y.bind(xpu, args = [x, l], args_grad = {'X': grad})
exec1.forward(is_train=True)
assert_almost_equal(x_np, exec1.outputs[0])
exec1.backward()
l_mask = np.equal(l_np.reshape(shape[0],1),range(shape[1]))
l_mask = np.array(l_mask, dtype=np.float32)*2 -1
grad_np = (-2)*l_mask*np.maximum(1-l_mask*x_np,0)
grad_np = grad_np.astype(np.float32)
assert_almost_equal(grad_np, grad)
# Seed set because the test is not robust enough to operate on random data
@with_seed(1234)
def test_roipooling():
data = mx.symbol.Variable(name='data')
rois = mx.symbol.Variable(name='rois')
test = mx.symbol.ROIPooling(data=data, rois=rois, pooled_size=(4, 4), spatial_scale=1)
x1 = np.random.rand(4, 3, 12, 8).astype('float32')
x2 = np.array([[0, 1.1, 1.1, 6.2, 6.2], [2, 6.1, 2.1, 8.2, 11.2], [1, 3.1, 1.1, 5.2, 10.2], [0, 3, 3, 3, 3]], dtype='float32')
check_numeric_gradient(sym=test, location=[x1, x2],
grad_nodes={'data':'write', 'rois':'null'},
numeric_eps=1e-4, rtol=1e-1, atol=1e-4)
check_numeric_gradient(sym=test, location=[x1, x2],
grad_nodes={'data':'add', 'rois':'null'},
numeric_eps=1e-4, rtol=1e-1, atol=1E-4)
def check_pad_with_shape(shape, xpu, pad_width, mode, dtype="float64"):
# bind with label
X = mx.symbol.Variable('X', dtype=dtype)
Y = mx.symbol.Pad(data=X, mode=mode, pad_width=pad_width)
x = mx.random.uniform(-1, 1, shape, ctx=mx.cpu(), dtype=dtype).copyto(xpu)
# numpy result
pad_grouped = list(zip(*[iter(list(pad_width))] * 2))
np_out = np.pad(x.asnumpy(), pad_grouped, mode)
# mxnet result
grad = mx.nd.empty(shape, ctx = xpu, dtype=dtype)
exec1 = Y.bind(xpu, args = [x], args_grad = {'X': grad})
exec1.forward(is_train=True)
out = exec1.outputs[0]
# compare numpy + mxnet
assert_almost_equal(out, np_out)
# grad check
check_numeric_gradient(Y, [x.asnumpy()], numeric_eps=1e-2, rtol=1e-2)
@with_seed()
def test_pad():
ctx = default_context()
shape1 = (2, 3, 3, 5)
pad1 = (0, 0, 0, 0, 1, 2, 3, 4)
shape2 = (2, 3, 3, 5, 4)
pad2 = (0, 0, 0, 0, 1, 2, 3, 4, 3, 1)
# note: this op doesn't support ints yet. Add tests when supported
dtypes = ["float16", "float32", "float64"]
for dtype in dtypes:
check_pad_with_shape(shape1, ctx, pad1, 'constant', dtype)
check_pad_with_shape(shape1, ctx, pad1, 'edge', dtype)
check_pad_with_shape(shape2, ctx, pad2, 'constant', dtype)
check_pad_with_shape(shape2, ctx, pad2, 'edge', dtype)
check_pad_with_shape(shape1, ctx, pad1, 'reflect', dtype)
check_pad_with_shape(shape2, ctx, pad2, 'reflect', dtype)
def np_instance_norm(data, weight, bias, eps):
spatial_dims = data.shape[2::]
num_spatial_vals = np.prod(np.array(spatial_dims))
scale = 1/float(num_spatial_vals)
sum_axis = tuple(range(2, data.ndim))
mean = scale * np.sum(data, axis = sum_axis)
mean = np.reshape(np.repeat(mean, num_spatial_vals), data.shape)
var = scale * np.sum((data - mean)**2, axis = sum_axis)
var = np.reshape(np.repeat(var, num_spatial_vals), data.shape)
weightBatch = np.tile(weight, (data.shape[0], 1))
weightBatch = np.reshape(np.repeat(weightBatch, num_spatial_vals), data.shape)
biasBatch = np.tile(bias, (data.shape[0], 1))
biasBatch = np.reshape(np.repeat(biasBatch, num_spatial_vals), data.shape)
return weightBatch * (data - mean)/np.sqrt(var + eps) + biasBatch
def check_instance_norm_with_shape(shape, xpu):
# bind with label
eps = 0.001
X = mx.symbol.Variable('X')
G = mx.symbol.Variable('G')
B = mx.symbol.Variable('B')
Y = mx.symbol.InstanceNorm(data=X, beta=B, gamma=G, eps=eps)
x = mx.random.normal(0, 1, shape, ctx=mx.cpu()).copyto(xpu)
gamma = mx.random.normal(0, 1, shape[1], ctx=mx.cpu()).copyto(xpu)
beta = mx.random.normal(0, 1, shape[1], ctx=mx.cpu()).copyto(xpu)
np_out = np_instance_norm(x.asnumpy(), gamma.asnumpy(), beta.asnumpy(), eps)
exec1 = Y.bind(xpu, args = {'X':x, 'G':gamma, 'B':beta})
exec1.forward(is_train=False)
out = exec1.outputs[0]
assert_almost_equal(out, np_out, rtol=1e-4, atol=1e-4)
check_numeric_gradient(Y, {'X':x.asnumpy(), 'G':gamma.asnumpy(), 'B':beta.asnumpy()},
numeric_eps=1e-2, rtol=1e-2, atol=1e-2)
@with_seed()
def test_instance_normalization():
check_instance_norm_with_shape((1, 1, 1), default_context())
check_instance_norm_with_shape((2, 1, 2), default_context())
check_instance_norm_with_shape((2,4,5,6), default_context())
check_instance_norm_with_shape((3,3,2,3,2,1,1), default_context())
def check_l2_normalization(in_shape, mode, dtype, norm_eps=1e-10):
ctx = default_context()
data = mx.symbol.Variable('data')
out = mx.symbol.L2Normalization(data=data, mode=mode, eps=norm_eps)
in_data = np.random.uniform(-1, 1, in_shape).astype(dtype)
# calculate numpy results
if mode == 'channel':
assert in_data.ndim > 2
np_norm = np.linalg.norm(in_data, axis=1) + norm_eps
np_norm = np.repeat(1. / np.expand_dims(np_norm, axis=1), in_data.shape[1], axis=1)
np_out = np.multiply(in_data, np_norm)
elif mode == 'spatial':
assert in_data.ndim > 2
s = in_data.shape
np_norm = np.linalg.norm(in_data.reshape((s[0], s[1], -1)), axis=2) + norm_eps
np_norm = np.repeat(1. / np_norm[:, np.newaxis], in_data.size / s[0] / s[1], axis=2)
np_out = np.multiply(in_data, np_norm.reshape(s))
elif mode == 'instance':
assert in_data.ndim > 1
s = in_data.shape
np_norm = np.linalg.norm(in_data.reshape((s[0], -1)), axis=1) + norm_eps
np_norm = np.repeat(1. / np_norm[:, np.newaxis], in_data.size / s[0], axis=1)
np_out = np.multiply(in_data, np_norm.reshape(s))
else:
raise RuntimeError('Unknown l2 normalization mode')
exe = out.simple_bind(ctx=ctx, data=in_data.shape)
output = exe.forward(is_train=True, data=in_data)
# compare numpy + mxnet
assert_almost_equal(exe.outputs[0], np_out, rtol=1e-2 if dtype is 'float16' else 1e-5, atol=1e-5)
# check gradient
check_numeric_gradient(out, [in_data], numeric_eps=1e-3, rtol=1e-2, atol=5e-3)
@with_seed()
def test_l2_normalization():
for dtype in ['float16', 'float32', 'float64']:
for mode in ['channel', 'spatial', 'instance']:
nbatch = random.randint(1, 4)
nchannel = random.randint(3, 5)
height = random.randint(4, 6)
check_l2_normalization((nbatch, nchannel, height), mode, dtype)
width = random.randint(5, 7)
check_l2_normalization((nbatch, nchannel, height, width), mode, dtype)
def check_layer_normalization(in_shape, axis, eps, dtype=np.float32,
forward_check_eps=1E-3, backward_check_eps=1E-3,
npy_grad_check=True, finite_grad_check=True):
def npy_layer_norm(data, gamma, beta, axis=1, eps=1E-5):
if axis < 0:
axis += data.ndim
broadcast_shape = [1 for _ in range(data.ndim)]
broadcast_shape[axis] = data.shape[axis]
mean = data.mean(axis=axis, keepdims=True).astype(dtype)
var = data.var(axis=axis, keepdims=True).astype(dtype)
std = np.sqrt(var + dtype(eps)).astype(dtype)
out = np.reshape(gamma, broadcast_shape) * (data - mean) / std + \
np.reshape(beta, broadcast_shape)
return out
def npy_layer_norm_grad(data, gamma, out_grad, axis, eps):
if axis < 0:
axis += data.ndim
exclude_axis = tuple([ele for ele in range(data.ndim) if ele != axis])
data_mean = data.mean(axis=axis, keepdims=True)
data_var = data.var(axis=axis, keepdims=True)
data_std = np.sqrt(data_var + eps)
centered_data = (data - data_mean) / data_std
gamma_grad = (centered_data * out_grad).sum(axis=exclude_axis, keepdims=True)
beta_grad = out_grad.sum(axis=exclude_axis, keepdims=True)
w = out_grad * gamma.reshape([1 if i != axis else data.shape[axis] for i in range(data.ndim)])\
/ data_std
data_grad = w - w.mean(axis=axis, keepdims=True)\
- centered_data * (w * centered_data).mean(axis=axis, keepdims=True)
gamma_grad = gamma_grad.reshape((-1,))
beta_grad = beta_grad.reshape((-1,))
return data_grad, gamma_grad, beta_grad
ctx = default_context()
data = np.random.normal(0, 1, in_shape).astype(dtype)
gamma = np.random.normal(0, 1, (in_shape[axis],)).astype(dtype)
beta = np.random.normal(0, 1, (in_shape[axis],)).astype(dtype)
data_s = mx.symbol.Variable('data')
gamma_s = mx.symbol.Variable('gamma')
beta_s = mx.symbol.Variable('beta')
out_s = mx.symbol.LayerNorm(data=data_s, gamma=gamma_s, beta=beta_s, axis=axis, eps=eps)
exe = out_s.simple_bind(ctx, data=in_shape)
exe.arg_dict['data'][:] = data
exe.arg_dict['gamma'][:] = gamma
exe.arg_dict['beta'][:] = beta
out_nd = exe.forward()[0]
out = npy_layer_norm(data, gamma, beta, axis, eps)
assert_almost_equal(out, out_nd, forward_check_eps, forward_check_eps)
if finite_grad_check:
for req in ['write', 'add']:
check_numeric_gradient(out_s, {'data': data, 'gamma': gamma, 'beta': beta},
grad_nodes={'data': req, 'gamma': req, 'beta': req},
numeric_eps=1e-2, rtol=1e-2, atol=1e-2)
if npy_grad_check:
# Test for grad_req = write
out_grad = np.random.normal(0, 1, in_shape).astype(dtype)
exe = out_s.simple_bind(ctx, data=in_shape, grad_req='write')
exe.arg_dict['data'][:] = data
exe.arg_dict['gamma'][:] = gamma
exe.arg_dict['beta'][:] = beta
exe.forward()
exe.backward([mx.nd.array(out_grad, ctx=ctx)])
gt_data_grad, gt_gamma_grad, gt_beta_grad =\
npy_layer_norm_grad(data, gamma, out_grad, axis, eps)
assert_almost_equal(exe.grad_dict['data'].asnumpy(), gt_data_grad, backward_check_eps, backward_check_eps)
assert_almost_equal(exe.grad_dict['gamma'].asnumpy(), gt_gamma_grad, backward_check_eps, backward_check_eps)
assert_almost_equal(exe.grad_dict['beta'].asnumpy(), gt_beta_grad, backward_check_eps, backward_check_eps)
# Test for grad_req = add
out_grad = np.random.normal(0, 1, in_shape).astype(dtype)
init_data_grad = np.random.normal(0, 1, in_shape).astype(dtype)
init_gamma_grad = np.random.normal(0, 1, (in_shape[axis],)).astype(dtype)
init_beta_grad = np.random.normal(0, 1, (in_shape[axis],)).astype(dtype)
exe = out_s.simple_bind(ctx, data=in_shape, grad_req='add')
exe.arg_dict['data'][:] = data
exe.arg_dict['gamma'][:] = gamma
exe.arg_dict['beta'][:] = beta
exe.grad_dict['data'][:] = init_data_grad
exe.grad_dict['gamma'][:] = init_gamma_grad
exe.grad_dict['beta'][:] = init_beta_grad
exe.forward()
exe.backward([mx.nd.array(out_grad, ctx=ctx)])
gt_data_grad, gt_gamma_grad, gt_beta_grad = \
npy_layer_norm_grad(data, gamma, out_grad, axis, eps)
assert_almost_equal(exe.grad_dict['data'].asnumpy(),
gt_data_grad + init_data_grad, backward_check_eps, backward_check_eps)
assert_almost_equal(exe.grad_dict['gamma'].asnumpy(),
gt_gamma_grad + init_gamma_grad, backward_check_eps, backward_check_eps)
assert_almost_equal(exe.grad_dict['beta'].asnumpy(),
gt_beta_grad + init_beta_grad, backward_check_eps, backward_check_eps)
@with_seed()
def test_norm():
try:
import scipy
assert LooseVersion(scipy.__version__) >= LooseVersion('0.1')
from scipy.linalg import norm as sp_norm
except (AssertionError, ImportError):
print("Could not import scipy.linalg.norm or scipy is too old. "
"Falling back to numpy.linalg.norm which is not numerically stable.")
from numpy.linalg import norm as sp_norm
def l1norm(input_data, axis=0, keepdims=True):
return np.sum(abs(input_data), axis=axis, keepdims=keepdims)
def l2norm(input_data, axis=0, keepdims=True):
return sp_norm(input_data, axis=axis, keepdims=keepdims)
ctx = default_context()
data = mx.symbol.Variable('data')
in_data_dim = random_sample([2,3,4], 1)[0]
in_shape = rand_shape_nd(in_data_dim, dim=5)
epsilon = 1e-3
acc_type = {np.float16: np.float32, np.float32: np.float32, np.float64: np.float64,
np.int32: np.int32, np.int64: np.int64}
dtype_to_str = {np.float16: 'float16', np.float32: 'float32', np.float64: 'float64',
np.int32: 'int32', np.int64: 'int64'}
is_windows = sys.platform.startswith('win')
for enforce_safe_acc in ["1", "0"]:
if is_windows:
if enforce_safe_acc == "0":
break
enforce_safe_acc = "0" if "MXNET_SAFE_ACCUMULATION" not in os.environ else os.environ["MXNET_SAFE_ACCUMULATION"]
else:
os.environ["MXNET_SAFE_ACCUMULATION"] = enforce_safe_acc
for order in [1, 2]:
for dtype in [np.float16, np.float32, np.float64]:
for i in range(in_data_dim):
for out_dtype in ['float32', 'float64']:
backward_dtype = np.float32 if out_dtype == 'float32' else np.float64
accumulation_type = acc_type[dtype]
if enforce_safe_acc == "0":
backward_dtype = dtype
out_dtype = dtype_to_str[dtype]
accumulation_type = dtype
skip_backward = 'int' in out_dtype
in_data = np.random.uniform(-1, 1, in_shape).astype(accumulation_type)
in_data[abs(in_data) < epsilon] = 2 * epsilon
norm_sym = mx.symbol.norm(data=data, ord=order, axis=i, out_dtype=out_dtype, keepdims=True)
npy_out = l1norm(in_data, i) if order is 1 else l2norm(in_data, i)
npy_out_backward = np.sign(in_data) if order is 1 else in_data/npy_out
check_symbolic_forward(norm_sym, [in_data.astype(dtype)], [npy_out.astype(out_dtype)],
rtol=1e-2 if dtype == np.float16 else 1e-3,
atol=1e-4 if dtype == np.float16 else 1e-5, ctx=ctx, dtype=dtype)
if dtype is not np.float16 and not skip_backward:
check_symbolic_backward(norm_sym, [in_data.astype(dtype)],
[np.ones(npy_out.shape).astype(out_dtype)],
[npy_out_backward], rtol=1e-3, atol=1e-5, ctx=ctx,
dtype=backward_dtype)
# Disable numeric gradient https://github.com/apache/incubator-mxnet/issues/11509
# check gradient
if dtype is not np.float16 and not skip_backward:
check_numeric_gradient(norm_sym, [in_data], numeric_eps=epsilon,
rtol=1e-1, atol=1e-3, dtype=backward_dtype)
if i < in_data_dim-1:
norm_sym = mx.symbol.norm(data=data, ord=order, axis=(i, i+1), keepdims=True)
npy_out = l1norm(in_data, (i, i+1)) if order is 1 else l2norm(in_data, (i, i+1))
npy_out_backward = np.sign(in_data) if order is 1 else in_data/npy_out
check_symbolic_forward(norm_sym, [in_data], [npy_out.astype(dtype)],
rtol=1e-2 if dtype is np.float16 else 1e-3,
atol=1e-4 if dtype is np.float16 else 1e-5, ctx=ctx)
if dtype is not np.float16 and not skip_backward:
check_symbolic_backward(norm_sym, [in_data],
[np.ones(npy_out.shape).astype(out_dtype)],
[npy_out_backward.astype(out_dtype)],
rtol=1e-3, atol=1e-5, ctx=ctx, dtype=backward_dtype)
# check gradient
if dtype is not np.float16 and not skip_backward:
check_numeric_gradient(norm_sym, [in_data], numeric_eps=epsilon,
rtol=1e-1, atol=1e-3, dtype=backward_dtype)
def test_layer_norm():
for enforce_safe_acc in ["1", "0"]:
os.environ["MXNET_SAFE_ACCUMULATION"] = enforce_safe_acc
for dtype, forward_check_eps, backward_check_eps in zip([np.float16, np.float32, np.float64],
[1E-2, 1E-3, 1E-4],
[1E-2, 1E-3, 1E-4]):
if dtype != np.float16:
in_shape_l, finite_grad_check_l = [(10, 6, 5), (10, 10), (128 * 32, 512)], [True, True, False]
else:
in_shape_l, finite_grad_check_l = [(10, 6, 5), (10, 10)], [True, True] # large input + fp16 does not pass the forward check
for in_shape, finite_grad_check in zip(in_shape_l, finite_grad_check_l):
for axis in range(-len(in_shape), len(in_shape)):
for eps in [1E-2, 1E-3]:
if dtype == np.float16:
npy_grad_check = False
else:
npy_grad_check = True
check_layer_normalization(in_shape, axis, eps, dtype=dtype,
forward_check_eps=forward_check_eps,
backward_check_eps=backward_check_eps,
npy_grad_check=npy_grad_check,
finite_grad_check=finite_grad_check)
# Numpy Implementation of Sequence Ops
def sequence_last_numpy(array, lengths, axis):
# create new array of dims [batch, seqlen, ...]
array2 = np.moveaxis(array, axis, 1)
dims = array2.shape
if lengths is None:
return array2[:, -1]
lengths = list(lengths)
return np.array([array2[i, int(lengths[i]) - 1] for i in range(dims[0])])
def sequence_mask_numpy(array, lengths, axis, value):
if lengths is None:
return array
arrayMask = array.copy()
# conform to [batch, seqlen, ...]
arrayMask = np.moveaxis(arrayMask, axis, 1)
shape = arrayMask.shape
lengths = list(lengths)
for i in range(shape[0]):
arrayMask[i, int(lengths[i]):] = value
return np.moveaxis(arrayMask, 1, axis)
def sequence_reverse_numpy(array, lengths, axis):
rarray = array.copy()
# conform to [batch, seqlen, ...]
rarray = np.moveaxis(rarray, axis, 1)
shape = rarray.shape
if lengths is None:
lengths = [shape[1]] * shape[0]
lengths = list(lengths)
for i in range(shape[0]):
j = int(lengths[i])
rarray[i,:j] = rarray[i,:j][::-1]
return np.moveaxis(rarray, 1, axis)
def check_sequence_func(ftype, mask_value=0, axis=0):
# bind with label
xpu = default_context()
X = mx.symbol.Variable('X')
L = mx.symbol.Variable('L') # lengths
shapes = [(3, 4), (1, 1), (3, 4, 3, 1, 1)]
for seqlenQ in [True, False]:
for ary_dtype in [np.float32]:
for idx_dtype in [np.int32, np.float32]:
for s in shapes:
x = mx.random.uniform(-1, 1, s, ctx=mx.cpu()).astype(ary_dtype).copyto(xpu)
batch = s[1] if (axis == 0) else s[0]
seqlen = s[axis]
l_np = np.random.randint(1, seqlen + 1, batch)
l = mx.nd.array(l_np, ctx=mx.cpu(), dtype=idx_dtype).copyto(xpu)
if not seqlenQ:
l_np = None
args = {'data':X, 'use_sequence_length':seqlenQ, "axis":axis}
if seqlenQ:
args['sequence_length'] = L
if ftype == "last":
Y = mx.symbol.SequenceLast(**args)
np_out = sequence_last_numpy(x.asnumpy(), l_np, axis)
elif ftype == "mask":
args['value'] = mask_value
Y = mx.symbol.SequenceMask(**args)
np_out = sequence_mask_numpy(x.asnumpy(), l_np, axis, mask_value)
elif ftype == "reverse":
Y = mx.symbol.SequenceReverse(**args)
np_out = sequence_reverse_numpy(x.asnumpy(), l_np, axis)
fargs = [x, l] if seqlenQ else [x]
gargs = [x.asnumpy(), l_np] if seqlenQ else [x.asnumpy()]
check_symbolic_forward(Y, fargs, [np_out], dtype="asnumpy")
check_numeric_gradient(Y, gargs, grad_nodes={'X':'write'},
numeric_eps=1e-2, rtol=1e-2)
check_numeric_gradient(Y, gargs, grad_nodes={'X':'add'},
numeric_eps=1e-3, rtol=1e-2, atol=1E-4)
check_numeric_gradient(Y, gargs, grad_nodes={'X':'null'},
numeric_eps=1e-3, rtol=1e-2, atol=1E-4)
@with_seed()
@unittest.skip("Flaky test: https://github.com/apache/incubator-mxnet/issues/11395")
def test_sequence_last():
check_sequence_func("last", axis=0)
check_sequence_func("last", axis=1)
@with_seed()
def test_sequence_mask():
check_sequence_func("mask", axis = 0, mask_value=-2.3)
check_sequence_func("mask", axis = 1, mask_value=0.3)
def check_sequence_reverse(xpu):
# sample data
arr = np.array(
[[[ 1., 2., 3.],
[ 4., 5., 6.]],
[[ 7., 8., 9.],
[ 10., 11., 12.]],
[[ 13., 14., 15.],
[ 16., 17., 18.]]])
arr1 = np.array(
[[[ 13., 14., 15.],
[ 16., 17., 18.]],
[[ 7., 8., 9.],
[ 10., 11., 12.]],
[[ 1., 2., 3.],
[ 4., 5., 6.]]])
arr2 = np.array(
[[[ 7., 8., 9.],
[ 10., 11., 12.]],
[[ 1., 2., 3.],
[ 4., 5., 6.]],
[[ 13., 14., 15.],
[ 16., 17., 18.]]])
arr3 = np.array(
[[[ 7., 8., 9.],
[ 16., 17., 18.]],
[[ 1., 2., 3.],
[ 10., 11., 12.]],
[[ 13., 14., 15.],
[ 4., 5., 6.]]])
# test for matrix case
seq_len_1 = [1, 2, 2]
arr_4 = np.array([[7., 8., 9.], [16., 17., 5.4]], dtype=np.float32)
arr_5 = np.array([[7., 17., 5.4], [16., 8., 9.]], dtype=np.float32)
def test_wrapper(arr, xpu, sequence_length=None, use_sequence_length=False):
# MxNet symbol creation
seq = mx.sym.Variable('seq')
if sequence_length and use_sequence_length:
seq_len = mx.sym.Variable('seq_len')
else:
# ensure that both are disabled, not just one
seq_len=None
use_sequence_length=False
rev = mx.sym.SequenceReverse(data=seq, sequence_length=seq_len, use_sequence_length=use_sequence_length)
# MxNet symbol execution
if sequence_length:
bound = rev.bind(xpu, {'seq': mx.nd.array(arr), 'seq_len': mx.nd.array(sequence_length)})
else:
bound = rev.bind(xpu, {'seq': mx.nd.array(arr)})
fwd = bound.forward()
return fwd[0].asnumpy()
# test cases
assert_array_equal(test_wrapper(arr, xpu, use_sequence_length=False), arr1)
assert_array_equal(test_wrapper(arr, xpu, sequence_length=[3, 3], use_sequence_length=True), arr1)
assert_array_equal(test_wrapper(arr, xpu, sequence_length=[2, 2], use_sequence_length=True), arr2)
assert_array_equal(test_wrapper(arr, xpu, sequence_length=[2, 3], use_sequence_length=True), arr3)
assert_array_equal(test_wrapper(arr_4, xpu, sequence_length=seq_len_1, use_sequence_length=True), arr_5)
@with_seed()
def test_sequence_reverse():
check_sequence_func("reverse", axis=0)
check_sequence_reverse(mx.cpu())
def mathematical_core_binary(name,
forward_mxnet_call,
forward_numpy_call,
backward_numpy_call1,
backward_numpy_call2,
data1_init=2.,
data2_init=3.,
grad_init=2.):
data1 = mx.symbol.Variable('data1')
data2 = mx.symbol.Variable('data2')
shape = (3, 4)
data_tmp1 = np.random.rand(3, 4)
data_tmp2 = np.random.rand(3, 4)
data_tmp1[:] = data1_init
data_tmp2[:] = data2_init
arr_data1 = mx.nd.array(data_tmp1)
arr_data2 = mx.nd.array(data_tmp2)
arr_grad1 = mx.nd.empty(shape)
arr_grad2 = mx.nd.empty(shape)
test = forward_mxnet_call(data1, data2)
exe_test = test.bind(default_context(), args=[arr_data1, arr_data2], args_grad=[arr_grad1, arr_grad2])
exe_test.forward(is_train=True)
out = exe_test.outputs[0]
npout = forward_numpy_call(data_tmp1, data_tmp2)
assert_almost_equal(out, npout)
out_grad = mx.nd.empty(shape)
out_grad[:] = grad_init
exe_test.backward(out_grad)
npout_grad = np.ones(shape)
npout_grad[:] = grad_init
npout_grad1 = npout_grad * backward_numpy_call1(data_tmp1, data_tmp2)
npout_grad2 = npout_grad * backward_numpy_call2(data_tmp1, data_tmp2)
assert_almost_equal(arr_grad1, npout_grad1)
assert_almost_equal(arr_grad2, npout_grad2)
def mathematical_core(name, forward_mxnet_call, forward_numpy_call, backward_numpy_call, data_init=5., grad_init=2.):
data = mx.symbol.Variable('data')
shape = (3, 4)
data_tmp = np.ones(shape)
data_tmp[:] = data_init
arr_data = mx.nd.array(data_tmp)
arr_grad = mx.nd.empty(shape)
arr_grad[:] = 3
test = forward_mxnet_call(data)
exe_test = test.bind(default_context(), args=[arr_data], args_grad=[arr_grad])
exe_test.forward(is_train=True)
out = exe_test.outputs[0]
npout = forward_numpy_call(data_tmp)
assert_almost_equal(out, npout)
out_grad = mx.nd.empty(shape)
out_grad[:] = grad_init
npout_grad = out_grad.asnumpy()
temp = backward_numpy_call(data_tmp)
npout_grad = npout_grad * temp
exe_test.backward(out_grad)
assert_almost_equal(arr_grad, npout_grad)
@with_seed()
def test_special_functions_using_scipy():
try:
from scipy import special as scipy_special
except:
print("Could not import scipy. Skipping unit tests for special functions")
return
# gamma
mathematical_core("gamma", lambda x: mx.sym.gamma(x), lambda x: scipy_special.gamma(x),
lambda x: scipy_special.gamma(x) * scipy_special.psi(x), 0.5, 0.5)
# gammaln
mathematical_core("gammaln", lambda x: mx.sym.gammaln(x), lambda x: scipy_special.gammaln(x),
lambda x: scipy_special.psi(x), 0.5, 0.5)
# erf
mathematical_core("erf", lambda x: mx.sym.erf(x), lambda x: scipy_special.erf(x),
lambda x: 2.0 / math.sqrt(math.pi) * np.exp(-(x ** 2)), 0.5, 0.5)
# erfinv
mathematical_core("erfinv", lambda x: mx.sym.erfinv(x), lambda x: scipy_special.erfinv(x),
lambda x: 0.5 * math.sqrt(math.pi) * np.exp(scipy_special.erfinv(x) ** 2), 0.5, 0.5)
def rounding(name, forward_mxnet_call, forward_numpy_call, data_init=5., grad_init=2.):
data = mx.symbol.Variable('data')
shape = (3, 4)
data_tmp = np.ones(shape)
data_tmp[:] = data_init
arr_data = mx.nd.array(data_tmp)
test = forward_mxnet_call(data)
exe_test = test.bind(default_context(), args=[arr_data])
exe_test.forward(is_train=True)
out = exe_test.outputs[0]
npout = forward_numpy_call(data_tmp)
assert_almost_equal(out, npout)
@with_seed()
def test_mathematical():
# rsqrt
mathematical_core("rsqrt",
lambda x: mx.sym.rsqrt(x),
lambda x: 1 / np.sqrt(x),
lambda x: -(1.0 / (2.0 * x * np.sqrt(x))))
# tan
mathematical_core("tan", lambda x: mx.sym.tan(x), lambda x: np.tan(x), lambda x: np.tan(x) ** 2 + 1)
# arcsin
mathematical_core("arcsin", lambda x: mx.sym.arcsin(x), lambda x: np.arcsin(x),
lambda x: 1. / (1. - x ** 2) ** (1. / 2.), 0.5, 0.5)
# arccos
mathematical_core("arccos", lambda x: mx.sym.arccos(x), lambda x: np.arccos(x),
lambda x: -1. / (1. - x ** 2.) ** (1. / 2.), 0.5, 0.5)
# arctan
mathematical_core("arctan", lambda x: mx.sym.arctan(x), lambda x: np.arctan(x),
lambda x: 1. / (x ** 2. + 1.), 0.5, 0.5)
# hypot
mathematical_core_binary("hypot",
lambda x, y: mx.sym.hypot(x, y),
lambda x, y: np.hypot(x, y),
lambda x, y: x / np.hypot(x, y),
lambda x, y: y / np.hypot(x, y),
0.5, 0.5, 0.5)
# hypot scalar
mathematical_core("hypot scalar",
lambda x: mx.sym.hypot(x, 3),
lambda x: np.hypot(x, 3),
lambda x: x / np.hypot(x, 3),
0.5, 0.5)
# degrees
mathematical_core("degrees",
lambda x: mx.sym.degrees(x),
lambda x: np.degrees(x),
lambda x: 180./np.pi,
0.5, 0.5)
# radians
mathematical_core("radians",
lambda x: mx.sym.radians(x),
lambda x: np.radians(x),
lambda x: np.pi / 180.,
0.6, 1)
# sinh
mathematical_core("sinh", lambda x: mx.sym.sinh(x), lambda x: np.sinh(x), lambda x: np.cosh(x))
# cosh
mathematical_core("cosh", lambda x: mx.sym.cosh(x), lambda x: np.cosh(x), lambda x: np.sinh(x), 5, 5)
# tanh
mathematical_core("tanh", lambda x: mx.sym.tanh(x), lambda x: np.tanh(x), lambda x: 1. - np.tanh(x) ** 2, 0.5, 1)
# arcsinh
mathematical_core("arcsinh", lambda x: mx.sym.arcsinh(x), lambda x: np.arcsinh(x),
lambda x: 1./(x**2 + 1.)**(1./2.))
# arccosh
mathematical_core("arccosh", lambda x: mx.sym.arccosh(x), lambda x: np.arccosh(x),
lambda x: 1./(x**2 - 1.)**(1./2.))
# arctanh
mathematical_core("arctanh", lambda x: mx.sym.arctanh(x), lambda x: np.arctanh(x),
lambda x: -1./(x**2 - 1.), 0.5)
# log1p
mathematical_core("log1p", lambda x: mx.sym.log1p(x), lambda x: np.log1p(x),
lambda x: 1. / (1.0 + x), 0.5, 0.5)
# expm1
mathematical_core("expm1", lambda x: mx.sym.expm1(x), lambda x: np.expm1(x),
lambda x: np.exp(x), 0.5, 0.5)
# log10
mathematical_core("log10", lambda x: mx.sym.log10(x), lambda x: np.log10(x),
lambda x: 1. / (x * np.log(10.)))
# log2
mathematical_core("log2", lambda x: mx.sym.log2(x), lambda x: np.log2(x),
lambda x: 1. / (x * np.log(2.)))
# rint
rounding("rint", lambda x: mx.sym.rint(x), lambda x: np.rint(x))
# fix
rounding("fix", lambda x: mx.sym.fix(x), lambda x: np.fix(x))
@with_seed()
def test_special_functions_using_scipy():
try:
from scipy import special as scipy_special
except:
print("Could not import scipy. Skipping unit tests for special functions")
return
# gamma
mathematical_core("gamma", lambda x: mx.sym.gamma(x), lambda x: scipy_special.gamma(x),
lambda x: scipy_special.gamma(x) * scipy_special.psi(x), 0.5, 0.5)
# gammaln
mathematical_core("gammaln", lambda x: mx.sym.gammaln(x), lambda x: scipy_special.gammaln(x),
lambda x: scipy_special.psi(x), 0.5, 0.5)
@with_seed()
def test_clip():
data = mx.symbol.Variable('data')
shape = (30, 30)
data_tmp = np.random.uniform(-1, 1, shape).astype('float32')
test = mx.sym.clip(data, a_max=0.6, a_min=-0.6)
check_symbolic_forward(test, [data_tmp], [np.clip(data_tmp, -0.6, 0.6)])
check_symbolic_backward(test, [data_tmp], [np.ones(shape)],
[np.where(data_tmp <= 0.6, [1], [0]) * np.where(data_tmp >= -0.6, [1], [0])])
# Test monitor on symbol using clip
def simple_callback(name, arr):
pass
exe = test.simple_bind(ctx=mx.current_context(), data=shape)
exe.set_monitor_callback(simple_callback, monitor_all=True)
exe.forward(is_train=True)
exe.backward(out_grads=mx.nd.ones(shape))
mx.nd.waitall()
@with_seed()
def test_init():
def test_basic_val_init(sym_func, np_func, shape, dtype):
x = sym_func(shape=shape, dtype=dtype)
exe = x.bind(default_context(), args=[], args_grad=[])
exe.forward(is_train=True)
assert_almost_equal(exe.outputs[0], np_func(shape=shape, dtype=dtype))
assert exe.outputs[0].asnumpy().dtype == dtype
def test_arange():
# General Random Tests
dtype_list = [np.float32, np.float64, np.int32, np.uint8]
config_list = [(10,),
(0, 10),
(5, 100, 4),
(50, -50, -2),
(-100, 100, 1),
(1.3, 456.6, 1.3)]
for dtype in dtype_list:
for config in config_list:
repeats = random.choice([1, 3])
np_out = np.repeat(np.arange(*config, dtype=dtype), repeats)
nd_out = mx.nd.arange(*config, repeat=repeats, dtype=dtype)
assert_almost_equal(np_out, nd_out)
def test_arange_inferstop():
s = mx.sym.arange(start=0, stop=None, infer_range=True)
s = mx.sym.elemwise_add(s, mx.sym.zeros(shape=[5]))
exe = s.bind(ctx=mx.cpu(), args={})
exe.forward()
assert_almost_equal(exe.outputs[0], np.array([0,1,2,3,4]))
def test_arange_like():
shape_list = [(10,), (10, 20), (10, 20, 30), (10, 20, 30, 40)]
axis_list = [0, -1]
for sh in shape_list:
for axis in axis_list:
val = np.random.rand(*sh)
data = mx.nd.array(val)
nd_out = mx.nd.contrib.arange_like(data, start=0, axis=axis)
np_out = np.arange(start=0, stop=sh[axis])
assert_almost_equal(nd_out.asnumpy(), np_out)
def test_arange_like_without_axis():
shape_list = [(10,), (10, 20), (10, 20, 30), (10, 20, 30, 40)]
for sh in shape_list:
val = np.random.rand(*sh)
data = mx.nd.array(val)
nd_out = mx.nd.contrib.arange_like(data, start=0)
np_out = np.arange(start=0, stop=val.size)
assert_almost_equal(nd_out.asnumpy(), np_out.reshape(sh))
test_basic_val_init(mx.sym.zeros, np.zeros, (3, 4), np.float32)
test_basic_val_init(mx.sym.ones, np.ones, 3, np.int32)
test_basic_val_init(mx.sym.ones, np.ones, (2, 2, 3), np.float16)
test_arange()
test_arange_inferstop()
test_arange_like()
test_arange_like_without_axis()
@with_seed()
def test_order():
ctx = default_context()
def gt_topk(dat, axis, ret_typ, k, is_ascend):
if ret_typ == "indices":
if is_ascend:
indices = np.arange(k)
else:
indices = np.arange(-1, -k-1, -1)
ret = np.take(dat.argsort(axis=axis), axis=axis, indices=indices, mode='wrap')
elif ret_typ == "value":
if is_ascend:
indices = np.arange(k)
else:
indices = np.arange(-1, -k-1, -1)
ret = np.take(np.sort(dat, axis=axis), axis=axis, indices=indices, mode='wrap')
else:
assert dat.shape == (5, 5, 5, 5)
assert axis is None or axis == 1
ret = np.zeros(dat.shape)
if is_ascend:
indices = np.arange(k)
else:
indices = np.arange(-1, -k-1, -1)
gt_argsort = np.take(dat.argsort(axis=axis), axis=axis, indices=indices, mode='wrap')
if axis is None:
ret.ravel()[gt_argsort] = 1
else:
for i in range(5):
for j in range(5):
for k in range(5):
ret[i, gt_argsort[i, :, j, k], j, k] = 1
return ret
dshape = (5, 5, 5, 5)
a_npy = np.arange(np.prod(dshape)).astype(np.float32)
np.random.shuffle(a_npy)
a_npy = a_npy.reshape(dshape)
a = mx.sym.Variable('a')
def get_large_matrix():
data = np.array([np.arange(300096).astype(np.float32)])
data = np.repeat(data, 100, axis=0)
np.apply_along_axis(np.random.shuffle, 1, data)
return data
large_matrix_npy = get_large_matrix()
for axis in [1, 3, None]:
for is_ascend in [True, False]:
b = mx.sym.sort(a, axis=axis, is_ascend=is_ascend)
if axis is None:
out_npy = gt_topk(dat=a_npy, axis=axis, ret_typ="value", k=a_npy.size, is_ascend=is_ascend)
else:
out_npy = gt_topk(dat=a_npy, axis=axis, ret_typ="value", k=5, is_ascend=is_ascend)
check_numeric_gradient(b, location={'a': a_npy}, numeric_eps=1e-2, ctx=ctx)
check_symbolic_forward(b, location={'a': a_npy}, expected=[out_npy])
b = mx.sym.topk(a, axis=1, is_ascend=is_ascend, ret_typ="indices", k=5)
check_symbolic_backward(sym=b, location={'a': large_matrix_npy},
out_grads=[np.random.normal(size=(100, 5))],
expected=[np.zeros((100, 300096))])
check_symbolic_forward(b, location={'a': large_matrix_npy},
expected=[gt_topk(dat=large_matrix_npy, axis=1,
ret_typ="indices", k=5,
is_ascend=is_ascend)])
b = mx.sym.argsort(a, axis=1, is_ascend=False)
check_symbolic_backward(sym=b, location={'a': a_npy},
out_grads=[np.random.normal(size=(5, 5, 5, 5))],
expected=[np.zeros((5, 5, 5, 5))])
check_symbolic_forward(b, location={'a': a_npy},
expected=[gt_topk(dat=a_npy, axis=1, ret_typ="indices", k=5,
is_ascend=False)])
b = mx.sym.argmax(a, axis=1, keepdims=True)
check_symbolic_backward(sym=b, location={'a': a_npy},
out_grads=[np.random.normal(size=(5, 5, 5, 5))],
expected=[np.zeros((5, 5, 5, 5))])
check_symbolic_forward(b, location={'a': a_npy},
expected=[gt_topk(dat=a_npy, axis=1, ret_typ="indices", k=1,
is_ascend=False)])
b = mx.sym.argmin(a, axis=1, keepdims=True)
check_symbolic_backward(sym=b, location={'a': a_npy},
out_grads=[np.random.normal(size=(5, 5, 5, 5))],
expected=[np.zeros((5, 5, 5, 5))])
check_symbolic_forward(b, location={'a': a_npy},
expected=[gt_topk(dat=a_npy, axis=1, ret_typ="indices", k=1,
is_ascend=True)])
for dtype in [np.float16, np.float32, np.float64]:
dshape = (5, 5, 5, 5)
a_npy = np.arange(np.prod(dshape)).astype(dtype)
np.random.shuffle(a_npy)
a_npy = a_npy.reshape(dshape)
a = mx.sym.Variable('a')
for axis in [1, 3, None]:
K = [1, 3, 5, 7] if axis is None else [1, 3, 5]
for k in K:
for is_ascend in [True, False]:
b = mx.sym.topk(a, axis=axis, is_ascend=is_ascend, ret_typ="value", k=k)
out_npy = gt_topk(dat=a_npy, axis=axis, ret_typ="value", k=k, is_ascend=is_ascend)
check_numeric_gradient(b, location={'a': a_npy}, numeric_eps=1e-2, ctx=ctx)
check_symbolic_forward(b, location={'a': a_npy}, expected=[out_npy])
b = mx.sym.topk(a, axis=1, is_ascend=is_ascend, ret_typ="indices", k=5)
check_symbolic_backward(sym=b, location={'a': large_matrix_npy},
out_grads=[np.random.normal(size=(100, 5))],
expected=[np.zeros((100, 300096))])
check_symbolic_forward(b, location={'a': large_matrix_npy},
expected=[gt_topk(dat=large_matrix_npy, axis=1,
ret_typ="indices", k=5, is_ascend=is_ascend)])
b = mx.sym.topk(a, axis=3, is_ascend=is_ascend, ret_typ="indices", k=3)
check_symbolic_backward(sym=b, location={'a': a_npy},
out_grads=[np.random.normal(size=(5, 5, 5, 3))],
expected=[np.zeros((5, 5, 5, 5))])
check_symbolic_forward(b, location={'a': a_npy},
expected=[gt_topk(dat=a_npy, axis=3, ret_typ="indices", k=3,
is_ascend=False)])
b = mx.sym.topk(a, axis=1, is_ascend=True, ret_typ="mask", k=3)
check_symbolic_backward(sym=b, location={'a': a_npy},
out_grads=[np.random.normal(size=(5, 5, 5, 5))],
expected=[np.zeros((5, 5, 5, 5))])
check_symbolic_forward(b, location={'a': a_npy},
expected=[gt_topk(dat=a_npy, axis=1, ret_typ="mask", k=3,
is_ascend=True)])
@with_seed()
def test_blockgrad():
a = mx.sym.Variable('a')
b = mx.sym.BlockGrad(a)
exe = b.simple_bind(ctx=default_context(), a=(10, 10))
a_npy = np.random.rand(10, 10)
exe.forward(is_train=True, a=a_npy)
assert_almost_equal(exe.outputs[0], a_npy)
exe.backward() # No error if BlockGrad works
@with_seed()
def test_take():
def grad_helper(grad_in, axis, idx):
if axis == 0:
if axis == len(grad_in.shape) - 1:
grad_in[idx] += 1.0
else:
grad_in[idx, :] += 1.0
elif axis == 1:
if axis == len(grad_in.shape) - 1:
grad_in[:, idx] += 1.0
else:
grad_in[:, idx, :] += 1.0
elif axis == 2:
if axis == len(grad_in.shape) - 1:
grad_in[:, :, idx] += 1.0
else:
grad_in[:, :, idx, :] += 1.0
elif axis == 3:
if axis == len(grad_in.shape) - 1:
grad_in[:, :, :, idx] += 1.0
else:
grad_in[:, :, :, idx, :] += 1.0
elif axis == 4:
grad_in[:, :, :, :, idx] += 1.0
else:
raise ValueError("axis %d is not supported..." % axis)
def check_output_n_grad(data_shape, idx_shape, axis, mode, out_of_range=True):
data = mx.sym.Variable('a')
idx = mx.sym.Variable('indices')
idx = mx.sym.BlockGrad(idx)
result = mx.sym.take(a=data, indices=idx, axis=axis, mode=mode)
exe = result.simple_bind(default_context(), a=data_shape,
indices=idx_shape, axis=axis, mode=mode)
data_real = np.random.normal(size=data_shape).astype('float32')
if out_of_range:
idx_real = np.random.randint(low=-data_shape[axis], high=data_shape[axis], size=idx_shape)
if mode == 'raise':
idx_real[idx_real == 0] = 1
idx_real *= data_shape[axis]
else:
idx_real = np.random.randint(low=0, high=data_shape[axis], size=idx_shape)
if axis < 0:
axis += len(data_shape)
grad_out = np.ones((data_shape[0:axis] if axis > 0 else ()) + idx_shape + (data_shape[axis+1:] if axis < len(data_shape) - 1 else ()), dtype='float32')
grad_in = np.zeros(data_shape, dtype='float32')
exe.arg_dict['a'][:] = mx.nd.array(data_real)
exe.arg_dict['indices'][:] = mx.nd.array(idx_real)
exe.forward(is_train=True)
if out_of_range and mode == 'raise':
try:
mx_out = exe.outputs[0].asnumpy()
except MXNetError as e:
return
else:
# Did not raise exception
assert False, "did not raise %s" % MXNetError.__name__
assert_almost_equal(exe.outputs[0], np.take(data_real, idx_real, axis=axis, mode=mode))
for i in np.nditer(idx_real):
if mode == 'clip':
i = np.clip(i, 0, data_shape[axis])
grad_helper(grad_in, axis, i)
exe.backward([mx.nd.array(grad_out)])
assert_almost_equal(exe.grad_dict['a'], grad_in)
def check_autograd_req():
row_len = 2
col_len = 8
shape = (row_len, col_len)
sc = mx.nd.random.uniform(-1.0, 1.0, shape=shape, dtype="float32")
sc.attach_grad()
i = mx.nd.array([0], dtype="int64")
j = mx.nd.array([0], dtype="int64")
with mx.autograd.record(train_mode=True):
xs = []
for _ in range(row_len):
x_i = []
for _ in range(col_len):
x_ij = sc.take(i).squeeze(axis=0).take(j).squeeze(axis=0)
x_i.append(x_ij)
j = j + 1
i = i + 1
j = j - col_len # reset j
xs.append(mx.nd.stack(*x_i))
x = mx.nd.stack(*xs)
x = x.sum()
x.backward()
assert_almost_equal(np.ones(sc.grad.shape), sc.grad)
for mode in ['clip', 'wrap', 'raise']:
for data_ndim in range(1, 5):
for idx_ndim in range(1, 4):
for axis in range(-data_ndim, data_ndim):
data_shape = ()
for _ in range(data_ndim):
data_shape += (np.random.randint(low=1, high=5), )
idx_shape = ()
for _ in range(idx_ndim):
idx_shape += (np.random.randint(low=1, high=5), )
if mode == 'raise':
check_output_n_grad(data_shape, idx_shape, axis, 'raise', False)
check_output_n_grad(data_shape, idx_shape, axis, mode)
check_autograd_req()
@with_seed()
def test_grid_generator():
# transform_type = affine
test_case = [(20,21),(4,3),(6,12),(15,17)]
for target_shape in test_case:
affine_matrix = mx.sym.Variable('affine')
grid = mx.sym.GridGenerator(data=affine_matrix,transform_type='affine', target_shape=target_shape)
exe = grid.simple_bind(ctx=default_context(), affine=(1,6), grad_req='write')
# check forward
exe.arg_dict['affine'][:] = np.array([[1.0,0,0,0,1.0,0]])
exe.forward(is_train=True)
output = exe.outputs[0].asnumpy()
output[0,0,:,:] = (output[0,0,:,:] + 1) * (target_shape[1] - 1) / 2.0
output[0,1,:,:] = (output[0,1,:,:] + 1) * (target_shape[0] - 1) / 2.0
xv, yv = np.meshgrid(np.arange(target_shape[0]), np.arange(target_shape[1]))
assert_almost_equal(output[0,0], yv.T)
assert_almost_equal(output[0,1], xv.T)
# check backward
out_grad = np.random.normal(size=(1,2)+target_shape)
exe.backward(mx.nd.array(out_grad))
tmp = np.zeros((3,target_shape[0]*target_shape[1]))
tmp[0] = -1.0 + (np.arange(target_shape[0]*target_shape[1]) % target_shape[1]) * (2.0 / (target_shape[1]-1))
tmp[1] = -1.0 + (np.arange(target_shape[0]*target_shape[1]) // target_shape[1]) * (2.0 / (target_shape[0]-1))
tmp[2] = 1
grad_est = np.dot(out_grad[0].reshape(2,target_shape[0]*target_shape[1]),tmp.T).reshape(1,6)
assert_almost_equal(exe.grad_dict['affine'], grad_est, rtol=1e-3, atol=1e-5)
# check addto
exe = grid.simple_bind(ctx=default_context(), affine=(1,6), grad_req='add')
grid_grad_npy = np.random.normal(size=exe.grad_dict['affine'].shape)
exe.grad_dict['affine'][:] = grid_grad_npy
exe.arg_dict['affine'][:] = np.array([[1.0, 0, 0, 0, 1.0, 0]])
exe.forward(is_train=True)
exe.backward(mx.nd.array(out_grad))
assert_almost_equal(exe.grad_dict['affine'], grad_est + grid_grad_npy, rtol=1e-2, atol=1e-5)
# transform_type = warp
test_case = [(12,21),(4,3),(6,12)]
for target_shape in test_case:
flow = mx.sym.Variable('flow')
grid = mx.sym.GridGenerator(data=flow,transform_type='warp', target_shape=target_shape)
exe = grid.simple_bind(ctx=default_context(), flow=(1,2)+target_shape, grad_req='write')
# check forward
exe.arg_dict['flow'][:] = np.ones((1,2)+target_shape)
exe.forward(is_train=True)
output = exe.outputs[0].asnumpy()
output[0,0,:,:] = (output[0,0,:,:] + 1) * (target_shape[1] - 1) / 2.0
output[0,1,:,:] = (output[0,1,:,:] + 1) * (target_shape[0] - 1) / 2.0
xv, yv = np.meshgrid(np.arange(target_shape[0])+1, np.arange(target_shape[1])+1)
assert_almost_equal(output[0,0], yv.T)
assert_almost_equal(output[0,1], xv.T)
# check backward
out_grad = np.random.normal(size=(1,2)+target_shape)
exe.backward(mx.nd.array(out_grad))
grad_est = np.zeros((1,2)+target_shape)
grad_est[0,0] = out_grad[0,0] / ((target_shape[1]-1.0) / 2.0)
grad_est[0,1] = out_grad[0,1] / ((target_shape[0]-1.0) / 2.0)
assert_almost_equal(exe.grad_dict['flow'], grad_est, rtol=1e-3)
# check addto
exe_add = grid.simple_bind(ctx=default_context(), flow=(1, 2) + target_shape, grad_req='add')
flow_grad_npy = np.random.normal(size=exe_add.grad_dict['flow'].shape)
exe_add.arg_dict['flow'][:] = np.ones((1, 2) + target_shape)
exe_add.grad_dict['flow'][:] = flow_grad_npy
exe_add.forward(is_train=True)
exe_add.backward(mx.nd.array(out_grad))
assert_almost_equal(exe_add.grad_dict['flow'], grad_est + flow_grad_npy, rtol=1e-3, atol=1e-5)
@with_seed()
def test_index2d():
for _ in range(30):
n = np.random.randint(1, 100)
m = np.random.randint(1, 500)
data = mx.random.uniform(-1, 1, shape=(n, m), ctx=default_context())
x = mx.nd.array(np.random.randint(0, m, size=n), ctx=default_context(), dtype='int32')
r = mx.nd.batch_take(data, x)
assert_almost_equal(r, data.asnumpy()[np.arange(n), x.asnumpy()])
@with_seed()
def test_cast():
for srctype in [np.int32, np.float32, np.float16]:
for dsttype in [np.float32, np.int32, np.float16]:
x = mx.sym.Variable('x', dtype=srctype)
y = mx.sym.Cast(x, dtype=dsttype)
exe = y.simple_bind(ctx=default_context(), x=(10, 10))
assert exe.arg_arrays[0].dtype == srctype
assert exe.outputs[0].dtype == dsttype
X = np.random.uniform(-10, 10, size=(10, 10))
exe.arg_arrays[0][:] = X
exe.forward(is_train=True)
exe.backward(mx.nd.array(X, dtype=dsttype, ctx=default_context()))
assert_almost_equal(exe.outputs[0], X.astype(srctype).astype(dsttype), rtol=1e-3, atol=1e-5)
assert_almost_equal(exe.grad_arrays[0], X.astype(dsttype).astype(srctype), rtol=1e-3, atol=1e-5)
def get_cast_op_data():
FP16_FRACTION_BITS = 10
FP32_FRACTION_BITS = 23
FP32_EXP_MIN = -126
FP32_EXP_MAX = 127
# generate test cases in the vicinity of representable float16 mantissas
# and mid-way between them, but over the full range of float32 exponents.
for sign_bit in [0, 1]:
for exponent in range(FP32_EXP_MIN - FP32_FRACTION_BITS - 1, FP32_EXP_MAX + 2):
denominator = 2**(FP16_FRACTION_BITS + 1)
for numerator in range(0, denominator):
fraction = numerator / float(denominator)
for y in [-1.0, 0.0, 1.0]:
small_delta = y / 2**FP32_FRACTION_BITS
val = (-1.0)**sign_bit * 2.0**exponent * (1.0 + fraction + small_delta)
yield val
# Add np.nan as a final data value to process
yield np.nan
# Test requires all platforms to round float32->float16 with same round-to-nearest-even policy.
@with_seed()
def test_cast_float32_to_float16():
input_np = np.array(list(get_cast_op_data())).astype(np.float32)
# The intermediate cast to np.float64 below gets around a numpy rounding bug that is fixed
# as of numpy 1.17 by PR https://github.com/numpy/numpy/pull/12722
expected_output = input_np.astype(np.float64).astype(np.float16)
def check_cast(op, input_np, expected_output):
x = mx.sym.Variable('x', dtype=np.float32)
sym = op(x, dtype=np.float16)
ctx = default_context()
exe = sym.bind(ctx, {'x': mx.nd.array(input_np, dtype=np.float32, ctx=ctx)})
assert exe.arg_arrays[0].dtype == np.float32
assert exe.outputs[0].dtype == np.float16
exe.forward(is_train=True)
sym_output = exe.outputs[0].asnumpy()
for fp32_val, model_fp16_val, np_fp16_val in zip(input_np, sym_output, expected_output):
assert (model_fp16_val == np_fp16_val) or \
(np.isnan(model_fp16_val) and np.isnan(np_fp16_val)), \
'fp32->fp16 cast mismatch: with fp32 value {}, model_fp16 = {}, numpy_fp16 = {}'.format(
fp32_val, model_fp16_val, np_fp16_val)
check_cast(mx.sym.Cast, input_np, expected_output)
check_cast(mx.sym.amp_cast, input_np, expected_output)
@with_seed()
def test_amp_multicast():
x = mx.sym.Variable('x', dtype=np.float16)
y = mx.sym.Variable('y', dtype=np.float32)
z = mx.sym.Variable('z', dtype=np.float16)
ctx = default_context()
res = mx.sym.amp_multicast(x, y, z, num_outputs=3)
exe = res.bind(ctx, {'x': mx.nd.random.uniform(shape=(3, 3), dtype=np.float16, ctx=ctx),
'y': mx.nd.random.uniform(shape=(3, 3), dtype=np.float32, ctx=ctx),
'z': mx.nd.random.uniform(shape=(3, 3), dtype=np.float16, ctx=ctx)})
exe.forward(is_train=True)
out1, out2, out3 = exe.outputs
assert out1.asnumpy().dtype == np.float32
assert out2.asnumpy().dtype == np.float32
assert out3.asnumpy().dtype == np.float32
def check_amp_multicast(input_np, expected_output):
x = mx.sym.Variable('x', dtype=np.float16)
y = mx.sym.Variable('y', dtype=np.float32)
z = mx.sym.Variable('z', dtype=np.float16)
ctx = default_context()
res = mx.sym.amp_multicast(x, y, z, num_outputs=3)
exe = res.bind(ctx, {'x': mx.nd.array(input_np, dtype=np.float16, ctx=ctx),
'y': mx.nd.array(input_np, dtype=np.float32, ctx=ctx),
'z': mx.nd.array(input_np, dtype=np.float16, ctx=ctx)})
exe.forward(is_train=True)
sym_output = exe.outputs[0].asnumpy()
for fp32_val, model_fp16_val, np_fp16_val in zip(input_np, sym_output, expected_output):
assert (model_fp16_val == np_fp16_val) or \
(np.isnan(model_fp16_val) and np.isnan(np_fp16_val)), \
'fp32->fp16 cast mismatch: with fp32 value {}, model_fp16 = {}, numpy_fp16 = {}'.format(
fp32_val, model_fp16_val, np_fp16_val)
input_np = np.array(list(get_cast_op_data()), dtype=np.float16)
expected_output = input_np.astype(np.float32)
check_amp_multicast(input_np, expected_output)
@with_seed()
def test_all_finite():
data = mx.sym.Variable("data", dtype=np.float32)
data2 = mx.sym.Variable("data2", dtype=np.float32)
finite_arr = mx.nd.array([[0, 0]])
inf_arr = mx.nd.array([[np.inf, np.inf]])
z = mx.sym.all_finite(data)
ctx = default_context()
exe = z.bind(ctx, {'data': inf_arr})
exe.forward(is_train=False)
sym_output = exe.outputs[0].asnumpy()
assert sym_output[0] == 0
exe = z.bind(ctx, {'data': finite_arr})
exe.forward(is_train=False)
sym_output = exe.outputs[0].asnumpy()
assert sym_output[0] == 1
z = mx.sym.multi_all_finite(data, data2, num_arrays=2)
exe = z.bind(ctx, {'data': finite_arr, 'data2': inf_arr})
exe.forward(is_train=False)
sym_output = exe.outputs[0].asnumpy()
assert sym_output[0] == 0
z = mx.sym.multi_all_finite(data, data2, num_arrays=2)
exe = z.bind(ctx, {'data': finite_arr, 'data2': finite_arr})
exe.forward(is_train=False)
sym_output = exe.outputs[0].asnumpy()
assert sym_output[0] == 1
@with_seed()
def test_repeat():
def test_repeat_forward():
ndim_max = 6 # max number of dims of the ndarray
size_max = 10 # max number of elements in each dim
repeats = 3
for ndim in range(1, ndim_max+1):
shape = ()
for i in range(0, ndim):
shape += (np.random.randint(1, size_max+1), )
a = np.random.random_sample(size=shape)
aa = np.repeat(a, repeats)
b = mx.nd.array(a, ctx=default_context())
bb = mx.nd.repeat(b, repeats)
assert_almost_equal(aa, bb)
for axis in range(0, ndim):
aa = np.repeat(a, repeats, axis)
bb = mx.nd.repeat(b, repeats, axis)
assert_almost_equal(aa, bb)
def test_repeat_backward(axis):
data = mx.sym.Variable('data')
n1 = 3
n2 = 4
shape = (n1, n2)
data_tmp = np.random.randint(0, 10, n1 * n2).reshape(shape)
arr_data = mx.nd.array(data_tmp)
arr_grad = mx.nd.empty(shape)
repeats = 2
test = mx.sym.repeat(data, repeats=repeats, axis=axis)
exe = test.bind(ctx=default_context(), args=[arr_data], args_grad=[arr_grad])
npout_grad = np.random.randint(0, 10, n1 * n2 * repeats)
if axis == 0:
npout_grad = npout_grad.reshape(n1 * repeats, n2)
elif axis == 1:
npout_grad = npout_grad.reshape(n1, n2 * repeats)
else:
raise RuntimeError("Invalid axis value")
out_grad = mx.nd.array(npout_grad)
exe.backward(out_grad)
expected_grad = np.zeros(shape)
if axis == 0:
for i in range(shape[0]):
for j in range(shape[1]):
k = i * repeats
expected_grad[i][j] = sum(npout_grad[k:k + repeats, j])
elif axis == 1:
for j in range(shape[1]):
for i in range(shape[0]):
k = j * repeats
expected_grad[i][j] = sum(npout_grad[i, k:k + repeats])
else:
raise RuntimeError("Invalid axis value")
assert_almost_equal(expected_grad, arr_grad, rtol=1e-3)
def test_repeat_numeric_gradient():
data = mx.sym.Variable('data')
n1 = 3
n2 = 4
shape = (n1, n2)
data_tmp = np.random.randint(0, 10, n1 * n2).reshape(shape)
repeats = 2
test = mx.sym.repeat(data, repeats=repeats, axis=0)
check_numeric_gradient(test, [data_tmp], numeric_eps=1e-3, rtol=1e-2)
test_repeat_forward()
test_repeat_backward(axis=0)
test_repeat_backward(axis=1)
test_repeat_numeric_gradient()
@with_seed()
def test_reverse():
data = mx.symbol.Variable('data')
shape = (5, 5, 5)
data_tmp = np.random.uniform(-1, 1, shape)
test = mx.sym.reverse(data, axis=[1, 2])
grad = np.random.uniform(-1, 1, shape)
check_numeric_gradient(test, [data_tmp], numeric_eps=2E-2)
check_symbolic_forward(test, [data_tmp], [data_tmp[:, ::-1, ::-1]])
check_symbolic_backward(test, [data_tmp], [grad], [grad[:, ::-1, ::-1]])
@with_seed()
def test_tile():
def test_normal_case():
ndim_min = 1
ndim_max = 5 # max number of dims of the ndarray
size_max = 10 # max number of elements in each dim
length_max = 3 # max length of reps
rep_max = 10 # max number of tiling in each dim
for ndim in range(ndim_min, ndim_max+1):
shape = []
for i in range(1, ndim+1):
shape.append(np.random.randint(1, size_max+1))
shape = tuple(shape)
a = np.random.randint(0, 100, shape)
b = mx.nd.array(a, dtype=a.dtype)
reps_len = np.random.randint(1, length_max+1)
reps_tuple = ()
for i in range(1, reps_len):
reps_tuple += (np.random.randint(1, rep_max), )
reps_array = np.asarray(reps_tuple)
a_tiled = np.tile(a, reps_array)
b_tiled = mx.nd.tile(b, reps_tuple).asnumpy()
assert same(a_tiled, b_tiled)
def test_empty_tensor():
shape = (2, 3, 0, 4)
with mx.np_shape():
a = np.array([], dtype=np.int32).reshape(shape)
b = mx.nd.array(a, ctx=default_context(), dtype=a.dtype)
reps = (2, 4, 6)
a_tiled = np.tile(a, reps)
b_tiled = mx.nd.tile(b, reps).asnumpy()
assert same(a_tiled, b_tiled)
def test_empty_reps():
a = np.array([[2, 3, 4], [5, 6, 7]], dtype=np.int32)
b = mx.nd.array(a, ctx=default_context(), dtype=a.dtype)
a_tiled = np.tile(a, ())
b_tiled = mx.nd.tile(b, ()).asnumpy()
assert same(a_tiled, b_tiled)
def test_tile_backward():
data = mx.sym.Variable('data')
n1 = 2
n2 = 2
shape = (n1, n2)
data_tmp = np.random.randint(0, 10, n1 * n2).reshape(shape)
arr_data = mx.nd.array(data_tmp)
arr_grad = mx.nd.empty(shape)
reps1 = 2
reps2 = 2
reps = (reps1, reps2)
test = mx.sym.tile(data, reps=reps)
exe = test.bind(ctx=default_context(), args=[arr_data], args_grad=[arr_grad])
npout_grad = np.random.randint(0, 10, n1 * n2 * reps1 * reps2).reshape(n1 * reps1, n2 * reps2)
out_grad = mx.nd.array(npout_grad)
exe.backward(out_grad)
expected_grad = np.zeros(shape)
for i in range(shape[0]):
for j in range(shape[1]):
expected_grad[i][j] += sum(sum(npout_grad[i:(n1 * reps1):reps1, j:(n2 * reps2):reps2]))
assert_almost_equal(expected_grad, arr_grad, rtol=1e-3)
def test_tile_numeric_gradient():
data = mx.sym.Variable('data')
n1 = 2
n2 = 2
shape = (n1, n2)
data_tmp = np.random.randint(0, 10, n1 * n2).reshape(shape)
reps1 = 2
reps2 = 2
reps = (reps1, reps2)
test = mx.sym.tile(data, reps=reps)
check_numeric_gradient(test, [data_tmp], numeric_eps=1e-2, rtol=1e-2)
def test_invalid_reps():
data = mx.nd.arange(16).reshape((4, 4))
assert_exception(mx.nd.tile, MXNetError, data, (1, 2, -3))
assert_exception(mx.nd.tile, MXNetError, data, (1, 0, 3))
test_normal_case()
with mx.np_shape():
test_empty_tensor()
test_empty_reps()
test_tile_backward()
test_tile_numeric_gradient()
test_invalid_reps()
@with_seed()
def test_one_hot():
def test_normal_case(index_type=np.int32):
ndim_max = 6
dim_size_max = 20
depth = int(dim_size_max / 2)
on_value = 1
off_value = 0
for ndim in range(1, ndim_max+1):
shape = ()
for i in range(1, ndim+1):
shape += (np.random.randint(1, dim_size_max+1), )
indices = np.random.randint(-dim_size_max, dim_size_max+1,
size=np.prod(shape)).reshape(shape)
mx_one_hot_array = mx.nd.one_hot(
mx.nd.array(indices, ctx=default_context(), dtype=index_type),
depth=depth, dtype=np.int32)
expected_array = np.zeros((np.prod(shape), depth), dtype=np.int32)
expected_array[:] = off_value
indices_1d = indices.flatten()
row = 0
for idx in indices_1d:
if 0 <= idx < depth:
expected_array[row, idx] = on_value
row += 1
expected_array = expected_array.reshape(shape + (depth, ))
one_hot_array = mx_one_hot_array.asnumpy()
assert same(expected_array, one_hot_array)
def test_empty_indices():
shape = (2, 0, 9, 3)
with mx.np_shape():
indices = np.array([]).reshape(shape)
depth = 10
mx_one_hot_array = mx.nd.one_hot(
mx.nd.array(indices, ctx=default_context(), dtype=np.int32),
depth=depth, dtype=np.int32
).asnumpy()
expected_array = np.array([], dtype=np.int32).reshape(shape + (depth,))
assert same(expected_array, mx_one_hot_array)
def test_zero_depth():
shape = (2, 4, 9, 3)
indices = np.ones(shape)
depth = 0
mx_one_hot_array = mx.nd.one_hot(
mx.nd.array(indices, ctx=default_context(), dtype=np.int32),
depth=depth, dtype=np.int32).asnumpy()
expected_array = np.array([], dtype=np.int32).reshape(shape + (depth, ))
assert same(expected_array, mx_one_hot_array)
test_normal_case(index_type=np.int32)
test_normal_case(index_type=np.float64)
test_normal_case(index_type=np.float32)
test_normal_case(index_type=np.float16)
with mx.np_shape():
test_empty_indices()
test_zero_depth()
@with_seed()
def test_where():
def get_forward_expected_output(condition, x, y):
original_shape = x.shape
out = np.zeros(original_shape)
if condition.shape == x.shape:
for index, c in np.ndenumerate(condition):
if c != 0:
out[index] = x[index]
else:
out[index] = y[index]
elif condition.shape == (x.shape[0], ):
s = x.shape
m = s[0]
n = int(np.prod(s)/s[0])
x2d = x.reshape((m, n))
y2d = y.reshape((m, n))
out = out.reshape((m, n))
for i in range(0, m):
if condition[i] != 0:
for j in range(0, n):
out[i, j] = x2d[i, j]
else:
for j in range(0, n):
out[i, j] = y2d[i, j]
else:
raise RuntimeError("Invalid condition shape for where op")
out = out.reshape(original_shape)
return out
def get_forward_inputs_same_shape(shape):
condition_np = np.random.randint(0, 2, np.prod(shape)).reshape(shape)
x_np = np.random.randint(1, 6, np.prod(shape)).reshape(shape)
y_np = np.random.randint(7, 11, np.prod(shape)).reshape(shape)
return condition_np, x_np, y_np
def get_forward_inputs_condition_vector(shape):
condition_np = np.random.randint(0, 2, shape[0])
x_np = np.random.randint(1, 6, np.prod(shape)).reshape(shape)
y_np = np.random.randint(7, 11, np.prod(shape)).reshape(shape)
return condition_np, x_np, y_np
def get_backward_input(shape):
return np.random.randint(20, 30, np.prod(shape)).reshape(shape)
def get_backward_expected_outputs(grad_in, condition):
shape = grad_in.shape
grad_cond = np.zeros(condition.shape)
grad_x = np.empty(shape)
grad_y = np.empty(shape)
for index, c in np.ndenumerate(condition):
if 0 != c:
grad_x[index] = grad_in[index]
grad_y[index] = 0
else:
grad_x[index] = 0
grad_y[index] = grad_in[index]
return grad_cond, grad_x, grad_y
def test_where_helper(shape, same_shape):
if same_shape:
condition_np, x_np, y_np = get_forward_inputs_same_shape(shape)
else:
condition_np, x_np, y_np = get_forward_inputs_condition_vector(shape)
out_expected = get_forward_expected_output(condition_np, x_np, y_np)
grad_in_np = get_backward_input(shape)
grad_expected_cond, grad_expected_x, grad_expected_y\
= get_backward_expected_outputs(grad_in_np, condition_np)
condition = mx.sym.Variable('condition')
x = mx.sym.Variable('x')
y = mx.sym.Variable('y')
grad_in_mx = mx.nd.array(grad_in_np, dtype=np.int32)
where_sym = mx.sym.where(condition, x, y)
# test req='write'
where_exe_write = where_sym.simple_bind(ctx=default_context(),
condition=condition_np.shape,
x=x_np.shape, y=y_np.shape,
grad_req='write')
# test forward req='write'
outputs = where_exe_write.forward(is_train=True, condition=condition_np,
x=x_np, y=y_np)
assert same(outputs[0].asnumpy(), out_expected)
# test backward req='write'
where_exe_write.backward(grad_in_mx)
assert same(where_exe_write.grad_dict['x'].asnumpy(), grad_expected_x)
assert same(where_exe_write.grad_dict['y'].asnumpy(), grad_expected_y)
assert same(where_exe_write.grad_dict['condition'].asnumpy(), grad_expected_cond)
# test req='add'
x_grad_init = np.random.randint(30, 40, np.prod(shape)).reshape(shape)
y_grad_init = np.random.randint(40, 50, np.prod(shape)).reshape(shape)
where_exe_add = where_sym.simple_bind(ctx=default_context(),
condition=condition_np.shape,
x=x_np.shape, y=y_np.shape,
grad_req='add')
where_exe_add.grad_dict['x'][:] = x_grad_init
where_exe_add.grad_dict['y'][:] = y_grad_init
# test forward req='add'
outputs = where_exe_add.forward(is_train=True, condition=condition_np, x=x_np, y=y_np)
assert same(outputs[0].asnumpy(), out_expected)
# test backward req='add'
where_exe_add.backward(grad_in_mx)
x_ograd = where_exe_add.grad_dict['x'].asnumpy()
y_ograd = where_exe_add.grad_dict['y'].asnumpy()
assert same(x_ograd, grad_expected_x+x_grad_init)
assert same(y_ograd, grad_expected_y+y_grad_init)
def test_where_numeric_gradient(shape, same_shape):
condition = mx.sym.Variable('condition')
x = mx.sym.Variable('x')
y = mx.sym.Variable('y')
where_sym = mx.sym.where(condition, x, y)
if same_shape:
condition_np, x_np, y_np = get_forward_inputs_same_shape(shape)
else:
condition_np, x_np, y_np = get_forward_inputs_condition_vector(shape)
check_numeric_gradient(where_sym, [condition_np, x_np, y_np], grad_nodes=['x', 'y'])
def test_invalid_shape():
condition = mx.sym.Variable('condition')
x = mx.sym.Variable('x')
y = mx.sym.Variable('y')
where_sym = mx.sym.where(condition, x, y)
assert_exception(lambda: where_sym.eval(x=mx.nd.array([[2,3],[4,5],[6,7]]),
y=mx.nd.array([[8,9],[10,11],[12,13]]),
condition=mx.nd.array([1,0])), MXNetError)
assert_exception(lambda: mx.nd.where(x=mx.nd.array([[2,3],[4,5],[6,7]]),
y=mx.nd.array([[8,9],[10,11],[12,13]]),
condition=mx.nd.array([1,0])), MXNetError)
def test_1d_cond():
cond = mx.nd.array([1, 0, 1])
x = mx.nd.array([[2, 3], [4, 5], [6, 7]])
y = mx.nd.array([[7, 8], [9, 10], [10, 11]])
expect_out = np.array([[2, 3], [9, 10], [6, 7]])
out = mx.nd.where(cond, x, y).asnumpy()
assert(expect_out.all() == out.all())
test_where_helper((5, 9), True)
test_where_helper((5, 9), False)
test_where_helper((5, 7, 9), True)
test_where_helper((5, 7, 9), False)
test_where_helper((10, 8, 15, 3), True)
test_where_helper((10, 8, 15, 3), False)
test_where_numeric_gradient((5, 9), True)
test_where_numeric_gradient((5, 9), False)
test_where_numeric_gradient((5, 7, 9), True)
test_where_numeric_gradient((5, 7, 9), False)
test_invalid_shape()
test_1d_cond()
@with_seed()
def test_softmin():
for ndim in range(1, 5):
for dtype in [np.float16, np.float32, np.float64]:
rtol, atol = (1e-2, 5e-3) if dtype is np.float16 else (1e-3, 1e-3)
shape = np.random.randint(1, 5, size=ndim)
axis = np.random.randint(-ndim, ndim)
data = np.random.uniform(-2, 2, size=shape).astype(dtype)
data = data / 10 if dtype is np.float16 else data
sym = mx.sym.softmin(axis=axis)
expected_fwd = np_softmax(-data, axis=axis)
expected_bwd = np.zeros(shape)
check_symbolic_forward(sym, [data], [expected_fwd], atol=atol, dtype=dtype)
for req in ['null', 'add', 'write']:
check_symbolic_backward(sym, [data], [np.ones(expected_fwd.shape)], [expected_bwd],
rtol=rtol, atol=atol, grad_req=req, dtype=dtype)
if dtype is not np.float16:
check_numeric_gradient(sym, [data], rtol=rtol, atol=atol, dtype=dtype)
@with_seed()
def test_new_softmax():
for ndim in range(1, 5):
shape = np.random.randint(1, 5, size=ndim)
axis = np.random.randint(-ndim, ndim)
data = np.random.uniform(-2, 2, size=shape)
sym = mx.sym.softmax(axis=axis)
expected_fwd = np_softmax(data, axis=axis)
expected_bwd = np.zeros(shape)
check_symbolic_forward(sym, [data], [expected_fwd])
for req in ['null', 'add', 'write']:
check_symbolic_backward(sym, [data], [np.ones(expected_fwd.shape)], [expected_bwd],
rtol=1e-2, atol=1e-3, grad_req=req)
check_numeric_gradient(sym, [data], rtol=1e-2, atol=1e-3)
@with_seed()
def test_softmax_with_temperature():
for ndim in range(1, 5):
shape = np.random.randint(1, 5, size=ndim)
data = np.random.uniform(-2, 2, size=shape)
for temp in range(1, 11):
sym = mx.sym.softmax(axis=0, temperature=temp)
expected_fwd = np_softmax(data, axis=0, temperature=temp)
expected_bwd = np.zeros(shape)
check_symbolic_forward(sym, [data], [expected_fwd], rtol=0.05, atol=1e-3)
check_symbolic_backward(sym, [data], [np.ones(shape)], [expected_bwd], rtol=0.05, atol=1e-3)
check_numeric_gradient(sym, [data], rtol=0.05, atol=1e-3)
@with_seed()
def test_log_softmax():
for ndim in range(1, 5):
for _ in range(5):
shape = np.random.randint(1, 5, size=ndim)
axis = np.random.randint(0, ndim)
data = np.random.uniform(-2, 2, size=shape)
sym = mx.sym.log_softmax(axis=axis-ndim)
check_symbolic_forward(sym, [data], [np.log(np_softmax(data, axis=axis)+1e-20)])
check_numeric_gradient(sym, [data], rtol=0.05, atol=1e-3)
def test_softmax_with_large_inputs():
def softmax_forward(input_data, true_output):
data = mx.sym.Variable('data')
out1 = data.softmax(axis=1)
exec1 = out1.bind(default_context(), args={'data': input_data})
exec1.forward()[0].wait_to_read()
ndarr = exec1.outputs[0][0][0][0]
assert_almost_equal(ndarr, true_output, rtol=1e-5, atol=1e-5)
softmax_forward(mx.nd.array([[[[-1e30,-1e30]]]]), np.array([1.0,1.0]))
softmax_forward(mx.nd.array([[[[1e30,1e30]]]]), np.array([1.0,1.0]))
softmax_forward(mx.nd.array([[[[-3.4e38,-3.4e38]]]]), np.array([1.0,1.0]))
softmax_forward(mx.nd.array([[[[3.4e38,3.4e38]]]]), np.array([1.0,1.0]))
@with_seed()
def test_softmax_dtype():
def check_dtypes_almost_equal(op_name,
atol, rtol,
grad_atol, grad_rtol,
idtype, ref_dtype, odtype=None):
op = getattr(mx.nd, op_name)
input_data = mx.random.uniform(shape=(100, 500))
dtype_input = input_data.astype(idtype)
ref_input = input_data.astype(ref_dtype)
dtype_input.attach_grad()
ref_input.attach_grad()
with mx.autograd.record():
dtype_softmax = op(dtype_input, axis=-1, dtype=odtype)
ref_softmax = op(ref_input, axis=-1, dtype=odtype)
assert_almost_equal(dtype_softmax, ref_softmax, rtol=rtol, atol=atol)
dtype_softmax.backward()
ref_softmax.backward()
assert_almost_equal(dtype_input.grad, ref_input.grad, rtol=grad_rtol, atol=grad_atol)
import sys
is_windows = sys.platform.startswith('win')
enforce_safe_acc = os.environ.get("MXNET_SAFE_ACCUMULATION", "0")
if not is_windows or enforce_safe_acc == "1":
os.environ["MXNET_SAFE_ACCUMULATION"] = "1"
check_dtypes_almost_equal('softmax', 1e-5, 1e-5, 1e-5, 1e-5, 'float16', 'float32')
check_dtypes_almost_equal('softmax', 1e-5, 1e-5, 1e-5, 1e-5, 'float16', 'float32', 'float32')
check_dtypes_almost_equal('softmax', 1e-5, 1e-5, 1e-5, 1e-5, 'float32', 'float64')
check_dtypes_almost_equal('softmax', 1e-5, 1e-5, 1e-5, 1e-5, 'float32', 'float64', 'float64')
check_dtypes_almost_equal('softmin', 1e-5, 1e-5, 1e-5, 1e-5, 'float16', 'float32')
check_dtypes_almost_equal('softmin', 1e-5, 1e-5, 1e-5, 1e-5, 'float16', 'float32', 'float32')
check_dtypes_almost_equal('softmin', 1e-5, 1e-5, 1e-5, 1e-5, 'float32', 'float64')
check_dtypes_almost_equal('softmin', 1e-5, 1e-5, 1e-5, 1e-5, 'float32', 'float64', 'float64')
check_dtypes_almost_equal('log_softmax', 1e-2, 1e-2, 1e-2, 1e-2,
'float16', 'float32')
check_dtypes_almost_equal('log_softmax', 1e-2, 1e-2, 1e-2, 1e-2,
'float16', 'float32', 'float32')
check_dtypes_almost_equal('log_softmax', 1e-3, 1e-3, 1e-3, 1e-3,
'float32', 'float64')
check_dtypes_almost_equal('log_softmax', 1e-3, 1e-3, 1e-3, 1e-3,
'float32', 'float64', 'float64')
@with_seed()
def test_softmax_with_length():
def np_softmax_with_length(data, length):
res = np.zeros(data.shape)
for i in range(length.shape[0]):
for j in range(length.shape[1]):
leng = int(length[i, j])
res[i, 0:leng, j] = np_softmax(data[i, 0:leng, j])
return res
ndim = 3
shape = rand_shape_nd(ndim, dim=10)
len_shape = list(shape)
del len_shape[1]
len_shape = tuple(len_shape)
for dtype in [np.float16, np.float32, np.float64]:
mx_data = rand_ndarray(shape, dtype=dtype)
np_data = mx_data.asnumpy()
np_length = np.random.randint(1, shape[1] + 1, len_shape)
mx_length = mx.nd.array(np_length, dtype=np.int32)
np_out = np_softmax_with_length(np_data, np_length)
data = mx.sym.Variable("data")
length = mx.sym.Variable("length")
mx_sym = mx.sym.softmax(data=data, length=length, use_length=True, axis=1)
location = {"data": mx_data, "length": mx_length}
rtol = 1e-2 if dtype == np.float16 else 1e-3
atol = 1e-4 if dtype == np.float16 else 1e-5
check_symbolic_forward(mx_sym, location, [np_out], rtol=rtol, atol=atol, dtype="asnumpy")
check_symbolic_backward(mx_sym, location, [np.ones(shape, dtype=dtype)],
[np.zeros(shape), np.zeros(len_shape, dtype=np.int32)], rtol=1e-2, atol=1e-3, dtype="asnumpy")
@with_seed()
def test_pick():
def test_pick_helper(index_type=np.int32):
for mode in ['clip', 'wrap']:
ndim = np.random.randint(1, 5)
bshape = np.random.randint(1, 10, size=ndim)
axis = np.random.randint(0, ndim)
sshape = bshape.copy()
sshape[axis] = 1
data = np.random.uniform(-1, 1, size=bshape)
if mode == 'wrap':
index = np.random.randint(-2*bshape[axis], 2*bshape[axis], size=sshape)
else:
index = np.random.randint(0, bshape[axis], size=sshape)
exp = []
for i in range(ndim):
if i == axis:
if mode == 'wrap':
exp.append(index % bshape[axis])
else:
exp.append(index)
else:
ishape = [1 for _ in range(ndim)]
ishape[i] = bshape[i]
exp.append(np.arange(bshape[i]).reshape(ishape))
expected = data[exp]
data = mx.nd.array(data, dtype='float32')
index = mx.nd.array(index, dtype=index_type)
out = mx.nd.pick(data, index, axis=axis, keepdims=True, mode=mode)
assert_almost_equal(out.asnumpy(), expected)
data_holder = data
index_holder = index
data = mx.sym.Variable('data')
index = mx.sym.Variable('index')
sym = mx.sym.pick(data, index, axis=axis, keepdims=True, mode=mode)
check_numeric_gradient(sym, [data_holder, index_holder], grad_nodes=['data'])
test_pick_helper(np.int32)
test_pick_helper(np.float32)
def check_ctc_loss(acts, labels, loss_truth, contrib=False):
in_var = mx.sym.Variable('input')
labels_var = mx.sym.Variable('labels')
if contrib:
ctc = mx.sym.contrib.ctc_loss(in_var, labels_var)
else:
ctc = mx.sym.ctc_loss(in_var, labels_var)
acts_nd = mx.nd.array(acts, ctx=default_context())
labels_nd = mx.nd.array(labels, ctx=default_context())
exe = ctc.bind(ctx=default_context(), args=[acts_nd, labels_nd])
# test forward with grad calc
exe.forward(is_train=True)
outTest = exe.outputs[0].copy()
# test forward without grad calc
exe.forward(is_train=False)
outTrain = exe.outputs[0]
# make sure losses calculated with both modes are the same
assert_almost_equal(outTest, outTrain)
# test against ground truth, if available
if loss_truth is not None:
assert_almost_equal(outTest, loss_truth)
# test grad
check_numeric_gradient(ctc, [acts, labels], grad_nodes=['input'], rtol=0.05, atol=1e-3)
@with_seed()
def test_ctc_loss():
# Test 1: check that batches are same + check against Torch WarpCTC
acts = np.array([
[[1.2, 3.4, 1.2, -0.1, -2.34], [1.2, 3.4, 1.2, -0.1, -2.34]],
[[0.1, 0.2, 0.3, 0.22, 0.123], [0.1, 0.2, 0.3, 0.22, 0.123]],
[[-15, -14, -13, -12, -11], [-15, -14, -13, -12, -11]]],
dtype=np.float32)
labels = np.array([[2, 3, 0], [2, 3, 0]])
true_loss = np.array([4.04789, 4.04789], dtype=np.float32) # from Torch
for contrib in [False, True]:
check_ctc_loss(acts, labels, true_loss, contrib=contrib)
# Test 2:
acts2 = np.array([
[[-5, -4, -3, -2, -1], [1.2, 3.4, 1.2, -0.1, -2.34]],
[[-10, -9, -8, -7, -6], [0.1, 0.2, 0.3, 0.22, 0.123]],
[[-15, -14, -13, -12, -11], [-15, -14.2, -13.5, -12.2, -11.22]]], dtype=np.float32)
labels2 = np.array([[2, 3, 1], [2, 0, 0]], dtype=np.float32)
true_loss = np.array([7.3557, 5.4091], dtype=np.float32) # from Torch
for contrib in [False, True]:
check_ctc_loss(acts2, labels2, true_loss, contrib=contrib)
# Test 3: check use integer type as label
labels3 = np.array([[2, 3, 1], [2, 0, 0]], dtype=np.int32)
true_loss = np.array([7.3557, 5.4091], dtype=np.float32) # from Torch
for contrib in [False, True]:
check_ctc_loss(acts2, labels3, true_loss, contrib=contrib)
@with_seed()
def test_ctc_loss_with_large_classes():
ctx = default_context()
num_classes = 6000
seq_len = 8
batch_size = 2
data = np.empty((num_classes, 0))
for i in range(seq_len * batch_size) :
row = np.roll(np.arange(num_classes, dtype=np.float32), i).reshape(num_classes, 1)
data = np.append(data, row/13, axis=1)
data = data.reshape(seq_len, batch_size, num_classes)
label = np.array([
[100, 200, 300, 400, 500, 0, 0, 0],
[1000, 2000, 3000, 4000, 0, 5000, 0, 0]], dtype=np.int32)
nd_data = mx.nd.array(data)
nd_label = mx.nd.array(label)
loss = mx.nd.ctc_loss(data=nd_data, label=nd_label)
expected_loss = np.array([688.02826, 145.34462])
assert_almost_equal(loss, expected_loss)
@with_seed()
def test_ctc_loss_grad():
def check_ctc_loss_grad(blank_label, contrib=False): # from tf
vocab_size = 5
max_label_len = 5
padding_mask = -1+ (blank_label=='first')
targets_0 = [0, 1, 2, 1, 0]
loss_log_prob_0 = -3.34211
input_prob_matrix_0 = np.asarray(
[[0.633766, 0.221185, 0.0917319, 0.0129757, 0.0142857, 0.0260553],
[0.111121, 0.588392, 0.278779, 0.0055756, 0.00569609, 0.010436],
[0.0357786, 0.633813, 0.321418, 0.00249248, 0.00272882, 0.0037688],
[0.0663296, 0.643849, 0.280111, 0.00283995, 0.0035545, 0.00331533],
[0.458235, 0.396634, 0.123377, 0.00648837, 0.00903441, 0.00623107]],
dtype=np.float32)
gradient_log_prob_0 = np.asarray(
[[-0.366234, 0.221185, 0.0917319, 0.0129757, 0.0142857, 0.0260553],
[0.111121, -0.411608, 0.278779, 0.0055756, 0.00569609, 0.010436],
[0.0357786, 0.633813, -0.678582, 0.00249248, 0.00272882, 0.0037688],
[0.0663296, -0.356151, 0.280111, 0.00283995, 0.0035545, 0.00331533],
[-0.541765, 0.396634, 0.123377, 0.00648837, 0.00903441, 0.00623107]],
dtype=np.float32)
targets_1 = [0, 1, 1, 0]
loss_log_prob_1 = -5.42262
input_prob_matrix_1 = np.asarray(
[[0.30176, 0.28562, 0.0831517, 0.0862751, 0.0816851, 0.161508],
[0.24082, 0.397533, 0.0557226, 0.0546814, 0.0557528, 0.19549],
[0.230246, 0.450868, 0.0389607, 0.038309, 0.0391602, 0.202456],
[0.280884, 0.429522, 0.0326593, 0.0339046, 0.0326856, 0.190345],
[0.423286, 0.315517, 0.0338439, 0.0393744, 0.0339315, 0.154046]],
dtype=np.float32)
gradient_log_prob_1 = np.asarray(
[[-0.69824, 0.28562, 0.0831517, 0.0862751, 0.0816851, 0.161508],
[0.24082, -0.602467, 0.0557226, 0.0546814, 0.0557528, 0.19549],
[0.230246, 0.450868, 0.0389607, 0.038309, 0.0391602, -0.797544],
[0.280884, -0.570478, 0.0326593, 0.0339046, 0.0326856, 0.190345],
[-0.576714, 0.315517, 0.0338439, 0.0393744, 0.0339315, 0.154046]],
dtype=np.float32)
inputs = [
np.vstack(
[input_prob_matrix_0[t, :], input_prob_matrix_1[t, :]])
for t in range(5)
] + 2 * [np.nan * np.ones((2, vocab_size+1), np.float32)]
inputs = np.log(np.asarray(inputs, dtype=np.float32))
grad_truth = np.array([
np.vstack(
[gradient_log_prob_0[t, :], gradient_log_prob_1[t, :]])
for t in range(5)
] + 2 * [np.zeros((2, vocab_size+1), np.float32)])
if blank_label == 'first':
inputs = np.roll(inputs, 1, axis=2)
grad_truth = np.roll(grad_truth, 1, axis=2)
labels = (np.asarray([x + [padding_mask]*(max_label_len-len(x))
for x in [targets_0, targets_1]])+(blank_label == 'first'))
seq_lens = np.array([5, 5], dtype=np.int32)
label_lens = np.array([5, 4], dtype=np.int32)
loss_truth = np.array([-loss_log_prob_0, -loss_log_prob_1], np.float32)
with default_context():
data = mx.nd.array(inputs)
label = mx.nd.array(labels)
data.attach_grad()
with mx.autograd.record():
if contrib:
l = mx.contrib.ndarray.CTCLoss(data, label,
use_data_lengths=True,
use_label_lengths=True,
data_lengths=mx.nd.array(seq_lens),
label_lengths=mx.nd.array(label_lens),
blank_label=blank_label)
else:
l = mx.ndarray.CTCLoss(data, label,
use_data_lengths=True,
use_label_lengths=True,
data_lengths=mx.nd.array(seq_lens),
label_lengths=mx.nd.array(label_lens),
blank_label=blank_label)
l.backward()
assert_almost_equal(l, loss_truth, atol=1e-5, rtol=1e-5)
assert_almost_equal(data.grad, grad_truth, atol=1e-5, rtol=1e-5)
for contrib in [False, True]:
for label in ['first', 'last']:
check_ctc_loss_grad(label, contrib=contrib)
@with_seed()
def test_quantization_op():
min0 = mx.nd.array([0.0])
max0 = mx.nd.array([1.0])
a = mx.nd.array([[0.1392, 0.5928], [0.6027, 0.8579]])
qa, min1, max1 = mx.nd.contrib.quantize(a, min0, max0, out_type='int8')
a_ = mx.nd.contrib.dequantize(qa, min1, max1, out_type='float32')
qa_real = mx.nd.array([[18, 75], [77, 109]])
a_real = mx.nd.array([[0.14173228, 0.5905512], [0.6062992, 0.8582677]])
print(a_.asnumpy())
print(a_real.asnumpy())
assert same(qa.asnumpy(), qa_real.asnumpy())
assert_almost_equal(a_.asnumpy(), a_real.asnumpy(), rtol=1e-2)
@with_seed()
def test_index_copy():
x = mx.nd.zeros((5,3))
t = mx.nd.array([[1,2,3],[4,5,6],[7,8,9]])
index = mx.nd.array([0,4,2], dtype=np.int64)
tensor = mx.nd.array([[1,2,3],[0,0,0],[7,8,9],[0,0,0],[4,5,6]])
x_grad = mx.nd.array([[0,0,0],[1,1,1],[0,0,0],[1,1,1],[0,0,0]])
t_grad = mx.nd.array([[1,1,1],[1,1,1],[1,1,1]])
t.attach_grad()
with mx.autograd.record():
out = mx.nd.contrib.index_copy(x, index, t)
out.backward()
assert same(out.asnumpy(), tensor.asnumpy())
assert same(t.grad.asnumpy(), t_grad.asnumpy())
x.attach_grad()
t.attach_grad()
with mx.autograd.record():
out = mx.nd.contrib.index_copy(x, index, t)
out.backward()
assert same(out.asnumpy(), tensor.asnumpy())
assert same(x.grad.asnumpy(), x_grad.asnumpy())
assert same(t.grad.asnumpy(), t_grad.asnumpy())
@with_seed()
def test_boolean_mask():
data = mx.nd.array([[1, 2, 3],[4, 5, 6],[7, 8, 9]])
index = mx.nd.array([0, 1, 0])
data.attach_grad()
with mx.autograd.record():
out = mx.nd.contrib.boolean_mask(data, index)
out.backward()
data.grad.wait_to_read()
expected = np.array([[4, 5, 6]])
expected_grad = np.array([[0, 0, 0], [1, 1, 1], [0, 0, 0]])
assert same(out.asnumpy(), expected)
assert same(data.grad.asnumpy(), expected_grad)
# test 0-size output
mx.set_np_shape(True)
data = mx.nd.array([[1, 2, 3],[4, 5, 6],[7, 8, 9]])
index = mx.nd.array([0, 0, 0])
data.attach_grad()
with mx.autograd.record():
out = mx.nd.contrib.boolean_mask(data, index)
out.backward()
data.grad.wait_to_read()
expected = np.zeros((0, 3))
expected_grad = np.array([[0, 0, 0], [0, 0, 0], [0, 0, 0]])
assert same(out.asnumpy(), expected)
assert same(data.grad.asnumpy(), expected_grad)
mx.set_np_shape(False)
# test gradient
shape = (100, 30)
a = mx.nd.random.randint(0, 100, shape=shape)
a.attach_grad()
bi = mx.nd.random.randint(0, 100, shape=shape[0:1]) > 50
ci = mx.nd.random.randint(0, 100, shape=shape[0:1]) < 50
mx_grad = mx.nd.zeros_like(a)
mx.autograd.mark_variables([a], [mx_grad], grad_reqs='add')
T = 3
for _ in range(T):
with mx.autograd.record():
b = mx.nd.contrib.boolean_mask(a, bi)
c = mx.nd.contrib.boolean_mask(a, ci)
su = b.sum() + c.sum()
su.backward()
grad = (bi + ci).asnumpy().reshape((-1,) + (1,) * (len(shape)-1))
grad = np.tile(grad, (1,) + shape[1:])
# T times
grad *= T
assert_allclose(a.grad.asnumpy(), grad)
a_np = a.asnumpy()
assert same(b.asnumpy(), a_np[bi.asnumpy().astype('bool')])
assert same(c.asnumpy(), a_np[ci.asnumpy().astype('bool')])
@with_seed()
def test_div_sqrt_dim():
data_tmp = np.random.normal(0, 1, (5, 10, 8))
data = mx.symbol.Variable('data')
test = mx.sym.contrib.div_sqrt_dim(data)
check_numeric_gradient(test, [data_tmp], numeric_eps=1E-2)
check_symbolic_forward(test, [data_tmp], [data_tmp / np.sqrt(data_tmp.shape[-1])])
@with_seed()
def test_reciprocal_op():
eps = 2**(-11)
data_tmp = np.random.rand(3, 4) * 10 - 5
# Avoid possible division by 0 errors and finite difference method inaccuracies.
# Factor of 6 below set empirically, depends on eps.
# Issue exposed by seed 879579887.
# Replace problematic inputs with 1.0.
data_tmp[abs(data_tmp) < 6*eps] = 1.0
data = mx.symbol.Variable('data')
test = mx.sym.reciprocal(data)
check_numeric_gradient(test, [data_tmp], numeric_eps = eps)
check_symbolic_forward(test, [data_tmp], [np.reciprocal(data_tmp)])
@with_seed()
def test_cbrt_op():
eps = 2**(-11)
data_tmp = np.random.rand(3, 4) * 10 - 5
# Avoid finite difference method inaccuracies due to infinite gradient at the origin.
# Factor of 4 below set empirically, depends on eps.
# Issue exposed by seed 553872106.
# Replace problematic inputs with 1.0.
data_tmp[abs(data_tmp) < 4*eps] = 1.0
data = mx.symbol.Variable('data')
test = mx.sym.cbrt(data)
check_numeric_gradient(test, [data_tmp], numeric_eps=eps)
check_symbolic_forward(test, [data_tmp], [np.cbrt(data_tmp)])
@with_seed()
def test_rcbrt_op():
eps = 2**(-11)
data_tmp = np.random.rand(3, 4) * 10 - 5
# Avoid possible division by 0 errors and finite difference method inaccuracies.
# Factor of 4 below set empirically, depends on eps.
# Issue exposed by seed 788174893.
# Replace problematic inputs with 1.0.
data_tmp[abs(data_tmp) < 4*eps] = 1.0
data = mx.symbol.Variable('data')
test = mx.sym.rcbrt(data)
check_numeric_gradient(test, [data_tmp], numeric_eps = eps)
check_symbolic_forward(test, [data_tmp], [1/np.cbrt(data_tmp)])
@with_seed()
def test_custom_op():
class Sqr(mx.operator.CustomOp):
def forward(self, is_train, req, in_data, out_data, aux):
if in_data[0].stype == 'default':
aux[0][:] = 1
self.assign(out_data[0], req[0], in_data[0]*in_data[0])
else:
inp = in_data[0]
csr_m = inp.data * inp.data
out = mx.nd.sparse.csr_matrix((csr_m, inp.indices, inp.indptr), shape=inp.shape)
self.assign(out_data[0], req[0], out)
if (in_data[0].stype == 'csr'):
assert(isinstance(out_data[0], mx.nd.sparse.CSRNDArray))
def backward(self, req, out_grad, in_data, out_data, in_grad, aux):
self.assign(in_grad[0], req[0], 2 * mx.nd.sparse.elemwise_mul(in_data[0], out_grad[0]))
if in_data[0].stype == 'default':
assert (aux[0].asnumpy() == 1).all()
@mx.operator.register("sqr")
class SqrProp(mx.operator.CustomOpProp):
def __init__(self):
super(SqrProp, self).__init__(need_top_grad=True)
def list_arguments(self):
return ['data']
def list_outputs(self):
return ['output']
def list_auxiliary_states(self):
return ['aux']
def infer_shape(self, in_shape):
return in_shape, [in_shape[0]], [in_shape[0]]
def infer_type(self, in_type):
return in_type, [in_type[0]], [in_type[0]]
def infer_storage_type(self, in_stype):
if in_stype[0] == 'default':
return ['default'], ['default'], ['default']
return ['csr'], ['csr'], ['csr']
def infer_storage_type_backward(self, ograd_stype, in_stype,
out_stype, igrad_stype, aux_stype):
if in_stype[0] == 'default':
return ['default'], ['default'], ['default'], ['default'], ['default']
return ['default'], ['csr'], ['csr'], ['csr'], ['csr']
def create_operator(self, ctx, shapes, dtypes):
return Sqr()
data = mx.symbol.Variable('data')
aux = mx.symbol.Variable('aux')
op = mx.symbol.Custom(data=data, aux=aux, name='sqr', op_type='sqr')
x = mx.nd.array(np.random.uniform(-1, 1, size=(4, 10)))
aux = mx.nd.zeros_like(x)
check_numeric_gradient(op, [x], [aux])
data = mx.symbol.cast(data, dtype='float64')
op = mx.symbol.cast(op, dtype='float32')
check_numeric_gradient(op, [x], [aux])
data = mx.symbol.Variable('data', stype='csr')
aux = mx.symbol.Variable('aux')
op2 = mx.symbol.Custom(data=data, aux=aux, name='sqr', op_type='sqr')
x = x.tostype('csr')
aux = mx.nd.zeros_like(x)
check_numeric_gradient(op2, [x], [aux], grad_stype_dict={"data": "csr"})
x2 = mx.nd.array(np.random.uniform(-1, 1, size=(4, 10)))
x2 = x2.tostype('csr')
aux2 = mx.nd.zeros_like(x2)
x2.attach_grad()
with mx.autograd.record():
output = mx.nd.Custom(x2, aux2, name='sqr', op_type='sqr')
output.backward()
expected_output = mx.nd.sparse.square(x2)
expected_grad = 2 * x2
rtol = 1e-4
atol = 1e-6
assert_almost_equal(output, expected_output, rtol=rtol, atol=atol)
assert_almost_equal(x2.grad, expected_grad, rtol=rtol, atol=atol)
# test for backward compatibility, i.e. the correctness of default implementation of
# infer storage in custom operator
class Mult(mx.operator.CustomOp):
def forward(self, is_train, req, in_data, out_data, aux):
self.assign(out_data[0], req[0], in_data[0]*in_data[1])
def backward(self, req, out_grad, in_data, out_data, in_grad, aux):
self.assign(in_grad[0], req[0], in_data[1])
self.assign(in_grad[1], req[1], in_data[0])
@mx.operator.register("mult")
class MultProp(mx.operator.CustomOpProp):
def __init__(self):
super(MultProp, self).__init__(need_top_grad=True)
def list_arguments(self):
return ['lhs', 'rhs']
def list_outputs(self):
return ['output']
def infer_shape(self, in_shape):
return in_shape, [in_shape[0]], []
def create_operator(self, ctx, shapes, dtypes):
return Mult()
lhs = mx.nd.array(np.random.uniform(-1, 1, size=(4, 10)))
rhs = mx.nd.array(np.random.uniform(-1, 1, size=(4, 10)))
lhs.attach_grad()
rhs.attach_grad()
with mx.autograd.record():
y = mx.nd.Custom(lhs, rhs, name='mult', op_type='mult')
y.backward()
assert_almost_equal(rhs, lhs.grad, rtol=rtol, atol=atol)
assert_almost_equal(lhs, rhs.grad, rtol=rtol, atol=atol)
class MultNoGrad(mx.operator.CustomOp):
def forward(self, is_train, req, in_data, out_data, aux):
self.assign(out_data[0], req[0], in_data[0]*in_data[1])
def backward(self, req, out_grad, in_data, out_data, in_grad, aux):
self.assign(in_grad[0], req[0], in_data[1])
self.assign(in_grad[1], req[1], in_data[0])
@mx.operator.register("mult_no_grad")
class MultNoGradProp(mx.operator.CustomOpProp):
def __init__(self):
super(MultNoGradProp, self).__init__(need_top_grad=False)
def list_arguments(self):
return ['lhs', 'rhs']
def list_outputs(self):
return ['output']
def infer_shape(self, in_shape):
return in_shape, [in_shape[0]], []
def create_operator(self, ctx, shapes, dtypes):
return MultNoGrad()
def infer_storage_type_backward(self, ograd_stype, in_stype, out_stype, igrad_stype, aux_stype):
return ograd_stype, in_stype, out_stype, igrad_stype, aux_stype
with mx.autograd.record():
y2 = mx.nd.Custom(lhs, rhs, name="mult_no_grad", op_type="mult_no_grad")
y2.backward()
assert_almost_equal(rhs, lhs.grad, rtol=rtol, atol=atol)
assert_almost_equal(lhs, rhs.grad, rtol=rtol, atol=atol)
class NoInputOp(mx.operator.CustomOp):
def __init__(self, length, depth):
super(NoInputOp, self).__init__()
self.output = np.ones(shape=(length, depth), dtype=np.float32)
def forward(self, is_train, req, in_data, out_data, aux):
self.assign(out_data[0], req[0], self.output)
def backward(self, req, out_grad, in_data, out_data, in_grad, aux):
pass
@mx.operator.register("no_input_op")
class NoInputOpProp(mx.operator.CustomOpProp):
def __init__(self, length, depth):
super(NoInputOpProp, self).__init__()
self.length = int(length)
self.depth = int(depth)
def list_arguments(self):
return []
def list_outputs(self):
return ['output']
def infer_shape(self, in_shape):
return [], [(self.length, self.depth)], []
def infer_type(self, in_type):
return [], [np.float32], []
def create_operator(self, ctx, shapes, dtypes):
return NoInputOp(length=self.length, depth=self.depth)
with mx.autograd.record():
x = mx.nd.Custom(length=10, depth=10, op_type="no_input_op")
assert_almost_equal(x, np.ones(shape=(10, 10), dtype=np.float32))
@with_seed()
def test_custom_op_fork():
# test custom operator fork
# see https://github.com/apache/incubator-mxnet/issues/14396
class AdditionOP(mx.operator.CustomOp):
def __init__(self):
super(AdditionOP, self).__init__()
def forward(self, is_train, req, in_data, out_data, aux):
out_data[0][:] = in_data[0] + in_data[1]
def backward(self, req, out_grad, in_data, out_data, in_grad, aux):
in_grad[0][:] = out_grad[0]
in_grad[1][:] = out_grad[0]
@mx.operator.register("AdditionOP")
class AdditionOPProp(mx.operator.CustomOpProp):
def __init__(self):
super(AdditionOPProp, self).__init__()
def list_arguments(self):
return ['a', 'b']
def list_outputs(self):
return ['output']
def infer_shape(self, in_shape):
return in_shape, [in_shape[0]]
def create_operator(self, ctx, shapes, dtypes):
return AdditionOP()
if not sys.platform.startswith('win'): # no fork in windows
def custom_add():
a = mx.nd.array([1, 2, 3])
b = mx.nd.array([4, 5, 6])
c = mx.nd.Custom(a, b, op_type='AdditionOP')
assert_almost_equal((a + b).asnumpy(), c.asnumpy())
custom_add()
from multiprocessing import Process
p = Process(target=custom_add)
p.daemon = True
p.start()
p.join(5)
assert not p.is_alive() and p.exitcode == 0
def _build_dot_custom(fun_forward, name):
class Dot(mx.operator.CustomOp):
def __init__(self):
super(Dot, self).__init__()
def forward(self, is_train, req, in_data, out_data, aux):
fun_forward(in_data, out_data)
def backward(self, req, out_grad, in_data, out_data, in_grad, aux):
pass
@mx.operator.register(name)
class DotProp(mx.operator.CustomOpProp):
def __init__(self):
super(DotProp, self).__init__()
def list_arguments(self):
return ['a', 'b']
def list_outputs(self):
return ['output']
def infer_shape(self, in_shape):
return in_shape, [(in_shape[0][0], in_shape[1][1])]
def create_operator(self, ctx, shapes, dtypes):
return Dot()
@with_seed()
def test_custom_op_exc():
# test except handling
# see https://github.com/apache/incubator-mxnet/pull/14693
# 1. error in python code
def custom_exc1():
def f(in_data, out_data):
assert False
out_data[0][:] = mx.nd.dot(in_data[0], in_data[1])
_build_dot_custom(f, 'Dot1')
a = mx.nd.zeros((4, 1))
b = mx.nd.zeros((1, 4))
c = mx.nd.Custom(a, b, op_type='Dot1')
c.wait_to_read()
assert_raises(MXNetError, custom_exc1)
# 2. error in pushing operator to engine
def custom_exc2():
def f(in_data, out_data):
out_data[0][:] = mx.nd.dot(in_data[0], in_data[1])
_build_dot_custom(f, 'Dot2')
a = mx.nd.zeros((4, 2))
b = mx.nd.zeros((1, 4))
# trigger error by invalid input shapes of operands
c = mx.nd.Custom(a, b, op_type='Dot2')
c.wait_to_read()
assert_raises(MXNetError, custom_exc2)
# 3. error in real execution
if default_context().device_type == 'cpu':
def custom_exc3():
def f(in_data, out_data):
dot = mx.nd.dot(in_data[0], in_data[1])
# input to Cholesky factorization should be
# symmetric positive-definite, error will be
# triggered in op execution on cpu
out_data[0][:] = mx.nd.linalg.potrf(dot)
out_data[0].wait_to_read()
_build_dot_custom(f, 'Dot3')
a = mx.nd.zeros((2, 1))
b = mx.nd.zeros((1, 2))
c = mx.nd.Custom(a, b, op_type='Dot3')
c.wait_to_read()
assert_raises(MXNetError, custom_exc3)
def custom_exc4():
def f(in_data, out_data):
dot = mx.nd.dot(in_data[0], in_data[1])
# input to Cholesky factorization should be
# symmetric positive-definite, error will be
# triggered in op execution on cpu
out_data[0][:] = mx.nd.linalg.potrf(dot)
_build_dot_custom(f, 'Dot4')
a = mx.nd.zeros((2, 1))
b = mx.nd.zeros((1, 2))
c = mx.nd.Custom(a, b, op_type='Dot4')
c.wait_to_read()
assert_raises(MXNetError, custom_exc4)
@with_seed()
def test_psroipooling():
for num_rois in [1, 2]:
for num_classes, num_group in itertools.product([2, 3], [2, 3]):
for image_height, image_width in itertools.product([168, 224], [168, 224]):
for grad_nodes in [['im_data']]:
spatial_scale = 0.0625
feat_height = np.int(image_height * spatial_scale)
feat_width = np.int(image_width * spatial_scale)
im_data = np.random.rand(1, num_classes*num_group*num_group, feat_height, feat_width)
rois_data = np.zeros([num_rois, 5])
rois_data[:, [1,3]] = np.sort(np.random.rand(num_rois, 2)*(image_width-1))
rois_data[:, [2,4]] = np.sort(np.random.rand(num_rois, 2)*(image_height-1))
im_data_var = mx.symbol.Variable(name="im_data")
rois_data_var = mx.symbol.Variable(name="rois_data")
op = mx.sym.contrib.PSROIPooling(data=im_data_var, rois=rois_data_var, spatial_scale=spatial_scale,
group_size=num_group, pooled_size=num_group,
output_dim=num_classes, name='test_op')
rtol, atol = 1e-2, 1e-3
check_numeric_gradient(op, [im_data, rois_data], rtol=rtol, atol=atol,
grad_nodes=grad_nodes)
@with_seed()
def test_psroipooling_with_type():
arg_params = {
'psroipool_rois': np.array([[0, 10, 22, 161, 173], [0, 20, 15, 154, 160]])}
# plain psroipooling
sym = mx.sym.contrib.PSROIPooling(spatial_scale=0.0625, output_dim=2, pooled_size=3, name='psroipool')
ctx_list = [{'ctx': mx.cpu(0),
'psroipool_data': (1, 18, 14, 14),
'psroipool_rois': (2, 5),
'type_dict': {'psroipool_data': np.float64, 'psroipool_rois': np.float64}},
{'ctx': mx.cpu(0),
'psroipool_data': (1, 18, 14, 14),
'psroipool_rois': (2, 5),
'type_dict': {'psroipool_data': np.float32, 'psroipool_rois': np.float32}},
{'ctx': mx.cpu(0),
'psroipool_data': (1, 18, 14, 14),
'psroipool_rois': (2, 5),
'type_dict': {'psroipool_data': np.float16, 'psroipool_rois': np.float16}},
]
check_consistency(sym, ctx_list, grad_req={'psroipool_data': 'write',
'psroipool_rois': 'null'}, arg_params=arg_params)
@with_seed()
def test_deformable_convolution():
for num_batch in [1, 2]:
for num_channel_data, num_deformable_group in itertools.product([4, 8], [1, 2]):
for input_height, input_width in itertools.product([5, 6], [5, 6]):
for dilate in [(1, 1), (2, 2)]:
for grad_nodes in [['im_data'], ['offset_data'], ['weight']]:
output_height = input_height
output_width = input_width
im_data = np.random.rand(num_batch, num_channel_data, input_height, input_width)
offset_data = \
np.random.rand(num_batch, num_deformable_group * 3 * 3 * 2, output_height, output_width)\
* 0.8 + 0.1
weight = np.random.normal(0, 0.001, (num_channel_data, num_channel_data, 3, 3))
bias = np.zeros(num_channel_data)
im_data_var = mx.symbol.Variable(name="im_data")
offset_data_var = mx.symbol.Variable(name="offset_data")
weight_var = mx.symbol.Variable(name="weight")
bias_var = mx.symbol.Variable(name="bias")
op = mx.sym.contrib.DeformableConvolution(name='test_op', data=im_data_var,
offset=offset_data_var,
weight=weight_var, bias=bias_var,
num_filter=num_channel_data, pad=dilate,
kernel=(3, 3), stride=(1, 1), dilate=dilate,
num_deformable_group=num_deformable_group)
if grad_nodes[0] == 'offset_data':
# wider tolerance needed for coordinate differential
rtol, atol = 1.0, 1e-2
else:
rtol, atol = 0.05, 1e-3
# By now we only have gpu implementation
if default_context().device_type == 'gpu':
check_numeric_gradient(op, [im_data, offset_data, weight, bias], rtol=rtol, atol=atol,
grad_nodes=grad_nodes, ctx=mx.gpu(0))
def _validate_sample_location(input_rois, input_offset, spatial_scale, pooled_w, pooled_h, sample_per_part, part_size, output_dim, num_classes, trans_std, feat_h, feat_w):
num_rois = input_rois.shape[0]
output_offset = input_offset.copy()
# simulate deformable psroipooling forward function
for roi_idx in range(num_rois):
sub_rois = input_rois[roi_idx, :].astype(np.float32)
img_idx, x0, y0, x1, y1 = int(sub_rois[0]), sub_rois[1], sub_rois[2], sub_rois[3], sub_rois[4]
roi_start_w = round(x0) * spatial_scale - 0.5
roi_start_h = round(y0) * spatial_scale - 0.5
roi_end_w = round(x1 + 1) * spatial_scale - 0.5
roi_end_h = round(y1 + 1) * spatial_scale - 0.5
roi_w, roi_h = roi_end_w - roi_start_w, roi_end_h - roi_start_h
bin_size_w, bin_size_h = roi_w / pooled_w, roi_h / pooled_h
sub_bin_size_w, sub_bin_size_h = bin_size_w / sample_per_part, bin_size_h / sample_per_part
for c_top in range(output_dim):
channel_each_cls = output_dim / num_classes
class_id = int(c_top / channel_each_cls)
for ph in range(pooled_h):
for pw in range(pooled_w):
part_h = int(math.floor(float(ph) / pooled_h * part_size))
part_w = int(math.floor(float(pw) / pooled_w * part_size))
trans_x = input_offset[roi_idx, class_id * 2, part_h, part_w] * trans_std
trans_y = input_offset[roi_idx, class_id * 2 + 1, part_h, part_w] * trans_std
bin_h_start, bin_w_start = ph * bin_size_h + roi_start_h, pw * bin_size_w + roi_start_w
need_check = True
while need_check:
pass_check = True
for ih in range(sample_per_part):
for iw in range(sample_per_part):
h = bin_h_start + trans_y * roi_h + ih * sub_bin_size_h
w = bin_w_start + trans_x * roi_w + iw * sub_bin_size_w
if w < -0.5 or w > feat_w - 0.5 or h < -0.5 or h > feat_h - 0.5:
continue
w = min(max(w, 0.1), feat_w - 1.1)
h = min(max(h, 0.1), feat_h - 1.1)
# if the following condiiton holds, the sampling location is not differentiable
# therefore we need to re-do the sampling process
if h - math.floor(h) < 1e-3 or math.ceil(h) - h < 1e-3 or w - math.floor(w) < 1e-3 or math.ceil(w) - w < 1e-3:
trans_x, trans_y = random.random() * trans_std, random.random() * trans_std
pass_check = False
break
if not pass_check:
break
if pass_check:
output_offset[roi_idx, class_id * 2 + 1, part_h, part_w] = trans_y / trans_std
output_offset[roi_idx, class_id * 2, part_h, part_w] = trans_x / trans_std
need_check = False
return output_offset
@unittest.skip("Flaky test, tracked at https://github.com/apache/incubator-mxnet/issues/11713")
@with_seed()
def test_deformable_psroipooling():
sample_per_part = 4
trans_std = 0.1
for num_rois in [1, 2]:
for num_classes, num_group in itertools.product([2, 3], [2, 3]):
for image_height, image_width in itertools.product([160, 224], [160, 224]):
for grad_nodes in [['im_data'], ['offset_data']]:
spatial_scale = 0.0625
stride = int(1 / spatial_scale)
feat_height = np.int(image_height * spatial_scale)
feat_width = np.int(image_width * spatial_scale)
im_data = np.random.rand(1, num_classes*num_group*num_group, feat_height, feat_width)
rois_data = np.zeros([num_rois, 5])
rois_data[:, [1,3]] = np.sort(np.random.rand(num_rois, 2)*(image_width-1 - 2 * stride)) + stride
rois_data[:, [2,4]] = np.sort(np.random.rand(num_rois, 2)*(image_height-1 - 2 * stride)) + stride
offset_data = np.random.rand(num_rois, 2*num_classes, num_group, num_group)
# at certain points, the bilinear interpolation function may be non-differentiable
# to avoid this, we check whether the input locates on the valid points
offset_data = _validate_sample_location(rois_data, offset_data, spatial_scale, num_group, num_group,
sample_per_part, num_group, num_classes, num_classes, trans_std, feat_height, feat_width)
im_data_var = mx.symbol.Variable(name="im_data")
rois_data_var = mx.symbol.Variable(name="rois_data")
offset_data_var = mx.symbol.Variable(name="offset_data")
op = mx.sym.contrib.DeformablePSROIPooling(data=im_data_var, rois=rois_data_var,
trans=offset_data_var, spatial_scale=spatial_scale,
sample_per_part=4, group_size=num_group,
pooled_size=num_group, output_dim=num_classes,
trans_std=0.1, no_trans=False, name='test_op')
rtol, atol = 1e-2, 1e-3
# By now we only have gpu implementation
if default_context().device_type == 'gpu':
check_numeric_gradient(op, [im_data, rois_data, offset_data], rtol=rtol, atol=atol,
grad_nodes=grad_nodes, ctx=mx.gpu(0))
def _gemm_test_helper(dtype, grad_check, rtol_fw = 1e-7, atol_fw = 1e-9):
num_eps = 1e-6
rtol_bw = 1e-5
atol_bw = 1e-6
data1 = mx.symbol.Variable('data1')
data2 = mx.symbol.Variable('data2')
data3 = mx.symbol.Variable('data3')
check_fw = lambda sym, location, expected :\
check_symbolic_forward(sym, location, expected, rtol=rtol_fw,
atol=atol_fw, dtype=dtype)
check_grad = lambda sym, location:\
check_numeric_gradient(sym, location, numeric_eps=num_eps, rtol=rtol_bw,
atol=atol_bw, dtype=dtype)
rep_3x = lambda a, m, n :\
np.reshape(np.tile(np.array(a).flatten(), 3), (3, 1, m, n))
shape1 = (2, 3)
shape2 = (3, 2)
shape3 = (3, 3)
shape4 = (2, 2)
data_in1 = np.random.uniform(1, 10, shape1).astype(dtype)
data_in2 = np.random.uniform(1, 10, shape2).astype(dtype)
data_in3 = np.random.uniform(1, 10, shape3).astype(dtype)
data_in4 = np.random.uniform(1, 10, shape4).astype(dtype)
# Check all transpositions of gemm operator.
data_in1_t = np.transpose(data_in1)
data_in2_t = np.transpose(data_in2)
res_gemm = 4. * np.dot(data_in1, data_in2) + 7. * data_in4
test_gemm = mx.sym.linalg.gemm(data1, data2, data3, alpha=4., beta=7.)
check_fw(test_gemm, [data_in1, data_in2, data_in4], [res_gemm])
if grad_check == 1:
check_grad(test_gemm, [data_in1, data_in2, data_in4])
res_gemm = 4. * np.dot(data_in1_t, data_in2_t) + 7. * data_in3
test_gemm = mx.sym.linalg.gemm(data1, data2, data3, alpha=4., beta=7.,
transpose_a=True, transpose_b=True)
check_fw(test_gemm, [data_in1, data_in2, data_in3], [res_gemm])
if grad_check == 1:
check_grad(test_gemm, [data_in1, data_in2, data_in3])
res_gemm = 4. * np.dot(data_in1_t, data_in1) + 7. * data_in3
test_gemm = mx.sym.linalg.gemm(data1, data2, data3, alpha=4., beta=7.,
transpose_a=True)
check_fw(test_gemm, [data_in1, data_in1, data_in3], [res_gemm])
if grad_check == 1:
check_grad(test_gemm, [data_in1, data_in1, data_in3])
res_gemm = 4. * np.dot(data_in1, data_in1_t) + 7. * data_in4
test_gemm = mx.sym.linalg.gemm(data1, data2, data3, alpha=4., beta=7.,
transpose_b=True)
check_fw(test_gemm, [data_in1, data_in1, data_in4], [res_gemm])
if grad_check == 1:
check_grad(test_gemm, [data_in1, data_in1, data_in4])
# Check batch of gemm.
a = rep_3x(data_in1, 2, 3)
b = rep_3x(data_in2, 3, 2)
c = rep_3x(data_in4, 2, 2)
r = 4. * np.dot(data_in1, data_in2) + 7. * data_in4
r = rep_3x(r, 2, 2)
test_gemm = mx.sym.linalg.gemm(data1, data2, data3, alpha=4., beta=7.)
check_fw(test_gemm, [a, b, c], [r])
if grad_check == 1:
check_grad(test_gemm, [a, b, c])
# Check for different axis that describes matrix rows.
a2 = np.copy(np.swapaxes(a, 0, 2))
b2 = np.copy(np.swapaxes(b, 0, 2))
c2 = np.copy(np.swapaxes(c, 0, 2))
r2 = np.copy(np.swapaxes(r, 0, 2))
test_gemm = mx.sym.linalg.gemm(data1, data2, data3, alpha=4., beta=7., axis = 0)
check_fw(test_gemm, [a2, b2, c2], [r2])
if grad_check == 1:
check_grad(test_gemm, [a2, b2, c2])
a2 = np.copy(np.swapaxes(a, 1, 2))
b2 = np.copy(np.swapaxes(b, 1, 2))
c2 = np.copy(np.swapaxes(c, 1, 2))
r2 = np.copy(np.swapaxes(r, 1, 2))
test_gemm = mx.sym.linalg.gemm(data1, data2, data3, alpha=4., beta=7., axis = -3)
check_fw(test_gemm, [a2, b2, c2], [r2])
if grad_check == 1:
check_grad(test_gemm, [a2, b2, c2])
# Check gemm2 operator same way as gemm.
res_gemm = 4. * np.dot(data_in1, data_in2)
test_gemm = mx.sym.linalg.gemm2(data1, data2, alpha=4.)
check_fw(test_gemm, [data_in1, data_in2], [res_gemm])
if grad_check == 1:
check_grad(test_gemm, [data_in1, data_in2])
res_gemm = 4. * np.dot(data_in1_t, data_in2_t)
test_gemm = mx.sym.linalg.gemm2(data1, data2, alpha=4., transpose_a=True,
transpose_b=True)
check_fw(test_gemm, [data_in1, data_in2], [res_gemm])
if grad_check == 1:
check_grad(test_gemm, [data_in1, data_in2])
res_gemm = 4. * np.dot(data_in1_t, data_in1)
test_gemm = mx.sym.linalg.gemm2(data1, data2, alpha=4., transpose_a=True)
check_fw(test_gemm, [data_in1, data_in1], [res_gemm])
if grad_check == 1:
check_grad(test_gemm, [data_in1, data_in1])
res_gemm = 4. * np.dot(data_in1, data_in1_t)
test_gemm = mx.sym.linalg.gemm2(data1, data2, alpha=4., transpose_b=True)
check_fw(test_gemm, [data_in1, data_in1], [res_gemm])
if grad_check == 1:
check_grad(test_gemm, [data_in1, data_in1])
# Check batch of gemm2.
a = rep_3x(data_in1, 2, 3)
b = rep_3x(data_in2, 3, 2)
r = rep_3x(4. * np.dot(data_in1, data_in2), 2, 2)
test_gemm = mx.sym.linalg.gemm2(data1, data2, alpha=4.)
check_fw(test_gemm, [a, b], [r])
if grad_check == 1:
check_grad(test_gemm, [a, b])
a2 = np.copy(np.swapaxes(a, 0, 2))
b2 = np.copy(np.swapaxes(b, 0, 2))
r2 = np.copy(np.swapaxes(r, 0, 2))
test_gemm = mx.sym.linalg.gemm2(data1, data2, alpha=4., axis = 0)
check_fw(test_gemm, [a2, b2], [r2])
if grad_check == 1:
check_grad(test_gemm, [a2, b2])
a2 = np.copy(np.swapaxes(a, 1, 2))
b2 = np.copy(np.swapaxes(b, 1, 2))
r2 = np.copy(np.swapaxes(r, 1, 2))
test_gemm = mx.sym.linalg.gemm2(data1, data2, alpha=4., axis = -3)
check_fw(test_gemm, [a2, b2], [r2])
if grad_check == 1:
check_grad(test_gemm, [a2, b2])
# Test gemm separately from other la-operators.
@with_seed()
def test_gemm():
_gemm_test_helper(np.float64, True)
os.environ["MXNET_CUDA_TENSOR_OP_MATH_ALLOW_CONVERSION"] = "0"
_gemm_test_helper(np.float32, False, rtol_fw = 1e-5, atol_fw = 1e-7)
if default_context().device_type == 'gpu':
os.environ["MXNET_CUDA_TENSOR_OP_MATH_ALLOW_CONVERSION"] = "1"
_gemm_test_helper(np.float32, False, rtol_fw = 2e-5, atol_fw = 2e-7)
os.environ["MXNET_CUDA_TENSOR_OP_MATH_ALLOW_CONVERSION"] = "0"
# Helper functions for test_laop
def _make_symm_symbol(a, ndims):
assert ndims >= 2
tr_shape = list(range(ndims))
tr_shape[-1] = ndims-2
tr_shape[-2] = ndims-1
tr_shape = tuple(tr_shape)
return 0.5 * (a + mx.sym.transpose(a, axes=tr_shape))
def _make_triangle_symm(a, ndims, m, lower, dtype=np.float32):
assert ndims >= 2
# The last two dimensions must both be m
# Create mask for lower triangle and diagonal
index = mx.sym.arange(start=0, stop=m, step=1, dtype=np.int32)
lt_mask = mx.sym.one_hot(index, depth=m, dtype=dtype)
for j in range(1, m):
part1 = mx.sym.zeros(shape=(j, m), dtype=dtype)
index = mx.sym.arange(start=0, stop=m-j, step=1, dtype=np.int32)
part2 = mx.sym.one_hot(index, depth=m, dtype=dtype)
lt_mask = lt_mask + mx.sym.concat(*[part1, part2], dim=0)
if not lower:
lt_mask = mx.sym.reshape(lt_mask, shape=(m, m))
lt_mask = mx.sym.transpose(lt_mask, axes=(1, 0))
shp = tuple([1]*(ndims-2) + [m, m])
lt_mask = mx.sym.reshape(lt_mask, shape=shp)
return mx.sym.broadcast_mul(a, lt_mask)
# @ankkhedia: Getting rid of fixed seed as flakiness could not be reproduced
# tracked at https://github.com/apache/incubator-mxnet/issues/11718
@with_seed()
def test_laop():
dtype = np.float64
rtol_fw = 1e-7
atol_fw = 1e-9
num_eps = 2e-6
rtol_bw = 1e-5
atol_bw = 1e-5
# enable numerical checking of gradients
grad_check = 1
data1 = mx.symbol.Variable('data1')
data2 = mx.symbol.Variable('data2')
rep_3x = lambda a, m, n :\
np.reshape(np.tile(np.array(a).flatten(), 3), (3, 1, m, n))
def check_fw_grad(sym, location, expected):
check_symbolic_forward(sym, location, expected, rtol=rtol_fw,
atol=atol_fw, dtype=dtype)
if grad_check == 1:
check_numeric_gradient(sym, location, numeric_eps=num_eps, rtol=rtol_bw,
atol=atol_bw, dtype=dtype)
matrix = np.array([[9., 3., -6., 12.],
[3., 26., -7., -11.],
[-6., -7., 9., 7.],
[12., -11., 7., 65.]])
trian = np.array([[3., 0., 0., 0.],
[1., 5., 0., 0.],
[-2., -1., 2., 0.],
[4., -3., 6., 2.]])
pow = np.array([[2., 1., 1., 1.],
[1., 4., 1., 1.],
[1., 1., 8., 1.],
[1., 1., 1., 16.]])
inv = np.array([[8.95/3., 0.05/3., 2.65, -2.5/3.],
[0.05/3., 0.05, 0.05, 0.],
[2.65, 0.05, 2.5, -0.75],
[-2.5/3., 0., -0.75, 0.25]])
ident = np.eye(4)
shape = (4, 4, 1, 1)
ones = mx.nd.ones(shape).asnumpy()
for lower in [True, False]:
upper = not lower
# Tests with trivial 1x1 matrices.
data_in = np.random.uniform(1, 10, shape)
# test potrf
# Note: Have to symmetrize input, for gradient test to work
res_potrf = np.sqrt(data_in)
test_potrf = mx.sym.linalg.potrf(data1, lower=lower)
check_fw_grad(test_potrf, [data_in], [res_potrf])
# test potri
res_potri = np.divide(ones, data_in * data_in)
test_potri = mx.sym.linalg.potri(data1, lower=lower)
check_fw_grad(test_potri, [data_in], [res_potri])
# test trsm
trian_in = data_in * 7.
test_trsm = mx.sym.linalg.trsm(data1, data2, alpha=7., lower=lower)
check_fw_grad(test_trsm, [trian_in, data_in], [ones])
# test trmm
trian_in = np.divide(ones, trian_in)
test_trmm = mx.sym.linalg.trmm(data1, data2, alpha=7., transpose=True,
rightside=True, lower=lower)
check_fw_grad(test_trmm, [trian_in, data_in], [ones])
# test sumlogdiag
res_sumlogdiag = np.reshape(np.log(data_in), (4, 4))
test_sumlogdiag = mx.sym.linalg.sumlogdiag(data1)
check_fw_grad(test_sumlogdiag, [data_in], [res_sumlogdiag])
# more elaborate example of Cholesky factorization
low_trian = trian
if upper:
trian = np.transpose(trian)
# test potrf
test_potrf = mx.sym.linalg.potrf(_make_symm_symbol(data1, ndims=4), lower=lower)
a = rep_3x(matrix, 4, 4)
r = rep_3x(trian, 4, 4)
check_fw_grad(test_potrf, [a], [r])
#test potri
data1_ltri = _make_triangle_symm(
data1, ndims=4, m=4, lower=lower, dtype=dtype)
test_potri = mx.sym.linalg.potri(data1_ltri, lower=lower)
a = rep_3x(trian, 4, 4)
r = rep_3x(inv, 4, 4)
check_fw_grad(test_potri, [a], [r])
# test trsm
test_trsm = mx.sym.linalg.trsm(data1_ltri, data2, alpha=7., transpose=upper, lower=lower)
b = rep_3x(matrix, 4, 4)
r = rep_3x(7. * np.transpose(low_trian), 4, 4)
check_fw_grad(test_trsm, [a, b], [r])
test_trsm2 = mx.sym.linalg.trsm(
data1_ltri, data2, alpha=-2., rightside=True, transpose=lower, lower=lower)
r = rep_3x(-2. * low_trian, 4, 4)
check_fw_grad(test_trsm2, [a, b], [r])
test_trsm3 = mx.sym.linalg.trsm(
data1_ltri, data2, alpha=0.5, transpose=lower, lower=lower)
b = rep_3x(np.transpose(low_trian), 4, 4)
r = rep_3x(0.5 * ident, 4, 4)
check_fw_grad(test_trsm3, [a, b], [r])
test_trsm4 = mx.sym.linalg.trsm(
data1_ltri, data2, alpha=-0.5, rightside=True, transpose=upper, lower=lower)
b = rep_3x(low_trian, 4, 4)
r = rep_3x(-0.5 * ident, 4, 4)
check_fw_grad(test_trsm4, [a, b], [r])
# test trmm
test_trmm = mx.sym.linalg.trmm(
data1_ltri, data2, alpha=7., transpose=True, rightside=True, lower=lower)
a = [a, rep_3x(matrix, 4, 4)]
r = rep_3x(7. * np.dot(matrix, trian.T), 4, 4)
check_fw_grad(test_trmm, a, [r])
test_trmm2 = mx.sym.linalg.trmm(data1_ltri, data2, alpha=-2., lower=lower)
r = rep_3x(-2. * np.dot(trian, matrix), 4, 4)
check_fw_grad(test_trmm2, a, [r])
test_trmm3 = mx.sym.linalg.trmm(data1_ltri, data2, rightside=True, lower=lower)
r = rep_3x(np.dot(matrix, trian), 4, 4)
check_fw_grad(test_trmm3, a, [r])
test_trmm4 = mx.sym.linalg.trmm(
data1_ltri, data2, alpha=1.2, transpose=True, lower=lower)
r = rep_3x(1.2 * np.dot(trian.T, matrix), 4, 4)
check_fw_grad(test_trmm4, a, [r])
# test sumlogdiag
r = np.reshape(np.tile(10. * np.log(np.array([2.])), 3), (3,))
check_fw_grad(test_sumlogdiag, [rep_3x(pow, 4, 4)], [r])
# Tests for operators linalg.syrk, linalg.gelqf
def _gelqf_combined_symbol(a):
q, l = mx.sym.linalg.gelqf(a)
q_qt = mx.sym.linalg.syrk(q, transpose=False, alpha=1., name='Q_times_Qt')
l_q = mx.sym.linalg.trmm(l, q, alpha=1., name='L_times_Q')
return mx.sym.Group([q_qt, l_q])
# NOTE: If we leave the unused output dangling, things break if dtype=np.float64. Namely, the
# backward gradient for the unused output is of dtype np.float32 then.
# ==> Very annoying!
def _gelqf_first_output(a):
q, l = mx.sym.linalg.gelqf(a)
bogus_scal = mx.sym.sum(mx.sym.BlockGrad(l), axis=(), keepdims=True) * 0.0
return mx.sym.broadcast_add(q, bogus_scal)
def _gelqf_second_output(a):
q, l = mx.sym.linalg.gelqf(a)
bogus_scal = mx.sym.sum(mx.sym.BlockGrad(q), axis=(), keepdims=True) * 0.0
return mx.sym.broadcast_add(l, bogus_scal)
def _syevd_combined_symbol(a):
u, lam = mx.sym.linalg.syevd(a)
u_ut = mx.sym.linalg.syrk(u, transpose=False, alpha=1., name='U_times_Ut')
lam_u = mx.sym.broadcast_mul(mx.sym.reshape(lam, shape=(-2, 1)), u)
ut_lam_u = mx.sym.linalg.gemm2(u, lam_u, alpha=1., transpose_a=True,
transpose_b=False, name='Ut_L_U')
return mx.sym.Group([u_ut, ut_lam_u])
@with_seed()
def test_laop_2():
dtype = np.float64
rtol_fw = 1e-7
atol_fw = 1e-9
num_eps = 1e-6
rtol_bw = 1e-5
atol_bw = 1e-6
# enable numerical checking of gradients
grad_check = 1
data1 = mx.symbol.Variable('data1')
check_fw = lambda sym, location, expected :\
check_symbolic_forward(sym, location, expected, rtol=rtol_fw,
atol=atol_fw, dtype=dtype)
check_grad = lambda sym, location:\
check_numeric_gradient(sym, location, numeric_eps=num_eps, rtol=rtol_bw,
atol=atol_bw, dtype=dtype)
rep_3x = lambda a, m, n :\
np.reshape(np.tile(np.array(a).flatten(), 3), (3, 1, m, n))
# Tests for linalg.syrk
mnalpha_lst = [(2, 3, 1.), (5, 3, -2.), (1, 6, 5.), (3, 3, 0.5), (4, 1, 10.), (1, 1, 1.)]
for m, n, alpha in mnalpha_lst:
#print('syrk: m={}, n={}, alpha={}'.format(m, n, alpha))
data_in1 = np.random.uniform(1, 10, (m, n))
res_syrk1 = alpha * np.dot(data_in1, data_in1.T)
test_syrk1 = mx.sym.linalg.syrk(data1, transpose=False, alpha=alpha)
check_fw(test_syrk1, [data_in1], [res_syrk1])
if grad_check == 1:
check_grad(test_syrk1, [data_in1])
res_syrk2 = alpha * np.dot(data_in1.T, data_in1)
test_syrk2 = mx.sym.linalg.syrk(data1, transpose=True, alpha=alpha)
check_fw(test_syrk2, [data_in1], [res_syrk2])
if grad_check == 1:
check_grad(test_syrk2, [data_in1])
# Batch mode (3x the same thing)
a_batch = rep_3x(data_in1, m, n)
r1_batch = rep_3x(res_syrk1, m, m)
check_fw(test_syrk1, [a_batch], [r1_batch])
if grad_check == 1:
check_grad(test_syrk1, [a_batch])
r2_batch = rep_3x(res_syrk2, n, n)
check_fw(test_syrk2, [a_batch], [r2_batch])
if grad_check == 1:
check_grad(test_syrk2, [a_batch])
# Tests for linalg.gelqf
# Currently disabled on GPU as they need cuda8
# and MxNet builds use cuda 7.5
if not (default_context() == mx.cpu()):
return
test_gelqf2 = _gelqf_combined_symbol(data1) # Outputs (dot(Q, Q.T), dot(L, Q))
test_gelqf_q = _gelqf_first_output(data1) # Output Q (L is not dangling)
test_gelqf_l = _gelqf_second_output(data1) # Output L (Q is not dangling)
mn_lst = [(4, 4), (1, 1), (5, 20), (1, 10), (15, 50)]
for m, n in mn_lst:
#print('gelqf: m={}, n={}'.format(m, n))
data_in1 = np.random.normal(0., 10., (m, n))
res_eye = np.eye(m)
res_a = data_in1
check_fw(test_gelqf2, [data_in1], [res_eye, res_a])
if grad_check == 1:
# A => Q
check_grad(test_gelqf_q, [data_in1])
# A => L
check_grad(test_gelqf_l, [data_in1])
# Batch mode (3x the same thing)
a_batch = rep_3x(data_in1, m, n)
reye_batch = rep_3x(res_eye, m, m)
ra_batch = a_batch
check_fw(test_gelqf2, [a_batch], [reye_batch, ra_batch])
if grad_check == 1:
# A => Q
check_grad(test_gelqf_q, [a_batch])
# A => L
check_grad(test_gelqf_l, [a_batch])
# Tests for operator linalg.syevd
def _syevd_first_output(a):
u, lam = mx.sym.linalg.syevd(a)
bogus_scal = mx.sym.sum(mx.sym.BlockGrad(lam), axis=(), keepdims=True) * 0.0
return mx.sym.broadcast_add(u, bogus_scal)
def _syevd_second_output(a):
u, lam = mx.sym.linalg.syevd(a)
bogus_scal = mx.sym.sum(mx.sym.BlockGrad(u), axis=(), keepdims=True) * 0.0
return mx.sym.broadcast_add(lam, bogus_scal)
def _syevd_forward(a):
lam, ut = np.linalg.eig(a)
ind = np.argsort(lam)
lam = lam[ind]
u = ut[:, ind].T
for i in range(0, a.shape[0]):
_syevd_forw_eigvec_sign(u[i])
return u, lam
def _syevd_forw_eigvec_sign(v):
ind = np.argmax(np.abs(v))
if v[ind] < 0.:
v[:] = -v
def _syevd_backward(grad_u, grad_l, u, l):
n = l.size
assert grad_l.size == n
assert grad_u.shape == (n, n)
assert u.shape == (n, n)
temp = np.dot(grad_u, u.T)
temp2 = np.diag(grad_l)
for i in range(1, n):
for j in range(0, i):
denom = 2. * (l[i] - l[j])
elem = (temp[i, j] - temp[j, i])/denom
temp2[i, j] = elem
temp2[j, i] = elem
temp3 = np.dot(u.T, temp2)
return np.dot(temp3, u)
# Seed set because the test is not robust enough to operate on random data
@with_seed(1896893923)
def test_laop_3():
# Currently disabled on GPU as syevd needs cuda8
# and MxNet builds use cuda 7.5
if not (default_context() == mx.cpu()):
return
dtype = np.float64
rtol_fw = 1e-6
atol_fw = 1e-6
num_eps = 1e-4
rtol_bw = 1e-2
atol_bw = 1e-2
# enable numerical checking of gradients
grad_check = 1
data1 = mx.symbol.Variable('data1')
check_fw = lambda sym, location, expected :\
check_symbolic_forward(sym, location, expected, rtol=rtol_fw,
atol=atol_fw, dtype=dtype)
check_grad = lambda sym, location:\
check_numeric_gradient(sym, location, numeric_eps=num_eps, rtol=rtol_bw,
atol=atol_bw, dtype=dtype)
rep_3x = lambda a, m, n :\
np.reshape(np.tile(np.array(a).flatten(), 3), (3, 1, m, n))
check_bw = lambda sym, location, out_grads, expected :\
check_symbolic_backward(sym, location, out_grads, expected,
rtol=rtol_fw, atol=atol_fw, dtype=dtype)
# Tests for linalg.syevd
test_syevd2 = _syevd_combined_symbol(data1) # Outputs (U U^T, U^T (diag L) U)
data1_s2 = _make_symm_symbol(data1, ndims=2)
test_syevd_u_2 = _syevd_first_output(data1_s2)
test_syevd_l_2 = _syevd_second_output(data1_s2)
data1_s4 = _make_symm_symbol(data1, ndims=4)
test_syevd_u_4 = _syevd_first_output(data1_s4)
test_syevd_l_4 = _syevd_second_output(data1_s4)
n_lst = [4, 1, 2, 10, 14]
for n in n_lst:
#print('\n** syevd: n={}'.format(n))
data_in1 = np.random.normal(0., 10., (n, n))
data_in1 = 0.5 * (data_in1 + data_in1.T)
res_eye = np.eye(n)
res_a = data_in1
check_fw(test_syevd2, [data_in1], [res_eye, res_a])
# Check backward
grad_u = np.random.normal(0., 2., (n, n))
grad_l = np.random.normal(0., 2., (n,))
bw_u, bw_l = _syevd_forward(data_in1)
grad_a = _syevd_backward(grad_u, grad_l, bw_u, bw_l)
check_bw(mx.sym.linalg.syevd(data1), [data_in1], [grad_u, grad_l], [grad_a])
if grad_check == 1:
# A => U
check_grad(test_syevd_u_2, [data_in1])
# A => L
check_grad(test_syevd_l_2, [data_in1])
# Batch mode (3x the same thing)
a_batch = rep_3x(data_in1, n, n)
reye_batch = rep_3x(res_eye, n, n)
ra_batch = a_batch
check_fw(test_syevd2, [a_batch], [reye_batch, ra_batch])
if grad_check == 1:
# A => U
check_grad(test_syevd_u_4, [a_batch])
# A => L
check_grad(test_syevd_l_4, [a_batch])
# @piyushghai - Removing the fixed seed for this test.
# Issue for flakiness is tracked at - https://github.com/apache/incubator-mxnet/issues/11721
@with_seed()
def test_laop_4():
# Currently disabled on GPU as syevd needs cuda8
# and MxNet builds use cuda 7.5
if not (default_context() == mx.cpu()):
return
rtol_fw = 1e-6
atol_fw = 1e-6
data1 = mx.symbol.Variable('data1')
check_fw = lambda sym, location, expected, dtype :\
check_symbolic_forward(sym, location, expected, rtol=rtol_fw,
atol=atol_fw, dtype=dtype)
a_np = np.array([[1., 2.], [2., 4.]])
u_np = np.array([[0.89442718, -0.44721359], [0.44721359, 0.89442718]])
l_np = np.array([0., 5.])
test_syevd = mx.sym.linalg.syevd(data1)
# float64
#print('float64')
check_fw(test_syevd, [a_np], [u_np, l_np], np.float64)
# float32
#print('float32')
check_fw(test_syevd, [a_np], [u_np, l_np], np.float32)
def test_laop_5():
# tests for diagonal and triangular matrix extraction and generation
data = mx.symbol.Variable('data')
# test complete range of small matrices to cover corner cases
for n in range(1, 5):
# test batched and non-batched processing
for b in range(3):
shape = (n, n) if b == 0 else (b, n, n)
data_in = np.random.uniform(1, 10, shape)
# test all legal offsets of the diagonal
for offs in range(1-n, n):
# test extraction of diagonal
test_diag = mx.sym.linalg.extractdiag(data, offset=offs)
res_diag = np.diagonal(data_in, offset=offs) if b==0 else np.diagonal(data_in, axis1=1, axis2=2, offset=offs)
check_symbolic_forward(test_diag, [data_in], [res_diag])
check_numeric_gradient(test_diag, [data_in])
# test generation of diagonal matrix
test_diag2 = mx.sym.linalg.makediag(data, offset=offs)
res_diag2 = None
if b == 0:
res_diag2 = np.diagflat(res_diag, k=offs)
else:
for i in range(b):
res = np.reshape(np.diagflat(res_diag[i], k=offs), (1, n, n))
res_diag2 = res if res_diag2 is None else np.concatenate((res_diag2, res), axis=0)
check_symbolic_forward(test_diag2, [res_diag], [res_diag2])
check_numeric_gradient(test_diag2, [res_diag])
# check both settings for parameter "lower" in case of zero offset
lower_vals = [True] if offs != 0 else [True, False]
for lower in lower_vals:
# test extraction of triangle by doing a full roundtrip as the intermediate extracted
# triangle has different orderings than numpy.
test_trian = mx.sym.linalg.extracttrian(data, offset=offs, lower=lower)
test_trian = mx.sym.linalg.maketrian(test_trian, offset=offs, lower=lower)
extracts_lower = (offs < 0) or ((offs == 0) and lower)
res_trian = None
if b == 0:
res_trian = np.tril(data_in, offs) if extracts_lower else np.triu(data_in, offs)
else:
for i in range(b):
res = np.tril(data_in[i], offs) if extracts_lower else np.triu(data_in[i], offs)
res = np.reshape(res, (1, n, n))
res_trian = res if res_trian is None else np.concatenate((res_trian, res), axis=0)
check_symbolic_forward(test_trian, [data_in], [res_trian])
check_numeric_gradient(test_trian, [data_in])
# Tests for linalg.inverse
@with_seed()
@unittest.skip("Test crashes https://github.com/apache/incubator-mxnet/issues/15975")
def test_laop_6():
dtype = np.float64
rtol_fw = 1e-7
atol_fw = 1e-9
num_eps = 1e-6
rtol_bw = 1e-4
atol_bw = 1e-6
data = mx.symbol.Variable('data')
check_fw = lambda sym, location, expected:\
check_symbolic_forward(sym, location, expected, rtol=rtol_fw,
atol=atol_fw, dtype=dtype)
check_grad = lambda sym, location:\
check_numeric_gradient(sym, location, numeric_eps=num_eps, rtol=rtol_bw,
atol=atol_bw, dtype=dtype)
## det(I + dot(v, v.T)) = 1 + dot(v.T, v) >= 1, so it's always invertible;
## det is away from zero, so the value of logdet is stable
v = np.random.random(4)
a = np.eye(4) + np.outer(v, v)
a = np.tile(a, (3, 1, 1))
permute_mat = np.eye(4)[[1, 0, 2, 3]]
# test matrix inverse
r = np.eye(4)
r = np.tile(r, (3, 1, 1))
test_inverse = mx.sym.linalg.inverse(data)
test_eye = mx.sym.linalg.gemm2(data, test_inverse)
check_fw(test_eye, [a], [r])
check_grad(test_inverse, [a])
# test matrix determinant
# det
r = np.linalg.det(a)
test_det = mx.sym.linalg.det(data)
check_fw(test_det, [a], [r])
check_grad(test_det, [a])
# test slogdet
r1 = np.array([1., 1., 1.])
r2 = np.log(np.abs(np.linalg.det(a)))
test_sign, test_logabsdet = mx.sym.linalg.slogdet(data)
check_fw(test_sign, [a], [r1])
check_fw(test_sign, [np.dot(a, permute_mat)], [-r1])
check_fw(test_logabsdet, [a], [r2])
check_grad(test_logabsdet, [a])
@with_seed()
def test_stack():
for _ in range(100):
ndim = random.randint(1, 5)
axis = random.randint(0, ndim)
if random.randint(0, 1):
axis = axis - ndim - 1
nin = random.randint(1, 3)
dshape = [random.randint(1, 5) for _ in range(ndim)]
inputs = [np.random.uniform(size=dshape) for _ in range(nin)]
output = np.stack(inputs, axis=axis)
sym_ins = [mx.sym.var('x%d'%i) for i in range(nin)]
out = mx.sym.stack(*sym_ins, axis=axis)
check_symbolic_forward(out, inputs, [output])
check_numeric_gradient(out, inputs)
@with_seed()
@unittest.skip("test fails intermittently. temporarily disabled till it gets fixed. tracked at https://github.com/apache/incubator-mxnet/issues/14288")
def test_dropout():
def zero_count(array, ratio):
zeros = 0
for i in array:
if i == 0:
zeros += 1
elif math.isnan(i):
assert ratio == 1 # Only valid for ratio = 1
zeros += 1
return zeros
def check_correctness(executor, input, ratio):
input = input.ravel()
output = executor.outputs[0].asnumpy().ravel()
input_sum = np.sum(input)
output_sum = np.sum(output)
# Make sure input zeroes are none (test data setup check)
assert zero_count(input, ratio) == 0
# count number of zeroes in output
output_zeroes = zero_count(output, ratio)
# Hopefully should be within ratio/2 %
error = abs(output_sum - input_sum) / input_sum
if ratio == 1.0:
assert output_zeroes == len(input)
elif ratio > 0.2:
assert output_zeroes > 0
assert error < (ratio/2)
elif ratio == 0:
assert output_zeroes == 0
def check_dropout_ratio(ratio, shape, cudnn_off=True):
# test dropout
x = mx.sym.var('data')
y = mx.sym.Dropout(x, p=ratio, cudnn_off=cudnn_off)
exe = y.simple_bind(ctx=default_context(), data=shape)
if ratio == 1:
max_value = float('nan')
else:
max_value = 1 if ratio == 0 else 1/ratio
if ratio == 1:
min_value = float('nan')
else:
min_value = 1 if ratio == 0 else 0
exe.arg_arrays[0][:] = 1
exe.forward(is_train=True)
if not math.isnan(max_value):
assert exe.outputs[0].asnumpy().max() > 0
else:
assert math.isnan(exe.outputs[0].asnumpy().max())
if not math.isnan(min_value):
assert exe.outputs[0].asnumpy().min() == min_value
else:
assert math.isnan(exe.outputs[0].asnumpy().min())
check_correctness(exe, exe.arg_arrays[0].asnumpy(), ratio)
if ratio == 0.5:
exe.backward([mx.nd.ones(shape)])
assert (exe.grad_arrays[0].asnumpy() == exe.outputs[0].asnumpy()).all()
exe.forward(is_train=False)
assert (exe.outputs[0].asnumpy() == exe.arg_arrays[0].asnumpy()).all()
exe.backward([mx.nd.ones(shape)], is_train=False)
assert (exe.grad_arrays[0].asnumpy() == exe.arg_arrays[0].asnumpy()).all()
# test permanent dropout
x = mx.sym.var('data')
y = mx.sym.Dropout(x, p=ratio, mode='always', cudnn_off=cudnn_off)
exe = y.simple_bind(ctx=default_context(), data=shape)
exe.arg_arrays[0][:] = 1
exe.forward(is_train=True)
assert exe.outputs[0].asnumpy().max() == max_value
assert exe.outputs[0].asnumpy().min() == min_value
exe.backward([mx.nd.ones(shape)])
assert (exe.grad_arrays[0].asnumpy() == exe.outputs[0].asnumpy()).all()
exe.forward(is_train=False)
assert exe.outputs[0].asnumpy().max() == max_value
assert exe.outputs[0].asnumpy().min() == min_value
exe.backward([mx.nd.ones(shape)], is_train=False)
assert (exe.grad_arrays[0].asnumpy() == exe.outputs[0].asnumpy()).all()
def get_slice(x, axis, idx):
ix = ()
for i in range(x.ndim):
if i == axis:
ix += (idx,)
else:
ix += (slice(None, None, None),)
return x[ix]
def check_dropout_axes(ratio, shape, axes, cudnn_off=True):
compactshape = list(shape)
for axis in axes:
compactshape[axis] = 1
compactx = mx.random.uniform(shape=tuple(compactshape))
broadcastx = compactx.broadcast_to(shape)
dropouty = mx.nd.Dropout(broadcastx, p=ratio, axes=axes, cudnn_off=cudnn_off)
for axis in axes:
target = get_slice(dropouty, axis, 0).asnumpy()
for i in range(1, shape[axis]):
assert(get_slice(dropouty, axis, i).asnumpy() == target).all()
def check_passthrough(ratio, shape, cudnn_off=True):
# test inference_mode forward and then backward
a = mx.random.uniform(shape=shape)
a.attach_grad()
with mx.autograd.record(train_mode=False):
b = mx.nd.Dropout(a, ratio, cudnn_off=cudnn_off) # dropout acts as identity
b.backward()
assert_almost_equal(a.grad.asnumpy(), mx.nd.ones_like(b).asnumpy())
shape = (100, 100)
check_dropout_ratio(0.5, shape)
check_dropout_ratio(0.0, shape)
check_dropout_ratio(1.0, shape)
check_dropout_ratio(0.75, shape)
check_dropout_ratio(0.25, shape)
check_dropout_ratio(0.5, shape, cudnn_off=False)
check_dropout_ratio(0.0, shape, cudnn_off=False)
check_dropout_ratio(1.0, shape, cudnn_off=False)
check_dropout_ratio(0.75, shape, cudnn_off=False)
check_dropout_ratio(0.25, shape, cudnn_off=False)
check_passthrough(0.5, shape)
check_passthrough(0.0, shape)
check_passthrough(1.0, shape)
check_passthrough(0.5, shape, cudnn_off=False)
check_passthrough(0.0, shape, cudnn_off=False)
check_passthrough(1.0, shape, cudnn_off=False)
nshape = (10, 10, 10, 10)
with mx.autograd.train_mode():
check_dropout_axes(0.25, nshape, axes = (0,))
check_dropout_axes(0.25, nshape, axes = (1,))
check_dropout_axes(0.25, nshape, axes = (2,))
check_dropout_axes(0.25, nshape, axes = (3,))
check_dropout_axes(0.25, nshape, axes = (0, 1))
check_dropout_axes(0.25, nshape, axes = (0, 2))
check_dropout_axes(0.25, nshape, axes = (0, 3))
check_dropout_axes(0.25, nshape, axes = (1, 2))
check_dropout_axes(0.25, nshape, axes = (1, 3))
check_dropout_axes(0.25, nshape, axes = (2, 3))
check_dropout_axes(0.25, nshape, axes = (0, 1, 2))
check_dropout_axes(0.25, nshape, axes = (0, 2, 3))
check_dropout_axes(0.25, nshape, axes = (1, 2, 3))
check_dropout_axes(0.25, nshape, axes = (0,), cudnn_off=False)
check_dropout_axes(0.25, nshape, axes = (1,), cudnn_off=False)
check_dropout_axes(0.25, nshape, axes = (2,), cudnn_off=False)
check_dropout_axes(0.25, nshape, axes = (3,), cudnn_off=False)
check_dropout_axes(0.25, nshape, axes = (0, 1), cudnn_off=False)
check_dropout_axes(0.25, nshape, axes = (0, 2), cudnn_off=False)
check_dropout_axes(0.25, nshape, axes = (0, 3), cudnn_off=False)
check_dropout_axes(0.25, nshape, axes = (1, 2), cudnn_off=False)
check_dropout_axes(0.25, nshape, axes = (1, 3), cudnn_off=False)
check_dropout_axes(0.25, nshape, axes = (2, 3), cudnn_off=False)
check_dropout_axes(0.25, nshape, axes = (0, 1, 2), cudnn_off=False)
check_dropout_axes(0.25, nshape, axes = (0, 2, 3), cudnn_off=False)
check_dropout_axes(0.25, nshape, axes = (1, 2, 3), cudnn_off=False)
@unittest.skip("test fails intermittently. temporarily disabled till it gets fixed. tracked at https://github.com/apache/incubator-mxnet/issues/11290")
@with_seed()
def test_scatter_gather_nd():
def check(data, idx):
data.attach_grad()
with mx.autograd.record():
y = mx.nd.gather_nd(data, idx)
y.backward(y)
npidx = tuple(i.asnumpy() for i in idx)
assert (data.asnumpy()[npidx] == y.asnumpy()).all()
npdata = np.zeros_like(data.asnumpy())
npdata[npidx] = y.asnumpy()
assert (npdata == data.grad.asnumpy()).all()
assert (mx.nd._internal._backward_gather_nd(y, idx, shape=data.shape).asnumpy() == data.grad.asnumpy()).all()
for dtype in ['int32', 'int64', 'float16', 'float32', 'float64']:
data = mx.nd.arange(360, dtype=dtype).reshape((3,4,5,6))
idx = mx.nd.array([[1,1,2], [3, 3, 0], [3,2,1]], dtype='int32')
check(data, idx)
idx = mx.nd.array([[1,1,2], [3,3,0], [3,2,1], [5,2,4]], dtype='int32')
check(data, idx)
data = mx.nd.array([2, 3, 0], dtype=dtype)
idx = mx.nd.array([[1, 1, 0], [0, 1, 0]], dtype='int32')
assert (mx.nd.scatter_nd(data, idx, shape=(2, 2)).asnumpy() == [[0, 0], [2, 3]]).all()
data = mx.nd.array([2, 3, 0], dtype=dtype)
idx = mx.nd.array([[1, 1, 0], [1, 1, 0]], dtype='int32')
assert (mx.nd._internal._backward_gather_nd(data, idx, shape=(2, 2)).asnumpy() == [[0, 0], [0, 5]]).all()
data_npy = np.random.randint(0, 10, (100,))
data = mx.nd.array(data_npy, dtype=dtype)
idx = mx.nd.zeros(shape=(1, 100), dtype='int32')
assert (mx.nd._internal._backward_gather_nd(data, idx, shape=(1,)).asscalar() == data_npy.sum())
if dtype == 'int64':
data = mx.nd.array([2123162361283621, -31231236374787,
-112372937128970, -1378278798172378], dtype=dtype)
idx = mx.nd.array([[0, 0, 0, 0]], dtype='int32')
assert (mx.nd._internal._backward_gather_nd(data, idx, shape=(1,)).asscalar() == data.asnumpy().sum())
def compare_forw_backw_unary_op(
name, forward_mxnet_call, forward_numpy_call,
backward_numpy_call, shape, input_low, input_high, rtol, atol,
dtype=np.float32):
check_fw = lambda sym, location, expected :\
check_symbolic_forward(sym, location, expected, rtol=rtol,
atol=atol, dtype=dtype)
check_bw = lambda sym, location, out_grads, expected :\
check_symbolic_backward(sym, location, out_grads, expected,
rtol=rtol, atol=atol, dtype=dtype)
op_name = 'unary_op={}, dtype={}'.format(name, dtype)
data = mx.symbol.Variable(op_name + '_data', dtype=dtype)
# Comparison: Forward expression
data_np = np.random.uniform(input_low, input_high, shape).astype(dtype)
res_np = forward_numpy_call(data_np)
op_ex = mx.sym.broadcast_add(
forward_mxnet_call(data), mx.sym.zeros_like(data),
name=op_name)
check_fw(op_ex, [data_np], [res_np])
# Comparison: Backward expression
res_grad = np.random.uniform(-2.0, 2.0, shape).astype(dtype)
data_grad = backward_numpy_call(data_np) * res_grad
check_bw(op_ex, [data_np], [res_grad], [data_grad])
def finite_diff_unary_op(
name, forward_mxnet_call, shape, input_low, input_high, rtol, atol,
num_eps):
# Finite difference tests are done in float64
dtype = np.float64
check_grad = lambda sym, location:\
check_numeric_gradient(sym, location, numeric_eps=num_eps, rtol=rtol,
atol=atol, dtype=dtype)
data_np = np.random.uniform(input_low, input_high, shape).astype(dtype)
data = mx.symbol.Variable('data', dtype=dtype)
op_name = 'unary_op={}, dtype={}'.format(name, dtype)
op_ex = mx.sym.broadcast_add(
forward_mxnet_call(data), mx.sym.zeros_like(data),
name=op_name)
check_grad(op_ex, [data_np])
def np_smooth_l1(x, sigma):
issq = 1. / sigma / sigma
absx = np.abs(x)
temp = x * sigma
return np.where(absx < issq, 0.5 * (temp ** 2), absx - 0.5 * issq)
def np_smooth_l1_grad(x, sigma):
ssq = sigma * sigma
return np.where(np.abs(x) < 1. / ssq, x * ssq, np.sign(x))
# Tests for unary operators (basic mathematical functions):
# - Forward: Comparison to NumPy (several dtype)
# - Backward: Comparison to NumPy (several dtype)
# - Finite difference tests (only dtype = float64)
# Seed set because the test is not robust enough to operate on random data
@with_seed(192837465)
def test_unary_math_operators():
have_scipy = True
try:
from scipy import special as scipy_special
except:
print("Could not import scipy. Skipping unit tests for special functions")
have_scipy = False
shape=(9, 10)
dtype_l = [np.float64, np.float32, np.float16]
rtol_l = [1e-7, 1e-6, 1e-2]
rtol_less_l = [1e-6, 1e-5, 1e-2]
atol_l = [1e-7, 1e-6, 1e-2]
atol_less_l = [1e-6, 1e-5, 1e-2]
rtol_fd = 1e-5
atol_fd = 1e-6
num_eps = 1e-6
unary_ops = {
'arccos' : [lambda x: mx.sym.arccos(x),
lambda x: np.arccos(x),
lambda x: -1. / np.sqrt(1. - x ** 2.),
-0.95, 0.95],
'arccosh': [lambda x: mx.sym.arccosh(x),
lambda x: np.arccosh(x),
lambda x: 1. / np.sqrt(x ** 2 - 1.),
1.05, 10.0],
'arcsin': [lambda x: mx.sym.arcsin(x),
lambda x: np.arcsin(x),
lambda x: 1. / np.sqrt(1. - x ** 2),
-0.95, 0.95],
'arcsinh': [lambda x: mx.sym.arcsinh(x),
lambda x: np.arcsinh(x),
lambda x: 1. / np.sqrt(x**2 + 1.),
-5.0, 5.0],
'arctan': [lambda x: mx.sym.arctan(x),
lambda x: np.arctan(x),
lambda x: 1. / (x ** 2. + 1.),
-5.0, 5.0],
'arctanh': [lambda x: mx.sym.arctanh(x),
lambda x: np.arctanh(x),
lambda x: 1. / (1. - x ** 2),
-0.95, 0.95],
'cbrt': [lambda x: mx.sym.cbrt(x),
lambda x: np.cbrt(x),
lambda x: 1. / (3. * np.cbrt(x) ** 2),
-10.0, 10.0],
'cos': [lambda x: mx.sym.cos(x),
lambda x: np.cos(x),
lambda x: -np.sin(x),
-5.0, 5.0],
'cosh': [lambda x: mx.sym.cosh(x),
lambda x: np.cosh(x),
lambda x: np.sinh(x),
-2.0, 2.0],
'exp': [lambda x: mx.sym.exp(x),
lambda x: np.exp(x),
lambda x: np.exp(x),
-4.0, 4.0],
'expm1': [lambda x: mx.sym.expm1(x),
lambda x: np.expm1(x),
lambda x: np.exp(x),
-0.1, 0.1],
'log': [lambda x: mx.sym.log(x),
lambda x: np.log(x),
lambda x: 1. / x,
0.01, 100.0],
'log10': [lambda x: mx.sym.log10(x),
lambda x: np.log10(x),
lambda x: 1. / (x * np.log(10.)),
0.01, 100.0],
'log2': [lambda x: mx.sym.log2(x),
lambda x: np.log2(x),
lambda x: 1. / (x * np.log(2.)),
0.01, 100.0],
'log1p': [lambda x: mx.sym.log1p(x),
lambda x: np.log1p(x),
lambda x: 1. / (1. + x),
-0.1, 0.1],
'rcbrt': [lambda x: mx.sym.rcbrt(x),
lambda x: 1. / np.cbrt(x),
lambda x: -1. / (3. * x * np.cbrt(x)),
0.01, 100.0],
'reciprocal': [lambda x: mx.sym.reciprocal(x),
lambda x: 1. / x,
lambda x: -1. / (x ** 2),
0.01, 100.0],
'relu': [lambda x: mx.sym.relu(x),
lambda x: np.maximum(x, 0.),
lambda x: 1. * (x > 0.),
-5.0, 5.0],
'rsqrt': [lambda x: mx.sym.rsqrt(x),
lambda x: 1. / np.sqrt(x),
lambda x: -0.5 / (x * np.sqrt(x)),
0.01, 100.0],
'sigmoid': [lambda x: mx.sym.sigmoid(x),
lambda x: 1. / (np.exp(-x) + 1.),
lambda x: 1. / (np.exp(-x) + 1.) / (np.exp(x) + 1.),
-3.0, 3.0],
'softsign': [lambda x: mx.sym.softsign(x),
lambda x: x / (1. + np.abs(x)),
lambda x: 1. / np.square(1. + np.abs(x)),
-3.0, 3.0],
'sin': [lambda x: mx.sym.sin(x),
lambda x: np.sin(x),
lambda x: np.cos(x),
-5.0, 5.0],
'sinh': [lambda x: mx.sym.sinh(x),
lambda x: np.sinh(x),
lambda x: np.cosh(x),
-2.0, 2.0],
'sqrt': [lambda x: mx.sym.sqrt(x),
lambda x: np.sqrt(x),
lambda x: 0.5 / np.sqrt(x),
0.01, 100.0],
'tan': [lambda x: mx.sym.tan(x),
lambda x: np.tan(x),
lambda x: np.tan(x) ** 2 + 1.,
-1.5, 1.5],
'tanh': [lambda x: mx.sym.tanh(x),
lambda x: np.tanh(x),
lambda x: 1. - np.tanh(x) ** 2,
-4.0, 4.0],
'smooth_l1_sig1': [lambda x: mx.sym.smooth_l1(x, scalar=1.),
lambda x: np_smooth_l1(x, 1.),
lambda x: np_smooth_l1_grad(x, 1.),
-2.0, 2.0],
'smooth_l1_sig_default': [lambda x: mx.sym.smooth_l1(x),
lambda x: np_smooth_l1(x, 1.),
lambda x: np_smooth_l1_grad(x, 1.),
-2.0, 2.0],
'smooth_l1_sig2': [lambda x: mx.sym.smooth_l1(x, scalar=2.),
lambda x: np_smooth_l1(x, 2.),
lambda x: np_smooth_l1_grad(x, 2.),
-1.0, 1.0]
}
if have_scipy:
unary_ops['gamma'] = [lambda x: mx.sym.gamma(x),
lambda x: scipy_special.gamma(x),
lambda x: scipy_special.gamma(x) * scipy_special.psi(x),
0.01, 5.0]
unary_ops['gammaln'] = [lambda x: mx.sym.gammaln(x),
lambda x: scipy_special.gammaln(x),
lambda x: scipy_special.psi(x),
0.01, 20.0]
# Loop over operators
for name, op in unary_ops.items():
# Loop over dtype's
for ind in range(len(dtype_l)):
dtype = dtype_l[ind]
if name == 'gammaln' or name == 'gamma':
rtol = rtol_less_l[ind]
atol = atol_less_l[ind]
else:
rtol = rtol_l[ind]
atol = atol_l[ind]
compare_forw_backw_unary_op(
name, op[0], op[1], op[2], shape, op[3], op[4], rtol, atol,
dtype)
# Finite difference testing
finite_diff_unary_op(
name, op[0], shape, op[3], op[4], rtol_fd, atol_fd, num_eps)
def compare_forw_backw_binary_op(
name, forward_mxnet_call, forward_numpy_call,
backward1_numpy_call, backward2_numpy_call, shape, input1_low,
input1_high, input2_low, input2_high, rtol, atol, dtype=np.float32):
check_fw = lambda sym, location, expected :\
check_symbolic_forward(sym, location, expected, rtol=rtol,
atol=atol, dtype=dtype)
check_bw = lambda sym, location, out_grads, expected :\
check_symbolic_backward(sym, location, out_grads, expected,
rtol=rtol, atol=atol, dtype=dtype)
op_name = 'binary_op={}, dtype={}'.format(name, dtype)
data1 = mx.symbol.Variable(op_name + '_data1', dtype=dtype)
data2 = mx.symbol.Variable(op_name + '_data2', dtype=dtype)
# Comparison: Forward expression
data1_np = np.random.uniform(input1_low, input1_high, shape).astype(dtype)
data2_np = np.random.uniform(input2_low, input2_high, shape).astype(dtype)
res_np = forward_numpy_call(data1_np, data2_np)
op_ex = mx.sym.broadcast_add(
forward_mxnet_call(data1, data2), mx.sym.zeros_like(data1),
name=op_name)
check_fw(op_ex, [data1_np, data2_np], [res_np])
# Comparison: Backward expression
res_grad = np.random.uniform(-2.0, 2.0, shape).astype(dtype)
data1_grad = backward1_numpy_call(data1_np, data2_np) * res_grad
data2_grad = backward2_numpy_call(data1_np, data2_np) * res_grad
check_bw(op_ex, [data1_np, data2_np], [res_grad], [data1_grad, data2_grad])
def finite_diff_binary_op(
name, forward_mxnet_call, shape, input1_low, input1_high, input2_low,
input2_high, rtol, atol, num_eps):
# Finite difference tests are done in float64
dtype = np.float64
check_grad = lambda sym, location:\
check_numeric_gradient(sym, location, numeric_eps=num_eps, rtol=rtol,
atol=atol, dtype=dtype)
data1_np = np.random.uniform(input1_low, input1_high, shape).astype(dtype)
data2_np = np.random.uniform(input2_low, input2_high, shape).astype(dtype)
data1 = mx.symbol.Variable('data1', dtype=dtype)
data2 = mx.symbol.Variable('data2', dtype=dtype)
op_name = 'binary_op={}, dtype={}'.format(name, dtype)
op_ex = mx.sym.broadcast_add(
forward_mxnet_call(data1, data2), mx.sym.zeros_like(data1),
name=op_name)
check_grad(op_ex, [data1_np, data2_np])
# Tests for unary operators (basic mathematical functions):
# - Forward: Comparison to NumPy (several dtype)
# - Backward: Comparison to NumPy (several dtype)
# - Finite difference tests (only dtype = float64)
@with_seed()
def test_binary_math_operators():
shape=(9, 10)
dtype_l = [np.float64, np.float32, np.float16]
rtol_l = [1e-7, 1e-6, 1e-2]
atol_l = [1e-7, 1e-6, 1e-2]
rtol_fd = 1e-5
atol_fd = 1e-6
num_eps = 1e-6
binary_ops = {
'hypot' : [lambda x, y: mx.sym.hypot(x, y),
lambda x, y: np.hypot(x, y),
lambda x, y: x / np.hypot(x, y),
lambda x, y: y / np.hypot(x, y),
-5.0, 5.0, -5.0, 5.0],
'pow': [lambda x, y: mx.sym.pow(x, y),
lambda x, y: np.power(x, y),
lambda x, y: np.power(x, y - 1.) * y,
lambda x, y: np.power(x, y) * np.log(x),
0.2, 5.0, -4.0, 4.0],
'power': [lambda x, y: mx.sym.power(x, y),
lambda x, y: np.power(x, y),
lambda x, y: np.power(x, y - 1.) * y,
lambda x, y: np.power(x, y) * np.log(x),
0.2, 5.0, -4.0, 4.0]
}
# Loop over operators
for name, op in binary_ops.items():
# Loop over dtype's
for ind in range(len(dtype_l)):
dtype = dtype_l[ind]
compare_forw_backw_binary_op(
name, op[0], op[1], op[2], op[3], shape, op[4], op[5], op[6],
op[7], rtol_l[ind], atol_l[ind], dtype)
# Finite difference testing
finite_diff_binary_op(
name, op[0], shape, op[4], op[5], op[6], op[7], rtol_fd, atol_fd,
num_eps)
@with_seed()
def test_softmax():
check_softmax_with_shape((3, 4), default_context(), preserve_shape=False)
check_softmax_with_shape((3, 4), default_context(), preserve_shape=True)
check_softmax_with_shape((3, 4, 2), default_context(), preserve_shape=True)
check_softmax_grad(default_context())
check_smoothed_softmax_grad(default_context())
@with_seed()
def test_softmax_output_normalization():
def _softmaxoutput_normalization(multi_output, use_ignore, normalization):
grad_scale = np.random.random()
batch_size = 8
num_labels = 6
H, W = 3, 3
ignore_label = np.random.randint(0, num_labels) if use_ignore else -1
if multi_output:
data_shape = (batch_size, num_labels, H, W)
label_shape = (batch_size, H, W)
else:
data_shape = (batch_size, num_labels)
label_shape = (batch_size, )
data = mx.nd.random.uniform(-1, 1, shape=data_shape)
label = mx.nd.random.randint(
0, num_labels, shape=label_shape).astype('float32')
data.attach_grad()
kwargs = dict(grad_scale=grad_scale,
normalization=normalization, multi_output=multi_output)
if use_ignore:
kwargs.update(use_ignore=True, ignore_label=ignore_label)
with mx.autograd.record():
out = mx.nd.SoftmaxOutput(data=data, label=label, **kwargs)
out.backward(mx.nd.ones_like(data))
exp_data = mx.nd.exp(data)
softmax_data = exp_data / exp_data.sum(1, keepdims=True)
argmax_data = mx.nd.argmax(data, axis=1)
assert_almost_equal(out.asnumpy(), softmax_data.asnumpy())
one_hot_label = mx.nd.one_hot(label, num_labels)
if multi_output:
one_hot_label = one_hot_label.transpose((0, 3, 1, 2))
data_grad = softmax_data - one_hot_label
if use_ignore:
if multi_output:
data_grad *= (label !=
ignore_label).reshape((batch_size, 1, H, W))
else:
data_grad *= (label != ignore_label).reshape((batch_size, 1))
valid_cnt = 1
if normalization == 'batch':
valid_cnt = batch_size
elif normalization == 'valid':
valid_cnt = mx.nd.maximum(1, (label != ignore_label).sum())
scale = grad_scale / valid_cnt
if multi_output:
if normalization != 'valid':
scale /= H * W
data_grad *= scale
assert_almost_equal(data.grad.asnumpy(), data_grad.asnumpy())
for multi_output in [False, True]:
for use_ignore in [False, True]:
for normalization in ['null', 'batch', 'valid']:
_softmaxoutput_normalization(
multi_output, use_ignore, normalization)
@with_seed()
def test_slice():
def test_slice_forward_backward(a, index):
a_np = a.asnumpy()
begin = []
end = []
step = []
for slice_i in index:
begin.append(slice_i.start)
end.append(slice_i.stop)
step.append(slice_i.step)
b = mx.nd.slice(a, begin=begin, end=end, step=step)
b_np = a_np[index]
assert same(b.asnumpy(), b_np)
data = mx.sym.Variable('data')
slice_sym = mx.sym.slice(data, begin=begin, end=end, step=step)
expected_in_grad = np.zeros_like(a_np)
expected_in_grad[index] = b_np
check_symbolic_backward(slice_sym, [a_np], [b_np], [expected_in_grad])
shape = (16, 14, 17, 20)
arr = mx.nd.arange(np.prod(shape)).reshape(shape=shape)
index_list = [(slice(None),), (slice(None), slice(None)), (slice(1, 10),), (slice(1, 10), slice(3, 9)),
(slice(1, 10), slice(2, 5), slice(3, 6), slice(7, 10)),
(slice(1, 10, 2), slice(2, 9, 3), slice(3, 6, 5), slice(7, 10, 2)),
(slice(None, None, -1), slice(None, None, -1), slice(None, None, -1)),
(slice(10, 0, -2), slice(5, 2, -1), slice(7, None, 3), slice(None, 12, 4))]
for index in index_list:
test_slice_forward_backward(arr, index)
# check numeric gradient
in_data = np.arange(36).reshape(2, 2, 3, 3)
data = mx.sym.Variable('data')
slice_sym = mx.sym.slice(data, begin=[0, None], end=[1, None], step=[2, -1])
check_numeric_gradient(slice_sym, [in_data])
def test_slice_partial_infer():
def check_slice_partial_infer(data, begin, end, step, expected_out_shape):
out = mx.sym.slice(data, begin=begin, end=end, step=step)
assert (out.infer_shape_partial()[1][0] == expected_out_shape), out.infer_shape_partial()[1]
def check_slice_axis_partial_infer(data, axis, begin, end, expected_out_shape):
out = mx.sym.slice_axis(data, axis=axis, begin=begin, end=end)
assert (out.infer_shape_partial()[1][0] == expected_out_shape), out.infer_shape_partial()[1]
var1 = mx.sym.var(name="data", shape=(0, 20))
check_slice_partial_infer(var1, (None, None), (None, 10), [], (0, 10))
check_slice_partial_infer(var1, (None, None), (None, 10), (None, 2), (0, 5))
check_slice_partial_infer(var1, (None, 3), (None, 10), [], (0, 7))
check_slice_partial_infer(var1, (None, 3), (5, 10), [], (0, 7))
check_slice_partial_infer(var1, (2, 3), (None, 10), [], (0, 7))
check_slice_partial_infer(var1, (2, 3), (None, 10), (None, 1), (0, 7))
check_slice_partial_infer(var1, (2, 3), (None, 10), (3, 3), (0, 3))
var1 = mx.sym.var(name="data", shape=(10, 0))
check_slice_axis_partial_infer(var1, 0, 0, 5, (5, 0))
check_slice_axis_partial_infer(var1, 1, 0, 5, (10, 0))
with mx.np_shape():
var1 = mx.sym.var(name="data", shape=(-1, 20))
check_slice_partial_infer(var1, (None, None), (None, 10), [], (-1, 10))
check_slice_partial_infer(var1, (None, None), (None, 10), (None, 2), (-1, 5))
check_slice_partial_infer(var1, (None, 3), (None, 10), [], (-1, 7))
check_slice_partial_infer(var1, (None, 3), (5, 10), [], (-1, 7))
check_slice_partial_infer(var1, (2, 3), (None, 10), [], (-1, 7))
check_slice_partial_infer(var1, (2, 3), (None, 10), (None, 1), (-1, 7))
check_slice_partial_infer(var1, (2, 3), (None, 10), (3, 3), (-1, 3))
var1 = mx.sym.var(name='data', shape=(10, -1))
check_slice_axis_partial_infer(var1, 0, 0, 5, (5, -1))
check_slice_axis_partial_infer(var1, 1, 0, 5, (10, -1))
@with_seed()
def test_float16_min_max():
"""Test for issue: https://github.com/apache/incubator-mxnet/issues/9007"""
a = mx.nd.array([np.finfo('float16').min, np.finfo('float16').max], dtype='float16')
assert a.dtype == np.float16
assert np.finfo('float16').min == mx.nd.min(a).asscalar()
assert np.finfo('float16').max == mx.nd.max(a).asscalar()
@with_seed()
@mx.use_np_shape
def test_zero_size_min_max():
def min():
a = mx.nd.zeros(shape=(5, 0))
a.min()
def max():
a = mx.nd.zeros(shape=(5, 0))
a.max()
assert_raises(MXNetError, min)
assert_raises(MXNetError, max)
@with_seed()
def test_squeeze_op():
def check_squeeze_op(shape, axis=None):
data = mx.nd.random.uniform(low=-10.0, high=10.0, shape=shape)
if axis is None:
out = mx.nd.squeeze(data).asnumpy()
out_expected = np.squeeze(data.asnumpy())
else:
out = mx.nd.squeeze(data, axis=axis).asnumpy()
out_expected = np.squeeze(data.asnumpy(), axis=axis)
if out.shape == (1,): # as an exception (1, 1, 1) will be squeezed to (1,)
out_expected = np.squeeze(data.asnumpy(), axis=tuple([i for i in range(1, len(shape))]))
assert same(out, out_expected)
# check forward
check_squeeze_op((1, 5, 1, 3, 1), 0)
check_squeeze_op((1, 5, 1, 3, 1), 2)
check_squeeze_op((1, 5, 1, 3, 1), 4)
check_squeeze_op((1, 5, 1, 3, 1), (0, 4))
check_squeeze_op((1, 5, 1, 3, 1), (0, 2, 4))
check_squeeze_op((1, 5, 1, 3, 1))
check_squeeze_op((1, 1, 1, 1))
# check gradient
data = mx.symbol.Variable('data')
shape = (1, 2, 1, 3, 1)
data_tmp = np.ones(shape)
test = mx.sym.squeeze(data)
check_numeric_gradient(test, [data_tmp])
test = mx.sym.squeeze(data, axis=2)
check_numeric_gradient(test, [data_tmp])
test = mx.sym.squeeze(data, axis=(2, 4))
check_numeric_gradient(test, [data_tmp])
@with_seed()
def test_adaptive_avg_pool_op():
def py_adaptive_avg_pool(x, height, width):
# 2D per frame adaptive avg pool
def adaptive_avg_pool_frame(x, y):
isizeH, isizeW = x.shape
osizeH, osizeW = y.shape
for oh in range(osizeH):
istartH = int(np.floor(1.0 * (oh * isizeH) / osizeH))
iendH = int(np.ceil(1.0 * (oh + 1) * isizeH / osizeH))
kH = iendH - istartH
for ow in range(osizeW):
istartW = int(np.floor(1.0 * (ow * isizeW) / osizeW))
iendW = int(np.ceil(1.0 * (ow + 1) * isizeW / osizeW))
kW = iendW - istartW
xsum = 0
for ih in range(kH):
for iw in range(kW):
xsum += x[istartH+ih][istartW+iw]
y[oh][ow] = xsum / kH / kW
B,C,_,_ = x.shape
y = np.empty([B,C,height, width], dtype=x.dtype)
for b in range(B):
for c in range(C):
adaptive_avg_pool_frame(x[b][c], y[b][c])
return y
def check_adaptive_avg_pool_op(shape, output_height, output_width=None):
x = mx.nd.random.uniform(shape=shape)
if output_width is None:
y = mx.nd.contrib.AdaptiveAvgPooling2D(x, output_size=output_height)
npy = py_adaptive_avg_pool(x.asnumpy(), output_height, output_height)
else:
y = mx.nd.contrib.AdaptiveAvgPooling2D(x, output_size=(output_height, output_width))
npy = py_adaptive_avg_pool(x.asnumpy(), output_height, output_width)
assert_almost_equal(y.asnumpy(), npy)
shape = (2, 2, 10, 10)
for i in range(1, 11):
check_adaptive_avg_pool_op(shape, i)
for j in range(1, 11):
check_adaptive_avg_pool_op(shape, i, j)
@with_seed()
def test_bilinear_resize_op():
def py_bilinear_resize(x, outputHeight, outputWidth):
batch, channel, inputHeight, inputWidth = x.shape
if outputHeight == inputHeight and outputWidth == inputWidth:
return x
y = np.empty([batch, channel, outputHeight, outputWidth])
rheight = 1.0 * (inputHeight - 1) / (outputHeight - 1) if outputHeight > 1 else 0.0
rwidth = 1.0 * (inputWidth - 1) / (outputWidth - 1) if outputWidth > 1 else 0.0
for h2 in range(outputHeight):
h1r = 1.0 * h2 * rheight
h1 = int(np.floor(h1r))
h1lambda = h1r - h1
h1p = 1 if h1 < (inputHeight - 1) else 0
for w2 in range(outputWidth):
w1r = 1.0 * w2 * rwidth
w1 = int(np.floor(w1r))
w1lambda = w1r - w1
w1p = 1 if w1 < (inputWidth - 1) else 0
for b in range(batch):
for c in range(channel):
y[b][c][h2][w2] = (1-h1lambda)*((1-w1lambda)*x[b][c][h1][w1] + \
w1lambda*x[b][c][h1][w1+w1p]) + \
h1lambda*((1-w1lambda)*x[b][c][h1+h1p][w1] + \
w1lambda*x[b][c][h1+h1p][w1+w1p])
return y
def py_bilinear_resize_backward(x, incoming_grads, mode='size'):
data1 = np.zeros_like(x)
data2 = incoming_grads
batchsize = data1.shape[0]
channels = data1.shape[1]
height1 = data1.shape[2]
width1 = data1.shape[3]
height2 = data2.shape[2]
width2 = data2.shape[3]
rheight = float(height1 - 1) / (height2 - 1) if (height2 > 1) else 0
rwidth = float(width1 - 1) / (width2 - 1) if (width2 > 1) else 0
# special case: just copy
if height1 == height2 and width1 == width2:
data1 += data2
return [data1]
for h2 in range(0, height2):
for w2 in range(0, width2):
h1r = rheight * h2
h1 = int(h1r)
h1p = 1 if (h1 < height1 - 1) else 0
h1lambda = h1r - h1
h0lambda = 1 - h1lambda
#
w1r = rwidth * w2
w1 = int(w1r)
w1p = 1 if (w1 < width1 - 1) else 0
w1lambda = w1r - w1
w0lambda = 1 - w1lambda
#
for n in range(0, batchsize):
for c in range(0, channels):
d2val = data2[n][c][h2][w2]
data1[n][c][h1][w1] += h0lambda * w0lambda * d2val
data1[n][c][h1][w1 + w1p] += h0lambda * w1lambda * d2val
data1[n][c][h1 + h1p][w1] += h1lambda * w0lambda * d2val
data1[n][c][h1 + h1p][w1 + w1p] += h1lambda * w1lambda * d2val
if mode == 'like':
return data1, np.zeros_like(incoming_grads)
return [data1]
def check_bilinear_resize_op(shape, height, width):
x = mx.nd.random.uniform(shape=shape)
y = mx.nd.contrib.BilinearResize2D(x, height=height, width=width)
assert_almost_equal(y, py_bilinear_resize(x.asnumpy(), height, width))
x_scale = width / shape[-1]
y_scale = height / shape[-2]
y = mx.nd.contrib.BilinearResize2D(x, scale_height=y_scale, scale_width=x_scale)
assert_almost_equal(y.asnumpy(), py_bilinear_resize(x.asnumpy(), height, width))
def check_bilinear_resize_align_corners_op():
img_shape = [1, 1, 3, 2]
data = [64, 32, 32, 64, 50, 100]
target_height = 6
target_width = 4
expected_data = {}
# align_corners = False
expected_data[0] = [
64.000, 56.000, 40.000, 32.000, 56.000, 52.000, 44.000, 40.000, 40.000, 44.000, 52.000, 56.000,
36.500, 45.625, 63.875, 73.000, 45.500, 56.875, 79.625, 91.000, 50.000, 62.500, 87.500, 100.000
]
# align_corners = True
expected_data[1] = [
64.000, 53.333, 42.667, 32.000, 51.200, 49.067, 46.933, 44.800, 38.400, 44.800, 51.200, 57.600,
35.600, 47.467, 59.333, 71.200, 42.800, 57.067, 71.333, 85.600, 50.000, 66.667, 83.333, 100.000
]
x = np.array(data, dtype=np.float32).reshape(img_shape)
x_nd = mx.nd.array(x)
y0 = np.array(expected_data[0]).reshape((1, 1, target_height, target_width))
y0_nd = mx.nd.contrib.BilinearResize2D(x_nd, height=target_height, width=target_width, mode='size', align_corners=False)
assert_almost_equal(y0, y0_nd.asnumpy(), atol=1e-3)
y1 = np.array(expected_data[1]).reshape((1, 1, target_height, target_width))
y1_nd = mx.nd.contrib.BilinearResize2D(x_nd, height=target_height, width=target_width, mode='size', align_corners=True)
assert_almost_equal(y1, y1_nd.asnumpy(), atol=1e-3)
def check_bilinear_resize_modes_op(shape, scale_height=None, scale_width=None, shape_1=None, mode=None):
x = mx.nd.random.uniform(shape=shape)
original_h = shape[2]
original_w = shape[3]
if mode == 'odd_scale':
assert scale_height is not None and scale_width is not None
new_h = int(original_h * scale_height) if (original_h % 2) == 0 else \
int((original_h - 1) * scale_height) + 1
new_w = int(original_w * scale_width) if (original_w % 2) == 0 \
else int((original_w - 1) * scale_width) + 1
y = mx.nd.contrib.BilinearResize2D(x, scale_height=scale_height,
scale_width=scale_width,
mode='odd_scale')
elif mode == 'to_even_down':
new_h = original_h if (original_h % 2) == 0 else original_h - 1
new_w = original_w if (original_w % 2) == 0 else original_w - 1
y = mx.nd.contrib.BilinearResize2D(x, mode='to_even_down')
elif mode == 'to_even_up':
new_h = original_h if (original_h % 2) == 0 else original_h + 1
new_w = original_w if (original_w % 2) == 0 else original_w + 1
y = mx.nd.contrib.BilinearResize2D(x, mode='to_even_up')
elif mode == 'to_odd_down':
new_h = original_h if (original_h % 2) == 1 else original_h - 1
new_w = original_w if (original_w % 2) == 1 else original_w - 1
y = mx.nd.contrib.BilinearResize2D(x, mode='to_odd_down')
elif mode == 'to_odd_up':
new_h = original_h if (original_h % 2) == 1 else original_h + 1
new_w = original_w if (original_w % 2) == 1 else original_w + 1
y = mx.nd.contrib.BilinearResize2D(x, mode='to_odd_up')
elif mode == 'like':
x_1 = mx.nd.random.uniform(shape=shape_1)
new_h = x_1.shape[2]
new_w = x_1.shape[3]
y = mx.nd.contrib.BilinearResize2D(x, x_1, mode='like')
new_shape_desired = np.array([shape[0], shape[1], new_h, new_w], dtype='int')
new_shape_got = np.array(y.shape, dtype='int')
data_sym = mx.sym.var('data')
data_np = x.asnumpy()
expected = py_bilinear_resize(data_np, new_h, new_w)
out_grads = np.ones([shape[0], shape[1], new_h, new_w])
expected_backward = py_bilinear_resize_backward(data_np, out_grads, mode)
assert_array_equal(new_shape_desired, new_shape_got, "Desired and got shapes are not equal. {} vs {}".format(
str(new_shape_desired.tolist()), str(new_shape_got.tolist())))
assert_almost_equal(y.asnumpy(), expected, 1e-3, 0)
if mode != 'like':
resize_sym = mx.sym.contrib.BilinearResize2D(data_sym, None, scale_height=scale_height, scale_width=scale_width, mode=mode)
check_symbolic_forward(resize_sym, [data_np], [expected], rtol=1e-3, atol=1e-5)
check_symbolic_backward(resize_sym, [data_np], [out_grads], expected_backward, rtol=1e-3, atol=1e-5)
check_numeric_gradient(resize_sym, [data_np], rtol=1e-2, atol=1e-4)
else:
data_sym_like = mx.sym.var('data_like')
resize_sym = mx.sym.contrib.BilinearResize2D(data_sym, data_sym_like, mode=mode)
date_np_like = x_1.asnumpy()
check_symbolic_forward(resize_sym, [data_np, date_np_like], [expected], rtol=1e-3, atol=1e-5)
check_symbolic_backward(resize_sym, [data_np, date_np_like], [out_grads], expected_backward, rtol=1e-3, atol=1e-5)
check_numeric_gradient(resize_sym, [data_np, date_np_like], rtol=1e-2, atol=1e-4)
shape = (2, 2, 10, 10)
check_bilinear_resize_op(shape, 5, 5)
check_bilinear_resize_op(shape, 10, 10)
check_bilinear_resize_op(shape, 15, 15)
check_bilinear_resize_op(shape, 3, 7)
check_bilinear_resize_op(shape, 13, 17)
shape = (2, 2, 20, 20)
check_bilinear_resize_modes_op(shape, scale_height=0.5, scale_width=0.5, mode='odd_scale')
check_bilinear_resize_modes_op(shape, scale_height=5, scale_width=10, mode='odd_scale')
check_bilinear_resize_modes_op(shape, scale_height=0.1, scale_width=0.2, mode='odd_scale')
check_bilinear_resize_modes_op(shape, mode='to_even_down')
check_bilinear_resize_modes_op(shape, mode='to_even_up')
check_bilinear_resize_modes_op(shape, mode='to_odd_down')
check_bilinear_resize_modes_op(shape, mode='to_odd_up')
shape = (2, 2, 21, 21)
check_bilinear_resize_modes_op(shape, scale_height=0.5, scale_width=0.5, mode='odd_scale')
check_bilinear_resize_modes_op(shape, scale_height=5, scale_width=10, mode='odd_scale')
check_bilinear_resize_modes_op(shape, scale_height=0.1, scale_width=0.2, mode='odd_scale')
check_bilinear_resize_modes_op(shape, mode='to_even_down')
check_bilinear_resize_modes_op(shape, mode='to_even_up')
check_bilinear_resize_modes_op(shape, mode='to_odd_down')
check_bilinear_resize_modes_op(shape, mode='to_odd_up')
shape_0 = (2, 2, 21, 21)
shape_1 = (2, 2, 10, 10)
check_bilinear_resize_modes_op(shape_0, shape_1=shape_1, mode='like')
check_bilinear_resize_modes_op(shape_1, shape_1=shape_0, mode='like')
check_bilinear_resize_align_corners_op()
def test_multi_proposal_op():
# paramters
feature_stride = 16
scales = (8, 16, 32)
ratios = (0.5, 1, 2)
rpn_pre_nms_top_n = 12000
rpn_post_nms_top_n = 2000
threshold = 0.7
rpn_min_size = 16
batch_size = 20
feat_len = (1000 + 15) // 16
H, W = feat_len, feat_len
num_anchors = len(scales) * len(ratios)
count_anchors = H * W * num_anchors
'''
cls_prob: (batch_size, 2 * num_anchors, H, W)
bbox_pred: (batch_size, 4 * num_anchors, H, W)
im_info: (batch_size, 3)
'''
cls_prob = mx.nd.empty((batch_size, 2 * num_anchors, H, W), dtype = np.float32)
bbox_pred = mx.nd.empty((batch_size, 4 * num_anchors, H, W), dtype = np.float32)
im_info = mx.nd.empty((batch_size, 3), dtype = np.float32)
cls_prob = mx.nd.array(np.random.random(cls_prob.shape))
bbox_pred = mx.nd.array(np.random.random(bbox_pred.shape))
for i in range(batch_size):
im_size = np.random.randint(100, feat_len * feature_stride, size = (2,))
im_scale = np.random.randint(70, 100) / 100.0
im_info[i, :] = [im_size[0], im_size[1], im_scale]
def get_sub(arr, i):
new_shape = list(arr.shape)
new_shape[0] = 1
res = arr[i].reshape(new_shape)
return res
def check_forward(rpn_pre_nms_top_n, rpn_post_nms_top_n):
single_proposal = []
single_score = []
for i in range(batch_size):
rois, score = mx.nd.contrib.Proposal(
cls_prob = get_sub(cls_prob, i),
bbox_pred = get_sub(bbox_pred, i),
im_info = get_sub(im_info, i),
feature_stride = feature_stride,
scales = scales,
ratios = ratios,
rpn_pre_nms_top_n = rpn_pre_nms_top_n,
rpn_post_nms_top_n = rpn_post_nms_top_n,
threshold = threshold,
rpn_min_size = rpn_min_size, output_score = True)
single_proposal.append(rois)
single_score.append(score)
multi_proposal, multi_score = mx.nd.contrib.MultiProposal(
cls_prob = cls_prob,
bbox_pred = bbox_pred,
im_info = im_info,
feature_stride = feature_stride,
scales = scales,
ratios = ratios,
rpn_pre_nms_top_n = rpn_pre_nms_top_n,
rpn_post_nms_top_n = rpn_post_nms_top_n,
threshold = threshold,
rpn_min_size = rpn_min_size, output_score = True)
single_proposal = mx.nd.stack(*single_proposal).reshape(multi_proposal.shape)
single_score = mx.nd.stack(*single_score).reshape(multi_score.shape)
single_proposal_np = single_proposal.asnumpy()
multi_proposal_np = multi_proposal.asnumpy()
single_score_np = single_score.asnumpy()
multi_score_np = multi_score.asnumpy()
# check rois x1,y1,x2,y2
assert np.allclose(single_proposal_np[:, 1:], multi_proposal_np[:, 1:])
# check rois batch_idx
for i in range(batch_size):
start = i * rpn_post_nms_top_n
end = start + rpn_post_nms_top_n
assert (multi_proposal_np[start:end, 0] == i).all()
# check score
assert np.allclose(single_score_np, multi_score_np)
def check_backward(rpn_pre_nms_top_n, rpn_post_nms_top_n):
im_info_sym = mx.sym.Variable('im_info')
cls_prob_sym = mx.sym.Variable('cls_prob')
bbox_pred_sym = mx.sym.Variable('bbox_pred')
sym = mx.sym.contrib.MultiProposal(
cls_prob = cls_prob_sym,
bbox_pred = bbox_pred_sym,
im_info = im_info_sym,
feature_stride = feature_stride,
scales = scales,
ratios = ratios,
rpn_pre_nms_top_n = rpn_pre_nms_top_n,
rpn_post_nms_top_n = rpn_post_nms_top_n,
threshold = threshold,
rpn_min_size = rpn_min_size, output_score = False)
location = [cls_prob.asnumpy(), bbox_pred.asnumpy(), im_info.asnumpy()]
expected = [np.zeros_like(e) for e in location]
out_grads = [np.ones((rpn_post_nms_top_n, 5))]
check_symbolic_backward(sym, location, out_grads, expected)
check_forward(rpn_pre_nms_top_n, rpn_post_nms_top_n)
check_forward(rpn_pre_nms_top_n, 1500)
check_forward(1000, 500)
check_backward(rpn_pre_nms_top_n, rpn_post_nms_top_n)
@with_seed()
def test_quadratic_function():
def f(x, a, b, c):
return a * x**2 + b * x + c
a = np.random.random_sample()
b = np.random.random_sample()
c = np.random.random_sample()
data = mx.symbol.Variable('data')
quad_sym = mx.sym.contrib.quadratic(data=data, a=a, b=b, c=c)
for dtype in [np.float16, np.float32, np.float64]:
tol = 1e-2 if dtype is np.float16 else 1e-5
for ndim in range(1, 6):
shape = rand_shape_nd(ndim, 5)
data_np = np.random.randn(*shape).astype(dtype)
expected = f(data_np, a, b, c)
backward_expected = 2 * a * data_np + b
# check imperative forward
output = mx.nd.contrib.quadratic(mx.nd.array(data_np), a=a, b=b, c=c)
assert_almost_equal(output, expected, rtol=tol, atol=tol)
# check forward
check_symbolic_forward(quad_sym, [data_np], [expected], rtol=tol, atol=tol)
# check backward
check_symbolic_backward(quad_sym, [data_np], [np.ones(expected.shape)],
[backward_expected], rtol=tol, atol=tol)
# check backward using finite difference
check_numeric_gradient(quad_sym, [data_np], atol=0.001)
def allclose_function(contexts):
def getRandom(base, percent = 1.):
return base * (1 + percent * (2 * np.random.random_sample() - 1.) / 100)
title = 'exp'
for ctx in contexts:
title += ' cpu' if ctx == mx.cpu() else ' gpu'
title += ' nElem shape'
num_ctx = len(contexts)
result = [False, False]
for dtype in [np.float16, np.float32, np.float64]:
rtol = getRandom(1e-2 if dtype is np.float16 else 1e-5)
atol = getRandom(1e-4 if dtype is np.float16 else 1e-7)
print('\nnumpy.{}: atol = {} rtol = {}'.format(dtype.__name__, atol, rtol))
print(title)
for ndim in range(1, 10):
shape = rand_shape_nd(ndim, 8)
a_np = np.random.randn(*shape).astype(dtype)
b_np = (a_np + np.random.randn(*shape).astype(dtype) / 10000000).astype(dtype)
expected = np.allclose(a_np, b_np, rtol, atol)
for n, ctx in enumerate(contexts):
a_ctx = mx.nd.array(a_np, dtype = dtype, ctx=ctx)
b_ctx = mx.nd.array(b_np, dtype = dtype, ctx=ctx)
output = mx.nd.contrib.allclose(a_ctx, b_ctx, rtol=rtol, atol=atol)
result[n] = output.asnumpy() == 1
if expected != result[n]:
# Preparing the output of elements of the array, which are considered as "not close" AND
# corresponding elements of comparison CPU/GPU/Python vectors, which are considered as "close"
v_ctx = 'CPU' if ctx == mx.cpu() else 'GPU'
if expected:
v_cmp = 'Python'
a_b = a_ctx.asnumpy()
b_b = b_ctx.asnumpy()
a_g = np.asarray(a_np)
b_g = np.asarray(b_np)
else:
v_cmp = v_ctx
v_ctx = 'Python'
a_b = np.asarray(a_np)
b_b = np.asarray(b_np)
a_g = a_ctx.asnumpy()
b_g = b_ctx.asnumpy()
print('\n *** Violations found on %s, but not on %s side ***' % (v_ctx, v_cmp))
frmt = " a[{0:d}]: b[{0:d}]:" \
" abs(a[{0:d}]-b[{0:d}]) - atol + rtol*abs(b[{0:d}]):"
# Define the indices of all violations and corresponding values of coordinates
bad_indexes = np.abs(a_b - b_b) >= atol + rtol * abs(b_b)
a_values = [a_b[bad_indexes], a_g[bad_indexes]]
b_values = [b_b[bad_indexes], b_g[bad_indexes]]
idx = np.asarray(np.where(bad_indexes == True))
idx = idx.reshape(1, idx.size)
idx_flat = np.asarray(np.where(bad_indexes.flatten() == True)).flatten()
for i in range(len(a_values[0])):
flat_idx = idx_flat[i]
print('{}: index = {} flat_index = {}'.format('%4d'%i, idx[i], flat_idx))
print(frmt.format(flat_idx))
for j in range(2):
diff = np.abs(a_values[j][i]-b_values[j][i]) - atol + rtol*abs(b_values[j][i])
print('{}: {} {} {}'.format('%6s'%v_ctx, a_values[j][i], b_values[j][i], diff))
if num_ctx == 1:
print(' {0:d} {1:d} {2:10d} {3:}'.format(expected, result[0], np.prod(shape), shape))
else:
print(' {0:d} {1:d} {2:d} {3:10d} {4:}'.format(expected, result[0], result[1], np.prod(shape), shape))
if expected != result[0] or num_ctx > 1 and expected != result[1]:
assert False
@with_seed()
def test_allclose_function():
allclose_function([default_context()])
@with_seed()
def test_histogram():
def f(x, bins=10, range=None):
return np.histogram(x, bins, range=range)
for ndim in range(1, 6):
shape = rand_shape_nd(ndim)
x = rand_ndarray(shape, stype='default', dtype=np.float64)
mx_bins = mx.nd.array([-1.0, 0.5, 2.0, 4.5, 50.0], dtype=np.float64)
np_bins = mx_bins.asnumpy()
bin_cnt = random.randint(2, 10)
bin_range = (-2.5, 2.5)
mx_histo1, mx_bins1 = mx.nd.histogram(x, bins=bin_cnt, range=bin_range)
np_histo1, np_bins1 = f(x.asnumpy(), bins=bin_cnt, range=bin_range)
assert_almost_equal(mx_bins1, np_bins1)
assert_almost_equal(mx_histo1, np_histo1, rtol=1e-3, atol=1e-5)
mx_histo2, mx_bins2 = mx.nd.histogram(x, bins=mx_bins)
np_histo2, np_bins2 = f(x.asnumpy(), bins=np_bins)
assert_almost_equal(mx_histo2, np_histo2, rtol=1e-3, atol=1e-5)
assert_almost_equal(mx_bins2, np_bins2, rtol=1e-3, atol=1e-5)
data = mx.sym.Variable("data")
bins = mx.sym.Variable("bins")
histo1 = mx.sym.histogram(a=data, bins=bin_cnt, range=bin_range)
histo2 = mx.sym.histogram(a=data, bins=bins)
executor1 = histo1.bind(ctx=default_context(), args={"data" : x})
executor1.forward(is_train=False)
assert_almost_equal(np_histo1, executor1.outputs[0].asnumpy(), 0, 0, ("EXPECTED_histo1", "FORWARD_histo1"), equal_nan=False)
executor2 = histo2.bind(ctx=default_context(), args={"data" : x, "bins" : mx_bins})
executor2.forward(is_train=False)
assert_almost_equal(np_histo2, executor2.outputs[0].asnumpy(), 0, 0, ("EXPECTED_histo2", "FORWARD_histo2"), equal_nan=False)
def test_op_output_names_monitor():
def check_name(op_sym, expected_names):
output_names = []
def get_output_names_callback(name, arr):
output_names.append(py_str(name))
op_exe = op_sym.simple_bind(ctx=mx.current_context(), grad_req='null')
op_exe.set_monitor_callback(get_output_names_callback, monitor_all=False)
try:
op_exe.forward()
mx.nd.waitall()
except mx.base.MXNetError:
# skip errors since test is to check output names
pass
for output_name, expected_name in zip(output_names, expected_names):
assert output_name == expected_name
is_windows = sys.platform.startswith('win')
if (is_windows):
# Windows doesn't support set environment variable on the fly, so disable it for now
pass
else:
# Disable subgraph in case subgraph will replace symbol
os.environ['MXNET_SUBGRAPH_BACKEND'] = "NONE"
data = mx.sym.Variable('data', shape=(10, 3, 10, 10))
conv_sym = mx.sym.Convolution(data, kernel=(2, 2), num_filter=1, name='conv')
check_name(conv_sym, ['conv_output'])
deconv_sym = mx.sym.Deconvolution(data, kernel=(2, 2), num_filter=1, name='deconv')
check_name(deconv_sym, ['deconv_output'])
fc_sym = mx.sym.FullyConnected(data, num_hidden=10, name='fc')
check_name(fc_sym, ['fc_output'])
lrn_sym = mx.sym.LRN(data, nsize=1, name='lrn')
check_name(lrn_sym, ['lrn_output', 'lrn_tmp_norm'])
act_sym = mx.sym.Activation(data, act_type='relu', name='act')
check_name(act_sym, ['act_output'])
cc_sym = mx.sym.concat(data, data, dim=0, name='concat')
check_name(cc_sym, ['concat_output'])
sm_sym = mx.sym.softmax(data, name='softmax')
check_name(sm_sym, ['softmax_output'])
sa_sym = mx.sym.SoftmaxActivation(data, name='softmax')
check_name(sa_sym, ['softmax_output'])
us_sym = mx.sym.UpSampling(data, scale=2, sample_type='nearest',
name='upsampling')
check_name(us_sym, ['upsampling_output'])
us_sym = mx.sym.Pooling(data, kernel=(2, 2), pool_type='avg',
name='pooling')
check_name(us_sym, ['pooling_output'])
del os.environ['MXNET_SUBGRAPH_BACKEND']
def test_op_all_names_monitor():
def check_name(op_sym, expected_names):
output_names = []
def get_output_names_callback(name, arr):
output_names.append(py_str(name))
op_exe = op_sym.simple_bind(ctx=mx.current_context(), grad_req='null')
op_exe.set_monitor_callback(get_output_names_callback, monitor_all=True)
try:
op_exe.forward()
mx.nd.waitall()
except mx.base.MXNetError:
# skip errors since test is to check all names
pass
for output_name, expected_name in zip(output_names, expected_names):
assert output_name == expected_name
is_windows = sys.platform.startswith('win')
if (is_windows):
# Windows doesn't support set environment variable on the fly, so disable it for now
pass
else:
# Disable subgraph in case subgraph will replace symbol
os.environ['MXNET_SUBGRAPH_BACKEND'] = "NONE"
data = mx.sym.Variable('data', shape=(10, 3, 10, 10))
conv_sym = mx.sym.Convolution(data, kernel=(2, 2), num_filter=1, name='conv')
check_name(conv_sym, ['data', 'conv_data', 'conv_weight', 'conv_weight', 'conv_bias', 'conv_bias', 'conv_output'])
deconv_sym = mx.sym.Deconvolution(data, kernel=(2, 2), num_filter=1, name='deconv')
check_name(deconv_sym, ['data', 'deconv_data', 'deconv_weight', 'deconv_weight', 'deconv_output'])
fc_sym = mx.sym.FullyConnected(data, num_hidden=10, name='fc')
check_name(fc_sym, ['data', 'fc_data', 'fc_weight', 'fc_weight', 'fc_bias', 'fc_bias', 'fc_output'])
lrn_sym = mx.sym.LRN(data, nsize=1, name='lrn')
check_name(lrn_sym, ['data', 'lrn_data', 'lrn_output', 'lrn_tmp_norm'])
act_sym = mx.sym.Activation(data, act_type='relu', name='act')
check_name(act_sym, ['data', 'act_input0', 'act_output'])
cc_sym = mx.sym.concat(data, data, dim=0, name='concat')
check_name(cc_sym, ['data', 'concat_arg0', 'data', 'concat_arg1', 'concat_output'])
sm_sym = mx.sym.softmax(data, name='softmax')
check_name(sm_sym, ['data', 'softmax_data', 'softmax_output'])
length = mx.sym.Variable("length", shape=(10, 10, 10))
sm_sym = mx.sym.softmax(data, length, axis=1, use_length=True, name='softmax')
check_name(sm_sym, ['data', 'softmax_data', 'length', 'softmax_length', 'softmax_output'])
sa_sym = mx.sym.SoftmaxActivation(data, name='softmax')
check_name(sa_sym, ['data', 'softmax_input0', 'softmax_output'])
us_sym = mx.sym.UpSampling(data, scale=2, sample_type='nearest',
name='upsampling')
check_name(us_sym, ['data', 'upsampling_arg0', 'upsampling_output'])
us_sym = mx.sym.Pooling(data, kernel=(2, 2), pool_type='avg',
name='pooling')
check_name(us_sym, ['data', 'pooling_data', 'pooling_output'])
del os.environ['MXNET_SUBGRAPH_BACKEND']
@with_seed()
@unittest.skip("test fails intermittently. temporarily disabled till it gets fixed. tracked at https://github.com/apache/incubator-mxnet/issues/13915")
def test_activation():
shapes = [(9,), (9, 10), (9, 10, 10), (1, 9, 10, 10)]
dtype_l = [np.float64, np.float32, np.float16]
rtol_l = [1e-7, 1e-6, 1e-2]
atol_l = [1e-7, 1e-6, 1e-2]
rtol_fd = 1e-5
atol_fd = 1e-6
num_eps = 1e-6
unary_ops = {
'relu': [lambda x: mx.sym.Activation(x, act_type='relu'),
lambda x: np.maximum(x, 0.),
lambda x: 1. * (x > 0.),
-5.0, 5.0],
'sigmoid': [lambda x: mx.sym.Activation(x, act_type='sigmoid'),
lambda x: 1. / (np.exp(-x) + 1.),
lambda x: 1. / (np.exp(-x) + 1.) / (np.exp(x) + 1.),
-3.0, 3.0],
'tanh': [lambda x: mx.sym.Activation(x, act_type='tanh'),
lambda x: np.tanh(x),
lambda x: 1. - np.tanh(x) ** 2,
-4.0, 4.0],
'softrelu': [lambda x: mx.sym.Activation(x, act_type='softrelu'),
lambda x: np.log(1. + np.exp(x)),
lambda x: 1. - 1 / (1 + np.exp(x)),
-3.0, 3.0],
'softsign': [lambda x: mx.sym.Activation(x, act_type='softsign'),
lambda x: x / (1. + np.abs(x)),
lambda x: 1. / np.square(1. + np.abs(x)),
-3.0, 3.0],
}
# Loop over operators
for name, op in unary_ops.items():
# Loop over shapes
for shape in shapes:
# Loop over dtype's
for ind in range(len(dtype_l)):
dtype = dtype_l[ind]
rtol = rtol_l[ind]
atol = atol_l[ind]
compare_forw_backw_unary_op(
name, op[0], op[1], op[2], shape, op[3], op[4], rtol, atol,
dtype)
# Finite difference testing
finite_diff_unary_op(
name, op[0], shape, op[3], op[4], rtol_fd, atol_fd, num_eps)
@with_seed()
def test_ravel():
# be aware that check_symbolic_forward will use float type internally
# for the arrays and that limits the representable flat index range.
# Taking dim==4 and a range of [0,..,100] for the data can already
# cause precision issues and break this test.
for dim in [1, 2, 3, 4]:
data = np.random.randint(50, size=(dim, 500))
shape = tuple(np.add(np.amax(data, axis=1), [1]))
a = mx.sym.Variable('a')
ravel_npy = np.ravel_multi_index(data, shape)
b = mx.sym.ravel_multi_index(a, shape=shape)
check_symbolic_forward(b, location={'a': data}, expected=[ravel_npy])
c = mx.sym.unravel_index(a, shape=shape)
check_symbolic_forward(c, location={'a': ravel_npy}, expected=[data])
# Test with leading dimension set to -1.
shape2 = shape
shape2 = (-1,)+shape[1:]
b = mx.sym.ravel_multi_index(a, shape=shape2)
check_symbolic_forward(b, location={'a': data}, expected=[ravel_npy])
c = mx.sym.unravel_index(a, shape=shape2)
check_symbolic_forward(c, location={'a': ravel_npy}, expected=[data])
def test_context_num_gpus():
try:
# Note: the test is run both on GPU and CPU hosts, so that we can not assert
# on a specific number here.
assert mx.context.num_gpus() >= 0
except mx.MXNetError as e:
# Note: On a CPU only host CUDA sometimes is not able to determine the number
# of GPUs
if str(e).find("CUDA") == -1:
raise e
@with_seed()
def test_op_roi_align():
T = np.float32
def assert_same_dtype(dtype_a, dtype_b):
'''
Assert whether the two data type are the same
Parameters
----------
dtype_a, dtype_b: type
Input data types to compare
'''
assert dtype_a == dtype_b,\
TypeError('Unmatched data types: %s vs %s' % (dtype_a, dtype_b))
def bilinear_interpolate(bottom, height, width, y, x):
if y < -1.0 or y > height or x < -1.0 or x > width:
return T(0.0), []
x = T(max(0.0, x))
y = T(max(0.0, y))
x_low = int(x)
y_low = int(y)
if x_low >= width - 1:
x_low = x_high = width - 1
x = T(x_low)
else:
x_high = x_low + 1
if y_low >= height - 1:
y_low = y_high = height - 1
y = T(y_low)
else:
y_high = y_low + 1
ly = y - T(y_low)
lx = x - T(x_low)
hy = T(1.0) - ly
hx = T(1.0) - lx
v1 = bottom[y_low, x_low]
v2 = bottom[y_low, x_high]
v3 = bottom[y_high, x_low]
v4 = bottom[y_high, x_high]
w1 = hy * hx
w2 = hy * lx
w3 = ly * hx
w4 = ly * lx
assert_same_dtype(w1.dtype, T)
assert_same_dtype(w2.dtype, T)
assert_same_dtype(w3.dtype, T)
assert_same_dtype(w4.dtype, T)
val = w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4
assert_same_dtype(val.dtype, T)
grad = [(y_low, x_low, w1), (y_low, x_high, w2),
(y_high, x_low, w3), (y_high, x_high, w4)
]
return val, grad
def roialign_forward_backward(data, rois, pooled_size, spatial_scale, sampling_ratio,
position_sensitive, dy):
N, C, H, W = data.shape
R = rois.shape[0]
PH, PW = pooled_size
assert rois.ndim == 2,\
ValueError(
'The ndim of rois should be 2 rather than %d' % rois.ndim)
assert rois.shape[1] == 5,\
ValueError(
'The length of the axis 1 of rois should be 5 rather than %d' % rois.shape[1])
assert_same_dtype(data.dtype, T)
assert_same_dtype(rois.dtype, T)
C_out = C // PH // PW if position_sensitive else C
out = np.zeros((R, C_out, PH, PW), dtype=T)
dx = np.zeros_like(data)
drois = np.zeros_like(rois)
for r in range(R):
batch_ind = int(rois[r, 0])
sw, sh, ew, eh = rois[r, 1:5] * T(spatial_scale)
roi_w = T(max(ew - sw, 1.0))
roi_h = T(max(eh - sh, 1.0))
bin_h = roi_h / T(PH)
bin_w = roi_w / T(PW)
bdata = data[batch_ind]
if sampling_ratio > 0:
roi_bin_grid_h = roi_bin_grid_w = sampling_ratio
else:
roi_bin_grid_h = int(np.ceil(roi_h / T(PH)))
roi_bin_grid_w = int(np.ceil(roi_w / T(PW)))
count = T(roi_bin_grid_h * roi_bin_grid_w)
for c in range(C_out):
for ph in range(PH):
for pw in range(PW):
val = T(0.0)
c_in = c * PH * PW + ph * PW + pw if position_sensitive else c
for iy in range(roi_bin_grid_h):
y = sh + T(ph) * bin_h + (T(iy) + T(0.5)) * \
bin_h / T(roi_bin_grid_h)
for ix in range(roi_bin_grid_w):
x = sw + T(pw) * bin_w + (T(ix) + T(0.5)) * \
bin_w / T(roi_bin_grid_w)
v, g = bilinear_interpolate(
bdata[c_in], H, W, y, x)
assert_same_dtype(v.dtype, T)
val += v
# compute grad
for qy, qx, qw in g:
assert_same_dtype(qw.dtype, T)
dx[batch_ind, c_in, qy, qx] += dy[r,
c, ph, pw] * qw / count
out[r, c, ph, pw] = val / count
assert_same_dtype(out.dtype, T)
return out, [dx, drois]
def test_roi_align_value(sampling_ratio=0, position_sensitive=False):
ctx = default_context()
dtype = np.float32
dlen = 224
N, C, H, W = 5, 3, 16, 16
R = 7
pooled_size = (3, 4)
C = C * pooled_size[0] * pooled_size[1] if position_sensitive else C
spatial_scale = H * 1.0 / dlen
data = mx.nd.array(
np.arange(N * C * W * H).reshape((N, C, H, W)), ctx=ctx, dtype=dtype)
center_xy = mx.nd.random.uniform(0, dlen, (R, 2), ctx=ctx, dtype=dtype)
wh = mx.nd.random.uniform(0, dlen, (R, 2), ctx=ctx, dtype=dtype)
batch_ind = mx.nd.array(np.random.randint(0, N, size=(R, 1)), ctx=ctx)
pos = mx.nd.concat(center_xy - wh / 2, center_xy + wh / 2, dim=1)
rois = mx.nd.concat(batch_ind, pos, dim=1)
data.attach_grad()
rois.attach_grad()
with mx.autograd.record():
output = mx.nd.contrib.ROIAlign(data, rois, pooled_size=pooled_size,
spatial_scale=spatial_scale, sample_ratio=sampling_ratio,
position_sensitive=position_sensitive)
C_out = C // pooled_size[0] // pooled_size[1] if position_sensitive else C
dy = mx.nd.random.uniform(-1, 1, (R, C_out) +
pooled_size, ctx=ctx, dtype=dtype)
output.backward(dy)
real_output, [dx, drois] = roialign_forward_backward(data.asnumpy(), rois.asnumpy(), pooled_size,
spatial_scale, sampling_ratio,
position_sensitive, dy.asnumpy())
assert_almost_equal(output, real_output, atol=1e-3)
assert_almost_equal(data.grad, dx, atol=1e-3)
assert_almost_equal(rois.grad, drois, atol=1e-3)
# modified from test_roipooling()
def test_roi_align_autograd(sampling_ratio=0):
ctx = default_context()
data = mx.symbol.Variable(name='data')
rois = mx.symbol.Variable(name='rois')
test = mx.symbol.contrib.ROIAlign(data=data, rois=rois, pooled_size=(4, 4), spatial_scale=1,
sample_ratio=sampling_ratio)
x1 = np.random.rand(4, 1, 12, 12).astype('float64')
x2 = np.array([[0, 1.1, 1.1, 6.2, 6.2], [2, 6.1, 2.1, 8.2, 11.2],
[1, 3.1, 1.1, 5.2, 10.2]], dtype='float64')
check_numeric_gradient(sym=test, location=[x1, x2],
grad_nodes={'data': 'write', 'rois': 'null'},
numeric_eps=1e-4, rtol=1e-1, atol=1e-4, ctx=ctx)
check_numeric_gradient(sym=test, location=[x1, x2],
grad_nodes={'data': 'add', 'rois': 'null'},
numeric_eps=1e-4, rtol=1e-1, atol=1e-4, ctx=ctx)
test_roi_align_value()
test_roi_align_value(sampling_ratio=2)
test_roi_align_value(position_sensitive=True)
test_roi_align_autograd()
@with_seed()
def test_op_rroi_align():
T = np.float32
def assert_same_dtype(dtype_a, dtype_b):
'''
Assert whether the two data type are the same
Parameters
----------
dtype_a, dtype_b: type
Input data types to compare
'''
assert dtype_a == dtype_b,\
TypeError('Unmatched data types: %s vs %s' % (dtype_a, dtype_b))
def bilinear_interpolate(bottom, height, width, y, x):
if y < -1.0 or y > height or x < -1.0 or x > width:
return T(0.0)
x = T(max(0.0, x))
y = T(max(0.0, y))
x_low = int(x)
y_low = int(y)
if x_low >= width - 1:
x_low = x_high = width - 1
x = T(x_low)
else:
x_high = x_low + 1
if y_low >= height - 1:
y_low = y_high = height - 1
y = T(y_low)
else:
y_high = y_low + 1
ly = y - T(y_low)
lx = x - T(x_low)
hy = T(1.0) - ly
hx = T(1.0) - lx
v1 = bottom[y_low, x_low]
v2 = bottom[y_low, x_high]
v3 = bottom[y_high, x_low]
v4 = bottom[y_high, x_high]
w1 = hy * hx
w2 = hy * lx
w3 = ly * hx
w4 = ly * lx
assert_same_dtype(w1.dtype, T)
assert_same_dtype(w2.dtype, T)
assert_same_dtype(w3.dtype, T)
assert_same_dtype(w4.dtype, T)
val = w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4
assert_same_dtype(val.dtype, T)
return val
def rroialign_forward(data, rois, pooled_size, spatial_scale, sampling_ratio):
N, C, H, W = data.shape
R = rois.shape[0]
PH, PW = pooled_size
assert rois.ndim == 2,\
ValueError(
'The ndim of rois should be 2 rather than %d' % rois.ndim)
assert rois.shape[1] == 6,\
ValueError(
'The length of the axis 1 of rois should be 6 rather than %d' % rois.shape[1])
assert_same_dtype(data.dtype, T)
assert_same_dtype(rois.dtype, T)
out = np.zeros((R, C, PH, PW), dtype=T)
for r in range(R):
batch_ind = int(rois[r, 0])
roi_center_w, roi_center_h, roi_w, roi_h = rois[r, 1:5] * T(spatial_scale)
roi_theta = T(rois[r,5] * np.pi / 180.0)
roi_w = T(max(roi_w, 1.0))
roi_h = T(max(roi_h, 1.0))
bin_h = roi_h / T(PH)
bin_w = roi_w / T(PW)
bdata = data[batch_ind]
if sampling_ratio > 0:
roi_bin_grid_h = roi_bin_grid_w = sampling_ratio
else:
roi_bin_grid_h = int(np.ceil(roi_h / T(PH)))
roi_bin_grid_w = int(np.ceil(roi_w / T(PW)))
count = T(roi_bin_grid_h * roi_bin_grid_w)
roi_start_h = T(-roi_h / 2.0)
roi_start_w = T(-roi_w / 2.0)
for c in range(C):
for ph in range(PH):
for pw in range(PW):
val = T(0.0)
for iy in range(roi_bin_grid_h):
yy = roi_start_h + T(ph) * bin_h + (T(iy) + T(0.5)) * \
bin_h / T(roi_bin_grid_h)
for ix in range(roi_bin_grid_w):
xx = roi_start_w + T(pw) * bin_w + (T(ix) + T(0.5)) * \
bin_w / T(roi_bin_grid_w)
x = xx * np.cos(roi_theta, dtype=T) + yy * np.sin(roi_theta, dtype=T) + roi_center_w
y = yy * np.cos(roi_theta, dtype=T) - xx * np.sin(roi_theta, dtype=T) + roi_center_h
v = bilinear_interpolate(
bdata[c], H, W, y, x)
assert_same_dtype(v.dtype, T)
val += v
out[r, c, ph, pw] = val / count
assert_same_dtype(out.dtype, T)
return out
def test_rroi_align_value(sampling_ratio=-1):
ctx = default_context()
if ctx.device_type == 'gpu':
print('skipped testing rroi align for gpu since it is not supported yet')
return
dtype = np.float32
dlen = 224
N, C, H, W = 5, 3, 16, 16
R = 7
pooled_size = (3, 4)
spatial_scale = H * 1.0 / dlen
data = mx.nd.array(
np.arange(N * C * W * H).reshape((N, C, H, W)), ctx=ctx, dtype=dtype)
center_xy = mx.nd.random.uniform(0, dlen, (R, 2), ctx=ctx, dtype=dtype)
wh = mx.nd.random.uniform(0, dlen, (R, 2), ctx=ctx, dtype=dtype)
theta = mx.nd.random.uniform(0, 180, (R,1), ctx=ctx, dtype=dtype)
batch_ind = mx.nd.array(np.random.randint(0, N, size=(R, 1)), ctx=ctx)
pos = mx.nd.concat(center_xy, wh, theta, dim=1)
rois = mx.nd.concat(batch_ind, pos, dim=1)
output = mx.nd.contrib.RROIAlign(data, rois, pooled_size=pooled_size,
spatial_scale=spatial_scale, sampling_ratio=sampling_ratio)
real_output = rroialign_forward(data.asnumpy(), rois.asnumpy(), pooled_size,
spatial_scale, sampling_ratio)
assert_almost_equal(output.asnumpy(), real_output, atol=1e-3)
test_rroi_align_value()
test_rroi_align_value(sampling_ratio=2)
@with_seed()
def test_diag():
# Test 2d input
h = np.random.randint(2,9)
w = np.random.randint(2,9)
a_np = np.random.random((h, w)).astype(np.float32)
a = mx.nd.array(a_np).astype('float32')
for k in [0, 1, -1, np.random.randint(-min(h,w) + 1, min(h,w))]:
assert_almost_equal(mx.nd.diag(a, k=k), np.diag(a_np, k=k))
# invalid k
k = max(h,w) + 1
assertRaises(MXNetError, mx.nd.diag, a, k=k)
# Test 2d backward, k=0
data = mx.sym.Variable('data')
diag_sym = mx.sym.diag(data=data)
check_numeric_gradient(diag_sym, [a_np])
# Test 2d backward, k=1
data = mx.sym.Variable('data')
diag_sym = mx.sym.diag(data=data, k=1)
check_numeric_gradient(diag_sym, [a_np])
# Test 2d backward, k=-1
data = mx.sym.Variable('data')
diag_sym = mx.sym.diag(data=data, k=-1)
check_numeric_gradient(diag_sym, [a_np])
# test 1d input
d = np.random.randint(2,9)
a_np = np.random.random((d))
a = mx.nd.array(a_np)
# k is random
k = np.random.randint(-d,d)
assert_almost_equal(mx.nd.diag(a, k=k), np.diag(a_np, k=k))
# Test 2d backward, k=0
data = mx.sym.Variable('data')
diag_sym = mx.sym.diag(data=data)
check_numeric_gradient(diag_sym, [a_np])
# Test 2d backward, k=1
data = mx.sym.Variable('data')
diag_sym = mx.sym.diag(data=data, k=1)
check_numeric_gradient(diag_sym, [a_np])
# Test 2d backward, k=-1
data = mx.sym.Variable('data')
diag_sym = mx.sym.diag(data=data, k=-1)
check_numeric_gradient(diag_sym, [a_np])
# Test 4d input
x1 = np.random.randint(3,9)
x2 = np.random.randint(3,9)
x3 = np.random.randint(3,9)
x4 = np.random.randint(3,9)
a_np = np.random.random((x1, x2, x3, x4)).astype(np.float32)
a = mx.nd.array(a_np).astype('float32')
# k = 0, axis1=0, axis2=1
r = mx.nd.diag(data=a, k=0, axis1=0, axis2=1)
assert_almost_equal(r, np.diagonal(a_np, offset=0, axis1=0, axis2=1))
# k = 1, axis1=1, axis2=0
r = mx.nd.diag(data=a, k=1, axis1=1, axis2=0)
assert_almost_equal(r, np.diagonal(a_np, offset=1, axis1=1, axis2=0))
# k = -1 axis1=1, axis3=3
r = mx.nd.diag(data=a, k=-1, axis1=1, axis2=3)
assert_almost_equal(r, np.diagonal(a_np, offset=-1, axis1=1, axis2=3))
# k = 2, axis1=-2, axis2=0
r = mx.nd.diag(data=a, k=2, axis1=-2, axis2=0)
assert_almost_equal(r, np.diagonal(a_np, offset=2, axis1=-2, axis2=0))
# Test 4d backward, k=0, axis1=3, axis2=0
data = mx.sym.Variable('data')
diag_sym = mx.sym.diag(data=data, k=0, axis1=3, axis2=0)
check_numeric_gradient(diag_sym, [a_np])
# Test 4d backward, k=1, axis1=1, axis2=2
data = mx.sym.Variable('data')
diag_sym = mx.sym.diag(data=data, k=1, axis1=1, axis2=2)
check_numeric_gradient(diag_sym, [a_np])
# Test 4d backward, k=-1, axis1=2, axis2=0
data = mx.sym.Variable('data')
diag_sym = mx.sym.diag(data=data, k=-1, axis1=2, axis2=0)
check_numeric_gradient(diag_sym, [a_np])
# Test 4d backward, k=-2, axis1=1, axis2=-1
data = mx.sym.Variable('data')
diag_sym = mx.sym.diag(data=data, k=-2, axis1=1, axis2=-1)
check_numeric_gradient(diag_sym, [a_np])
@with_seed()
def test_depthtospace():
def f(x, blocksize):
b, c, h, w = x.shape[0], x.shape[1], x.shape[2], x.shape[3]
tmp = np.reshape(x, [b, blocksize, blocksize, c // (blocksize**2), h, w])
tmp = np.transpose(tmp, [0, 3, 4, 1, 5, 2])
y = np.reshape(tmp, [b, c // (blocksize**2), h * blocksize, w * blocksize])
return y
block = random.randint(2, 4)
rand_mul1 = random.randint(1, 4)
n = random.randint(1, 5)
c = block * block * rand_mul1
h = random.randint(1, 5)
w = random.randint(1, 5)
shape_inp = (n, c, h, w)
data = rand_ndarray(shape_inp, 'default')
data_np = data.asnumpy()
expected = f(data_np, block)
output = mx.nd.depth_to_space(data, block)
assert_almost_equal(output, expected, atol=1e-3, rtol=1e-3)
shape_out = (n, c // (block ** 2), h * block, w * block)
data = mx.sym.Variable('data')
dts_sym = mx.sym.depth_to_space(data, block)
check_numeric_gradient(dts_sym, [np.ones(shape_inp)])
check_symbolic_forward(dts_sym, [data_np], [expected])
check_symbolic_backward(dts_sym, [data_np], [np.ones(shape_out)], [np.ones(shape_inp)])
def test_invalid_depth_dim():
invalid_shape_inp = (n, block - 1, h, w)
data = rand_ndarray(invalid_shape_inp, 'default')
assertRaises(MXNetError, mx.nd.depth_to_space, data, block)
def test_invalid_space_dim():
invalid_shape_inp = (n, block ** 2, 0, block + 1)
data = rand_ndarray(invalid_shape_inp, 'default')
assertRaises(MXNetError, mx.nd.depth_to_space, data, block)
def test_invalid_block_size():
block = 0
invalid_shape_inp = (n , c, h, w)
data = rand_ndarray(invalid_shape_inp, 'default')
assertRaises(MXNetError, mx.nd.depth_to_space, data, block)
test_invalid_depth_dim()
test_invalid_space_dim()
test_invalid_block_size()
@with_seed()
def test_spacetodepth():
def f(x, blocksize):
b, c, h, w = x.shape[0], x.shape[1], x.shape[2], x.shape[3]
tmp = np.reshape(x, [b, c, h // blocksize, blocksize, w // blocksize, blocksize])
tmp = np.transpose(tmp, [0, 3, 5, 1, 2, 4])
y = np.reshape(tmp, [b, c * (blocksize**2), h // blocksize, w // blocksize])
return y
block = random.randint(2, 4)
rand_mul1 = random.randint(1, 4)
rand_mul2 = random.randint(1, 4)
n = random.randint(1, 5)
c = random.randint(1, 5)
h = block * rand_mul1
w = block * rand_mul2
shape_inp = (n, c, h, w)
data = rand_ndarray(shape_inp, 'default')
data_np = data.asnumpy()
expected = f(data_np, block)
output = mx.nd.space_to_depth(data, block)
assert_almost_equal(output, expected, atol=1e-3, rtol=1e-3)
shape_out = (n, c * (block ** 2), h // block, w // block)
data = mx.sym.Variable('data')
dts_sym = mx.sym.space_to_depth(data, block)
check_numeric_gradient(dts_sym, [np.ones(shape_inp)])
check_symbolic_forward(dts_sym, [data_np], [expected])
check_symbolic_backward(dts_sym, [data_np], [np.ones(shape_out)], [np.ones(shape_inp)])
def test_invalid_space_dim():
invalid_shape_inp = (n , c, block - 1, w)
data = rand_ndarray(invalid_shape_inp, 'default')
assertRaises(MXNetError, mx.nd.space_to_depth, data, block)
def test_invalid_block_size():
block = 0
invalid_shape_inp = (n, c, h, w)
data = rand_ndarray(invalid_shape_inp, 'default')
assertRaises(MXNetError, mx.nd.space_to_depth, data, block)
def test_invalid_depth_dim():
invalid_shape_inp = (n, 0, h, w)
data = rand_ndarray(invalid_shape_inp, 'default')
assertRaises(MXNetError, mx.nd.space_to_depth, data, block)
test_invalid_space_dim()
test_invalid_block_size()
test_invalid_depth_dim()
@with_seed()
def test_softmax_cross_entropy():
def f_sm_ce(data, label):
return np.sum(-np.log(data) * label)
data = mx.sym.Variable('data')
label = mx.sym.Variable('label')
sym = mx.sym.softmax_cross_entropy(data=data, label=label)
num_labels = random.randint(100, 200)
batch_size = random.randint(100, 200)
np_data = rand_ndarray((batch_size, num_labels), stype='default').asnumpy()
np_sm = np_softmax(np_data)
np_label = np.random.randint(0, num_labels, (batch_size, ))
np_one_hot_label = np.zeros((batch_size, num_labels))
np_one_hot_label[np.arange(batch_size), np_label] = 1.
check_symbolic_forward(sym, {'data' : np_data, 'label' : np_label}, [np.array([f_sm_ce(np_sm, np_one_hot_label)])], rtol=1e-3, atol=1e-5)
@with_seed()
def test_split_v2():
dim = random.randint(2, 6)
shape = rand_shape_nd(dim)
axis = random.randint(-dim, dim-1)
axis_size = shape[axis]
samples = random.randint(0, axis_size - 1)
indices = sorted(random.sample([i for i in range(1, axis_size)], samples))
indices = tuple(indices)
mx_data = rand_ndarray(shape)
np_data = mx_data.asnumpy()
np_out = np.split(np_data, indices_or_sections=indices, axis=axis)
data = mx.sym.Variable("data")
sym = mx.sym.split_v2(data, indices_or_sections=indices, axis=axis)
check_symbolic_forward(sym, {"data": mx_data}, np_out, rtol=1e-3, atol=1e-5)
out_grad = [np.ones(arr.shape) for arr in np_out]
check_symbolic_backward(sym, {"data": mx_data}, out_grad, [np.concatenate(out_grad, axis=axis)])
@with_seed()
def test_moments():
dim = random.randint(2, 5)
shape = rand_shape_nd(dim, dim=5)
axes = [i for i in range(dim)]
test_dims = random.sample(axes, random.randint(1, dim))
test_axes = tuple(sorted(test_dims))
np_a = np.random.uniform(-1.0, 1.0, shape)
a = mx.nd.array(np_a)
for keepdims in [True, False]:
eps = 1e-3
np_a[abs(np_a) < eps] = 2 * eps
np_mean = np.mean(np_a, axis=test_axes, keepdims=keepdims)
np_var = np.var(np_a, axis=test_axes, keepdims=keepdims)
mx_mean, mx_var = mx.nd.moments(a, keepdims=keepdims, axes=test_axes)
N = np_a.size / np_mean.size
mx_sym = mx.sym.Variable("data")
mx_moments = mx.sym.moments(mx_sym, axes=test_axes, keepdims=keepdims)
mx_test_sym = mx.sym.elemwise_add(mx_moments[0], mx_moments[1])
if len(np_mean.shape) == 0:
np_mean = np_mean.reshape(mx_mean.shape)
np_var = np_var.reshape(mx_var.shape)
assert np_mean.shape == mx_mean.shape
assert np_var.shape == mx_var.shape
check_symbolic_forward(mx_test_sym, [np_a], [np_mean + np_var], rtol=1e-3, atol=1e-5)
check_numeric_gradient(mx_test_sym, [np_a], numeric_eps=eps, rtol=1e-2, atol=2e-4)
@with_seed()
def test_invalid_kernel_size():
invalid_kernel_size = 28
assert_exception(
mx.nd.Correlation,
MXNetError,
mx.nd.array(np.random.rand(1, 1, 28, 28)),
mx.nd.array(np.random.rand(1, 1, 28, 28)),
kernel_size=invalid_kernel_size)
@with_seed()
def test_valid_kernel_size():
valid_kernel_size = 9
mx.nd.Correlation(
mx.nd.array(np.random.rand(1, 1, 28, 28)),
mx.nd.array(np.random.rand(1, 1, 28, 28)),
kernel_size=valid_kernel_size)
@with_seed()
def test_valid_max_pooling_pad_type_same():
import math
input_data = mx.nd.array(np.random.rand(1,1,10))
stride = 2
kernel = 2
output_data=mx.nd.Pooling(
input_data,
kernel=kernel,
stride=stride,
pad=(0,0,0),
pool_type='max',
name='pooling',
pooling_convention="same")
assert(math.ceil(input_data.shape[2]/stride) == output_data.shape[2])
@with_seed()
def test_invalid_max_pooling_pad_type_same():
import math
input_data = mx.nd.array(np.random.rand(1,1,10))
stride = 2
kernel = 2
pad = 2
assert_exception(
mx.nd.Pooling,
MXNetError,
input_data,
stride=stride,
kernel=kernel,
pad=pad,
pool_type='max',
name='pooling',
pooling_convention="same")
@with_seed()
def test_image_normalize():
# Part 1 - Test 3D input with 3D mean/std
shape_3d = (3, 28, 28)
mean = (0, 1, 2)
std = (3, 2, 1)
data_in_3d = mx.nd.random.uniform(0, 1, shape_3d)
data_expected_3d = data_in_3d.asnumpy()
data_expected_3d[:][:][0] = data_expected_3d[:][:][0] / 3.0
data_expected_3d[:][:][1] = (data_expected_3d[:][:][1] - 1.0) / 2.0
data_expected_3d[:][:][2] = data_expected_3d[:][:][2] - 2.0
data = mx.symbol.Variable('data')
img_norm_sym = mx.sym.image.normalize(data=data, mean=mean, std=std)
# check forward
check_symbolic_forward(img_norm_sym, [data_in_3d], [data_expected_3d],
rtol=1e-5, atol=1e-5)
# Gradient is 1/std_dev
grad_expected_3d = np.ones(shape_3d)
grad_expected_3d[:][:][0] = 1 / 3.0
grad_expected_3d[:][:][1] = 1 / 2.0
grad_expected_3d[:][:][2] = 1 / 1.0
# check backward
check_symbolic_backward(img_norm_sym, location=[data_in_3d], out_grads=[mx.nd.ones(shape_3d)],
expected=[grad_expected_3d], rtol=1e-5, atol=1e-5)
# check backward using finite difference
check_numeric_gradient(img_norm_sym, [data_in_3d], atol=0.001)
# Part 2 - Test 4D input with 3D mean/std
shape_4d = (2, 3, 28, 28)
data_in_4d = mx.nd.random.uniform(0, 1, shape_4d)
data_expected_4d = data_in_4d.asnumpy()
data_expected_4d[0][:][:][0] = data_expected_4d[0][:][:][0] / 3.0
data_expected_4d[0][:][:][1] = (data_expected_4d[0][:][:][1] - 1.0) / 2.0
data_expected_4d[0][:][:][2] = data_expected_4d[0][:][:][2] - 2.0
data_expected_4d[1][:][:][0] = data_expected_4d[1][:][:][0] / 3.0
data_expected_4d[1][:][:][1] = (data_expected_4d[1][:][:][1] - 1.0) / 2.0
data_expected_4d[1][:][:][2] = data_expected_4d[1][:][:][2] - 2.0
# check forward
check_symbolic_forward(img_norm_sym, [data_in_4d], [data_expected_4d],
rtol=1e-5, atol=1e-5)
# Gradient is 1/std_dev
grad_expected_4d = np.ones(shape_4d)
grad_expected_4d[0][:][:][0] = 1 / 3.0
grad_expected_4d[0][:][:][1] = 1 / 2.0
grad_expected_4d[0][:][:][2] = 1 / 1.0
grad_expected_4d[1][:][:][0] = 1 / 3.0
grad_expected_4d[1][:][:][1] = 1 / 2.0
grad_expected_4d[1][:][:][2] = 1 / 1.0
# check backward
check_symbolic_backward(img_norm_sym, location=[data_in_4d], out_grads=[mx.nd.ones(shape_4d)],
expected=[grad_expected_4d], rtol=1e-5, atol=1e-5)
# check backward using finite difference
check_numeric_gradient(img_norm_sym, [data_in_4d], atol=0.001)
# Part 3 - Test 3D input with scalar mean/std
shape_3d = (3, 28, 28)
mean = 1.0
std = 2.0
data_in_3d = mx.nd.random.uniform(0, 1, shape_3d)
data_expected_3d = data_in_3d.asnumpy()
data_expected_3d[:][:][:] = (data_expected_3d[:][:][:] - 1.0) / 2.0
data = mx.symbol.Variable('data')
img_norm_sym = mx.sym.image.normalize(data=data, mean=mean, std=std)
# check forward
check_symbolic_forward(img_norm_sym, [data_in_3d], [data_expected_3d],
rtol=1e-5, atol=1e-5)
# Gradient is 1/std_dev
grad_expected_3d = np.ones(shape_3d)
grad_expected_3d[:][:][:] = 1 / 2.0
# check backward
check_symbolic_backward(img_norm_sym, location=[data_in_3d], out_grads=[mx.nd.ones(shape_3d)],
expected=[grad_expected_3d], rtol=1e-5, atol=1e-5)
# check backward using finite difference
check_numeric_gradient(img_norm_sym, [data_in_3d], atol=0.001)
# Part 4 - Test 4D input with scalar mean/std
shape_4d = (2, 3, 28, 28)
data_in_4d = mx.nd.random.uniform(0, 1, shape_4d)
data_expected_4d = data_in_4d.asnumpy()
data_expected_4d[:][:][:][:] = (data_expected_4d[:][:][:][:] - 1.0) / 2.0
# check forward
check_symbolic_forward(img_norm_sym, [data_in_4d], [data_expected_4d],
rtol=1e-5, atol=1e-5)
# Gradient is 1/std_dev
grad_expected_4d = np.ones(shape_4d)
grad_expected_4d[:][:][:][:] = 1 / 2.0
# check backward
check_symbolic_backward(img_norm_sym, location=[data_in_4d], out_grads=[mx.nd.ones(shape_4d)],
expected=[grad_expected_4d], rtol=1e-5, atol=1e-5)
# check backward using finite difference
check_numeric_gradient(img_norm_sym, [data_in_4d], atol=0.001)
@with_seed()
def test_index_array():
def test_index_array_default():
for shape in [(10,), (7, 5, 29), (5, 7, 11, 13, 17, 19)]:
data = mx.symbol.Variable("data")
index_array = mx.sym.contrib.index_array(data)
input_array = np.ones(shape)
mgrid = np.mgrid[tuple(slice(0, x) for x in shape)]
expected = np.stack(mgrid, axis=-1)
check_symbolic_forward(index_array, [input_array], [expected])
check_symbolic_backward(index_array, [input_array], [np.ones(expected.shape)], [np.zeros_like(input_array)])
@mx.use_np_shape
def test_index_array_default_zero_dim():
data = mx.symbol.Variable("data")
index_array = mx.sym.contrib.index_array(data)
input_array = np.ones(())
expected = np.zeros((0,))
check_symbolic_forward(index_array, [input_array], [expected])
check_symbolic_backward(index_array, [input_array], [np.ones(expected.shape)], [np.zeros_like(input_array)])
@mx.use_np_shape
def test_index_array_default_zero_size():
data = mx.symbol.Variable("data")
index_array = mx.sym.contrib.index_array(data)
input_array = np.ones((0, 0, 0))
expected = np.zeros((0, 0, 0, 3))
check_symbolic_forward(index_array, [input_array], [expected])
check_symbolic_backward(index_array, [input_array], [np.ones(expected.shape)], [np.zeros_like(input_array)])
def test_index_array_select_axes():
shape = (5, 7, 11, 13, 17, 19)
for axes in [(3,), (4, 1), (5, 1, 3), (-1,), (-5, -1, -3)]:
data = mx.symbol.Variable("data")
index_array = mx.sym.contrib.index_array(data, axes=axes)
input_array = np.ones(shape)
mgrid = np.mgrid[tuple(slice(0, x) for x in shape)]
expected = np.stack(mgrid, axis=-1)[..., axes]
check_symbolic_forward(index_array, [input_array], [expected])
check_symbolic_backward(index_array, [input_array], [np.ones(expected.shape)], [np.zeros_like(input_array)])
@mx.use_np_shape
def test_index_array_select_axes_zero_size():
data = mx.symbol.Variable("data")
index_array = mx.sym.contrib.index_array(data, axes=(2, 1))
input_array = np.ones((0, 0, 0, 0))
expected = np.zeros((0, 0, 2))
check_symbolic_forward(index_array, [input_array], [expected])
check_symbolic_backward(index_array, [input_array], [np.ones(expected.shape)], [np.zeros_like(input_array)])
test_index_array_default()
test_index_array_default_zero_dim()
test_index_array_default_zero_size()
test_index_array_select_axes()
test_index_array_select_axes_zero_size()
@with_seed()
def test_scalar_tensor_creation():
assertRaises(MXNetError, mx.nd.zeros, shape=())
assertRaises(MXNetError, mx.nd.ones, shape=())
with mx.np_shape():
data_mx = mx.nd.ones(shape=())
data_np = np.ones((), dtype=data_mx.dtype)
assert same(data_mx.asnumpy(), data_np)
@with_seed()
def test_zero_size_tensor_creation():
assertRaises(MXNetError, mx.nd.zeros, shape=(0, 1, 3, 0))
assertRaises(MXNetError, mx.nd.ones, shape=(0, 1, 3, 0))
with mx.np_shape():
data_mx = mx.nd.ones(shape=(0, 1, 0, 4))
data_np = np.ones(shape=data_mx.shape, dtype=data_mx.dtype)
assert same(data_mx.asnumpy(), data_np)
@with_seed()
def test_concat_with_zero_size_tensor():
with mx.np_shape():
data1 = mx.nd.ones((0, 8, 12))
data2 = mx.nd.ones((3, 8, 12))
data3 = mx.nd.ones((0, 8, 12))
ret = mx.nd.Concat(data1, data2, data3, dim=0)
assert ret.shape == (3, 8, 12)
data1 = mx.nd.ones((0, 3, 10))
data2 = mx.nd.ones((0, 4, 10))
data3 = mx.nd.ones((0, 5, 10))
ret = mx.nd.Concat(data1, data2, data3, dim=1)
assert ret.shape == (0, 12, 10)
@with_seed()
def test_np_shape_decorator():
@mx.use_np_shape
def check_scalar_one():
"""Generate scalar one tensor"""
return mx.nd.ones(shape=())
assert check_scalar_one.__name__ == "check_scalar_one"
assert check_scalar_one.__doc__ == "Generate scalar one tensor"
assert check_scalar_one().shape == ()
for active in [True, False]:
with mx.np_shape(active=active):
assert check_scalar_one.__name__ == "check_scalar_one"
assert check_scalar_one.__doc__ == "Generate scalar one tensor"
assert check_scalar_one().shape == ()
@mx.use_np_shape
def check_concat(shape1, shape2, axis):
data1 = mx.nd.ones(shape1)
data2 = mx.nd.ones(shape2)
ret = mx.nd.Concat(data1, data2, dim=axis)
expected_ret = np.concatenate((data1.asnumpy(), data2.asnumpy()), axis=axis)
assert ret.shape == expected_ret.shape
check_concat((0, 3, 4), (5, 3, 4), 0)
check_concat((8, 0, 5), (8, 7, 5), 1)
check_concat((8, 0, 0), (8, 0, 0), 2)
for active in [True, False]:
check_concat((0, 3, 4), (5, 3, 4), 0)
check_concat((8, 0, 5), (8, 7, 5), 1)
check_concat((8, 0, 0), (8, 0, 0), 2)
@with_seed()
def test_add_n():
data_shape = (2, 2)
input_num = 5
data = [mx.nd.random.uniform(shape=data_shape) for i in range(input_num)]
rslt = mx.nd.zeros(shape=data_shape)
for i in range(input_num):
rslt += data[i]
add_n_rslt = mx.nd.add_n(*data, out=data[0])
assert_almost_equal(rslt.asnumpy(), add_n_rslt.asnumpy(), atol=1e-5)
def test_get_all_registered_operators():
ops = get_all_registered_operators()
ok_(isinstance(ops, list))
ok_(len(ops) > 0)
ok_('Activation' in ops)
def test_get_operator_arguments():
operator_arguments = get_operator_arguments('Activation')
ok_(isinstance(operator_arguments, OperatorArguments))
ok_(operator_arguments.names == ['data', 'act_type'])
ok_(operator_arguments.types
== ['NDArray-or-Symbol', "{'relu', 'sigmoid', 'softrelu', 'softsign', 'tanh'}, required"])
ok_(operator_arguments.narg == 2)
def test_transpose_infer_shape_back():
o1 = mx.sym.ones(shape=[2,3])
o2 = mx.sym.ones(shape=[-1,-1])
t = mx.sym.transpose(o2)
b = o1 + t
x = b.bind(mx.cpu(), args={})
y = x.forward()
assert(y[0].shape == (2,3))
def test_transpose_infer_shape_mixed():
o1 = mx.sym.ones(shape=[2,-1])
o2 = mx.sym.ones(shape=[3,-1])
t = mx.sym.transpose(o2)
b = o1 + t
x = b.bind(mx.cpu(), args={})
y = x.forward()
assert(y[0].shape == (2,3))
@with_seed()
def test_sample_normal_default_shape():
# Test case from https://github.com/apache/incubator-mxnet/issues/16135
s = mx.nd.sample_normal(mu=mx.nd.array([10.0]), sigma=mx.nd.array([0.5]))
assert s.shape == (1,)
s = mx.nd.sample_normal(mu=mx.nd.array([10.0]), sigma=mx.nd.array([0.5]), shape=())
assert s.shape == (1,)
s = mx.nd.sample_normal(mu=mx.nd.array([10.0]), sigma=mx.nd.array([0.5]), shape=1)
assert s.shape == (1, 1)
s = mx.nd.sample_normal(mu=mx.nd.array([10.0]), sigma=mx.nd.array([0.5]), shape=(1,))
assert s.shape == (1, 1)
def test_large_tensor_disabled_err_msg():
LARGE_X = 4300000000
MEDIUM_X = 1000000000
SMALL_Y = 1
shape = (2, LARGE_X)
def check_nd_array():
x = np.arange(0, LARGE_X)
assertRaises(MXNetError, mx.nd.array, x)
def check_nd_ones():
assertRaises(MXNetError, mx.nd.ones, shape)
def check_nd_zeros():
assertRaises(MXNetError, mx.nd.zeros, shape)
def check_nd_full():
val = 1
assertRaises(Exception, mx.nd.full, shape, val)
def check_nd_arange():
start = 0
stop = LARGE_X
assertRaises(Exception, mx.nd.arange, start, stop)
def check_nd_random():
shape = (2, LARGE_X)
def check_random_exp():
lam = 4
assertRaises(MXNetError, mx.nd.random_exponential, lam, shape)
def check_random_gamma():
alpha = 9
beta = 0.5
assertRaises(MXNetError, mx.nd.random_gamma, alpha, beta, shape)
def check_random_normal():
loc = 0
scale = 1
assertRaises(MXNetError, mx.nd.random_normal, loc, scale, shape)
def check_random_poisson():
lam = 4
assertRaises(MXNetError, mx.nd.random_poisson, alpha, lam, shape)
def check_random_randint():
low = 0
high = 1000000
assertRaises(MXNetError, mx.nd.random_randint, low, high, shape)
def check_random_uniform():
low = 0
hight = 1
assertRaises(MXNetError, mx.nd.random_uniform, alpha, beta, shape)
def check_multihead_attention_selfatt(dtype):
def convert_weight(F, q_weight, k_weight, v_weight, num_heads):
q_weight = F.reshape(q_weight, shape=(num_heads, -1, 0), reverse=True)
k_weight = F.reshape(k_weight, shape=(num_heads, -1, 0), reverse=True)
v_weight = F.reshape(v_weight, shape=(num_heads, -1, 0), reverse=True)
all_weights = F.concat(q_weight, k_weight, v_weight, dim=-2)
all_weights = F.reshape(all_weights, shape=(-1, 0), reverse=True)
return all_weights
def convert_bias(F, q_bias, k_bias, v_bias, num_heads):
q_bias = F.reshape(q_bias, shape=(num_heads, -1))
k_bias = F.reshape(k_bias, shape=(num_heads, -1))
v_bias = F.reshape(v_bias, shape=(num_heads, -1))
all_bias = F.stack(q_bias, k_bias, v_bias, axis=1)
all_bias = F.reshape(all_bias, shape=(-1,))
return all_bias
batch_size = 2
qkv_length = 7 # length of a sequence
qkv_dim = 9 # dimension of encoding
num_heads = 3 # number of attention head
head_dim = 5 # head size
out_dim = 13 * num_heads
qkv_units = num_heads * head_dim
arg_params = {
'qkv': mx.nd.array(np.random.rand(*(batch_size, qkv_length, qkv_dim)).astype(dtype) * 0.1, dtype=dtype),
'q_weight': mx.nd.array(np.random.rand(*(qkv_units, qkv_dim)).astype(dtype) * 0.1, dtype=dtype),
'k_weight': mx.nd.array(np.random.rand(*(qkv_units, qkv_dim)).astype(dtype) * 0.1, dtype=dtype),
'v_weight': mx.nd.array(np.random.rand(*(qkv_units, qkv_dim)).astype(dtype) * 0.1, dtype=dtype),
'q_bias': mx.nd.array(np.random.rand(*(qkv_units,)).astype(dtype) * 0.1, dtype=dtype),
'k_bias': mx.nd.array(np.random.rand(*(qkv_units,)).astype(dtype) * 0.1, dtype=dtype),
'v_bias': mx.nd.array(np.random.rand(*(qkv_units,)).astype(dtype) * 0.1, dtype=dtype),
'out_weight': mx.nd.array(np.random.rand(*(out_dim, qkv_units)).astype(dtype) * 0.1, dtype=dtype),
'out_bias': mx.nd.array(np.random.rand(*(out_dim,)).astype(dtype) * 0.1, dtype=dtype),
}
qkv = mx.sym.Variable('qkv')
sonde = mx.sym.Variable('sonde')
q_weight = mx.sym.Variable('q_weight')
k_weight = mx.sym.Variable('k_weight')
v_weight = mx.sym.Variable('v_weight')
q_bias = mx.sym.Variable('q_bias')
k_bias = mx.sym.Variable('k_bias')
v_bias = mx.sym.Variable('v_bias')
out_weight = mx.sym.Variable('out_weight')
out_bias = mx.sym.Variable('out_bias')
qkv_weight = convert_weight(mx.sym, q_weight, k_weight, v_weight, num_heads)
qkv_bias = convert_bias(mx.sym, q_bias, k_bias, v_bias, num_heads)
qkv = mx.sym.transpose(qkv, axes=(1, 0, 2))
qkv_proj = mx.sym.FullyConnected(qkv, weight=qkv_weight, bias=qkv_bias, flatten=False,
num_hidden=qkv_units * 3, no_bias=False)
att_score = mx.sym.contrib.interleaved_matmul_selfatt_qk(
qkv_proj, heads=num_heads)
att_score = att_score + sonde
weighted_value = mx.sym.contrib.interleaved_matmul_selfatt_valatt(
qkv_proj, att_score, heads=num_heads)
output = mx.sym.FullyConnected(weighted_value, weight=out_weight, bias=out_bias, flatten=False,
num_hidden=out_dim, no_bias=False)
output = mx.sym.transpose(output, axes=(1, 0, 2))
output = mx.sym.Group([output, att_score])
executor = output.simple_bind(ctx=default_context(),
qkv=(batch_size, qkv_length, qkv_dim),
q_weight=(qkv_units, qkv_dim),
q_bias=(qkv_units,),
k_weight=(qkv_units, qkv_dim),
k_bias=(qkv_units,),
v_weight=(qkv_units, qkv_dim),
v_bias=(qkv_units,),
type_dict={'qkv': dtype,
'q_weight': dtype,
'k_weight': dtype,
'v_weight': dtype,
'q_bias': dtype,
'k_bias': dtype,
'v_bias': dtype,
'sonde': dtype},
grad_req='write', force_rebind=True)
output_shape = executor.outputs[0].shape
output_grads = np.random.rand(*output_shape).astype(dtype) * 0.1
executor.copy_params_from(arg_params, {})
executor.arg_dict['sonde'][:] = 0.
executor.arg_dict['sonde'].wait_to_read()
executor.forward(is_train=True)
output_opti = executor.outputs[0].asnumpy()
att_score_opti = executor.outputs[1].asnumpy()
executor.backward([mx.nd.array(output_grads, dtype=dtype),
mx.nd.zeros(att_score_opti.shape, dtype=dtype)])
grads_opti = {k: v.asnumpy() for k, v in executor.grad_dict.items()}
qkv = mx.sym.Variable('qkv')
sonde = mx.sym.Variable('sonde')
q_weight = mx.sym.Variable('q_weight')
k_weight = mx.sym.Variable('k_weight')
v_weight = mx.sym.Variable('v_weight')
q_bias = mx.sym.Variable('q_bias')
k_bias = mx.sym.Variable('k_bias')
v_bias = mx.sym.Variable('v_bias')
out_weight = mx.sym.Variable('out_weight')
out_bias = mx.sym.Variable('out_bias')
q = mx.sym.FullyConnected(qkv, weight=q_weight, bias=q_bias, flatten=False,
num_hidden=qkv_units, no_bias=False)
k = mx.sym.FullyConnected(qkv, weight=k_weight, bias=k_bias, flatten=False,
num_hidden=qkv_units, no_bias=False)
v = mx.sym.FullyConnected(qkv, weight=v_weight, bias=v_bias, flatten=False,
num_hidden=qkv_units, no_bias=False)
q = mx.sym.reshape(q, shape=(0, 0, num_heads, -1))
q = mx.sym.transpose(q, axes=(0, 2, 1, 3))
q = mx.sym.reshape(q, shape=(-1, 0, 0), reverse=True)
k = mx.sym.reshape(k, shape=(0, 0, num_heads, -1))
k = mx.sym.transpose(k, axes=(0, 2, 1, 3))
k = mx.sym.reshape(k, shape=(-1, 0, 0), reverse=True)
q = mx.sym.contrib.div_sqrt_dim(q)
att_score = mx.sym.batch_dot(q, k, transpose_b=True)
att_score = att_score + sonde
v = mx.sym.reshape(v, shape=(0, 0, num_heads, -1))
v = mx.sym.transpose(v, axes=(0, 2, 1, 3))
v = mx.sym.reshape(v, shape=(-1, 0, 0), reverse=True)
weighted_value = mx.sym.batch_dot(att_score, v)
weighted_value = mx.sym.reshape(weighted_value, shape=(-1, num_heads, 0, 0),
reverse=True)
weighted_value = mx.sym.transpose(weighted_value, axes=(0, 2, 1, 3))
weighted_value = mx.sym.reshape(weighted_value, shape=(0, 0, -1))
output = mx.sym.FullyConnected(weighted_value, weight=out_weight, bias=out_bias, flatten=False,
num_hidden=out_dim, no_bias=False)
output = mx.sym.Group([output, att_score])
executor = output.simple_bind(ctx=default_context(),
qkv=(batch_size, qkv_length, qkv_dim),
type_dict={'qkv': dtype},
grad_req='write', force_rebind=True)
executor.copy_params_from(arg_params, {})
executor.arg_dict['sonde'][:] = 0.
executor.arg_dict['sonde'].wait_to_read()
executor.forward(is_train=True)
output_orig = executor.outputs[0].asnumpy()
att_score_orig = executor.outputs[1].asnumpy()
executor.backward([mx.nd.array(output_grads, dtype=dtype),
mx.nd.zeros(att_score_orig.shape, dtype=dtype)])
grads_orig = {k : v.asnumpy() for k, v in executor.grad_dict.items()}
assert_allclose(att_score_orig, att_score_opti, rtol=1e-2, atol=1e-3)
assert_allclose(output_orig, output_opti, rtol=1e-2, atol=1e-3)
for k in grads_opti.keys():
assert(grads_orig[k].dtype == grads_opti[k].dtype)
assert(grads_orig[k].shape == grads_opti[k].shape)
assert_allclose(grads_orig[k], grads_opti[k], rtol=1e-2, atol=1e-3)
@with_seed()
def test_multihead_attention_selfatt():
dtypes = ['float32']
if default_context().device_type == 'gpu':
dtypes += ['float16']
for dtype in dtypes:
check_multihead_attention_selfatt(dtype=dtype)
def check_multihead_attention_encdec(dtype):
def convert_weight(F, k_weight, v_weight, num_heads):
k_weight = F.reshape(k_weight, shape=(num_heads, -1, 0), reverse=True)
v_weight = F.reshape(v_weight, shape=(num_heads, -1, 0), reverse=True)
all_weights = F.concat(k_weight, v_weight, dim=-2)
all_weights = F.reshape(all_weights, shape=(-1, 0), reverse=True)
return all_weights
def convert_bias(F, k_bias, v_bias, num_heads):
k_bias = F.reshape(k_bias, shape=(num_heads, -1))
v_bias = F.reshape(v_bias, shape=(num_heads, -1))
all_bias = F.stack(k_bias, v_bias, axis=1)
all_bias = F.reshape(all_bias, shape=(-1,))
return all_bias
batch_size = 2
qkv_length = 7 # length of a sequence
qkv_dim = 9 # dimension of encoding
num_heads = 3 # number of attention head
head_dim = 5 # head size
out_dim = 13 * num_heads
qkv_units = num_heads * head_dim
arg_params = {
'q': mx.nd.array(np.random.rand(*(batch_size, qkv_length, qkv_dim)).astype(dtype) * 0.1, dtype=dtype),
'kv': mx.nd.array(np.random.rand(*(batch_size, qkv_length, qkv_dim)).astype(dtype) * 0.1, dtype=dtype),
'q_weight': mx.nd.array(np.random.rand(*(qkv_units, qkv_dim)).astype(dtype) * 0.1, dtype=dtype),
'k_weight': mx.nd.array(np.random.rand(*(qkv_units, qkv_dim)).astype(dtype) * 0.1, dtype=dtype),
'v_weight': mx.nd.array(np.random.rand(*(qkv_units, qkv_dim)).astype(dtype) * 0.1, dtype=dtype),
'q_bias': mx.nd.array(np.random.rand(*(qkv_units,)).astype(dtype) * 0.1, dtype=dtype),
'k_bias': mx.nd.array(np.random.rand(*(qkv_units,)).astype(dtype) * 0.1, dtype=dtype),
'v_bias': mx.nd.array(np.random.rand(*(qkv_units,)).astype(dtype) * 0.1, dtype=dtype),
'out_weight': mx.nd.array(np.random.rand(*(out_dim, qkv_units)).astype(dtype) * 0.1, dtype=dtype),
'out_bias': mx.nd.array(np.random.rand(*(out_dim,)).astype(dtype) * 0.1, dtype=dtype),
}
q = mx.sym.Variable('q')
kv = mx.sym.Variable('kv')
sonde = mx.sym.Variable('sonde')
q_weight = mx.sym.Variable('q_weight')
k_weight = mx.sym.Variable('k_weight')
v_weight = mx.sym.Variable('v_weight')
q_bias = mx.sym.Variable('q_bias')
k_bias = mx.sym.Variable('k_bias')
v_bias = mx.sym.Variable('v_bias')
out_weight = mx.sym.Variable('out_weight')
out_bias = mx.sym.Variable('out_bias')
kv_weight = convert_weight(mx.sym, k_weight, v_weight, num_heads)
kv_bias = convert_bias(mx.sym, k_bias, v_bias, num_heads)
kv = mx.sym.transpose(kv, axes=(1, 0, 2))
kv_proj = mx.sym.FullyConnected(kv, weight=kv_weight, bias=kv_bias, flatten=False,
num_hidden=qkv_units * 2, no_bias=False)
q = mx.sym.transpose(q, axes=(1, 0, 2))
q_proj = mx.sym.FullyConnected(q, weight=q_weight, bias=q_bias, flatten=False,
num_hidden=qkv_units, no_bias=False)
att_score = mx.sym.contrib.interleaved_matmul_encdec_qk(
q_proj, kv_proj, heads=num_heads)
att_score = att_score + sonde
weighted_value = mx.sym.contrib.interleaved_matmul_encdec_valatt(
kv_proj, att_score, heads=num_heads)
output = mx.sym.FullyConnected(weighted_value, weight=out_weight, bias=out_bias, flatten=False,
num_hidden=out_dim, no_bias=False)
output = mx.sym.transpose(output, axes=(1, 0, 2))
output = mx.sym.Group([output, att_score])
executor = output.simple_bind(ctx=default_context(),
q=(batch_size, qkv_length, qkv_dim),
kv=(batch_size, qkv_length, qkv_dim),
q_weight=(qkv_units, qkv_dim),
q_bias=(qkv_units,),
k_weight=(qkv_units, qkv_dim),
k_bias=(qkv_units,),
v_weight=(qkv_units, qkv_dim),
v_bias=(qkv_units,),
out_weight=(out_dim, qkv_units),
out_bias=(out_dim,),
type_dict={'q': dtype,
'kv': dtype,
'q_weight': dtype,
'q_bias': dtype,
'k_weight': dtype,
'k_bias': dtype,
'v_weight': dtype,
'v_bias': dtype,
'out_weight': dtype,
'out_bias': dtype,
},
grad_req='write', force_rebind=True)
output_shape = executor.outputs[0].shape
output_grads = np.random.rand(*output_shape).astype(dtype) * 0.1
executor.copy_params_from(arg_params, {})
executor.arg_dict['sonde'][:] = 0.
executor.arg_dict['sonde'].wait_to_read()
executor.forward(is_train=True)
output_opti = executor.outputs[0].asnumpy()
att_score_opti = executor.outputs[1].asnumpy()
executor.backward([mx.nd.array(output_grads, dtype=dtype), mx.nd.zeros(att_score_opti.shape, dtype=dtype)])
grads_opti = {k: v.asnumpy() for k, v in executor.grad_dict.items()}
q = mx.sym.Variable('q')
kv = mx.sym.Variable('kv')
sonde = mx.sym.Variable('sonde')
q_weight = mx.sym.Variable('q_weight')
k_weight = mx.sym.Variable('k_weight')
v_weight = mx.sym.Variable('v_weight')
q_bias = mx.sym.Variable('q_bias')
k_bias = mx.sym.Variable('k_bias')
v_bias = mx.sym.Variable('v_bias')
out_weight = mx.sym.Variable('out_weight')
out_bias = mx.sym.Variable('out_bias')
q = mx.sym.FullyConnected(q, weight=q_weight, bias=q_bias, flatten=False,
num_hidden=qkv_units, no_bias=False)
k = mx.sym.FullyConnected(kv, weight=k_weight, bias=k_bias, flatten=False,
num_hidden=qkv_units, no_bias=False)
v = mx.sym.FullyConnected(kv, weight=v_weight, bias=v_bias, flatten=False,
num_hidden=qkv_units, no_bias=False)
q = mx.sym.reshape(q, shape=(0, 0, num_heads, -1))
q = mx.sym.transpose(q, axes=(0, 2, 1, 3))
q = mx.sym.reshape(q, shape=(-1, 0, 0), reverse=True)
k = mx.sym.reshape(k, shape=(0, 0, num_heads, -1))
k = mx.sym.transpose(k, axes=(0, 2, 1, 3))
k = mx.sym.reshape(k, shape=(-1, 0, 0), reverse=True)
q = mx.sym.contrib.div_sqrt_dim(q)
att_score = mx.sym.batch_dot(q, k, transpose_b=True)
att_score = att_score + sonde
v = mx.sym.reshape(v, shape=(0, 0, num_heads, -1))
v = mx.sym.transpose(v, axes=(0, 2, 1, 3))
v = mx.sym.reshape(v, shape=(-1, 0, 0), reverse=True)
weighted_value = mx.sym.batch_dot(att_score, v)
weighted_value = mx.sym.reshape(weighted_value, shape=(-1, num_heads, 0, 0),
reverse=True)
weighted_value = mx.sym.transpose(weighted_value, axes=(0, 2, 1, 3))
weighted_value = mx.sym.reshape(weighted_value, shape=(0, 0, -1))
output = mx.sym.FullyConnected(weighted_value, weight=out_weight, bias=out_bias, flatten=False,
num_hidden=out_dim, no_bias=False)
output = mx.sym.Group([output, att_score])
executor = output.simple_bind(ctx=default_context(),
q=(batch_size, qkv_length, qkv_dim),
kv=(batch_size, qkv_length, qkv_dim),
type_dict={'q': dtype,
'kv': dtype},
grad_req='write', force_rebind=True)
executor.copy_params_from(arg_params, {})
executor.arg_dict['sonde'][:] = 0.
executor.arg_dict['sonde'].wait_to_read()
executor.forward(is_train=True)
output_orig = executor.outputs[0].asnumpy()
att_score_orig = executor.outputs[1].asnumpy()
executor.backward([mx.nd.array(output_grads, dtype=dtype), mx.nd.zeros(att_score_orig.shape, dtype=dtype)])
grads_orig = {k : v.asnumpy() for k, v in executor.grad_dict.items()}
assert_allclose(att_score_orig, att_score_opti, rtol=1e-2, atol=1e-3)
assert_allclose(output_orig, output_opti, rtol=1e-2, atol=1e-3)
for k in grads_opti.keys():
assert(grads_orig[k].dtype == grads_opti[k].dtype)
assert(grads_orig[k].shape == grads_opti[k].shape)
assert_allclose(grads_orig[k], grads_opti[k], rtol=1e-2, atol=1e-3)
@with_seed()
def test_multihead_attention_encdec():
dtypes = ['float32']
if default_context().device_type == 'gpu':
dtypes += ['float16']
for dtype in dtypes:
check_multihead_attention_encdec(dtype=dtype)
@with_seed()
def test_im2col_col2im():
def compute_output_size(spatial, kernel, stride=1, dilate=1, pad=0):
pad_size = spatial + 2 * pad
dilated_kernel = dilate * (kernel - 1) + 1
return (pad_size - dilated_kernel) // stride + 1
def build_kwargs(kernel, stride=1, dilate=1, pad=0):
return {'kernel': (kernel, kernel),
'stride': (stride, stride),
'dilate': (dilate, dilate),
'pad': (pad, pad)}
# use im2col to compute convolution
def test_conv_compute(input_shape, num_filter, kernel, stride=1, dilate=1, pad=0):
batch_size = input_shape[0]
channel = input_shape[1]
kwargs = build_kwargs(kernel, stride, dilate, pad)
data = mx.nd.uniform(shape=input_shape)
col = mx.nd.im2col(data, **kwargs)
w = mx.nd.uniform(shape=(num_filter, channel, kernel, kernel))
c1 = mx.nd.dot(col.transpose((0, 2, 1)), w.reshape(num_filter, -1).T).transpose((0, 2, 1))
hos = compute_output_size(input_shape[2], kernel, stride, dilate, pad)
wos = compute_output_size(input_shape[3], kernel, stride, dilate, pad)
c1 = c1.reshape((batch_size, num_filter, hos, wos))
c2 = mx.nd.Convolution(data, num_filter=num_filter, weight=w, no_bias=True, **kwargs)
assert_almost_equal(c1.asnumpy(), c2.asnumpy(), rtol=1e-5, atol=1e-5)
test_conv_compute(
input_shape = (5, 3, 30, 20),
num_filter = 10,
kernel = 3
)
test_conv_compute(
input_shape = (5, 3, 30, 20),
num_filter = 10,
kernel = 3,
stride = 2
)
test_conv_compute(
input_shape = (5, 3, 30, 20),
num_filter = 10,
kernel = 3,
stride = 2,
dilate = 2
)
test_conv_compute(
input_shape = (5, 3, 30, 20),
num_filter = 10,
kernel = 3,
stride = 2,
dilate = 2,
pad = 1
)
# use composite of im2col and col2im to reconstruct image
def test_reconstruct(input_shape, kernel, stride=1, dilate=1, pad=0):
batch_size = input_shape[0]
channel = input_shape[1]
kwargs = build_kwargs(kernel, stride, dilate, pad)
data = mx.nd.uniform(shape=input_shape)
col = mx.nd.im2col(data, **kwargs)
im1 = mx.nd.col2im(col, input_shape[2:], **kwargs)
im2 = mx.nd.col2im(mx.nd.ones_like(col), input_shape[2:], **kwargs) * data
assert_almost_equal(im1.asnumpy(), im2.asnumpy(), rtol=1e-5, atol=1e-5)
test_reconstruct(
input_shape = (5, 3, 30, 20),
kernel = 3
)
test_reconstruct(
input_shape = (5, 3, 30, 20),
kernel = 3,
stride = 2
)
test_reconstruct(
input_shape = (5, 3, 30, 20),
kernel = 3,
stride = 2,
dilate = 2
)
test_reconstruct(
input_shape = (5, 3, 30, 20),
kernel = 3,
stride = 2,
dilate = 2,
pad = 1
)
# test gradient
# the grad of im2col is col2im, and vice versa
def test_grad(input_shape, kernel, stride=1, dilate=1, pad=0):
# im2col
data = mx.sym.Variable('data')
kwargs = build_kwargs(kernel, stride, dilate, pad)
sym = mx.sym.im2col(data, **kwargs)
im = mx.nd.uniform(shape=input_shape)
col = mx.nd.im2col(im, **kwargs)
col_shape = col.shape
expected = mx.nd.col2im(col, input_shape[2:], **kwargs)
check_symbolic_backward(sym, [im.asnumpy()], [col.asnumpy()], [expected.asnumpy()])
# col2im
data = mx.sym.Variable('data')
sym = mx.sym.col2im(data, input_shape[2:], **kwargs)
col = mx.nd.uniform(shape=col_shape)
im = mx.nd.col2im(col, input_shape[2:], **kwargs)
expected = mx.nd.im2col(im, **kwargs)
check_symbolic_backward(sym, [col.asnumpy()], [im.asnumpy()], [expected.asnumpy()])
test_grad(
input_shape = (5, 3, 30, 20),
kernel = 3
)
test_grad(
input_shape = (5, 3, 30, 20),
kernel = 3,
stride = 2
)
test_grad(
input_shape = (5, 3, 30, 20),
kernel = 3,
stride = 2,
dilate = 2
)
test_grad(
input_shape = (5, 3, 30, 20),
kernel = 3,
stride = 2,
dilate = 2,
pad = 1
)
if __name__ == '__main__':
import nose
nose.runmodule()
| 43.302507 | 173 | 0.574282 |
177ebe5eb503079eaf0f01a150c891e72a8028d3 | 10,032 | py | Python | tests/test_conf.py | kevinlai219/Mezzanine-Django | a92cde236448e19cdbf853b5cb34f99a60972147 | [
"BSD-2-Clause"
] | 3,053 | 2015-01-01T08:16:01.000Z | 2022-03-31T12:06:03.000Z | tests/test_conf.py | kevinlai219/Mezzanine-Django | a92cde236448e19cdbf853b5cb34f99a60972147 | [
"BSD-2-Clause"
] | 818 | 2015-01-03T05:44:45.000Z | 2022-03-08T00:06:23.000Z | tests/test_conf.py | kevinlai219/Mezzanine-Django | a92cde236448e19cdbf853b5cb34f99a60972147 | [
"BSD-2-Clause"
] | 1,352 | 2015-01-01T06:17:14.000Z | 2022-03-26T16:21:54.000Z | import warnings
from unittest import skipUnless
from django.conf import settings as django_settings
from django.utils.encoding import force_str
from mezzanine.conf import register_setting, registry, settings
from mezzanine.conf.context_processors import TemplateSettings
from mezzanine.conf.models import Setting
from mezzanine.utils.tests import TestCase
class ConfTests(TestCase):
@skipUnless(False, "Only run manually - see Github issue #1126")
def test_threading_race(self):
import multiprocessing.pool
import random
from django.db import connections
type_modifiers = {
int: lambda s: s + 1,
float: lambda s: s + 1.0,
bool: lambda s: not s,
str: lambda s: s + "test",
bytes: lambda s: s + b"test",
}
# Store a non-default value for every editable setting in the database
editable_settings = {}
for setting in registry.values():
if setting["editable"]:
modified = type_modifiers[setting["type"]](setting["default"])
Setting.objects.create(name=setting["name"], value=modified)
editable_settings[setting["name"]] = modified
# Make our child threads use this thread's connections. Recent SQLite
# do support access from multiple threads for in-memory databases, but
# Django doesn't support it currently - so we have to resort to this
# workaround, taken from Django's LiveServerTestCase.
# See Django ticket #12118 for discussion.
connections_override = {}
for conn in connections.all():
# If using in-memory sqlite databases, pass the connections to
# the server thread.
if conn.vendor == "sqlite" and conn.settings_dict["NAME"] == ":memory:":
# Explicitly enable thread-shareability for this connection
conn._old_allow_thread_sharing = conn.allow_thread_sharing
conn.allow_thread_sharing = True
connections_override[conn.alias] = conn
def initialise_thread():
for alias, connection in connections_override.items():
connections[alias] = connection
thread_pool = multiprocessing.pool.ThreadPool(8, initialise_thread)
def retrieve_setting(setting_name):
return setting_name, getattr(settings, setting_name)
def choose_random_setting(length=5000):
choices = list(editable_settings)
for _ in range(length):
yield random.choice(choices)
try:
for setting in thread_pool.imap_unordered(
retrieve_setting, choose_random_setting()
):
name, retrieved_value = setting
self.assertEqual(retrieved_value, editable_settings[name])
finally:
for conn in connections_override.values():
conn.allow_thread_sharing = conn._old_allow_thread_sharing
del conn._old_allow_thread_sharing
Setting.objects.all().delete()
def test_settings(self):
"""
Test that an editable setting can be overridden with a DB
value and that the data type is preserved when the value is
returned back out of the DB. Also checks to ensure no
unsupported types are defined for editable settings.
"""
settings.clear_cache()
# Find an editable setting for each supported type.
names_by_type = {}
for setting in registry.values():
if setting["editable"] and setting["type"] not in names_by_type:
names_by_type[setting["type"]] = setting["name"]
# Create a modified value for each setting and save it.
values_by_name = {}
for (setting_type, setting_name) in names_by_type.items():
setting_value = registry[setting_name]["default"]
if setting_type in (int, float):
setting_value += 1
elif setting_type is bool:
setting_value = not setting_value
elif setting_type is str:
setting_value += "test"
elif setting_type is bytes:
setting_value += b"test"
else:
setting = f"{setting_name}: {setting_type}"
self.fail("Unsupported setting type for %s" % setting)
values_by_name[setting_name] = setting_value
Setting.objects.create(name=setting_name, value=setting_value)
# Load the settings and make sure the DB values have persisted.
for (name, value) in values_by_name.items():
self.assertEqual(getattr(settings, name), value)
def test_editable_override(self):
"""
Test that an editable setting is always overridden by a settings.py
setting of the same name.
"""
settings.clear_cache()
Setting.objects.all().delete()
django_settings.FOO = "Set in settings.py"
Setting.objects.create(name="FOO", value="Set in database")
first_value = settings.FOO
settings.SITE_TITLE # Triggers access?
second_value = settings.FOO
self.assertEqual(first_value, second_value)
def test_invalid_value_warning(self):
"""
Test that a warning is raised when a database setting has an invalid
value, i.e. one that can't be converted to the correct Python type.
"""
settings.clear_cache()
register_setting(name="INVALID_INT_SETTING", editable=True, default=0)
Setting.objects.create(name="INVALID_INT_SETTING", value="zero")
with warnings.catch_warnings():
warning_re = r"The setting \w+ should be of type"
warnings.filterwarnings("error", warning_re, UserWarning)
with self.assertRaises(UserWarning):
settings.INVALID_INT_SETTING
self.assertEqual(settings.INVALID_INT_SETTING, 0)
def test_unregistered_setting(self):
"""
Test that accessing any editable setting will delete all Settings
with no corresponding registered setting from the database.
"""
settings.clear_cache()
register_setting(name="REGISTERED_SETTING", editable=True, default="")
Setting.objects.create(name="UNREGISTERED_SETTING", value="")
with self.assertRaises(AttributeError):
settings.UNREGISTERED_SETTING
qs = Setting.objects.filter(name="UNREGISTERED_SETTING")
self.assertEqual(qs.count(), 1)
# This triggers Settings._load(), which deletes unregistered Settings
settings.REGISTERED_SETTING
self.assertEqual(qs.count(), 0)
def test_conflicting_setting(self):
"""
Test that conflicting settings raise a warning and use the settings.py
value instead of the value from the database.
"""
settings.clear_cache()
register_setting(name="CONFLICTING_SETTING", editable=True, default=1)
Setting.objects.create(name="CONFLICTING_SETTING", value=2)
settings.CONFLICTING_SETTING = 3
with warnings.catch_warnings():
warning_re = (
"These settings are defined in both " r"settings\.py and the database"
)
warnings.filterwarnings("error", warning_re, UserWarning)
with self.assertRaises(UserWarning):
settings.CONFLICTING_SETTING
self.assertEqual(settings.CONFLICTING_SETTING, 3)
del settings.CONFLICTING_SETTING
def test_modeltranslation_configuration(self):
"""
Test that modeltranslation is properly configured in settings.
"""
if settings.USE_MODELTRANSLATION:
self.assertTrue(settings.USE_I18N)
def test_editable_caching(self):
"""
Test the editable setting caching behavior.
"""
# Ensure usage with no current request does not break caching
from mezzanine.core.request import _thread_local
try:
del _thread_local.request
except AttributeError:
pass
setting = Setting.objects.create(name="SITE_TITLE", value="Mezzanine")
original_site_title = settings.SITE_TITLE
setting.value = "Foobar"
setting.save()
new_site_title = settings.SITE_TITLE
setting.delete()
self.assertNotEqual(original_site_title, new_site_title)
class TemplateSettingsTests(TestCase):
def test_allowed(self):
# We choose a setting that will definitely exist:
ts = TemplateSettings(settings, ["INSTALLED_APPS"])
self.assertEqual(ts.INSTALLED_APPS, settings.INSTALLED_APPS)
self.assertEqual(ts["INSTALLED_APPS"], settings.INSTALLED_APPS)
def test_not_allowed(self):
ts = TemplateSettings(settings, [])
with warnings.catch_warnings():
warnings.simplefilter("ignore")
self.assertRaises(AttributeError, lambda: ts.INSTALLED_APPS)
self.assertRaises(KeyError, lambda: ts["INSTALLED_APPS"])
def test_add(self):
ts = TemplateSettings(settings, ["INSTALLED_APPS"])
ts["EXTRA_THING"] = "foo"
self.assertEqual(ts.EXTRA_THING, "foo")
self.assertEqual(ts["EXTRA_THING"], "foo")
def test_repr(self):
ts = TemplateSettings(settings, [])
self.assertEqual(repr(ts), "{}")
ts2 = TemplateSettings(settings, ["DEBUG", "SOME_NON_EXISTANT_SETTING"])
self.assertIn("'DEBUG': False", repr(ts2))
ts3 = TemplateSettings(settings, [])
ts3["EXTRA_THING"] = "foo"
self.assertIn("'EXTRA_THING'", repr(ts3))
self.assertIn("'foo'", repr(ts3))
def test_force_str(self):
ts = TemplateSettings(settings, [])
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
self.assertEqual(force_str(ts), "{}")
self.assertEqual(len(w), 0)
| 38.436782 | 86 | 0.639952 |
1faa5bf671e591096dfe504db664ecda3e725326 | 2,661 | py | Python | test/aqua/test_bernstein_vazirani.py | stefan-woerner/aqua | 12e1b867e254977d9c5992612a7919d8fe016cb4 | [
"Apache-2.0"
] | 504 | 2018-12-15T16:34:03.000Z | 2022-03-26T11:24:53.000Z | test/aqua/test_bernstein_vazirani.py | stefan-woerner/aqua | 12e1b867e254977d9c5992612a7919d8fe016cb4 | [
"Apache-2.0"
] | 746 | 2018-12-16T16:44:42.000Z | 2021-07-10T16:59:43.000Z | test/aqua/test_bernstein_vazirani.py | stefan-woerner/aqua | 12e1b867e254977d9c5992612a7919d8fe016cb4 | [
"Apache-2.0"
] | 421 | 2018-12-22T14:49:00.000Z | 2022-03-04T09:47:07.000Z | # This code is part of Qiskit.
#
# (C) Copyright IBM 2018, 2020.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
""" Test Bernstein Vazirani """
import unittest
import itertools
import math
from test.aqua import QiskitAquaTestCase
from ddt import ddt, idata, unpack
from qiskit import BasicAer
from qiskit.transpiler import PassManagerConfig
from qiskit.transpiler.preset_passmanagers import level_0_pass_manager
from qiskit.aqua import QuantumInstance
from qiskit.aqua.components.oracles import TruthTableOracle
from qiskit.aqua.algorithms import BernsteinVazirani
BITMAPS = ['00111100', '01011010']
MCT_MODES = ['basic', 'basic-dirty-ancilla', 'advanced', 'noancilla']
OPTIMIZATIONS = [True, False]
SIMULATORS = ['statevector_simulator', 'qasm_simulator']
@ddt
class TestBernsteinVazirani(QiskitAquaTestCase):
""" Test Bernstein Vazirani """
@idata(itertools.product(BITMAPS, MCT_MODES, OPTIMIZATIONS, SIMULATORS))
@unpack
def test_bernstein_vazirani(self, bv_input, mct_mode, optimization, simulator):
""" Test Bernstein Vazirani """
nbits = int(math.log(len(bv_input), 2))
# compute the ground-truth classically
parameter = ""
for i in reversed(range(nbits)):
bit = bv_input[2 ** i]
parameter += bit
backend = BasicAer.get_backend(simulator)
oracle = TruthTableOracle(bv_input, optimization=optimization, mct_mode=mct_mode)
algorithm = BernsteinVazirani(oracle)
quantum_instance = QuantumInstance(backend)
result = algorithm.run(quantum_instance=quantum_instance)
# print(result['circuit'].draw(line_length=10000))
self.assertEqual(result['result'], parameter)
def test_with_pass_manager(self):
""" Test Bernstein Vazirani using PassManager """
quantum_instance = QuantumInstance(
BasicAer.get_backend('qasm_simulator'),
pass_manager=level_0_pass_manager(
PassManagerConfig(basis_gates=['cx', 'u1', 'u2', 'u3'])))
alg = BernsteinVazirani(oracle=TruthTableOracle(bitmaps="01100110"),
quantum_instance=quantum_instance)
result = alg.run()
self.assertEqual(result['result'], '011')
if __name__ == '__main__':
unittest.main()
| 38.565217 | 89 | 0.710635 |
70b11d5419e45f0ec3006493e75cfaea5a7d4418 | 10,432 | py | Python | melime/explainers/explainer.py | elian204/melime | aef885fa4b6b02f7bf7294140d78a85fe546b622 | [
"MIT"
] | 48 | 2020-09-15T02:26:46.000Z | 2021-09-03T17:08:53.000Z | melime/explainers/explainer.py | elian204/melime | aef885fa4b6b02f7bf7294140d78a85fe546b622 | [
"MIT"
] | 1 | 2020-11-03T04:14:27.000Z | 2020-11-05T16:32:25.000Z | melime/explainers/explainer.py | elian204/melime | aef885fa4b6b02f7bf7294140d78a85fe546b622 | [
"MIT"
] | 3 | 2020-09-20T16:52:11.000Z | 2021-09-25T10:04:27.000Z | import warnings
from collections import defaultdict
import numpy as np
from scipy.stats import multivariate_normal
from sklearn import metrics
from melime.explainers.local_models.local_model_statistics import BasicStatistics
from melime.explainers.local_models.local_model_linear import RidgeMod, HuberRegressorMod, SGDRegressorMod
from melime.explainers.local_models.local_model_tree import Tree
standard_local_models = {
"BasicStatistics": BasicStatistics,
"SGD": SGDRegressorMod,
"Ridge": RidgeMod,
"HuberRegressor": HuberRegressorMod,
"Tree": Tree,
}
standard_weight_kernel = ["gaussian"]
def transformer_identity(x):
return x
class Explainer:
def __init__(
self,
model_predict,
generator,
local_model="BasicStatistics",
feature_names=None,
target_names=["target"],
transformer=None,
random_state=None,
verbose=False,
):
"""
Class to produce a local explanation for an instance from
a ML model
:param model_predict: model that the explanation want to be generated.
:param generator: Generator class, manifold estimation object that will be used to sample data.
:param local_model: linear model that will be used to generate the explanation.
:param transformer: transformation to be applied to the features for generating the features used to explain
:param random_state: seed for random condition.
:param verbose: bool to control if information will be printed on screen.
"""
self.feature_names = feature_names
self.target_names = target_names
self.model_predict = model_predict
self.generator = generator
self.random_state = random_state
if transformer is None:
self.transformer = transformer_identity
self.verbose = verbose
if isinstance(local_model, str):
self.local_model_name = local_model
if local_model in standard_local_models:
self.local_algorithm = standard_local_models[self.local_model_name]
else:
raise Exception(
f"local_model should be in the list {[*standard_local_models]:}. "
+ "You can also use our own linear model inheriting from LocalModelBase."
)
else:
self.local_algorithm = local_model
self.local_model_name = "custom"
self.predictions_index = set()
self.predictions_stat = {
"count": defaultdict(int),
"mean_probability": defaultdict(float),
"std_probability": defaultdict(float),
}
def explain_instance(
self,
x_explain,
r=None,
class_index=0,
n_samples=500,
tol_importance=0.001,
tol_error=0.001,
local_mini_batch_max=100,
weight_kernel=None,
test_batch=False,
scale_data=False,
include_x_explain_train=True,
):
"""
Generate an explanation for an instance from a ML model.
:param x_explain: instance to be explained
:param r: radius of the ball of the neighborhood
:param class_index: class which an explanation will be created
:param n_samples: number of samples for each epochs
:param tol: tolerance of the change in the importance
:param local_mini_batch_max: max number of local-mini-batch to generate the linear model
:return: explanation in a dict with importance, see status
"""
if self.generator.transformer:
chi_explain = self.generator.transform(x_explain)
else:
chi_explain = self.transformer(x_explain)
shape_input = list(x_explain.shape[1:])
if weight_kernel is None:
self.weight_kernel = None
elif isinstance(weight_kernel, str):
if weight_kernel == "gaussian":
self.weight_kernel = multivariate_normal(mean=chi_explain[0], cov=0.5 * r ** 2.0).pdf
else:
raise Exception(
f"weight_kernel should be in the list {' '.join(standard_weight_kernel):}. "
+ "You can also use our own kernel."
)
else:
self.weight_kernel = weight_kernel
diff_importance = None
error_local_model = None
y_p_explain = self.model_predict(x_explain)
if len(y_p_explain.shape) == 2:
y_p_explain = y_p_explain[0][class_index]
else:
y_p_explain = y_p_explain[0]
self.local_model = self.local_algorithm(
x_explain,
chi_explain,
y_p_explain,
feature_names=self.feature_names,
target_names=self.target_names,
class_index=class_index,
r=r,
tol_importance=tol_importance,
tol_error=tol_error,
scale_data=scale_data,
)
stats = {}
con_fav_samples = ContrafactualExaples()
self.generator.generated_data = None
if test_batch:
x_test_set = self.generator.sample_radius(x_explain, r=r, n_samples=n_samples)
chi_test_set = self.transformer(x_test_set)
y_test_set = self.model_predict(x_test_set)
for step in range(local_mini_batch_max):
if self.generator.transformer:
x_set, chi_set = self.generator.sample_radius(x_explain, r=r, n_samples=n_samples)
else:
x_set = self.generator.sample_radius(x_explain, r=r, n_samples=n_samples)
chi_set = self.transformer(x_set)
if x_set is None:
warnings.warn("New sample set is None!")
break
elif x_set.shape[0] == 0:
warnings.warn("New sample set is empty, try increase the r value!")
break
# Include the x_explain each local-mini-batch
if include_x_explain_train:
x_set = np.append(x_set, x_explain.reshape([1] + [*x_set[0].shape]), axis=0)
chi_set = np.append(chi_set, chi_explain.reshape([1] + [*chi_set[0].shape]), axis=0)
if self.weight_kernel is not None:
weight_set = self.weight_kernel(chi_set)
else:
weight_set = None
y_p = self.model_predict(x_set.reshape([-1] + shape_input))
if len(y_p.shape) != 1:
y_p = y_p[:, class_index]
self.local_model.partial_fit(chi_set, y_p, weight_set)
if test_batch:
self.calc_error(chi_test_set, y_test_set)
diff_importance, error_local_model, converged_lc = self.local_model.measure_convergence(chi_set, y_p)
con_fav_samples.insert_many(x_set, y_p)
# self.plot_convergence(x_set, y_p, diff_importance, error_local_model)
if self.verbose:
print("########################")
print(" Local-Mini-Batch", step)
print("\tdiff_importance", "error_local_model")
print("\t", diff_importance, error_local_model)
if converged_lc:
break
if not self.local_model.convergence:
warnings.warn(
"Convergence tolerance (tol) was not achieved!\n"
+ f"Current difference in the importance {diff_importance}/{tol_importance}\n"
+ f"Current Error: {error_local_model}/{tol_importance}"
)
return self.local_model, con_fav_samples
def calc_error(self, chi_set, y_set):
y_p_test_set = self.local_model.model.predict(chi_test_set)
v1 = metrics.explained_variance_score(y_test_set, y_p_test_set, sample_weight=weight_set)
v2 = metrics.mean_squared_error(y_test_set, y_p_test_set, sample_weight=weight_set)
return v1, v2
def plot_convergence(self, x_set, y_p, diff_importance, error_local_model):
from matplotlib import pyplot as plt
fig, axs = plt.subplots(2, 2, figsize=(6, 6))
axs[0, 0].scatter(x_set[:, 0], x_set[:, 1], c=y_p, s=10)
axs[0, 0].scatter([x_set[0, 0]], [x_set[0, 1]], s=20, c="red")
axs[1, 0].scatter(x_set[:, 0], x_set[:, 1], c=self.local_model.predict(x_set))
axs[0, 1].scatter(x_set[:, 0], self.local_model.predict(x_set), c="green")
axs[0, 1].scatter(x_set[:, 0], y_p, c="red", s=10)
axs[1, 1].scatter(x_set[:, 1], self.local_model.predict(x_set), c="green")
axs[1, 1].scatter(x_set[:, 1], y_p, c="red", s=10)
print(self.local_model.importance)
print("diff_importance", "Errors")
print(diff_importance, error_local_model)
plt.show()
class ContrafactualExaples(object):
"""
Class to save the n_max top favarable and
n_max top contrary samples found.
"""
def __init__(self, n_max=5):
self.n_max = n_max
self.y_con = list()
self.y_fav = list()
self.samples_con = list()
self.samples_fav = list()
def insert_many(self, samples, ys):
for sample, y in zip(samples, ys):
self.insert(sample, y)
def insert(self, sample, y):
# Favorable Samples
if len(self.y_fav) < self.n_max:
self.y_fav.append(y)
self.samples_fav.append(sample)
else:
if y > self.y_fav[-1]:
self.y_fav[-1] = y
self.samples_fav[-1] = sample
indices_ = np.argsort(self.y_fav).reshape(-1)[::-1]
self.y_fav = [self.y_fav[e] for e in indices_]
self.samples_fav = [self.samples_fav[e] for e in indices_]
# Contrary Samples
if len(self.y_con) < self.n_max:
self.y_con.append(y)
self.samples_con.append(sample)
else:
if y < self.y_con[-1]:
self.y_con[-1] = y
self.samples_con[-1] = sample
indices_ = np.argsort(self.y_con).reshape(-1)
self.y_con = [self.y_con[e] for e in indices_]
self.samples_con = [self.samples_con[e] for e in indices_]
def print_results(self):
print("Contrary:")
for e, ee in zip(self.samples_con, self.y_con):
print(e, ee)
print("Favarable:")
for e, ee in zip(self.samples_fav, self.y_fav):
print(e, ee)
| 38.637037 | 116 | 0.60765 |
a3ae33038916d377340203f630801f05687cc260 | 3,903 | py | Python | examples/swmr_multiprocess.py | benjaminhwilliams/h5py | fd0753a184194ae2f9a47d5a19fc83d3ec017bbc | [
"BSD-3-Clause"
] | 1 | 2018-07-13T06:29:17.000Z | 2018-07-13T06:29:17.000Z | examples/swmr_multiprocess.py | benjaminhwilliams/h5py | fd0753a184194ae2f9a47d5a19fc83d3ec017bbc | [
"BSD-3-Clause"
] | null | null | null | examples/swmr_multiprocess.py | benjaminhwilliams/h5py | fd0753a184194ae2f9a47d5a19fc83d3ec017bbc | [
"BSD-3-Clause"
] | 1 | 2021-04-08T11:11:04.000Z | 2021-04-08T11:11:04.000Z | """
Demonstrate the use of h5py in SWMR mode to write to a dataset (appending)
from one process while monitoring the growing dataset from another process.
Usage:
swmr_multiprocess.py [FILENAME [DATASETNAME]]
FILENAME: name of file to monitor. Default: swmrmp.h5
DATASETNAME: name of dataset to monitor in DATAFILE. Default: data
This script will start up two processes: a writer and a reader. The writer
will open/create the file (FILENAME) in SWMR mode, create a dataset and start
appending data to it. After each append the dataset is flushed and an event
sent to the reader process. Meanwhile the reader process will wait for events
from the writer and when triggered it will refresh the dataset and read the
current shape of it.
"""
import sys, time
import h5py
import numpy as np
import logging
from multiprocessing import Process, Event
class SwmrReader(Process):
def __init__(self, event, fname, dsetname, timeout = 2.0):
super(SwmrReader, self).__init__()
self._event = event
self._fname = fname
self._dsetname = dsetname
self._timeout = timeout
def run(self):
self.log = logging.getLogger('reader')
self.log.info("Waiting for initial event")
assert self._event.wait( self._timeout )
self._event.clear()
self.log.info("Opening file %s", self._fname)
f = h5py.File(self._fname, 'r', libver='latest', swmr=True)
assert f.swmr_mode
dset = f[self._dsetname]
try:
# monitor and read loop
while self._event.wait( self._timeout ):
self._event.clear()
self.log.debug("Refreshing dataset")
dset.refresh()
shape = dset.shape
self.log.info("Read dset shape: %s"%str(shape))
finally:
f.close()
class SwmrWriter(Process):
def __init__(self, event, fname, dsetname):
super(SwmrWriter, self).__init__()
self._event = event
self._fname = fname
self._dsetname = dsetname
def run(self):
self.log = logging.getLogger('writer')
self.log.info("Creating file %s", self._fname)
f = h5py.File(self._fname, 'w', libver='latest')
try:
arr = np.array([1,2,3,4])
dset = f.create_dataset(self._dsetname, chunks=(2,), maxshape=(None,), data=arr)
assert not f.swmr_mode
self.log.info("SWMR mode")
f.swmr_mode = True
assert f.swmr_mode
self.log.debug("Sending initial event")
self._event.set()
# Write loop
for i in range(5):
new_shape = ((i+1) * len(arr), )
self.log.info("Resizing dset shape: %s"%str(new_shape))
dset.resize( new_shape )
self.log.debug("Writing data")
dset[i*len(arr):] = arr
#dset.write_direct( arr, np.s_[:], np.s_[i*len(arr):] )
self.log.debug("Flushing data")
dset.flush()
self.log.info("Sending event")
self._event.set()
finally:
f.close()
if __name__ == "__main__":
logging.basicConfig(format='%(levelname)10s %(asctime)s %(name)10s %(message)s',level=logging.INFO)
fname = 'swmrmp.h5'
dsetname = 'data'
if len(sys.argv) > 1:
fname = sys.argv[1]
if len(sys.argv) > 2:
dsetname = sys.argv[2]
event = Event()
reader = SwmrReader(event, fname, dsetname)
writer = SwmrWriter(event, fname, dsetname)
logging.info("Starting reader")
reader.start()
logging.info("Starting reader")
writer.start()
logging.info("Waiting for writer to finish")
writer.join()
logging.info("Waiting for reader to finish")
reader.join()
| 33.93913 | 106 | 0.59339 |
329eb39501cef2040f5b76b927c58b73a8302d78 | 3,275 | py | Python | lib/modules/python/situational_awareness/network/active_directory/dscl_get_users.py | vinnybod/Empire | 6ad0bcd171952da93f059348e4ae00e20154dce7 | [
"BSD-3-Clause"
] | 3 | 2020-03-24T04:37:00.000Z | 2021-04-07T06:05:16.000Z | lib/modules/python/situational_awareness/network/active_directory/dscl_get_users.py | vinnybod/Empire | 6ad0bcd171952da93f059348e4ae00e20154dce7 | [
"BSD-3-Clause"
] | null | null | null | lib/modules/python/situational_awareness/network/active_directory/dscl_get_users.py | vinnybod/Empire | 6ad0bcd171952da93f059348e4ae00e20154dce7 | [
"BSD-3-Clause"
] | null | null | null | from builtins import object
class Module(object):
def __init__(self, mainMenu, params=[]):
# metadata info about the module, not modified during runtime
self.info = {
# name for the module that will appear in module menus
'Name': 'dscl Get-Users',
# list of one or more authors for the module
'Author': ['@424f424f'],
# more verbose multi-line description of the module
'Description': 'This module will use the current user context to query active directory for a list of users.',
# True if the module needs to run in the background
'Background' : False,
# File extension to save the file as
'OutputExtension' : "",
# if the module needs administrative privileges
'NeedsAdmin' : False,
# True if the method doesn't touch disk/is reasonably opsec safe
'OpsecSafe' : True,
# the module language
'Language' : 'python',
# the minimum language version needed
'MinLanguageVersion' : '2.6',
# list of any references/other comments
'Comments': ['']
}
# any options needed by the module, settable during runtime
self.options = {
# format:
# value_name : {description, required, default_value}
'Agent' : {
# The 'Agent' option is the only one that MUST be in a module
'Description' : 'Agent to run on.',
'Required' : True,
'Value' : ''
},
'Domain' : {
# The 'Agent' option is the only one that MUST be in a module
'Description' : 'Domain',
'Required' : True,
'Value' : ''
}
}
# save off a copy of the mainMenu object to access external functionality
# like listeners/agent handlers/etc.
self.mainMenu = mainMenu
# During instantiation, any settable option parameters
# are passed as an object set to the module and the
# options dictionary is automatically set. This is mostly
# in case options are passed on the command line
if params:
for param in params:
# parameter format is [Name, Value]
option, value = param
if option in self.options:
self.options[option]['Value'] = value
def generate(self, obfuscate=False, obfuscationCommand=""):
domain = self.options['Domain']['Value']
# the Python script itself, with the command to invoke
# for execution appended to the end. Scripts should output
# everything to the pipeline for proper parsing.
#
# the script should be stripped of comments, with a link to any
# original reference script included in the comments.
script = """
import subprocess
cmd = \"""dscl "/Active Directory/%s/All Domains/" -list /Users\"""
print(subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE).stdout.read())
""" % (domain)
return script
| 36.388889 | 122 | 0.561221 |
ad780e8e3d955e9c2948c5f3bfa7c4ba2b59ada1 | 9,255 | py | Python | src/ralph/account/models.py | jjagodzinski/ralph | 000a22bcc934dc2051e7a09ab1e84bd1c25a9e73 | [
"Apache-2.0"
] | null | null | null | src/ralph/account/models.py | jjagodzinski/ralph | 000a22bcc934dc2051e7a09ab1e84bd1c25a9e73 | [
"Apache-2.0"
] | null | null | null | src/ralph/account/models.py | jjagodzinski/ralph | 000a22bcc934dc2051e7a09ab1e84bd1c25a9e73 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import functools
from django.conf import settings
from django.contrib.auth.models import Group
from django.core.urlresolvers import reverse, NoReverseMatch
from django.db import models as db
from django.http import HttpResponseForbidden
from django.utils.translation import ugettext_lazy as _
from dj.choices import Choices
from dj.choices.fields import ChoiceField
from lck.django.activitylog.models import MonitoredActivity
from lck.django.common.models import TimeTrackable, EditorTrackable
from lck.django.profile.models import (
BasicInfo,
ActivationSupport,
GravatarSupport,
)
from ralph.business.models import Venture, VentureRole
class AvailableHomePage(Choices):
_ = Choices.Choice
default = _('Default home page')
ventures = _("Ventures list")
racks = _("Racks list")
networks = _("Network list")
reports = _("Reports")
catalog = _("Catalog")
cmdb_timeline = _("CMDB timeline")
class Perm(Choices):
_ = Choices.Choice
GLOBAL = Choices.Group(0).extra(per_venture=False)
read_dc_structure = _("read data center structure")
edit_ventures_roles = _("edit ventures and roles")
create_devices = _("create devices")
delete_devices = _("delete devices")
edit_device_info_generic = _("edit generic device info")
edit_device_info_financial = _("edit financial device info")
edit_device_info_support = _("edit device purchase info")
run_discovery = _("run discovery")
read_device_info_management = _("read device management_info")
read_network_structure = _("read network structure")
create_configuration_item = _("create configuration items")
edit_configuration_item_info_generic = _("edit configuration items")
edit_configuration_item_relations = _("edit configuration item relations")
read_configuration_item_info_generic = _(
"read configuration item info generic")
read_configuration_item_info_puppet = _(
"read configuration item info Puppet reports")
read_configuration_item_info_git = _("read configuration item info GIT ")
read_configuration_item_info_jira = _("read configuration item info jira")
bulk_edit = _("edit all device info in bulk")
edit_domain_name = _("edit domain name entries")
read_domain_name = _("read domain name entries")
create_device = _("create new devices manually")
read_deployment = _("read deployment")
PER_VENTURE = Choices.Group(100) << {'per_venture': True}
list_devices_generic = _("list venture devices")
list_devices_financial = _("list venture devices financial info")
read_device_info_generic = _("read generic device info")
read_device_info_financial = _("read financial device info")
read_device_info_support = _("read device purchase info")
read_device_info_history = _("read device history info")
read_device_info_reports = _("read device reports")
class Profile(BasicInfo, ActivationSupport, GravatarSupport,
MonitoredActivity):
class Meta:
verbose_name = _("profile")
verbose_name_plural = _("profiles")
home_page = ChoiceField(
choices=AvailableHomePage,
default=AvailableHomePage.default,
)
# TODO: define fields below and add AUTH_LDAP_PROFILE_ATTR_MAP mappings
company = db.CharField(max_length=64, blank=True)
employee_id = db.CharField(max_length=64, blank=True)
profit_center = db.CharField(max_length=1024, blank=True)
cost_center = db.CharField(max_length=1024, blank=True)
department = db.CharField(max_length=64, blank=True)
manager = db.CharField(max_length=1024, blank=True)
location = db.CharField(max_length=128, blank=True)
def __unicode__(self):
return self.nick
def has_perm(self, perm, obj=None, role=None):
if not self.is_active:
return False
if self.is_superuser:
return True
if isinstance(perm, Choices.Choice):
groups = self.groups.all()
if obj:
return BoundPerm.objects.filter(
db.Q(venture=None) |
db.Q(venture=obj) |
db.Q(venture__parent=obj) |
db.Q(venture__parent__parent=obj) |
db.Q(venture__parent__parent__parent=obj),
db.Q(role=None) |
db.Q(role=role) |
db.Q(role__parent=role) |
db.Q(role__parent__parent=role) |
db.Q(role__parent__parent__parent=role),
db.Q(profile=self) | db.Q(group__in=groups),
perm=perm.id,
).exists()
else:
return BoundPerm.objects.filter(
db.Q(role=None) | db.Q(role=role),
db.Q(profile=self) | db.Q(group__in=groups),
venture=None,
perm=perm.id,
).exists()
return super(Profile, self).has_perm(perm, obj)
def perm_ventures(self, perm):
"""Lists all ventures to which the user has permission."""
if not self.is_active:
return []
groups = self.groups.all()
if self.is_superuser or BoundPerm.objects.filter(
db.Q(profile=self) | db.Q(group__in=groups),
perm=perm.id,
venture=None,
).exists():
return Venture.objects.all()
return Venture.objects.filter(
db.Q(boundperm__profile=self) | db.Q(boundperm__group__in=groups),
boundperm__perm=perm.id,
)
def filter_by_perm(self, query, perm):
"""Filters a device search query according to the permissions."""
profile = self
if not profile.is_active:
return query.none()
if profile.is_superuser or profile.has_perm(perm):
return query
groups = self.groups.all()
return query.filter(
db.Q(venture__boundperm__profile=profile,
venture__boundperm__perm=perm.id) |
db.Q(venture__parent__boundperm__profile=profile,
venture__parent__boundperm__perm=perm.id) |
db.Q(venture__parent__parent__boundperm__profile=profile,
venture__parent__parent__boundperm__perm=perm.id) |
db.Q(venture__parent__parent__parent__boundperm__profile=profile,
venture__parent__parent__parent__boundperm__perm=perm.id) |
db.Q(venture__boundperm__group__in=groups,
venture__boundperm__perm=perm.id) |
db.Q(venture__parent__boundperm__group__in=groups,
venture__parent__boundperm__perm=perm.id) |
db.Q(venture__parent__parent__boundperm__group__in=groups,
venture__parent__parent__boundperm__perm=perm.id) |
db.Q(venture__parent__parent__parent__boundperm__group__in=groups,
venture__parent__parent__parent__boundperm__perm=perm.id)
).distinct()
class BoundPerm(TimeTrackable, EditorTrackable):
profile = db.ForeignKey(
Profile,
verbose_name=_("profile"),
null=True,
blank=True,
default=None,
)
group = db.ForeignKey(
Group,
verbose_name=_("group"),
null=True,
blank=True,
default=None,
)
perm = ChoiceField(verbose_name=_("permission"), choices=Perm)
venture = db.ForeignKey(
Venture,
verbose_name=_("venture"),
null=True,
blank=True,
default=None,
)
role = db.ForeignKey(
VentureRole,
verbose_name=_("venture role"),
null=True,
blank=True,
default=None,
help_text=_("if left empty, the permission applies to all roles "
"within the selected venture"),
)
class Meta:
verbose_name = _("bound permission")
verbose_name_plural = _("bound permissions")
def ralph_permission(perms):
""" Decorator checking permission to view
use example:
perms = [
{
'perm': Perm.read_device_info_reports,
'msg': _("You don't have permission to see reports.")
}
]
"""
def decorator(func):
def inner_decorator(self, *args, **kwargs):
profile = self.request.user.get_profile()
has_perm = profile.has_perm
for perm in perms:
if not has_perm(perm['perm']):
return HttpResponseForbidden(perm['msg'])
return func(self, *args, **kwargs)
return functools.wraps(func)(inner_decorator)
return decorator
def get_user_home_page_url(user):
profile = user.get_profile()
if profile.home_page == AvailableHomePage.default:
try:
home_page = reverse(settings.HOME_PAGE_URL_NAME, args=[])
except NoReverseMatch:
home_page = reverse('search')
else:
home_page = reverse(profile.home_page.name)
return home_page
| 36.437008 | 78 | 0.645165 |
4f217dc4653e55bb7ef891b3d70fa9e84beb4cdd | 5,914 | py | Python | plugins/bot_mcFinder.py | BluesDawn576/BDbot_Plugins | d84f89bb382291486737133148eec58f6ed34efa | [
"MIT"
] | 2 | 2021-05-02T14:24:45.000Z | 2021-07-15T11:08:39.000Z | plugins/bot_mcFinder.py | BluesDawn576/BDbot_Plugins | d84f89bb382291486737133148eec58f6ed34efa | [
"MIT"
] | null | null | null | plugins/bot_mcFinder.py | BluesDawn576/BDbot_Plugins | d84f89bb382291486737133148eec58f6ed34efa | [
"MIT"
] | null | null | null | import const
import json
import requests
from botoy import Action, GroupMsg
from botoy.collection import MsgTypes
from botoy.decorators import ignore_botself, in_content, these_msgtypes
from botoy.refine import refine_pic_group_msg
from botoy.sugar import Text
@ignore_botself
@these_msgtypes(MsgTypes.TextMsg)
@in_content(const.PREFIX + "(mc|player)")
def receive_group_msg(ctx: GroupMsg):
try:
if ctx.Content.startswith(const.PREFIX + "mc"):
text = ctx.Content[3:].lstrip()
if text == "":
Text(const.BOTNAME + "指令无效,{}mc <ip地址>".format(const.PREFIX))
return
msg_text, pic_base64 = get_server_info(text)
if pic_base64 != 0:
Action(ctx.CurrentQQ).sendGroupPic(
ctx.FromGroupId,
content = msg_text,
picBase64Buf = pic_base64
)
else:
Text(msg_text)
elif ctx.Content.startswith(const.PREFIX + "player"):
text = ctx.Content[7:].lstrip()
if text == "":
Text(const.BOTNAME + "指令无效,{}player <ip地址>".format(const.PREFIX))
return
Text(const.BOTNAME + get_server_player(text))
except BaseException as err:
Text("执行指令时出错:\n{}\nline {}: {}".format(
err.__traceback__.tb_frame.f_globals["__file__"],
err.__traceback__.tb_lineno,
err))
def get_response(msg_text):
response = requests.get("https://api.bluesdawn.top/minecraft/server/api?host={}".format(msg_text))
response.encoding = "utf-8"
raw_json = json.loads(response.text)
return raw_json
def get_server_info(msg_text):
res_json = get_response(msg_text)
res_status = res_json["status"]
if res_status == "Online":
res_favicon = res_json["favicon"]
res_version = res_json["version"]["version"]
res_motd = res_json["motd"]["clean"]
res_mods = res_json["mods"]
res_player = res_json["players"]
res_ping = res_json["queryinfo"]["processed"]
if res_version:
ver = res_version
else:
ver = "未知"
if "extra" in res_motd:
motd = res_motd["extra"][0]["text"]
elif "text" in res_motd:
motd = res_motd["text"]
else:
motd = res_motd
if res_mods["type"]:
mods_text = "\n[{}服,包含{}个模组]".format(res_mods["type"], len(res_mods["modlist"]))
else:
mods_text = ""
if res_favicon:
pic_base64 = res_favicon.replace("data:image/png;base64,","")
else:
pic_base64 = "iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAIAAACQkWg2AAAACXBIWXMAAA7EAAAOxAGVKw4bAAAFyGlUWHRYTUw6Y29tLmFkb2JlLnhtcAAAAAAAPD94cGFja2V0IGJlZ2luPSLvu78iIGlkPSJXNU0wTXBDZWhpSHpyZVN6TlRjemtjOWQiPz4gPHg6eG1wbWV0YSB4bWxuczp4PSJhZG9iZTpuczptZXRhLyIgeDp4bXB0az0iQWRvYmUgWE1QIENvcmUgNS42LWMxNDggNzkuMTY0MDM2LCAyMDE5LzA4LzEzLTAxOjA2OjU3ICAgICAgICAiPiA8cmRmOlJERiB4bWxuczpyZGY9Imh0dHA6Ly93d3cudzMub3JnLzE5OTkvMDIvMjItcmRmLXN5bnRheC1ucyMiPiA8cmRmOkRlc2NyaXB0aW9uIHJkZjphYm91dD0iIiB4bWxuczp4bXA9Imh0dHA6Ly9ucy5hZG9iZS5jb20veGFwLzEuMC8iIHhtbG5zOnhtcE1NPSJodHRwOi8vbnMuYWRvYmUuY29tL3hhcC8xLjAvbW0vIiB4bWxuczpzdEV2dD0iaHR0cDovL25zLmFkb2JlLmNvbS94YXAvMS4wL3NUeXBlL1Jlc291cmNlRXZlbnQjIiB4bWxuczpkYz0iaHR0cDovL3B1cmwub3JnL2RjL2VsZW1lbnRzLzEuMS8iIHhtbG5zOnBob3Rvc2hvcD0iaHR0cDovL25zLmFkb2JlLmNvbS9waG90b3Nob3AvMS4wLyIgeG1wOkNyZWF0b3JUb29sPSJBZG9iZSBQaG90b3Nob3AgMjEuMCAoV2luZG93cykiIHhtcDpDcmVhdGVEYXRlPSIyMDIxLTA0LTIzVDE5OjAxOjU3KzA4OjAwIiB4bXA6TWV0YWRhdGFEYXRlPSIyMDIxLTA0LTIzVDE5OjAxOjU3KzA4OjAwIiB4bXA6TW9kaWZ5RGF0ZT0iMjAyMS0wNC0yM1QxOTowMTo1NyswODowMCIgeG1wTU06SW5zdGFuY2VJRD0ieG1wLmlpZDoyOTdmMjE4Ni01Mjc0LTIzNDMtOGZiOC04NTEwNjRkMTgyMWYiIHhtcE1NOkRvY3VtZW50SUQ9ImFkb2JlOmRvY2lkOnBob3Rvc2hvcDo3NTBmZGM1ZS00MzNiLTkzNDEtYTgzNC0zNWE3OWRkNzZhMjIiIHhtcE1NOk9yaWdpbmFsRG9jdW1lbnRJRD0ieG1wLmRpZDo1ODJiYWFkZi0xMzM1LTQxNGItYjc0Yy0xNzIxNDNmMGVjNjMiIGRjOmZvcm1hdD0iaW1hZ2UvcG5nIiBwaG90b3Nob3A6Q29sb3JNb2RlPSIzIj4gPHhtcE1NOkhpc3Rvcnk+IDxyZGY6U2VxPiA8cmRmOmxpIHN0RXZ0OmFjdGlvbj0iY3JlYXRlZCIgc3RFdnQ6aW5zdGFuY2VJRD0ieG1wLmlpZDo1ODJiYWFkZi0xMzM1LTQxNGItYjc0Yy0xNzIxNDNmMGVjNjMiIHN0RXZ0OndoZW49IjIwMjEtMDQtMjNUMTk6MDE6NTcrMDg6MDAiIHN0RXZ0OnNvZnR3YXJlQWdlbnQ9IkFkb2JlIFBob3Rvc2hvcCAyMS4wIChXaW5kb3dzKSIvPiA8cmRmOmxpIHN0RXZ0OmFjdGlvbj0ic2F2ZWQiIHN0RXZ0Omluc3RhbmNlSUQ9InhtcC5paWQ6Mjk3ZjIxODYtNTI3NC0yMzQzLThmYjgtODUxMDY0ZDE4MjFmIiBzdEV2dDp3aGVuPSIyMDIxLTA0LTIzVDE5OjAxOjU3KzA4OjAwIiBzdEV2dDpzb2Z0d2FyZUFnZW50PSJBZG9iZSBQaG90b3Nob3AgMjEuMCAoV2luZG93cykiIHN0RXZ0OmNoYW5nZWQ9Ii8iLz4gPC9yZGY6U2VxPiA8L3htcE1NOkhpc3Rvcnk+IDwvcmRmOkRlc2NyaXB0aW9uPiA8L3JkZjpSREY+IDwveDp4bXBtZXRhPiA8P3hwYWNrZXQgZW5kPSJyIj8+HpfG2wAAACtJREFUKJFj/G/8nwEbYDzLiFWcCasoHjCqgRiAPbAZGBhwxc8g9MNw0AAAa+IFUOu3r0oAAAAASUVORK5CYII="
text = "状态:在线\n人数:{}/{}\nMOTD:{}\n版本:{}\nPing:{}{}".format(
res_player["online"],
res_player["max"],
motd,
ver,
res_ping,
mods_text
)
return [const.BOTNAME + text, pic_base64]
else:
text = "连接超时或离线"
return [const.BOTNAME + text, 0]
def get_server_player(msg_text):
res_json = get_response(msg_text)
res_status = res_json["status"]
if res_status == "Online":
res_player = res_json["players"]
count = res_player["online"]
if count > 0:
if "0" in res_player["list"]:
player = "\n"
for i in res_player["list"]:
player += i["name"] + "\n"
else:
player = "无法获取到玩家列表"
else:
player = "没有玩家在线"
text = "状态:在线\n人数:{}/{}\n玩家:{}".format(
res_player["online"],
res_player["max"],
player
)
else:
text = "连接超时或离线"
return text | 44.466165 | 2,179 | 0.707136 |
839a5775860a3f732d91f421ce719f61f8823499 | 21,559 | py | Python | src/benchmarks/gc/src/dead_code.py | svick/Performance | 8f782f846a16da0388fa51c6327a575741f279be | [
"MIT"
] | null | null | null | src/benchmarks/gc/src/dead_code.py | svick/Performance | 8f782f846a16da0388fa51c6327a575741f279be | [
"MIT"
] | null | null | null | src/benchmarks/gc/src/dead_code.py | svick/Performance | 8f782f846a16da0388fa51c6327a575741f279be | [
"MIT"
] | null | null | null | # Licensed to the .NET Foundation under one or more agreements.
# The .NET Foundation licenses this file to you under the MIT license.
# See the LICENSE file in the project root for more information.
duration_msec # unused variable (jupyter_notebook.py:331)
alloced_mb # unused variable (jupyter_notebook.py:332)
gen0_in_mb # unused variable (jupyter_notebook.py:381)
gen0_out_mb # unused variable (jupyter_notebook.py:382)
_.SuspensionPercent # unused property (jupyter_notebook.py:473)
_.PctPauseFromSuspend # unused property (jupyter_notebook.py:481)
Keys # unused variable (src\analysis\clr_types.py:42)
ContainsKey # unused function (src\analysis\clr_types.py:45)
nt_symbol_path # unused variable (src\analysis\clr_types.py:81)
Dispose # unused function (src\analysis\clr_types.py:103)
AllEvents # unused variable (src\analysis\clr_types.py:106)
Dispose # unused function (src\analysis\clr_types.py:110)
Ticks # unused variable (src\analysis\clr_types.py:135)
StartTime # unused variable (src\analysis\clr_types.py:149)
EndTime # unused variable (src\analysis\clr_types.py:150)
Threads # unused variable (src\analysis\clr_types.py:154)
SegmentSize # unused variable (src\analysis\clr_types.py:159)
GetGCPauseTimePercentage # unused function (src\analysis\clr_types.py:161)
ProcessDuration # unused variable (src\analysis\clr_types.py:165)
TotalHeapSize # unused variable (src\analysis\clr_types.py:170)
TotalPromoted # unused variable (src\analysis\clr_types.py:171)
Depth # unused variable (src\analysis\clr_types.py:172)
GenerationSize0 # unused variable (src\analysis\clr_types.py:173)
TotalPromotedSize0 # unused variable (src\analysis\clr_types.py:174)
GenerationSize1 # unused variable (src\analysis\clr_types.py:175)
TotalPromotedSize1 # unused variable (src\analysis\clr_types.py:176)
GenerationSize2 # unused variable (src\analysis\clr_types.py:177)
TotalPromotedSize2 # unused variable (src\analysis\clr_types.py:178)
GenerationSize3 # unused variable (src\analysis\clr_types.py:179)
TotalPromotedSize3 # unused variable (src\analysis\clr_types.py:180)
FinalizationPromotedSize # unused variable (src\analysis\clr_types.py:181)
FinalizationPromotedCount # unused variable (src\analysis\clr_types.py:182)
PinnedObjectCount # unused variable (src\analysis\clr_types.py:183)
SinkBlockCount # unused variable (src\analysis\clr_types.py:184)
GCHandleCount # unused variable (src\analysis\clr_types.py:185)
CondemnedGeneration # unused variable (src\analysis\clr_types.py:191)
Gen0ReductionCount # unused variable (src\analysis\clr_types.py:192)
MemoryPressure # unused variable (src\analysis\clr_types.py:195)
HasMemoryPressure # unused variable (src\analysis\clr_types.py:196)
MemoryPressure # unused variable (src\analysis\clr_types.py:247)
HasMemoryPressure # unused variable (src\analysis\clr_types.py:248)
VersionRecognized # unused variable (src\analysis\clr_types.py:249)
HasFreeListAllocated # unused variable (src\analysis\clr_types.py:251)
HasFreeListRejected # unused variable (src\analysis\clr_types.py:253)
CondemnReasons0 # unused variable (src\analysis\clr_types.py:254)
CondemnReasons1 # unused variable (src\analysis\clr_types.py:255)
HasCondemnReasons1 # unused variable (src\analysis\clr_types.py:256)
Version # unused variable (src\analysis\clr_types.py:257)
WaitReason # unused variable (src\analysis\clr_types.py:282)
GcWorkingThreadId # unused variable (src\analysis\clr_types.py:294)
GcWorkingThreadPriority # unused variable (src\analysis\clr_types.py:295)
SwitchSpans # unused variable (src\analysis\clr_types.py:296)
SampleSpans # unused variable (src\analysis\clr_types.py:297)
ObjectsRevisited # unused variable (src\analysis\clr_types.py:324)
Valid # unused variable (src\analysis\clr_types.py:334)
Allocated # unused variable (src\analysis\clr_types.py:336)
FreeListConsumed # unused variable (src\analysis\clr_types.py:338)
BGCCurrentPhase # unused variable (src\analysis\clr_types.py:363)
AllocedSinceLastGCBasedOnAllocTickMB # unused variable (src\analysis\clr_types.py:372)
GenOutMB # unused function (src\analysis\clr_types.py:405)
FindFirstHighestCondemnedHeap # unused function (src\analysis\clr_types.py:419)
IsLowEphemeral # unused function (src\analysis\clr_types.py:422)
IsNotCompacting # unused function (src\analysis\clr_types.py:425)
GetCondemnedReasons # unused function (src\analysis\clr_types.py:430)
reasons_info # unused variable (src\analysis\clr_types.py:430)
TotalPinnedPlugSize # unused variable (src\analysis\clr_types.py:435)
TotalUserPinnedPlugSize # unused variable (src\analysis\clr_types.py:436)
HeapStats # unused variable (src\analysis\clr_types.py:438)
FreeList # unused variable (src\analysis\clr_types.py:442)
NeedLoadedDotNetRuntimes # unused function (src\analysis\clr_types.py:474)
source # unused variable (src\analysis\clr_types.py:476)
Processes # unused function (src\analysis\clr_types.py:502)
source # unused variable (src\analysis\clr_types.py:504)
Analyze # unused function (src\analysis\clr_types.py:511)
issues # unused variable (src\analysis\clr_types.py:514)
param # unused variable (src\analysis\clr_types.py:514)
reader # unused variable (src\analysis\clr_types.py:548)
anEvent # unused variable (src\analysis\clr_types.py:555)
etlOrEtlxFilePath # unused variable (src\analysis\clr_types.py:560)
AbstractHistDict # unused class (src\analysis\clr_types.py:584)
identifier # unused variable (src\analysis\clr_types.py:585)
TimeQPC # unused variable (src\analysis\clr_types.py:591)
threadID # unused variable (src\analysis\clr_types.py:595)
timeQPC # unused variable (src\analysis\clr_types.py:595)
processID # unused variable (src\analysis\clr_types.py:598)
processID # unused variable (src\analysis\clr_types.py:603)
timeQPC # unused variable (src\analysis\clr_types.py:603)
ForEach # unused function (src\analysis\clr_types.py:618)
action # unused variable (src\analysis\clr_types.py:619)
endMSec # unused variable (src\analysis\clr_types.py:626)
startMSec # unused variable (src\analysis\clr_types.py:626)
gc_instance # unused variable (src\analysis\clr_types.py:635)
markIdleStolen # unused variable (src\analysis\clr_types.py:635)
gcInstance # unused variable (src\analysis\clr_types.py:642)
eventList # unused variable (src\analysis\clr_types.py:644)
markIdleStolen # unused variable (src\analysis\clr_types.py:645)
LoadTraceAndGetStacks # unused function (src\analysis\clr_types.py:656)
symReader # unused variable (src\analysis\clr_types.py:659)
traceLog # unused variable (src\analysis\clr_types.py:659)
DoesStackSampleContainFunction # unused function (src\analysis\clr_types.py:663)
stackSource # unused variable (src\analysis\clr_types.py:666)
functionName # unused variable (src\analysis\clr_types.py:668)
collectEventNames # unused variable (src\analysis\clr_types.py:675)
collectPerHeapHistoryTimes # unused variable (src\analysis\clr_types.py:675)
tracePath # unused variable (src\analysis\clr_types.py:675)
tracePath # unused variable (src\analysis\clr_types.py:682)
timeSpan # unused variable (src\analysis\clr_types.py:683)
includeRegex # unused variable (src\analysis\clr_types.py:684)
excludeRegex # unused variable (src\analysis\clr_types.py:685)
threadID # unused variable (src\analysis\clr_types.py:686)
maxEvents # unused variable (src\analysis\clr_types.py:687)
useTraceLog # unused variable (src\analysis\clr_types.py:688)
inputTracePath # unused variable (src\analysis\clr_types.py:695)
outputTracePath # unused variable (src\analysis\clr_types.py:695)
timeSpan # unused variable (src\analysis\clr_types.py:695)
symbol_path # unused variable (src\analysis\clr_types.py:702)
IsEESuspended # unused variable (src\analysis\clr_types.py:721)
MSecPerPhase # unused variable (src\analysis\clr_types.py:726)
JoinPhase # unused variable (src\analysis\clr_types.py:737)
Span # unused variable (src\analysis\clr_types.py:741)
Phase # unused variable (src\analysis\clr_types.py:753)
Phase # unused variable (src\analysis\clr_types.py:764)
ThreadIDToTotalStolenMSec # unused variable (src\analysis\clr_types.py:777)
StagesByPhase # unused function (src\analysis\clr_types.py:779)
ThreadIDToTotalStolenMSec # unused variable (src\analysis\clr_types.py:805)
ThreadIDToTotalStolenMSec # unused variable (src\analysis\clr_types.py:857)
AnalyzeSingleGc # unused function (src\analysis\clr_types.py:867)
get_parts # unused function (src\analysis\core_analysis.py:62)
get_parts # unused function (src\analysis\core_analysis.py:127)
num_samples # unused variable (src\analysis\core_analysis.py:282)
_.num_samples # unused attribute (src\analysis\core_analysis.py:327)
alloc_soh # unused variable (src\analysis\enums.py:33)
lowmemory # unused variable (src\analysis\enums.py:35)
alloc_loh # unused variable (src\analysis\enums.py:37)
oos_soh # unused variable (src\analysis\enums.py:38)
oos_loh # unused variable (src\analysis\enums.py:39)
induced_noforce # unused variable (src\analysis\enums.py:40)
gcstress # unused variable (src\analysis\enums.py:41)
lowmemory_blocking # unused variable (src\analysis\enums.py:42)
induced_compacting # unused variable (src\analysis\enums.py:43)
lowmemory_host # unused variable (src\analysis\enums.py:44)
pm_full_gc # unused variable (src\analysis\enums.py:45)
lowmemory_host_blocking # unused variable (src\analysis\enums.py:46)
expand_reuse_normal # unused variable (src\analysis\enums.py:52)
expand_reuse_bestfit # unused variable (src\analysis\enums.py:53)
expand_new_set_ep # unused variable (src\analysis\enums.py:54)
expand_new_seg # unused variable (src\analysis\enums.py:55)
expand_no_memory # unused variable (src\analysis\enums.py:56)
expand_next_full_gc # unused variable (src\analysis\enums.py:57)
high_frag # unused variable (src\analysis\enums.py:67)
no_gaps # unused variable (src\analysis\enums.py:68)
loh_forced # unused variable (src\analysis\enums.py:69)
last_gc # unused variable (src\analysis\enums.py:70)
induced_compacting # unused variable (src\analysis\enums.py:71)
fragmented_gen0 # unused variable (src\analysis\enums.py:72)
high_mem_load # unused variable (src\analysis\enums.py:73)
high_mem_frag # unused variable (src\analysis\enums.py:74)
vhigh_mem_frag # unused variable (src\analysis\enums.py:75)
no_gc_mode # unused variable (src\analysis\enums.py:76)
_.using_concurrent # unused property (src\analysis\enums.py:91)
_.using_server # unused property (src\analysis\enums.py:95)
BGCPhase # unused class (src\analysis\enums.py:185)
BGC1stNonConcurrent # unused variable (src\analysis\enums.py:186)
BGC1stConcurrent # unused variable (src\analysis\enums.py:187)
BGC2ndNonConcurrent # unused variable (src\analysis\enums.py:188)
BGC2ndConcurrent # unused variable (src\analysis\enums.py:189)
Concurrent # unused variable (src\analysis\enums.py:194)
init_cpu_mapping # unused variable (src\analysis\enums.py:214)
rescan_dependent_handles # unused variable (src\analysis\enums.py:219)
start_bgc # unused variable (src\analysis\enums.py:232)
restart_ee # unused variable (src\analysis\enums.py:233)
concurrent_overflow # unused variable (src\analysis\enums.py:234)
suspend_ee # unused variable (src\analysis\enums.py:235)
bgc_after_ephemeral # unused variable (src\analysis\enums.py:236)
allow_fgc # unused variable (src\analysis\enums.py:237)
bgc_sweep # unused variable (src\analysis\enums.py:238)
suspend_ee_verify # unused variable (src\analysis\enums.py:239)
restart_ee_verify # unused variable (src\analysis\enums.py:240)
set_state_free # unused variable (src\analysis\enums.py:241)
after_absorb # unused variable (src\analysis\enums.py:244)
after_reset # unused variable (src\analysis\enums.py:246)
after_ephemeral_sweep # unused variable (src\analysis\enums.py:247)
after_profiler_heap_walk # unused variable (src\analysis\enums.py:248)
minimal_gc # unused variable (src\analysis\enums.py:249)
after_commit_soh_no_gc # unused variable (src\analysis\enums.py:250)
expand_loh_no_gc # unused variable (src\analysis\enums.py:251)
final_no_gc # unused variable (src\analysis\enums.py:252)
disable_software_write_watch # unused variable (src\analysis\enums.py:253)
restarting # unused variable (src\analysis\enums.py:260)
stolen # unused variable (src\analysis\enums.py:262)
idle_for_no_good_reason # unused variable (src\analysis\enums.py:263)
MarkStack # unused variable (src\analysis\enums.py:277)
MarkFQ # unused variable (src\analysis\enums.py:278)
MarkHandles # unused variable (src\analysis\enums.py:279)
MarkOlder # unused variable (src\analysis\enums.py:280)
MarkSizedRef # unused variable (src\analysis\enums.py:281)
MarkOverflow # unused variable (src\analysis\enums.py:282)
CondemnedReasonsGroup # unused class (src\analysis\enums.py:287)
Initial_Generation # unused variable (src\analysis\enums.py:291)
Final_Generation # unused variable (src\analysis\enums.py:292)
Alloc_Exceeded # unused variable (src\analysis\enums.py:293)
Time_Tuning # unused variable (src\analysis\enums.py:294)
Induced # unused variable (src\analysis\enums.py:299)
Low_Ephemeral # unused variable (src\analysis\enums.py:300)
Expand_Heap # unused variable (src\analysis\enums.py:301)
Fragmented_Ephemeral # unused variable (src\analysis\enums.py:302)
Fragmented_Gen1_To_Gen2 # unused variable (src\analysis\enums.py:303)
Fragmented_Gen2 # unused variable (src\analysis\enums.py:304)
Fragmented_Gen2_High_Mem # unused variable (src\analysis\enums.py:305)
GC_Before_OOM # unused variable (src\analysis\enums.py:306)
Too_Small_For_BGC # unused variable (src\analysis\enums.py:307)
Ephemeral_Before_BGC # unused variable (src\analysis\enums.py:308)
Internal_Tuning # unused variable (src\analysis\enums.py:309)
Max # unused variable (src\analysis\enums.py:310)
init # unused variable (src\analysis\enums.py:315)
ThreadWaitReason # unused class (src\analysis\enums.py:326)
Executive # unused variable (src\analysis\enums.py:327)
FreePage # unused variable (src\analysis\enums.py:328)
PageIn # unused variable (src\analysis\enums.py:329)
SystemAllocation # unused variable (src\analysis\enums.py:330)
ExecutionDelay # unused variable (src\analysis\enums.py:331)
Suspended # unused variable (src\analysis\enums.py:332)
UserRequest # unused variable (src\analysis\enums.py:333)
EventPairHigh # unused variable (src\analysis\enums.py:334)
EventPairLow # unused variable (src\analysis\enums.py:335)
LpcReceive # unused variable (src\analysis\enums.py:336)
LpcReply # unused variable (src\analysis\enums.py:337)
VirtualMemory # unused variable (src\analysis\enums.py:338)
PageOut # unused variable (src\analysis\enums.py:339)
Unknown # unused variable (src\analysis\enums.py:340)
processor_number # unused variable (src\analysis\gui_join_analysis.py:88)
_.span_msec # unused property (src\analysis\gui_join_analysis.py:326)
_.span_msec # unused property (src\analysis\gui_join_analysis.py:337)
n_starts # unused variable (src\analysis\gui_join_analysis.py:352)
n_ends # unused variable (src\analysis\gui_join_analysis.py:353)
join_id # unused variable (src\analysis\gui_join_analysis.py:359)
_.n_starts # unused attribute (src\analysis\gui_join_analysis.py:399)
_.n_ends # unused attribute (src\analysis\gui_join_analysis.py:422)
median_join_msec # unused variable (src\analysis\gui_join_analysis.py:459)
maximum_join_msec # unused variable (src\analysis\gui_join_analysis.py:460)
minimum_join_msec # unused variable (src\analysis\gui_join_analysis.py:461)
absolute # unused variable (src\analysis\gui_join_analysis.py:467)
percentage # unused variable (src\analysis\gui_join_analysis.py:468)
deviation_from_median_join_stage_duration # unused variable (src\analysis\gui_join_analysis.py:474)
median_heap_join_msec # unused variable (src\analysis\gui_join_analysis.py:481)
minimum_heap_join_msec # unused variable (src\analysis\gui_join_analysis.py:482)
maximum_heap_join_msec # unused variable (src\analysis\gui_join_analysis.py:483)
median_phase_join_msec # unused variable (src\analysis\gui_join_analysis.py:490)
max_phase_join_msec # unused variable (src\analysis\gui_join_analysis.py:491)
min_phase_join_msec # unused variable (src\analysis\gui_join_analysis.py:492)
deviation_from_median_join_msec # unused variable (src\analysis\gui_join_analysis.py:493)
statistics_over_all_joins # unused variable (src\analysis\gui_join_analysis.py:499)
statistics_over_individual_joins # unused variable (src\analysis\gui_join_analysis.py:500)
statistics_over_individual_gc_phases # unused variable (src\analysis\gui_join_analysis.py:501)
heap_num # unused variable (src\analysis\gui_join_analysis.py:717)
stolen # unused variable (src\analysis\gui_stolen_cpu_analysis.py:28)
stolen_cpu_breakdown # unused variable (src\analysis\gui_stolen_cpu_analysis.py:38)
gc_occurrences # unused variable (src\analysis\gui_stolen_cpu_analysis.py:54)
timestamp # unused variable (src\analysis\gui_stolen_cpu_analysis.py:85)
interrupting_thread_duration_ms # unused variable (src\analysis\gui_stolen_cpu_analysis.py:86)
tid # unused variable (src\analysis\gui_stolen_cpu_analysis.py:89)
stolen_cpu_instances # unused variable (src\analysis\gui_stolen_cpu_analysis.py:97)
loose # unused variable (src\analysis\join_analysis.py:23)
_.rw # unused property (src\analysis\mem_utils.py:72)
_.r_only # unused property (src\analysis\mem_utils.py:76)
print_maps # unused function (src\analysis\mem_utils.py:164)
parse_maps # unused function (src\analysis\mem_utils.py:204)
parse_valgrind_err # unused function (src\analysis\mem_utils.py:220)
addr # unused variable (src\analysis\mem_utils.py:427)
followed_by # unused variable (src\analysis\mem_utils.py:429)
_.abs_count_diff # unused property (src\analysis\mem_utils.py:617)
max_stdev_fraction # unused function (src\analysis\report.py:137)
_stats_list_for_proc # unused function (src\analysis\run_metrics.py:154)
_.Action1 # unused property (src\analysis\setup_clr.py:255)
_.startup_flags # unused property (src\analysis\types.py:228)
serialize_run_metric # unused function (src\analysis\types.py:386)
deserialize_run_metric # unused function (src\analysis\types.py:393)
single_gc_metric_must_exist_for_name # unused function (src\analysis\types.py:412)
single_heap_metric_must_exist_for_name # unused function (src\analysis\types.py:430)
_.Gen0UserAllocatedMB # unused property (src\analysis\types.py:778)
_.LOHUserAllocatedMB # unused property (src\analysis\types.py:784)
_.Gen0SizeBeforeMB # unused property (src\analysis\types.py:788)
_.Gen1SizeBeforeMB # unused property (src\analysis\types.py:792)
_.LOHSizeBeforeMB # unused property (src\analysis\types.py:800)
_.Gen0BudgetMB # unused property (src\analysis\types.py:804)
_.Gen1BudgetMB # unused property (src\analysis\types.py:808)
_.Gen0SizeAfterMB # unused property (src\analysis\types.py:820)
_.Gen1SizeAfterMB # unused property (src\analysis\types.py:824)
_.LOHSizeAfterMB # unused property (src\analysis\types.py:832)
_.Gen0FreeListSpaceBeforeMB # unused property (src\analysis\types.py:836)
_.Gen1FreeListSpaceBeforeMB # unused property (src\analysis\types.py:840)
_.LOHFreeListSpaceBeforeMB # unused property (src\analysis\types.py:848)
_.Gen0FreeListSpaceAfterMB # unused property (src\analysis\types.py:852)
_.Gen1FreeListSpaceAfterMB # unused property (src\analysis\types.py:856)
_.LOHFreeListSpaceAfterMB # unused property (src\analysis\types.py:864)
_.Gen0FreeObjSpaceBeforeMB # unused property (src\analysis\types.py:868)
_.Gen1FreeObjSpaceBeforeMB # unused property (src\analysis\types.py:872)
_.Gen2FreeObjSpaceBeforeMB # unused property (src\analysis\types.py:876)
_.LOHFreeObjSpaceBeforeMB # unused property (src\analysis\types.py:880)
_.Gen0FreeObjSpaceAfterMB # unused property (src\analysis\types.py:884)
_.Gen1FreeObjSpaceAfterMB # unused property (src\analysis\types.py:888)
_.Gen2FreeObjSpaceAfterMB # unused property (src\analysis\types.py:892)
_.LOHFreeObjSpaceAfterMB # unused property (src\analysis\types.py:896)
allow_out_of_date # unused variable (src\commonlib\get_built.py:46)
empty_set # unused function (src\commonlib\collection_util.py:38)
group_by # unused function (src\commonlib\collection_util.py:54)
filter_together # unused function (src\commonlib\collection_util.py:113)
_.y_min # unused property (src\commonlib\collection_util.py:403)
_.y_max # unused property (src\commonlib\collection_util.py:407)
reverse # unused function (src\commonlib\collection_util.py:445)
_.average_bytes # unused property (src\commonlib\host_info.py:47)
l1 # unused variable (src\commonlib\host_info.py:55)
l2 # unused variable (src\commonlib\host_info.py:56)
l3 # unused variable (src\commonlib\host_info.py:57)
hostname # unused variable (src\commonlib\host_info.py:64)
cache_info # unused variable (src\commonlib\host_info.py:68)
to_json # unused function (src\commonlib\parse_and_serialize.py:65)
_.sort_base_mapping_type_on_output # unused attribute (src\commonlib\parse_and_serialize.py:256)
_.__qualname__ # unused attribute (src\commonlib\type_utils.py:48)
bytes_to_kb # unused function (src\commonlib\util.py:468)
_float_range_around # unused function (src\exec\generate_tests.py:212)
_survive_bench_file # unused function (src\exec\generate_tests.py:270)
_gcsmall_benchyaml # unused function (src\exec\generate_tests.py:724)
NonTemporaryDirectory # unused function (src\exec\run_single_test.py:190)
_run_single_test_linux_perfcollect # unused function (src\exec\run_single_test.py:813)
| 65.330303 | 100 | 0.800501 |
6314b49979a051388a2fac6dfd3077481813b075 | 1,604 | py | Python | tutorial/snippets/views.py | jjfeore/restful-tutorial | 13580cabfd3d5478001838784400575978a2ce10 | [
"MIT"
] | null | null | null | tutorial/snippets/views.py | jjfeore/restful-tutorial | 13580cabfd3d5478001838784400575978a2ce10 | [
"MIT"
] | null | null | null | tutorial/snippets/views.py | jjfeore/restful-tutorial | 13580cabfd3d5478001838784400575978a2ce10 | [
"MIT"
] | null | null | null | from snippets.models import Snippet
from snippets.serializers import SnippetSerializer
from rest_framework import generics, permissions, renderers, viewsets
from django.contrib.auth.models import User
from snippets.serializers import UserSerializer
from snippets.permissions import IsOwnerOrReadOnly
from rest_framework.decorators import api_view, detail_route
from rest_framework.response import Response
from rest_framework.reverse import reverse
@api_view(['GET'])
def api_root(request, format=None):
return Response({
'users': reverse('user-list', request=request, format=format),
'snippets': reverse('snippet-list', request=request, format=format)
})
class UserViewSet(viewsets.ReadOnlyModelViewSet):
"""
This viewset automatically provides `list` and `detail` actions.
"""
queryset = User.objects.all()
serializer_class = UserSerializer
class SnippetViewSet(viewsets.ModelViewSet):
"""
This viewset automatically provides `list`, `create`, `retrieve`,
`update` and `destroy` actions.
Additionally we also provide an extra `highlight` action.
"""
queryset = Snippet.objects.all()
serializer_class = SnippetSerializer
permission_classes = (permissions.IsAuthenticatedOrReadOnly,
IsOwnerOrReadOnly,)
@detail_route(renderer_classes=[renderers.StaticHTMLRenderer])
def highlight(self, request, *args, **kwargs):
snippet = self.get_object()
return Response(snippet.highlighted)
def perform_create(self, serializer):
serializer.save(owner=self.request.user)
| 34.12766 | 75 | 0.740648 |
30c7da58c84496a2696836abf398d28211a692bc | 576 | py | Python | react-track/app/tracks/migrations/0002_track_posted_by.py | julesc00/react-track | 792dd9d076f7eaac230034a06dd4593d2cfdca51 | [
"MIT"
] | null | null | null | react-track/app/tracks/migrations/0002_track_posted_by.py | julesc00/react-track | 792dd9d076f7eaac230034a06dd4593d2cfdca51 | [
"MIT"
] | null | null | null | react-track/app/tracks/migrations/0002_track_posted_by.py | julesc00/react-track | 792dd9d076f7eaac230034a06dd4593d2cfdca51 | [
"MIT"
] | null | null | null | # Generated by Django 3.1 on 2020-08-29 22:39
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('tracks', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='track',
name='posted_by',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
]
| 26.181818 | 121 | 0.671875 |
b181051f671fad64626e39ce3c2a564326a57ceb | 2,862 | py | Python | merkle-tree/merkle_tree.py | ltucker284/basic-chain | 71820fbc963d534bdfb82054e08d757d3518bb99 | [
"Apache-2.0"
] | 2 | 2020-05-10T14:05:34.000Z | 2021-09-08T04:37:15.000Z | merkle-tree/merkle_tree.py | ltucker284/basic-chain | 71820fbc963d534bdfb82054e08d757d3518bb99 | [
"Apache-2.0"
] | 2 | 2021-03-10T01:08:20.000Z | 2021-05-10T20:37:57.000Z | merkle-tree/merkle_tree.py | ltucker284/basic-chain | 71820fbc963d534bdfb82054e08d757d3518bb99 | [
"Apache-2.0"
] | 2 | 2021-03-19T08:16:05.000Z | 2021-09-08T04:37:24.000Z | #######################################
# The following class was cloned from the following repo: https://github.com/Concerned-HumanDev/Simple-Merkle-Tree-in-Python.git
# All the credit for creating this Merkle Tree example goes to the owner of the repo: Jae Duk Seo
# Code has been updated to Python 3.
#######################################
import hashlib,json
from collections import OrderedDict
class merkle_tree:
def __init__(self, list1=None):
self.list1 = list1
self.past_transaction = OrderedDict()
def create_tree(self):
list1 = self.list1
past_transaction = self.past_transaction
temp_transaction = []
for index in range(0, len(list1), 2):
current_index = list1[index]
#print("Current Index is: ", current_index)
if index+1 != len(list1):
current_right_index = (list1[index+1]).encode('utf-8')
#print("Current Right Index is: ", current_right_index)
else:
current_right_index = ''
current_hash = hashlib.sha256(current_index.encode('utf-8'))
#print("Hashing Index {}: HASH = {}".format(current_index, current_hash.hexdigest()))
if current_right_index != '':
current_right_hash = hashlib.sha256(current_right_index)
#print("Hashing Index {}: RIGHT HASH = {}".format(current_right_index, current_right_hash.hexdigest()))
past_transaction[list1[index]] = current_hash.hexdigest()
#print("Current Contents of Past Transaction: ", past_transaction)
if current_right_index != '':
past_transaction[list1[index+1]] = current_right_hash.hexdigest()
if current_right_index != '':
temp_transaction.append(current_hash.hexdigest() + current_right_hash.hexdigest())
#print("Temp Transaction:" ,temp_transaction)
else:
temp_transaction.append(current_hash.hexdigest())
#print(temp_transaction)
if len(list1) != 1:
self.list1 = temp_transaction
self.past_transaction = past_transaction
self.create_tree()
def Get_past_transaction(self):
return self.past_transaction
def Get_root_leaf(self):
last_key = list(self.past_transaction.keys())[-1]
# return self.past_transaction[last_key]
return last_key
# if __name__ == "__main__":
# Merk_tree = merkle_tree()
# transaction = ['a', 'b', 'c', 'd']
# Merk_tree.list1 = transaction
# Merk_tree.create_tree()
# past_transaction = Merk_tree.Get_past_transaction()
# print("Past Transaction: ", Merk_tree.Get_past_transaction())
# print("Final root of the tree: ", Merk_tree.Get_root_leaf())
| 38.16 | 128 | 0.606918 |
e4ba8c2f1af19919508d4bc28008d3a4370f8724 | 441 | py | Python | production/main.py | RaeChen07/SteamDataAnalysis | ac3fe30ec56f71ac4eb05366dac892aba60375c9 | [
"MIT"
] | null | null | null | production/main.py | RaeChen07/SteamDataAnalysis | ac3fe30ec56f71ac4eb05366dac892aba60375c9 | [
"MIT"
] | null | null | null | production/main.py | RaeChen07/SteamDataAnalysis | ac3fe30ec56f71ac4eb05366dac892aba60375c9 | [
"MIT"
] | null | null | null | """
Main script, will run all the other scripts and generate all the graphs
"""
import os
import multi_playtime
import multi_ach
import machine_learning
import engagement_trend
def main():
if not os.path.isdir("output"):
# Create folder for plot outputs
os.mkdir("output")
multi_playtime.main()
multi_ach.main()
machine_learning.main()
engagement_trend.main()
if __name__ == "__main__":
main()
| 16.961538 | 71 | 0.693878 |
cbb0b39705b70430ae4ce49ad696c912b40d6a3a | 1,591 | py | Python | lingvo/core/tshape_test.py | Singed-jj/lingvo | a2a4ac8bd835ffc2f95fc38ee3e9bc17c30fcc56 | [
"Apache-2.0"
] | 1 | 2020-08-04T08:39:35.000Z | 2020-08-04T08:39:35.000Z | lingvo/core/tshape_test.py | Singed-jj/lingvo | a2a4ac8bd835ffc2f95fc38ee3e9bc17c30fcc56 | [
"Apache-2.0"
] | null | null | null | lingvo/core/tshape_test.py | Singed-jj/lingvo | a2a4ac8bd835ffc2f95fc38ee3e9bc17c30fcc56 | [
"Apache-2.0"
] | null | null | null | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for lingvo.core.tshape."""
import lingvo.compat as tf
from lingvo.core import test_utils
from lingvo.core.tshape import Shape
class TshapeTest(test_utils.TestCase):
def testShape(self):
w1 = Shape([3, 3, 'd0', 'd1'])
w2 = Shape([5, 5, w1[2], 'd2'])
self.assertEqual(w2.ToTensorShape().as_list(), [5, 5, None, None])
s2 = w2.Subs({w1[2]: 8, w1[3]: 16, w2[3]: 32})
self.assertEqual(s2.ToTensorShape().as_list(), [5, 5, 8, 32])
# __getitem__
inner = w1[-2:]
self.assertIsInstance(inner, Shape)
# unpack
d0, d1 = w1[-2:]
self.assertEqual((d0 * d1).subs({d0: 3, d1: 5}), 15)
# __add__
self.assertEqual(str(w1 + w2), '[3, 3, _d0, _d1, 5, 5, _d0, _d2]')
# __radd_
self.assertEqual(str([7] + w1), '[7, 3, 3, _d0, _d1]')
self.assertEqual(str(w1[-2:] + w2[-1:]), '[_d0, _d1, _d2]')
if __name__ == '__main__':
tf.test.main()
| 31.82 | 80 | 0.626021 |
da165023ec13da6e8e4f09bde02911481d804f7c | 8,111 | py | Python | tensorflow_datasets/core/file_format_adapter.py | manda-creator/datasets | 040bccda79b096dc428e66e7d0a6dece7b22b8eb | [
"Apache-2.0"
] | null | null | null | tensorflow_datasets/core/file_format_adapter.py | manda-creator/datasets | 040bccda79b096dc428e66e7d0a6dece7b22b8eb | [
"Apache-2.0"
] | null | null | null | tensorflow_datasets/core/file_format_adapter.py | manda-creator/datasets | 040bccda79b096dc428e66e7d0a6dece7b22b8eb | [
"Apache-2.0"
] | null | null | null | # coding=utf-8
# Copyright 2019 The TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""`tfds.file_adapter.FileFormatAdapter`s for GeneratorBasedBuilder.
FileFormatAdapters implement methods to write and read data from a
particular file format.
Currently, a single FileAdapter is available:
* TFRecordExampleAdapter: To store the pre-processed dataset as .tfrecord file
```python
return TFRecordExampleAdapter({
"x": tf.io.FixedLenFeature(tuple(), tf.int64)
})
```
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import contextlib
import random
import string
from absl import logging
import numpy as np
import six
import tensorflow.compat.v2 as tf
from tensorflow_datasets.core import example_parser
from tensorflow_datasets.core import example_serializer
from tensorflow_datasets.core import lazy_imports_lib
from tensorflow_datasets.core import utils
__all__ = [
"FileFormatAdapter",
"TFRecordExampleAdapter",
]
@six.add_metaclass(abc.ABCMeta)
class FileFormatAdapter(object):
"""Provides writing and reading methods for a file format."""
def __init__(self, example_specs):
"""Constructor.
Args:
example_specs: Nested `dict` of `tfds.features.TensorInfo`, corresponding
to the structure of data to write/read.
"""
del example_specs
@abc.abstractmethod
def write_from_generator(self, generator, output_files):
"""Write to files from generators_and_filenames.
Args:
generator: generator yielding dictionaries of feature name to value.
output_files: `list<str>`, output files to write files to.
"""
raise NotImplementedError
def write_from_pcollection(
self, pcollection, file_path_prefix=None, num_shards=None):
"""Write the PCollection to file.
Args:
pcollection: `beam.PCollection`, the PCollection containing the examples
to write.
file_path_prefix: `str`, output files to write files to.
num_shards: `int`,
"""
# TODO(tfds): Should try to unify the write_from_generator signatures:
# * Have the FileFormatAdapter to add the prefix when reading/writing
raise NotImplementedError
@abc.abstractmethod
def dataset_from_filename(self, filename):
"""Returns a `tf.data.Dataset` whose elements are dicts given a filename."""
raise NotImplementedError
@abc.abstractproperty
def filetype_suffix(self):
"""Returns a str file type suffix (e.g. "tfrecord")."""
raise NotImplementedError
class TFRecordExampleAdapter(FileFormatAdapter):
"""Writes/Reads serialized Examples protos to/from TFRecord files.
Constraints on generators:
* The generator must yield feature dictionaries (`dict<str feature_name,
feature_value>`).
* The allowed feature types are `int`, `float`, and `str` (or `bytes` in
Python 3; `unicode` strings will be encoded in `utf-8`), or lists thereof.
"""
def __init__(self, example_specs):
super(TFRecordExampleAdapter, self).__init__(example_specs)
self._serializer = example_serializer.ExampleSerializer(
example_specs)
self._parser = example_parser.ExampleParser(example_specs)
def write_from_generator(self, generator, output_files):
wrapped = (self._serializer.serialize_example(example)
for example in generator)
_write_tfrecords_from_generator(wrapped, output_files, shuffle=True)
def write_from_pcollection(self, pcollection, file_path_prefix, num_shards):
beam = lazy_imports_lib.lazy_imports.apache_beam
# WARNING: WriteToTFRecord do not support long in python2 with the default,
# beam implementation, so need to convert the long value (from the proto
# field) into int, otherwise, the number of shards will be random.
num_shards = int(num_shards)
return (
pcollection
| "SerializeDict" >> beam.Map(self._serializer.serialize_example)
| "Shuffle" >> beam.Reshuffle()
| "WriteToExamples" >> beam.io.WriteToTFRecord(
file_path_prefix=".".join([file_path_prefix, self.filetype_suffix]),
num_shards=num_shards,
)
)
def dataset_from_filename(self, filename):
dataset = tf.compat.v1.data.TFRecordDataset(
filename, buffer_size=int(16 * 1e6))
return dataset.map(self._parser.parse_example,
num_parallel_calls=tf.data.experimental.AUTOTUNE)
@property
def filetype_suffix(self):
return "tfrecord"
def do_files_exist(filenames):
"""Whether any of the filenames exist."""
preexisting = [tf.io.gfile.exists(f) for f in filenames]
return any(preexisting)
@contextlib.contextmanager
def _close_on_exit(handles):
"""Call close on all handles on exit."""
try:
yield handles
finally:
for handle in handles:
handle.close()
def get_incomplete_path(filename):
"""Returns a temporary filename based on filename."""
random_suffix = "".join(
random.choice(string.ascii_uppercase + string.digits) for _ in range(6))
return filename + ".incomplete" + random_suffix
@contextlib.contextmanager
def _incomplete_files(filenames):
"""Create temporary files for filenames and rename on exit."""
tmp_files = [get_incomplete_path(f) for f in filenames]
try:
yield tmp_files
for tmp, output in zip(tmp_files, filenames):
tf.io.gfile.rename(tmp, output)
finally:
for tmp in tmp_files:
if tf.io.gfile.exists(tmp):
tf.io.gfile.remove(tmp)
@contextlib.contextmanager
def incomplete_dir(dirname):
"""Create temporary dir for dirname and rename on exit."""
tmp_dir = get_incomplete_path(dirname)
tf.io.gfile.makedirs(tmp_dir)
try:
yield tmp_dir
tf.io.gfile.rename(tmp_dir, dirname)
finally:
if tf.io.gfile.exists(tmp_dir):
tf.io.gfile.rmtree(tmp_dir)
def _shuffle_tfrecord(path, random_gen):
"""Shuffle a single record file in memory."""
# Read all records
record_iter = tf.compat.v1.io.tf_record_iterator(path)
all_records = [
r for r in utils.tqdm(
record_iter, desc="Reading...", unit=" examples", leave=False)
]
# Shuffling in memory
random_gen.shuffle(all_records)
# Write all record back
with tf.io.TFRecordWriter(path) as writer:
for record in utils.tqdm(
all_records, desc="Writing...", unit=" examples", leave=False):
writer.write(record)
def _write_tfrecords_from_generator(generator, output_files, shuffle=True):
"""Writes generated str records to output_files in round-robin order."""
if do_files_exist(output_files):
raise ValueError(
"Pre-processed files already exists: {}.".format(output_files))
with _incomplete_files(output_files) as tmp_files:
# Write all shards
writers = [tf.io.TFRecordWriter(fname) for fname in tmp_files]
with _close_on_exit(writers) as writers:
logging.info("Writing TFRecords")
_round_robin_write(writers, generator)
# Shuffle each shard
if shuffle:
# WARNING: Using np instead of Python random because Python random
# produce different values between Python 2 and 3 and between
# architectures
random_gen = np.random.RandomState(42)
for path in utils.tqdm(
tmp_files, desc="Shuffling...", unit=" shard", leave=False):
_shuffle_tfrecord(path, random_gen=random_gen)
def _round_robin_write(writers, generator):
"""Write records from generator round-robin across writers."""
for i, example in enumerate(utils.tqdm(
generator, unit=" examples", leave=False)):
writers[i % len(writers)].write(example)
| 32.186508 | 80 | 0.725681 |
2cf6d1a725545c357021892afc0e2227a4fb9359 | 757 | py | Python | scripts/get_mean_score.py | Faldict/data_diet | 335fb6ce50235ff9aed6cb9ae8513fea9afce252 | [
"Apache-2.0"
] | null | null | null | scripts/get_mean_score.py | Faldict/data_diet | 335fb6ce50235ff9aed6cb9ae8513fea9afce252 | [
"Apache-2.0"
] | null | null | null | scripts/get_mean_score.py | Faldict/data_diet | 335fb6ce50235ff9aed6cb9ae8513fea9afce252 | [
"Apache-2.0"
] | null | null | null | # python get_mean_score.py <ROOT:str> <EXP:str> <N_RUNS:int> <STEP:int> <TYPE:str>
import numpy as np
import os
import sys
ROOT = sys.argv[1]
EXP = sys.argv[2]
N_RUNS = int(sys.argv[3])
STEP = int(sys.argv[4])
TYPE = sys.argv[5]
if TYPE == 'l2_error':
path_name = 'error_l2_norm_scores'
elif TYPE == 'grad_norm':
path_name = 'grad_norm_scores'
else:
path_name = 'forget_scores'
exp_dir = ROOT + f'/exps/{EXP}'
scores = []
for run in range(N_RUNS):
load_path = exp_dir + f'/run_{run}/{path_name}/ckpt_{STEP}.npy'
scores.append(np.load(load_path))
scores = np.stack(scores).mean(0)
save_dir = exp_dir + f'/{path_name}'
save_path = save_dir + f'/ckpt_{STEP}.npy'
if not os.path.exists(save_dir): os.makedirs(save_dir)
np.save(save_path, scores)
| 24.419355 | 82 | 0.696169 |
e5c5702d0f5c53b5a9a5b0c0a7f0d7da751ca83f | 6,199 | py | Python | architecture_view_sdk/model/topboard/sprint_pb2.py | easyopsapis/easyops-api-python | adf6e3bad33fa6266b5fa0a449dd4ac42f8447d0 | [
"Apache-2.0"
] | 5 | 2019-07-31T04:11:05.000Z | 2021-01-07T03:23:20.000Z | architecture_view_sdk/model/topboard/sprint_pb2.py | easyopsapis/easyops-api-python | adf6e3bad33fa6266b5fa0a449dd4ac42f8447d0 | [
"Apache-2.0"
] | null | null | null | architecture_view_sdk/model/topboard/sprint_pb2.py | easyopsapis/easyops-api-python | adf6e3bad33fa6266b5fa0a449dd4ac42f8447d0 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: sprint.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from architecture_view_sdk.model.topboard import product_basic_pb2 as architecture__view__sdk_dot_model_dot_topboard_dot_product__basic__pb2
from architecture_view_sdk.model.topboard import issue_basic_pb2 as architecture__view__sdk_dot_model_dot_topboard_dot_issue__basic__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='sprint.proto',
package='topboard',
syntax='proto3',
serialized_options=_b('ZBgo.easyops.local/contracts/protorepo-models/easyops/model/topboard'),
serialized_pb=_b('\n\x0csprint.proto\x12\x08topboard\x1a\x38\x61rchitecture_view_sdk/model/topboard/product_basic.proto\x1a\x36\x61rchitecture_view_sdk/model/topboard/issue_basic.proto\"\xca\x01\n\x06Sprint\x12\'\n\x07product\x18\x01 \x03(\x0b\x32\x16.topboard.ProductBasic\x12$\n\x06issues\x18\x02 \x03(\x0b\x32\x14.topboard.IssueBasic\x12\x0c\n\x04name\x18\x03 \x01(\t\x12\x12\n\ninstanceId\x18\x04 \x01(\t\x12\r\n\x05title\x18\x05 \x01(\t\x12\x0e\n\x06status\x18\x06 \x01(\t\x12\x0c\n\x04goal\x18\x07 \x01(\t\x12\x11\n\tstartTime\x18\x08 \x01(\t\x12\x0f\n\x07\x65ndTime\x18\t \x01(\tBDZBgo.easyops.local/contracts/protorepo-models/easyops/model/topboardb\x06proto3')
,
dependencies=[architecture__view__sdk_dot_model_dot_topboard_dot_product__basic__pb2.DESCRIPTOR,architecture__view__sdk_dot_model_dot_topboard_dot_issue__basic__pb2.DESCRIPTOR,])
_SPRINT = _descriptor.Descriptor(
name='Sprint',
full_name='topboard.Sprint',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='product', full_name='topboard.Sprint.product', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='issues', full_name='topboard.Sprint.issues', index=1,
number=2, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='name', full_name='topboard.Sprint.name', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='instanceId', full_name='topboard.Sprint.instanceId', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='title', full_name='topboard.Sprint.title', index=4,
number=5, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='status', full_name='topboard.Sprint.status', index=5,
number=6, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='goal', full_name='topboard.Sprint.goal', index=6,
number=7, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='startTime', full_name='topboard.Sprint.startTime', index=7,
number=8, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='endTime', full_name='topboard.Sprint.endTime', index=8,
number=9, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=141,
serialized_end=343,
)
_SPRINT.fields_by_name['product'].message_type = architecture__view__sdk_dot_model_dot_topboard_dot_product__basic__pb2._PRODUCTBASIC
_SPRINT.fields_by_name['issues'].message_type = architecture__view__sdk_dot_model_dot_topboard_dot_issue__basic__pb2._ISSUEBASIC
DESCRIPTOR.message_types_by_name['Sprint'] = _SPRINT
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
Sprint = _reflection.GeneratedProtocolMessageType('Sprint', (_message.Message,), {
'DESCRIPTOR' : _SPRINT,
'__module__' : 'sprint_pb2'
# @@protoc_insertion_point(class_scope:topboard.Sprint)
})
_sym_db.RegisterMessage(Sprint)
DESCRIPTOR._options = None
# @@protoc_insertion_point(module_scope)
| 46.609023 | 671 | 0.756251 |
583bccb9914bd55b39b146384346af82e9585a8b | 620 | py | Python | lib/models/utils.py | YaphetS-X/CenterNet-MobileNetV3 | 144891f00c252092e0ff617f42f3583d9f3282cd | [
"MIT"
] | 84 | 2019-09-10T08:37:53.000Z | 2022-02-02T15:12:52.000Z | lib/models/utils.py | 1qWERTy12/CenterNet-MobileNetV3 | 144891f00c252092e0ff617f42f3583d9f3282cd | [
"MIT"
] | 8 | 2019-09-05T06:19:14.000Z | 2021-07-13T03:39:18.000Z | lib/models/utils.py | 1qWERTy12/CenterNet-MobileNetV3 | 144891f00c252092e0ff617f42f3583d9f3282cd | [
"MIT"
] | 16 | 2019-09-25T01:08:55.000Z | 2021-11-15T11:33:09.000Z | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
def _gather_feat(feat, ind, mask=None):
dim = feat.size(2)
ind = ind.unsqueeze(2).expand(ind.size(0), ind.size(1), dim)
feat = feat.gather(1, ind)
if mask is not None:
mask = mask.unsqueeze(2).expand_as(feat)
feat = feat[mask]
feat = feat.view(-1, dim)
return feat
def _tranpose_and_gather_feat(feat, ind):
feat = feat.permute(0, 2, 3, 1).contiguous()
feat = feat.view(feat.size(0), -1, feat.size(3))
feat = _gather_feat(feat, ind)
return feat
| 26.956522 | 65 | 0.66129 |
fcd0981e6af9348d1f542141243250828f63d2ac | 1,792 | py | Python | haprestio/files/templates/haproxy.cfg.py | innofocus/haprestio | 6a9bf3a3d73fb3faa7cf1e5cfc757cc360fbafde | [
"MIT"
] | null | null | null | haprestio/files/templates/haproxy.cfg.py | innofocus/haprestio | 6a9bf3a3d73fb3faa7cf1e5cfc757cc360fbafde | [
"MIT"
] | null | null | null | haprestio/files/templates/haproxy.cfg.py | innofocus/haprestio | 6a9bf3a3d73fb3faa7cf1e5cfc757cc360fbafde | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
import multiprocessing, sys
haproxy_user = sys.argv[1]
haproxy_pass = sys.argv[2]
haproxy_cfg = """
global
log stdout format raw local0 info
#log /dev/log local0
#log /dev/log local1 notice
chroot /var/lib/haproxy
stats socket /run/haproxy.admin.sock mode 660 level admin
stats timeout 30s
user haproxy
group haproxy
daemon
maxconn 200000
nbproc "{nbproc}"
{cpumap}
ca-base /etc/ssl/certs
crt-base /etc/ssl/private
ssl-default-bind-ciphers ECDHE-RSA-AES128-GCM-SHA256:ECDH+AESGCM:DH+AESGCM:ECDH+AES256:DH+AES256:ECDH+AES128:DH+AES:RSA+AESGCM:RSA+AES:!aNULL:!MD5:!DSS:!3DES
ssl-default-bind-options no-sslv3
tune.ssl.default-dh-param 2048
defaults
default-server init-addr last,libc,none
option log-health-checks
mode http
option dontlognull
timeout connect 8000
timeout client 60000s
timeout server 60000s
#errorfile 400 /etc/haproxy/errors/400.http
#errorfile 403 /etc/haproxy/errors/403.http
#errorfile 408 /etc/haproxy/errors/408.http
#errorfile 500 /etc/haproxy/errors/500.http
#errorfile 502 /etc/haproxy/errors/502.http
#errorfile 503 /etc/haproxy/errors/503.http
#errorfile 504 /etc/haproxy/errors/504.http
listen stats
bind *:8282
mode http
bind-process {nbproc}
stats enable
stats uri /
stats realm Haproxy\ Statistics
stats show-desc "HAProxy WebStatistics"
stats show-node
stats show-legends
stats auth {haproxy_user}:{haproxy_pass}
stats admin if TRUE
"""
numcpu = multiprocessing.cpu_count()
cpumap="cpu-map 1 0\n"
for i in range(1,numcpu):
cpumap += " cpu-map {} {}".format(i+1,i)
if i < numcpu-1:
cpumap += "\n"
print(haproxy_cfg.format(nbproc=numcpu, cpumap=cpumap, haproxy_user=haproxy_user,
haproxy_pass=haproxy_pass))
| 27.569231 | 159 | 0.726004 |
ed1a236b543784e1d734fb0543348805b8b48abf | 1,183 | py | Python | problems/663.Equal_Tree_Partition/li.py | subramp-prep/leetcode | d125201d9021ab9b1eea5e5393c2db4edd84e740 | [
"Unlicense"
] | null | null | null | problems/663.Equal_Tree_Partition/li.py | subramp-prep/leetcode | d125201d9021ab9b1eea5e5393c2db4edd84e740 | [
"Unlicense"
] | null | null | null | problems/663.Equal_Tree_Partition/li.py | subramp-prep/leetcode | d125201d9021ab9b1eea5e5393c2db4edd84e740 | [
"Unlicense"
] | null | null | null | # coding=utf-8
# Author: Jianghan LI
# Question: 663.Equal_Tree_Partition
# Complexity: O(N)
# Date: 2017-08-20
# Contest 46, 0:36:46 - 0:57:02, 2 wrong tries
# Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution(object):
def checkEqualTree(self, root):
"""
:type root: TreeNode
:rtype: bool
"""
def treeSum(root):
if not root: return 0
root.val += (root.left and treeSum(root.left) or 0) + (root.right and treeSum(root.right) or 0)
return root.val
tree = treeSum(root)
if tree%2==1: return False
def findSum(root, s):
if not root: return False
if root.val == s: return True
if root.left and findSum(root.left, s): return True
if root.right and findSum(root.right, s): return True
return False
if not root: return False
if root.left and findSum(root.left, tree/2): return True
if root.right and findSum(root.right, tree/2): return True
return False
| 28.166667 | 107 | 0.579036 |
cbc7d14e8f82a9f79b8ae61500615f7a7902c6a4 | 1,174 | py | Python | web/frontend/urls.py | tcsvn/activity-assistant | eeb0ef72a046a8a781ff31b384edec8243dd22a7 | [
"MIT"
] | 45 | 2020-11-06T20:31:13.000Z | 2022-03-24T06:14:18.000Z | web/frontend/urls.py | tcsvn/activity-assistant | eeb0ef72a046a8a781ff31b384edec8243dd22a7 | [
"MIT"
] | 10 | 2020-12-14T00:17:11.000Z | 2022-02-06T19:39:01.000Z | web/frontend/urls.py | tcsvn/activity-assistant | eeb0ef72a046a8a781ff31b384edec8243dd22a7 | [
"MIT"
] | 3 | 2020-12-15T22:50:09.000Z | 2022-03-13T21:12:28.000Z | from django.conf.urls import url, include
from django.urls import path, re_path
from rest_framework.routers import DefaultRouter
from frontend import views
from rest_framework.schemas import get_schema_view
urlpatterns = []
urlpatterns += [
path('', views.SetupView.as_view(), name='setup'),
re_path(r'^dashboard/', views.DashboardView.as_view(), name='dashboard'),
re_path(r'^person/[0-9]+', views.PersonView.as_view(), name='person'),
re_path(r'^dataset/$', views.DatasetView.as_view(), name='dataset'),
re_path(r'^webhook', views.WebhookView.as_view(), name='webhook'),
re_path(r'^config', views.ConfigView.as_view(), name='config'),
re_path(r'^dataset/[0-9]+', views.DatasetAnalyticsView.as_view(), name='dataset_analytics'),
#re_path(r'^activities/', views.ActivityView.as_view(), name='activity'),
#re_path(r'^map', views.EditMapView.as_view(), name='map'),
#re_path(r'^assign_activities_to_locations', views.AssignActivities.as_view(), name='assign_activities'),
#re_path(r'^model_selection', views.ModelSelectionView.as_view(), name='model_selection'),
#re_path(r'^model', views.ModelView.as_view(), name='model')
]
| 51.043478 | 109 | 0.722317 |
f40dc4422dcd56adc952a9d62c499582a434c57b | 14,448 | py | Python | rstbx/indexing_api/lattice.py | rimmartin/cctbx_project | 644090f9432d9afc22cfb542fc3ab78ca8e15e5d | [
"BSD-3-Clause-LBNL"
] | null | null | null | rstbx/indexing_api/lattice.py | rimmartin/cctbx_project | 644090f9432d9afc22cfb542fc3ab78ca8e15e5d | [
"BSD-3-Clause-LBNL"
] | null | null | null | rstbx/indexing_api/lattice.py | rimmartin/cctbx_project | 644090f9432d9afc22cfb542fc3ab78ca8e15e5d | [
"BSD-3-Clause-LBNL"
] | null | null | null | from __future__ import division
from six.moves import range
from rstbx.array_family import flex
from rstbx.indexing_api import dps_extended
from rstbx.indexing_api.sampling import hemisphere_shortcut
from rstbx.dps_core import Directional_FFT
import math,cmath
from scitbx import matrix
from libtbx.test_utils import approx_equal
import boost.python
class _(boost.python.injector, dps_extended):
def index(self, raw_spot_input=None, reciprocal_space_vectors=None,
panel_addresses=None):
assert [raw_spot_input, reciprocal_space_vectors].count(None) == 1
self.raw_spot_input = raw_spot_input # deprecate record
# must be x, y, phi in degrees, as a vec3_double
if raw_spot_input is not None:
if len(self.detector) > 1:
assert len(raw_spot_input) == len(panel_addresses)
# some hard protection against floating point error
assert len(raw_spot_input) > 7 # no chance of 1DFFT indexing with 7 or fewer spots
self.panelID = panel_addresses
reciprocal_space_vectors = self.raw_spot_positions_mm_to_reciprocal_space(
self.raw_spot_input, self.detector, self.inv_wave, self.S0_vector, self.axis,
self.panelID)
else:
# some hard protection against floating point error
assert len(reciprocal_space_vectors) > 7 # no chance of 1DFFT indexing with 7 or fewer spots
if self.max_cell is None:
from rstbx.indexing_api.nearest_neighbor import neighbor_analysis
NN = neighbor_analysis(reciprocal_space_vectors)
self.max_cell = NN.max_cell
if self.recommended_grid_sampling_rad is None:
rossmann_suggestion = 0.029 # radians; value used in Steller (1997)
norms = reciprocal_space_vectors.norms()
naive_obs_highest_resolution = 1./flex.max(norms)
characteristic_grid = naive_obs_highest_resolution / self.max_cell
# purely heuristic for now, figure out details later
new_suggestion = 2. * characteristic_grid
self.recommended_grid_sampling_rad = min(rossmann_suggestion,
new_suggestion)
self.setMaxcell(self.max_cell)
self.setXyzData(reciprocal_space_vectors) # extended API
hemisphere_shortcut(ai = self, # extended API
characteristic_sampling = self.recommended_grid_sampling_rad,
max_cell = self.max_cell
)
def sum_score_detail(self,reciprocal_space_vectors):
"""Evaluates the probability that the trial value of ( S0_vector | origin_offset ) is correct,
given the current estimate and the observations. The trial value comes through the
reciprocal space vectors, and the current estimate comes through the short list of
DPS solutions. Actual return value is a sum of NH terms, one for each DPS solution, each ranging
from -1.0 to 1.0"""
nh = min ( self.getSolutions().size(), 20) # extended API
solutions = self.getSolutions() #extended API
sum_score = 0.0
for t in range(nh):
#if t!=unique:continue
dfft = Directional_FFT(angle = solutions[t], xyzdata = reciprocal_space_vectors,
granularity = self.granularity, amax = self.amax, # extended API
F0_cutoff = 11)
kval = dfft.kval();
kmax = dfft.kmax();
kval_cutoff = self.raw_spot_input.size()/4.0; # deprecate record
if ( kval > kval_cutoff ):
ff=dfft.fft_result;
kbeam = ((-dfft.pmin)/dfft.delta_p) + 0.5;
Tkmax = cmath.phase(ff[kmax]);
backmax = math.cos(Tkmax+(2*math.pi*kmax*kbeam/(2*ff.size()-1)) );
### Here it should be possible to calculate a gradient.
### Then minimize with respect to two coordinates. Use lbfgs? Have second derivatives?
### can I do something local to model the cosine wave?
### direction of wave travel. Period. phase.
sum_score += backmax;
#if t == unique:
# print t, kmax, dfft.pmin, dfft.delta_p, Tkmax,(2*math.pi*kmax*kbeam/(2*ff.size()-1))
return sum_score
def get_S0_vector_score(self,trial_beam,unique):
trial_beam = matrix.col(trial_beam)
reciprocal_space_vectors = self.raw_spot_positions_mm_to_reciprocal_space(
self.raw_spot_input, self.detector, self.inv_wave, trial_beam, self.axis,
self.panelID)
return self.sum_score_detail(reciprocal_space_vectors)
def optimize_S0_local_scope(self):
"""Local scope: find the optimal S0 vector closest to the input S0 vector
(local minimum, simple minimization)"""
############ Implement a direct beam check right here #########################
unique=0
# construct two vectors that are perpendicular to the beam. Gives a basis for refining beam
beamr0 = self.S0_vector.cross(self.axis).normalize()
beamr1 = beamr0.cross(self.S0_vector).normalize()
beamr2 = beamr1.cross(self.S0_vector).normalize()
assert approx_equal(self.S0_vector.dot(beamr1), 0.)
assert approx_equal(self.S0_vector.dot(beamr2), 0.)
assert approx_equal(beamr2.dot(beamr1), 0.)
# so the orthonormal vectors are self.S0_vector, beamr1 and beamr2
grid = 10
# DO A SIMPLEX MINIMIZATION
from scitbx.simplex import simplex_opt
class test_simplex_method(object):
def __init__(selfOO):
selfOO.starting_simplex=[]
selfOO.n = 2
for ii in range(selfOO.n+1):
selfOO.starting_simplex.append(flex.random_double(selfOO.n))
selfOO.optimizer = simplex_opt( dimension=selfOO.n,
matrix = selfOO.starting_simplex,
evaluator = selfOO,
tolerance=1e-7)
selfOO.x = selfOO.optimizer.get_solution()
def target(selfOO, vector):
newvec = matrix.col(self.S0_vector) + vector[0]*0.0002*beamr1 + vector[1]*0.0002*beamr2
normal = newvec.normalize() * self.inv_wave
return -self.get_S0_vector_score(normal,unique) # extended API
MIN = test_simplex_method()
#MIN = test_cma_es()
print "MINIMUM=",list(MIN.x)
newvec = matrix.col(self.S0_vector) + MIN.x[0]*0.0002*beamr1 + MIN.x[1]*0.0002*beamr2
new_S0_vector = newvec.normalize() * self.inv_wave
print "old S0:",list(self.S0_vector.elems)
print "new S0",list(new_S0_vector.elems)
plot = False
if plot:
scores = flex.double()
for x in range(-grid,grid+1):
for y in range(-grid,grid+1):
ref = matrix.col(self.S0_vector)
newvec = ref + x*0.0002*beamr1 + y*0.0002*beamr2
normal = newvec.normalize() * self.inv_wave
scores.append( self.get_S0_vector_score(normal,unique) ) # extended API
def show_plot(grid,excursi):
excursi.reshape(flex.grid(grid, grid))
from matplotlib import pyplot as plt
plt.figure()
CS = plt.contour([i*0.2 for i in range(grid)],[i*0.2 for i in range(grid)], excursi.as_numpy_array())
plt.clabel(CS, inline=1, fontsize=10, fmt="%6.3f")
plt.title("Score as to beam likelihood")
plt.scatter([0.1*(grid-1)],[0.1*(grid-1)],color='g',marker='o')
plt.scatter([0.1*(grid-1)+0.2*MIN.x[0]] , [0.1*(grid-1)+0.2*MIN.x[1]],color='r',marker='*')
plt.axes().set_aspect("equal")
plt.show()
show_plot(2 * grid + 1, scores)
return new_S0_vector
@staticmethod
def get_new_detector(old_detector,origin_offset):
import copy
new_detector = copy.deepcopy(old_detector)
if len(new_detector) > 1 and len(new_detector.hierarchy()) > 1:
h = new_detector.hierarchy()
h.set_local_frame(fast_axis=h.get_fast_axis(),
slow_axis=h.get_slow_axis(),
origin=matrix.col(h.get_origin()) + origin_offset)
else:
for panel in new_detector:
panel.set_local_frame(fast_axis=panel.get_fast_axis(),
slow_axis=panel.get_slow_axis(),
origin=matrix.col(panel.get_origin()) + origin_offset)
return new_detector
def get_origin_offset_score(self,trial_origin_offset):
trial_detector = dps_extended.get_new_detector(self.detector,trial_origin_offset)
reciprocal_space_vectors = self.raw_spot_positions_mm_to_reciprocal_space(
self.raw_spot_input, trial_detector, self.inv_wave, self.S0_vector, self.axis,
self.panelID)
return self.sum_score_detail(reciprocal_space_vectors)
def optimize_origin_offset_local_scope(self):
"""Local scope: find the optimal origin-offset closest to the current overall detector position
(local minimum, simple minimization)"""
# construct two vectors that are perpendicular to the beam. Gives a basis for refining beam
if self.axis is None:
beamr0 = self.S0_vector.cross(matrix.col((1,0,0))).normalize()
else:
beamr0 = self.S0_vector.cross(self.axis).normalize()
beamr1 = beamr0.cross(self.S0_vector).normalize()
beamr2 = beamr1.cross(self.S0_vector).normalize()
assert approx_equal(self.S0_vector.dot(beamr1), 0.)
assert approx_equal(self.S0_vector.dot(beamr2), 0.)
assert approx_equal(beamr2.dot(beamr1), 0.)
# so the orthonormal vectors are self.S0_vector, beamr1 and beamr2
# DO A SIMPLEX MINIMIZATION
from scitbx.simplex import simplex_opt
class test_simplex_method(object):
def __init__(selfOO):
selfOO.starting_simplex=[]
selfOO.n = 2
for ii in range(selfOO.n+1):
selfOO.starting_simplex.append(flex.random_double(selfOO.n))
selfOO.optimizer = simplex_opt( dimension=selfOO.n,
matrix = selfOO.starting_simplex,
evaluator = selfOO,
tolerance=1e-7)
selfOO.x = selfOO.optimizer.get_solution()
def target(selfOO, vector):
trial_origin_offset = vector[0]*0.2*beamr1 + vector[1]*0.2*beamr2
return -self.get_origin_offset_score(trial_origin_offset)
MIN = test_simplex_method()
trial_origin_offset = MIN.x[0]*0.2*beamr1 + MIN.x[1]*0.2*beamr2
#print "The Origin Offset best score is",self.get_origin_offset_score(trial_origin_offset)
if self.horizon_phil.indexing.plot_search_scope:
scope = self.horizon_phil.indexing.mm_search_scope
plot_px_sz = self.detector[0].get_pixel_size()[0]
grid = max(1,int(scope/plot_px_sz))
scores = flex.double()
for y in range(-grid,grid+1):
for x in range(-grid,grid+1):
new_origin_offset = x*plot_px_sz*beamr1 + y*plot_px_sz*beamr2
scores.append( self.get_origin_offset_score(new_origin_offset) )
def show_plot(widegrid,excursi):
excursi.reshape(flex.grid(widegrid, widegrid))
def igrid(x): return x - (widegrid//2)
from matplotlib import pyplot as plt
plt.figure()
CS = plt.contour([igrid(i)*plot_px_sz for i in range(widegrid)],
[igrid(i)*plot_px_sz for i in range(widegrid)], excursi.as_numpy_array())
plt.clabel(CS, inline=1, fontsize=10, fmt="%6.3f")
plt.title("Wide scope search for detector origin offset")
plt.scatter([0.0],[0.0],color='g',marker='o')
plt.scatter([0.2*MIN.x[0]] , [0.2*MIN.x[1]],color='r',marker='*')
plt.axes().set_aspect("equal")
plt.xlabel("offset (mm) along beamr1 vector")
plt.ylabel("offset (mm) along beamr2 vector")
plt.show()
show_plot(widegrid = 2 * grid + 1, excursi = scores)
return dps_extended.get_new_detector(self.detector, trial_origin_offset)
def get_basis_general(self):
"""
In this function, self requires the following abstract interface:
n_candidates() = number of candidate basis solutions presented
__getitem__(i) = return the ith candidate basis vector of type rstbx_ext.Direction
setOrientation(orientation) where orientation is a cctbx.crystal_orientation object.
must represent the primitive setting.
getOrientation()
niggli() adjusts the stored orientation to the niggli setting
getMosaicity() = mosaicity in degrees, from labelit, will be removed from interface
hklobserved()
combos()
rmsdev()
model_likelihood()
"""
"""side-effect: sets orientation matrix"""
from rstbx.indexing_api.basis_choice import SelectBasisMetaprocedure as SBM
pd = {}
M = SBM(input_index_engine = self,input_dictionary = pd, horizon_phil = self.horizon_phil) # extended API
print "Finished SELECT BASIS with solution M",M
from rstbx.dps_core.lepage import iotbx_converter
L = iotbx_converter(self.getOrientation().unit_cell().minimum_cell(),5.0) # extended API
supergroup = L[0]
triclinic = self.getOrientation().unit_cell() # extended API
cb_op = supergroup['cb_op_inp_best'].c().as_double_array()[0:9]
orient = self.getOrientation() # extended API
orient_best = orient.change_basis(matrix.sqr(cb_op).transpose())
constrain_orient = orient_best.constrain(supergroup['system'])
self.setOrientation(constrain_orient) # extended API
L[-1]["orient"] = orient
if True:
for subgroup in L:
print subgroup.short_digest()
print "\ntriclinic cell=%s volume(A^3)=%.3f"%(triclinic,triclinic.volume())
print "\nafter symmetrizing to %s:"%supergroup.reference_lookup_symbol()
#M.show_rms()
return L
class DPS_primitive_lattice(dps_extended):
def __init__(self, max_cell, recommended_grid_sampling_rad, horizon_phil):
from libtbx import adopt_init_args
adopt_init_args(self,locals())
dps_extended.__init__(self)
class basis_choice_adapter(dps_extended):
def __init__(self):
from libtbx import adopt_init_args
adopt_init_args(self,locals())
dps_extended.__init__(self)
#start here.
#0) rationalize the L class
#) fully document. Have a map from here to there. Implement Richard's fix
#P) figure out where the ".constrain()" code is
#X) symmetry
#1) encapsulate the parameter refinement
#2) encapsulate the outlier rejection. Do not pass the autoindexengine to it
#3) encapsulate the get_basis_general command so it takes the canonical objects. not autoindexengine.
| 43.518072 | 111 | 0.667497 |
8d65327dfb2f5fefb222a7a020498133a6dae6ee | 55 | py | Python | GUI/apps/StatisticsApp.py | FeelsBright/SmartMed | 91dd91e51be82d7dc9779a3ad0a739f4214652be | [
"Apache-2.0"
] | null | null | null | GUI/apps/StatisticsApp.py | FeelsBright/SmartMed | 91dd91e51be82d7dc9779a3ad0a739f4214652be | [
"Apache-2.0"
] | null | null | null | GUI/apps/StatisticsApp.py | FeelsBright/SmartMed | 91dd91e51be82d7dc9779a3ad0a739f4214652be | [
"Apache-2.0"
] | null | null | null | from .App import App
class StatisticsApp(App):
pass
| 9.166667 | 25 | 0.745455 |
45d8af2a74052fe6222e28a91a97e188ba514225 | 948 | py | Python | data/db_repository/versions/008_migration.py | wgd3/fitgents | 171b63f79febd82c9a7e57717923e827c61db1f0 | [
"MIT"
] | null | null | null | data/db_repository/versions/008_migration.py | wgd3/fitgents | 171b63f79febd82c9a7e57717923e827c61db1f0 | [
"MIT"
] | null | null | null | data/db_repository/versions/008_migration.py | wgd3/fitgents | 171b63f79febd82c9a7e57717923e827c61db1f0 | [
"MIT"
] | null | null | null | from sqlalchemy import *
from migrate import *
from migrate.changeset import schema
pre_meta = MetaData()
post_meta = MetaData()
users = Table('users', post_meta,
Column('user_id', Integer, primary_key=True, nullable=False),
Column('name', String(length=50)),
Column('email', String(length=50)),
Column('age', Integer),
Column('_password', LargeBinary(length=120)),
Column('_salt', String(length=120)),
Column('is_admin', Boolean),
)
def upgrade(migrate_engine):
# Upgrade operations go here. Don't create your own engine; bind
# migrate_engine to your metadata
pre_meta.bind = migrate_engine
post_meta.bind = migrate_engine
post_meta.tables['users'].columns['is_admin'].create()
def downgrade(migrate_engine):
# Operations to reverse the above upgrade go here.
pre_meta.bind = migrate_engine
post_meta.bind = migrate_engine
post_meta.tables['users'].columns['is_admin'].drop()
| 29.625 | 68 | 0.712025 |
cf14c86e687b12e722e324841b79c70b3ab10b7b | 2,530 | py | Python | python/gemm_sparse_prod.py | alsam/scriptics-samples | 0169bcec96c29ba1e50d4d3ed5ad5afbb62a272b | [
"MIT"
] | null | null | null | python/gemm_sparse_prod.py | alsam/scriptics-samples | 0169bcec96c29ba1e50d4d3ed5ad5afbb62a272b | [
"MIT"
] | null | null | null | python/gemm_sparse_prod.py | alsam/scriptics-samples | 0169bcec96c29ba1e50d4d3ed5ad5afbb62a272b | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
#-*- coding: utf-8 -*-
# The MIT License (MIT)
#
# Copyright (c) 2021 Alexander Samoilov
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import scipy.sparse, scipy.io
m = 150
n = 200
k = 250
A=scipy.sparse.random(m, n, density=0.01, format='csr')
B=scipy.sparse.random(n, k, density=0.01, format='csr')
C=A.dot(B)
def out_mat(label, mat):
print('int nnz' + label + ' = ', mat.nnz, ';')
print('thrust::host_vector<double> h_csrVal' + label + '(nnz' + label + ');')
print('thrust::host_vector<int> h_csrColInd' + label + '(nnz' + label + ');')
print('thrust::host_vector<int> h_csrRowPtr' + label + '(', len(mat.indptr), ');')
print('{')
print(' double tmp_h_csrVal' + label + '[] = {', ','.join(map(str, mat.data)), '};')
print(' int tmp_h_csrColInd' + label + '[] = {', ','.join(map(str, mat.indices)), '};')
print(' int tmp_h_csrRowPtr' + label + '[] = {', ','.join(map(str, mat.indptr)), '};')
print(' h_csrVal' + label + '.assign(tmp_h_csrVal' + label + ', tmp_h_csrVal' + label + '+nnz' + label + ');')
print(' h_csrColInd' + label + '.assign(tmp_h_csrColInd' + label + ', tmp_h_csrColInd' + label + '+nnz' + label + ');')
print(' h_csrRowPtr' + label + '.assign(tmp_h_csrRowPtr' + label + ', tmp_h_csrRowPtr' + label + '+', len(mat.indptr), ');')
print('}')
out_mat('A', A)
out_mat('B', B)
out_mat('C', C)
scipy.io.mmwrite(target='A.mtx', a=A)
scipy.io.mmwrite(target='B.mtx', a=B)
scipy.io.mmwrite(target='C.mtx', a=C)
| 44.385965 | 131 | 0.666008 |
2d55c272e37f3971828bd6dba4db5d30ed3864fc | 776 | py | Python | python/fogcreek_encode.py | tbedford/code-snippets | 9afe36c2726829f14fa5ec11acb8214bed704938 | [
"MIT"
] | null | null | null | python/fogcreek_encode.py | tbedford/code-snippets | 9afe36c2726829f14fa5ec11acb8214bed704938 | [
"MIT"
] | null | null | null | python/fogcreek_encode.py | tbedford/code-snippets | 9afe36c2726829f14fa5ec11acb8214bed704938 | [
"MIT"
] | 1 | 2018-10-09T02:03:12.000Z | 2018-10-09T02:03:12.000Z | from random import randint
magic_string = "keyboarding_"
magic_list = list(magic_string)
base_string = "abcdefghijklmnopqrstuvwxyz_"
base_list = list(base_string)
encoded_dict = {}
count = 100
# encode magic list
for c in magic_list:
encoded_dict[c] = count
count = count - 1
if c in base_list:
base_list.remove(c)
# encode remaining base list
for c in base_list:
encoded_dict[c] = count
count = count - 1
keys = encoded_dict.keys()
encoded_string = ""
while len(keys) > 0:
key = keys[randint(0, len(keys) - 1)]
if encoded_dict[key] > 0:
encoded_string = encoded_string + key
encoded_dict[key] = encoded_dict[key] - 1
elif encoded_dict[key] == 0:
keys.remove(key)
print(encoded_string)
| 20.972973 | 49 | 0.662371 |
be9c09e4e01f45bec3b83b0cd523e2320b17cd37 | 840 | py | Python | stroppy/__main__.py | ciaron/stroppy | 081f14d50917bd7669b6554719791ceee538b96e | [
"MIT"
] | null | null | null | stroppy/__main__.py | ciaron/stroppy | 081f14d50917bd7669b6554719791ceee538b96e | [
"MIT"
] | null | null | null | stroppy/__main__.py | ciaron/stroppy | 081f14d50917bd7669b6554719791ceee538b96e | [
"MIT"
] | null | null | null | # __main__.py
# original from https://realpython.com/pypi-publish-python-package/
from configparser import ConfigParser
from importlib import resources # Python 3.7+
import sys
from reader import feed
from reader import viewer
def main():
"""Read the Real Python article feed"""
# Read URL of the Real Python feed from config file
cfg = ConfigParser()
cfg.read_string(resources.read_text("reader", "config.txt"))
url = cfg.get("feed", "url")
# If an article ID is given, show the article
if len(sys.argv) > 1:
article = feed.get_article(url, sys.argv[1])
viewer.show(article)
# If no ID is given, show a list of all articles
else:
site = feed.get_site(url)
titles = feed.get_titles(url)
viewer.show_list(site, titles)
if __name__ == "__main__":
main()
| 26.25 | 67 | 0.670238 |
4d4bb3ffe3cb4e9c0e97cc06c738d251e77c2da2 | 499 | py | Python | env/Lib/site-packages/plotly/validators/scatterpolargl/line/_shape.py | andresgreen-byte/Laboratorio-1--Inversion-de-Capital | 8a4707301d19c3826c31026c4077930bcd6a8182 | [
"MIT"
] | 11,750 | 2015-10-12T07:03:39.000Z | 2022-03-31T20:43:15.000Z | venv/Lib/site-packages/plotly/validators/scatterpolargl/line/_shape.py | wakisalvador/constructed-misdirection | 74779e9ec640a11bc08d5d1967c85ac4fa44ea5e | [
"Unlicense"
] | 2,951 | 2015-10-12T00:41:25.000Z | 2022-03-31T22:19:26.000Z | venv/Lib/site-packages/plotly/validators/scatterpolargl/line/_shape.py | wakisalvador/constructed-misdirection | 74779e9ec640a11bc08d5d1967c85ac4fa44ea5e | [
"Unlicense"
] | 2,623 | 2015-10-15T14:40:27.000Z | 2022-03-28T16:05:50.000Z | import _plotly_utils.basevalidators
class ShapeValidator(_plotly_utils.basevalidators.EnumeratedValidator):
def __init__(
self, plotly_name="shape", parent_name="scatterpolargl.line", **kwargs
):
super(ShapeValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
values=kwargs.pop("values", ["linear", "hv", "vh", "hvh", "vhv"]),
**kwargs
)
| 33.266667 | 78 | 0.627255 |
e165457e997b3b03f13cabf436dfeaf68d43027e | 655 | py | Python | yawast/scanner/plugins/http/waf.py | sasqwatch/yawast | 41572108e4d46f76cb83eae6dd3727bc02bd46c7 | [
"BSD-3-Clause"
] | null | null | null | yawast/scanner/plugins/http/waf.py | sasqwatch/yawast | 41572108e4d46f76cb83eae6dd3727bc02bd46c7 | [
"BSD-3-Clause"
] | null | null | null | yawast/scanner/plugins/http/waf.py | sasqwatch/yawast | 41572108e4d46f76cb83eae6dd3727bc02bd46c7 | [
"BSD-3-Clause"
] | null | null | null | from typing import List, Dict
from yawast.reporting.enums import Vulnerabilities
from yawast.scanner.plugins.result import Result
def get_waf(headers: Dict, raw: str, url: str) -> List[Result]:
results = []
if "Server" in headers:
if headers["Server"] == "cloudflare":
results.append(
Result(
"WAF Detected: Cloudflare", Vulnerabilities.WAF_CLOUDFLARE, url, raw
)
)
if "X-CDN" in headers or "X-Iinfo" in headers:
results.append(
Result("WAF Detected: Incapsula", Vulnerabilities.WAF_INCAPSULA, url, raw)
)
return results
| 27.291667 | 88 | 0.60458 |
7341eeafc791717acfa5a2dbdb2f41adca8c5e76 | 1,137 | py | Python | corehq/ex-submodules/casexml/apps/stock/tests/mock_consumption.py | kkrampa/commcare-hq | d64d7cad98b240325ad669ccc7effb07721b4d44 | [
"BSD-3-Clause"
] | 1 | 2020-05-05T13:10:01.000Z | 2020-05-05T13:10:01.000Z | corehq/ex-submodules/casexml/apps/stock/tests/mock_consumption.py | kkrampa/commcare-hq | d64d7cad98b240325ad669ccc7effb07721b4d44 | [
"BSD-3-Clause"
] | 1 | 2019-12-09T14:00:14.000Z | 2019-12-09T14:00:14.000Z | corehq/ex-submodules/casexml/apps/stock/tests/mock_consumption.py | MaciejChoromanski/commcare-hq | fd7f65362d56d73b75a2c20d2afeabbc70876867 | [
"BSD-3-Clause"
] | 5 | 2015-11-30T13:12:45.000Z | 2019-07-01T19:27:07.000Z | from __future__ import absolute_import
from __future__ import unicode_literals
from casexml.apps.stock.models import ConsumptionMixin
from dimagi.utils import parsing as dateparse
from datetime import datetime, timedelta
from casexml.apps.stock.consumption import compute_daily_consumption_from_transactions, ConsumptionConfiguration
import collections
to_ts = dateparse.json_format_datetime
class MockTransaction(
collections.namedtuple('MockTransaction', ['type', 'normalized_value', 'received_on']),
ConsumptionMixin):
pass
def mock_transaction(action, value, age):
return MockTransaction(action, value, ago(age))
now = datetime.utcnow()
def ago(days):
return now - timedelta(days=days)
# note that you must add inferred consumption transactions manually to txdata
def mock_consumption(txdata, window, params=None):
default_params = {'min_window': 0, 'min_periods': 0}
params = params or {}
default_params.update(params)
config = ConsumptionConfiguration(**default_params)
return compute_daily_consumption_from_transactions(
txdata,
ago(window),
config,
)
| 29.153846 | 112 | 0.770449 |
020eb7132b0c6e00069d6443b38510a2a01b17c2 | 101 | py | Python | pre_process.py | AnuragVanam/team-text-eater | 70920603100015ef25b4ac11e3198e8e766d38df | [
"Apache-2.0"
] | null | null | null | pre_process.py | AnuragVanam/team-text-eater | 70920603100015ef25b4ac11e3198e8e766d38df | [
"Apache-2.0"
] | null | null | null | pre_process.py | AnuragVanam/team-text-eater | 70920603100015ef25b4ac11e3198e8e766d38df | [
"Apache-2.0"
] | null | null | null |
import string
def pre_process(para):
result = para.translate(string.punctuation)
return(result) | 16.833333 | 45 | 0.772277 |
89bd556d9bd1056fc3ddaf7dae1b610a3812ca6d | 164 | py | Python | src/client/client.py | Kirishikesan/HoneySim | cf2e76816a9adc186b909addef8558cce2fc3055 | [
"MIT"
] | null | null | null | src/client/client.py | Kirishikesan/HoneySim | cf2e76816a9adc186b909addef8558cce2fc3055 | [
"MIT"
] | null | null | null | src/client/client.py | Kirishikesan/HoneySim | cf2e76816a9adc186b909addef8558cce2fc3055 | [
"MIT"
] | null | null | null | import socket
s = socket.socket()
port = 3000
s.connect(('127.0.0.1', port))
print (s.recv(1024) )
s.close() | 16.4 | 31 | 0.432927 |
cce233322695c19117937e8ad7ce2c0ff4661a50 | 3,062 | py | Python | dlcv/utils.py | Loonride/deeplens-cv | 9e5b31c1a269d364e4912ba8266415fa04277e11 | [
"MIT"
] | null | null | null | dlcv/utils.py | Loonride/deeplens-cv | 9e5b31c1a269d364e4912ba8266415fa04277e11 | [
"MIT"
] | null | null | null | dlcv/utils.py | Loonride/deeplens-cv | 9e5b31c1a269d364e4912ba8266415fa04277e11 | [
"MIT"
] | null | null | null | """This file is part of DeepLens which is released under MIT License and
is copyrighted by the University of Chicago. This project is developed by
the database group (chidata).
utils.py defines some utilities that can be used for debugging and manipulating
image streams.
"""
import cv2
import numpy as np
import itertools
#plays video stream through the system player
def play(vstream):
for frame in vstream:
cv2.imshow('Player',frame['data'])
if cv2.waitKey(3) & 0xFF == ord('q'):
break
#shows a single frame
def show(frame):
cv2.imshow('Debug',frame)
cv2.waitKey(0)
#overlays a bounding box with labels over a frame
def overlay(frame, bbs):
ff = np.copy(frame)
for label, bb in bbs:
cv2.rectangle(ff, (bb[0],bb[2]), (bb[1],bb[3]),(0,255,0), 2)
cv2.putText(ff, label, (bb[0],bb[2]), cv2.FONT_HERSHEY_SIMPLEX, 1.0, (0, 255, 0), lineType=cv2.LINE_AA)
return ff
#crop and replace primitives
def bb_crop(frame, box):
ff = np.copy(frame)
return ff[box.y0:box.y1,box.x0:box.x1]
def bb_replace(frame1, box, frame2):
ff = np.copy(frame1)
ff[box.y0:box.y1,box.x0:box.x1] = frame2
return ff
#matches frames against each other
def image_match(im1, im2, hess_thresh=150, dist_threshold=1000, accept=0.75):
brisk = cv2.BRISK_create(thresh=hess_thresh)
(kps1, descs1) = brisk.detectAndCompute(im1, None)
(kps2, descs2) = brisk.detectAndCompute(im2, None)
match_cnt = 0
for i,k in enumerate(kps1):
best_match = None
for j,k in enumerate(kps2):
distance = np.linalg.norm(descs2[j]-descs1[i])
if distance < dist_threshold:
if best_match == None:
best_match = (j, distance)
else:
best_match = (j,min(best_match[1], distance))
match_cnt += (best_match != None)
if len(kps1) == 0:
return False
return (match_cnt/len(kps1) >= accept)
def labels_to_intervals(labels_list):
"""
labels_to_intervals() converts list of labels of each frame into set of time intervals where a tag occurs
Args:
labels_list: list of labels of each frame
e.g. [{'person'}, {'person'}, {'person'}, {'surfboard', 'person'}]
Returns:
tags - set of time intervals where a tag occurs:
{ (label, start, end) }, a video from time 0 (inclusive) to time T (exclusive)
e.g. {('cat', 3, 9), ('dog', 5, 8), ('people', 0, 6)}
e.g. {('cat', 0, 1), ('cat', 2, 4), ('cat', 6, 8), ('dog', 0, 3),
('dog', 6, 8), ('people', 0, 2), ('people', 4, 6)}
"""
labels_dict = dict()
for frame, labels in enumerate(labels_list):
for label in labels:
if label in labels_dict:
labels_dict[label].add(frame)
else:
labels_dict[label] = {frame}
output = set()
for key, value in labels_dict.items():
frame_list = sorted(value)
for interval in [(t[0][1], t[-1][1]) for t in
(tuple(g[1]) for g in itertools.groupby(enumerate(frame_list), lambda x: x[0]-x[1]))]:
output.add((key, interval[0], interval[1]+1))
return output
| 29.728155 | 111 | 0.633246 |
8219c809c801ccf965a889f8566b2f1bd144a7db | 403 | py | Python | django-app_poetry_base/django_app/django_snacks_base/django_snacks/wsgi.py | robertCodedIt/django-snacks | 99bd320a1f412f2d967479e8d22dfa52c31a0331 | [
"MIT"
] | null | null | null | django-app_poetry_base/django_app/django_snacks_base/django_snacks/wsgi.py | robertCodedIt/django-snacks | 99bd320a1f412f2d967479e8d22dfa52c31a0331 | [
"MIT"
] | null | null | null | django-app_poetry_base/django_app/django_snacks_base/django_snacks/wsgi.py | robertCodedIt/django-snacks | 99bd320a1f412f2d967479e8d22dfa52c31a0331 | [
"MIT"
] | null | null | null | """
WSGI config for django_snacks project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'django_snacks.settings')
application = get_wsgi_application()
| 23.705882 | 78 | 0.791563 |
3957c2a340fab6846222dca93cfe8c43e09f8e89 | 10,183 | py | Python | squareconnect/models/batch_retrieve_inventory_changes_request.py | shaminmeerankutty/connect-python-sdk | 524c8fe344bc3c0340833984970a07d519c4f5be | [
"Apache-2.0"
] | 53 | 2016-08-06T17:12:16.000Z | 2020-08-02T19:43:58.000Z | squareconnect/models/batch_retrieve_inventory_changes_request.py | shaminmeerankutty/connect-python-sdk | 524c8fe344bc3c0340833984970a07d519c4f5be | [
"Apache-2.0"
] | 32 | 2016-08-19T16:32:30.000Z | 2020-01-14T18:01:37.000Z | squareconnect/models/batch_retrieve_inventory_changes_request.py | shaminmeerankutty/connect-python-sdk | 524c8fe344bc3c0340833984970a07d519c4f5be | [
"Apache-2.0"
] | 45 | 2016-09-05T11:58:09.000Z | 2020-11-15T16:26:41.000Z | # coding: utf-8
"""
Copyright 2017 Square, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from pprint import pformat
from six import iteritems
import re
class BatchRetrieveInventoryChangesRequest(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self, catalog_object_ids=None, location_ids=None, types=None, states=None, updated_after=None, updated_before=None, cursor=None):
"""
BatchRetrieveInventoryChangesRequest - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'catalog_object_ids': 'list[str]',
'location_ids': 'list[str]',
'types': 'list[str]',
'states': 'list[str]',
'updated_after': 'str',
'updated_before': 'str',
'cursor': 'str'
}
self.attribute_map = {
'catalog_object_ids': 'catalog_object_ids',
'location_ids': 'location_ids',
'types': 'types',
'states': 'states',
'updated_after': 'updated_after',
'updated_before': 'updated_before',
'cursor': 'cursor'
}
self._catalog_object_ids = catalog_object_ids
self._location_ids = location_ids
self._types = types
self._states = states
self._updated_after = updated_after
self._updated_before = updated_before
self._cursor = cursor
@property
def catalog_object_ids(self):
"""
Gets the catalog_object_ids of this BatchRetrieveInventoryChangesRequest.
Filters results by [CatalogObject](#type-catalogobject) ID. Only applied when set. Default: unset.
:return: The catalog_object_ids of this BatchRetrieveInventoryChangesRequest.
:rtype: list[str]
"""
return self._catalog_object_ids
@catalog_object_ids.setter
def catalog_object_ids(self, catalog_object_ids):
"""
Sets the catalog_object_ids of this BatchRetrieveInventoryChangesRequest.
Filters results by [CatalogObject](#type-catalogobject) ID. Only applied when set. Default: unset.
:param catalog_object_ids: The catalog_object_ids of this BatchRetrieveInventoryChangesRequest.
:type: list[str]
"""
self._catalog_object_ids = catalog_object_ids
@property
def location_ids(self):
"""
Gets the location_ids of this BatchRetrieveInventoryChangesRequest.
Filters results by [Location](#type-location) ID. Only applied when set. Default: unset.
:return: The location_ids of this BatchRetrieveInventoryChangesRequest.
:rtype: list[str]
"""
return self._location_ids
@location_ids.setter
def location_ids(self, location_ids):
"""
Sets the location_ids of this BatchRetrieveInventoryChangesRequest.
Filters results by [Location](#type-location) ID. Only applied when set. Default: unset.
:param location_ids: The location_ids of this BatchRetrieveInventoryChangesRequest.
:type: list[str]
"""
self._location_ids = location_ids
@property
def types(self):
"""
Gets the types of this BatchRetrieveInventoryChangesRequest.
Filters results by [InventoryChangeType](#type-inventorychangetype). Default: [`PHYSICAL_COUNT`, `ADJUSTMENT`]. `TRANSFER` is not supported as a filter. See [InventoryChangeType](#type-inventorychangetype) for possible values
:return: The types of this BatchRetrieveInventoryChangesRequest.
:rtype: list[str]
"""
return self._types
@types.setter
def types(self, types):
"""
Sets the types of this BatchRetrieveInventoryChangesRequest.
Filters results by [InventoryChangeType](#type-inventorychangetype). Default: [`PHYSICAL_COUNT`, `ADJUSTMENT`]. `TRANSFER` is not supported as a filter. See [InventoryChangeType](#type-inventorychangetype) for possible values
:param types: The types of this BatchRetrieveInventoryChangesRequest.
:type: list[str]
"""
self._types = types
@property
def states(self):
"""
Gets the states of this BatchRetrieveInventoryChangesRequest.
Filters `ADJUSTMENT` query results by [InventoryState](#type-inventorystate). Only applied when set. Default: unset. See [InventoryState](#type-inventorystate) for possible values
:return: The states of this BatchRetrieveInventoryChangesRequest.
:rtype: list[str]
"""
return self._states
@states.setter
def states(self, states):
"""
Sets the states of this BatchRetrieveInventoryChangesRequest.
Filters `ADJUSTMENT` query results by [InventoryState](#type-inventorystate). Only applied when set. Default: unset. See [InventoryState](#type-inventorystate) for possible values
:param states: The states of this BatchRetrieveInventoryChangesRequest.
:type: list[str]
"""
self._states = states
@property
def updated_after(self):
"""
Gets the updated_after of this BatchRetrieveInventoryChangesRequest.
Provided as an RFC 3339 timestamp. Returns results whose `created_at` or `calculated_at` value is after the given time. Default: UNIX epoch (`1970-01-01T00:00:00Z`).
:return: The updated_after of this BatchRetrieveInventoryChangesRequest.
:rtype: str
"""
return self._updated_after
@updated_after.setter
def updated_after(self, updated_after):
"""
Sets the updated_after of this BatchRetrieveInventoryChangesRequest.
Provided as an RFC 3339 timestamp. Returns results whose `created_at` or `calculated_at` value is after the given time. Default: UNIX epoch (`1970-01-01T00:00:00Z`).
:param updated_after: The updated_after of this BatchRetrieveInventoryChangesRequest.
:type: str
"""
self._updated_after = updated_after
@property
def updated_before(self):
"""
Gets the updated_before of this BatchRetrieveInventoryChangesRequest.
Provided as an RFC 3339 timestamp. Returns results whose `created_at` or `calculated_at` value is strictly before the given time. Default: UNIX epoch (`1970-01-01T00:00:00Z`).
:return: The updated_before of this BatchRetrieveInventoryChangesRequest.
:rtype: str
"""
return self._updated_before
@updated_before.setter
def updated_before(self, updated_before):
"""
Sets the updated_before of this BatchRetrieveInventoryChangesRequest.
Provided as an RFC 3339 timestamp. Returns results whose `created_at` or `calculated_at` value is strictly before the given time. Default: UNIX epoch (`1970-01-01T00:00:00Z`).
:param updated_before: The updated_before of this BatchRetrieveInventoryChangesRequest.
:type: str
"""
self._updated_before = updated_before
@property
def cursor(self):
"""
Gets the cursor of this BatchRetrieveInventoryChangesRequest.
A pagination cursor returned by a previous call to this endpoint. Provide this to retrieve the next set of results for the original query. See [Pagination](/basics/api101/pagination) for more information.
:return: The cursor of this BatchRetrieveInventoryChangesRequest.
:rtype: str
"""
return self._cursor
@cursor.setter
def cursor(self, cursor):
"""
Sets the cursor of this BatchRetrieveInventoryChangesRequest.
A pagination cursor returned by a previous call to this endpoint. Provide this to retrieve the next set of results for the original query. See [Pagination](/basics/api101/pagination) for more information.
:param cursor: The cursor of this BatchRetrieveInventoryChangesRequest.
:type: str
"""
self._cursor = cursor
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| 36.761733 | 233 | 0.648826 |
498c333cb911e29dd3bbb2a51a4af2051c51c4cf | 42,648 | py | Python | networkx/classes/digraph.py | SultanOrazbayev/networkx | 5be9755636fa4da71da2e28f8467336d3c0164a7 | [
"BSD-3-Clause"
] | 2 | 2020-02-25T14:07:05.000Z | 2020-11-21T20:29:37.000Z | networkx/classes/digraph.py | SultanOrazbayev/networkx | 5be9755636fa4da71da2e28f8467336d3c0164a7 | [
"BSD-3-Clause"
] | 2 | 2020-10-16T04:18:16.000Z | 2021-11-19T20:00:15.000Z | networkx/classes/digraph.py | SultanOrazbayev/networkx | 5be9755636fa4da71da2e28f8467336d3c0164a7 | [
"BSD-3-Clause"
] | 1 | 2021-08-18T03:14:44.000Z | 2021-08-18T03:14:44.000Z | """Base class for directed graphs."""
from copy import deepcopy
import networkx as nx
from networkx.classes.graph import Graph
from networkx.classes.coreviews import AdjacencyView
from networkx.classes.reportviews import (
OutEdgeView,
InEdgeView,
DiDegreeView,
InDegreeView,
OutDegreeView,
)
from networkx.exception import NetworkXError
import networkx.convert as convert
__all__ = ["DiGraph"]
class DiGraph(Graph):
"""
Base class for directed graphs.
A DiGraph stores nodes and edges with optional data, or attributes.
DiGraphs hold directed edges. Self loops are allowed but multiple
(parallel) edges are not.
Nodes can be arbitrary (hashable) Python objects with optional
key/value attributes. By convention `None` is not used as a node.
Edges are represented as links between nodes with optional
key/value attributes.
Parameters
----------
incoming_graph_data : input graph (optional, default: None)
Data to initialize graph. If None (default) an empty
graph is created. The data can be any format that is supported
by the to_networkx_graph() function, currently including edge list,
dict of dicts, dict of lists, NetworkX graph, NumPy matrix
or 2d ndarray, SciPy sparse matrix, or PyGraphviz graph.
attr : keyword arguments, optional (default= no attributes)
Attributes to add to graph as key=value pairs.
See Also
--------
Graph
MultiGraph
MultiDiGraph
OrderedDiGraph
Examples
--------
Create an empty graph structure (a "null graph") with no nodes and
no edges.
>>> G = nx.DiGraph()
G can be grown in several ways.
**Nodes:**
Add one node at a time:
>>> G.add_node(1)
Add the nodes from any container (a list, dict, set or
even the lines from a file or the nodes from another graph).
>>> G.add_nodes_from([2, 3])
>>> G.add_nodes_from(range(100, 110))
>>> H = nx.path_graph(10)
>>> G.add_nodes_from(H)
In addition to strings and integers any hashable Python object
(except None) can represent a node, e.g. a customized node object,
or even another Graph.
>>> G.add_node(H)
**Edges:**
G can also be grown by adding edges.
Add one edge,
>>> G.add_edge(1, 2)
a list of edges,
>>> G.add_edges_from([(1, 2), (1, 3)])
or a collection of edges,
>>> G.add_edges_from(H.edges)
If some edges connect nodes not yet in the graph, the nodes
are added automatically. There are no errors when adding
nodes or edges that already exist.
**Attributes:**
Each graph, node, and edge can hold key/value attribute pairs
in an associated attribute dictionary (the keys must be hashable).
By default these are empty, but can be added or changed using
add_edge, add_node or direct manipulation of the attribute
dictionaries named graph, node and edge respectively.
>>> G = nx.DiGraph(day="Friday")
>>> G.graph
{'day': 'Friday'}
Add node attributes using add_node(), add_nodes_from() or G.nodes
>>> G.add_node(1, time="5pm")
>>> G.add_nodes_from([3], time="2pm")
>>> G.nodes[1]
{'time': '5pm'}
>>> G.nodes[1]["room"] = 714
>>> del G.nodes[1]["room"] # remove attribute
>>> list(G.nodes(data=True))
[(1, {'time': '5pm'}), (3, {'time': '2pm'})]
Add edge attributes using add_edge(), add_edges_from(), subscript
notation, or G.edges.
>>> G.add_edge(1, 2, weight=4.7)
>>> G.add_edges_from([(3, 4), (4, 5)], color="red")
>>> G.add_edges_from([(1, 2, {"color": "blue"}), (2, 3, {"weight": 8})])
>>> G[1][2]["weight"] = 4.7
>>> G.edges[1, 2]["weight"] = 4
Warning: we protect the graph data structure by making `G.edges[1, 2]` a
read-only dict-like structure. However, you can assign to attributes
in e.g. `G.edges[1, 2]`. Thus, use 2 sets of brackets to add/change
data attributes: `G.edges[1, 2]['weight'] = 4`
(For multigraphs: `MG.edges[u, v, key][name] = value`).
**Shortcuts:**
Many common graph features allow python syntax to speed reporting.
>>> 1 in G # check if node in graph
True
>>> [n for n in G if n < 3] # iterate through nodes
[1, 2]
>>> len(G) # number of nodes in graph
5
Often the best way to traverse all edges of a graph is via the neighbors.
The neighbors are reported as an adjacency-dict `G.adj` or `G.adjacency()`
>>> for n, nbrsdict in G.adjacency():
... for nbr, eattr in nbrsdict.items():
... if "weight" in eattr:
... # Do something useful with the edges
... pass
But the edges reporting object is often more convenient:
>>> for u, v, weight in G.edges(data="weight"):
... if weight is not None:
... # Do something useful with the edges
... pass
**Reporting:**
Simple graph information is obtained using object-attributes and methods.
Reporting usually provides views instead of containers to reduce memory
usage. The views update as the graph is updated similarly to dict-views.
The objects `nodes, `edges` and `adj` provide access to data attributes
via lookup (e.g. `nodes[n], `edges[u, v]`, `adj[u][v]`) and iteration
(e.g. `nodes.items()`, `nodes.data('color')`,
`nodes.data('color', default='blue')` and similarly for `edges`)
Views exist for `nodes`, `edges`, `neighbors()`/`adj` and `degree`.
For details on these and other miscellaneous methods, see below.
**Subclasses (Advanced):**
The Graph class uses a dict-of-dict-of-dict data structure.
The outer dict (node_dict) holds adjacency information keyed by node.
The next dict (adjlist_dict) represents the adjacency information and holds
edge data keyed by neighbor. The inner dict (edge_attr_dict) represents
the edge data and holds edge attribute values keyed by attribute names.
Each of these three dicts can be replaced in a subclass by a user defined
dict-like object. In general, the dict-like features should be
maintained but extra features can be added. To replace one of the
dicts create a new graph class by changing the class(!) variable
holding the factory for that dict-like structure. The variable names are
node_dict_factory, node_attr_dict_factory, adjlist_inner_dict_factory,
adjlist_outer_dict_factory, edge_attr_dict_factory and graph_attr_dict_factory.
node_dict_factory : function, (default: dict)
Factory function to be used to create the dict containing node
attributes, keyed by node id.
It should require no arguments and return a dict-like object
node_attr_dict_factory: function, (default: dict)
Factory function to be used to create the node attribute
dict which holds attribute values keyed by attribute name.
It should require no arguments and return a dict-like object
adjlist_outer_dict_factory : function, (default: dict)
Factory function to be used to create the outer-most dict
in the data structure that holds adjacency info keyed by node.
It should require no arguments and return a dict-like object.
adjlist_inner_dict_factory : function, optional (default: dict)
Factory function to be used to create the adjacency list
dict which holds edge data keyed by neighbor.
It should require no arguments and return a dict-like object
edge_attr_dict_factory : function, optional (default: dict)
Factory function to be used to create the edge attribute
dict which holds attribute values keyed by attribute name.
It should require no arguments and return a dict-like object.
graph_attr_dict_factory : function, (default: dict)
Factory function to be used to create the graph attribute
dict which holds attribute values keyed by attribute name.
It should require no arguments and return a dict-like object.
Typically, if your extension doesn't impact the data structure all
methods will inherited without issue except: `to_directed/to_undirected`.
By default these methods create a DiGraph/Graph class and you probably
want them to create your extension of a DiGraph/Graph. To facilitate
this we define two class variables that you can set in your subclass.
to_directed_class : callable, (default: DiGraph or MultiDiGraph)
Class to create a new graph structure in the `to_directed` method.
If `None`, a NetworkX class (DiGraph or MultiDiGraph) is used.
to_undirected_class : callable, (default: Graph or MultiGraph)
Class to create a new graph structure in the `to_undirected` method.
If `None`, a NetworkX class (Graph or MultiGraph) is used.
**Subclassing Example**
Create a low memory graph class that effectively disallows edge
attributes by using a single attribute dict for all edges.
This reduces the memory used, but you lose edge attributes.
>>> class ThinGraph(nx.Graph):
... all_edge_dict = {"weight": 1}
...
... def single_edge_dict(self):
... return self.all_edge_dict
...
... edge_attr_dict_factory = single_edge_dict
>>> G = ThinGraph()
>>> G.add_edge(2, 1)
>>> G[2][1]
{'weight': 1}
>>> G.add_edge(2, 2)
>>> G[2][1] is G[2][2]
True
Please see :mod:`~networkx.classes.ordered` for more examples of
creating graph subclasses by overwriting the base class `dict` with
a dictionary-like object.
"""
def __init__(self, incoming_graph_data=None, **attr):
"""Initialize a graph with edges, name, or graph attributes.
Parameters
----------
incoming_graph_data : input graph (optional, default: None)
Data to initialize graph. If None (default) an empty
graph is created. The data can be an edge list, or any
NetworkX graph object. If the corresponding optional Python
packages are installed the data can also be a NumPy matrix
or 2d ndarray, a SciPy sparse matrix, or a PyGraphviz graph.
attr : keyword arguments, optional (default= no attributes)
Attributes to add to graph as key=value pairs.
See Also
--------
convert
Examples
--------
>>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc
>>> G = nx.Graph(name="my graph")
>>> e = [(1, 2), (2, 3), (3, 4)] # list of edges
>>> G = nx.Graph(e)
Arbitrary graph attribute pairs (key=value) may be assigned
>>> G = nx.Graph(e, day="Friday")
>>> G.graph
{'day': 'Friday'}
"""
self.graph_attr_dict_factory = self.graph_attr_dict_factory
self.node_dict_factory = self.node_dict_factory
self.node_attr_dict_factory = self.node_attr_dict_factory
self.adjlist_outer_dict_factory = self.adjlist_outer_dict_factory
self.adjlist_inner_dict_factory = self.adjlist_inner_dict_factory
self.edge_attr_dict_factory = self.edge_attr_dict_factory
self.graph = self.graph_attr_dict_factory() # dictionary for graph attributes
self._node = self.node_dict_factory() # dictionary for node attr
# We store two adjacency lists:
# the predecessors of node n are stored in the dict self._pred
# the successors of node n are stored in the dict self._succ=self._adj
self._adj = self.adjlist_outer_dict_factory() # empty adjacency dict
self._pred = self.adjlist_outer_dict_factory() # predecessor
self._succ = self._adj # successor
# attempt to load graph with data
if incoming_graph_data is not None:
convert.to_networkx_graph(incoming_graph_data, create_using=self)
# load graph attributes (must be after convert)
self.graph.update(attr)
@property
def adj(self):
"""Graph adjacency object holding the neighbors of each node.
This object is a read-only dict-like structure with node keys
and neighbor-dict values. The neighbor-dict is keyed by neighbor
to the edge-data-dict. So `G.adj[3][2]['color'] = 'blue'` sets
the color of the edge `(3, 2)` to `"blue"`.
Iterating over G.adj behaves like a dict. Useful idioms include
`for nbr, datadict in G.adj[n].items():`.
The neighbor information is also provided by subscripting the graph.
So `for nbr, foovalue in G[node].data('foo', default=1):` works.
For directed graphs, `G.adj` holds outgoing (successor) info.
"""
return AdjacencyView(self._succ)
@property
def succ(self):
"""Graph adjacency object holding the successors of each node.
This object is a read-only dict-like structure with node keys
and neighbor-dict values. The neighbor-dict is keyed by neighbor
to the edge-data-dict. So `G.succ[3][2]['color'] = 'blue'` sets
the color of the edge `(3, 2)` to `"blue"`.
Iterating over G.succ behaves like a dict. Useful idioms include
`for nbr, datadict in G.succ[n].items():`. A data-view not provided
by dicts also exists: `for nbr, foovalue in G.succ[node].data('foo'):`
and a default can be set via a `default` argument to the `data` method.
The neighbor information is also provided by subscripting the graph.
So `for nbr, foovalue in G[node].data('foo', default=1):` works.
For directed graphs, `G.adj` is identical to `G.succ`.
"""
return AdjacencyView(self._succ)
@property
def pred(self):
"""Graph adjacency object holding the predecessors of each node.
This object is a read-only dict-like structure with node keys
and neighbor-dict values. The neighbor-dict is keyed by neighbor
to the edge-data-dict. So `G.pred[2][3]['color'] = 'blue'` sets
the color of the edge `(3, 2)` to `"blue"`.
Iterating over G.pred behaves like a dict. Useful idioms include
`for nbr, datadict in G.pred[n].items():`. A data-view not provided
by dicts also exists: `for nbr, foovalue in G.pred[node].data('foo'):`
A default can be set via a `default` argument to the `data` method.
"""
return AdjacencyView(self._pred)
def add_node(self, node_for_adding, **attr):
"""Add a single node `node_for_adding` and update node attributes.
Parameters
----------
node_for_adding : node
A node can be any hashable Python object except None.
attr : keyword arguments, optional
Set or change node attributes using key=value.
See Also
--------
add_nodes_from
Examples
--------
>>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc
>>> G.add_node(1)
>>> G.add_node("Hello")
>>> K3 = nx.Graph([(0, 1), (1, 2), (2, 0)])
>>> G.add_node(K3)
>>> G.number_of_nodes()
3
Use keywords set/change node attributes:
>>> G.add_node(1, size=10)
>>> G.add_node(3, weight=0.4, UTM=("13S", 382871, 3972649))
Notes
-----
A hashable object is one that can be used as a key in a Python
dictionary. This includes strings, numbers, tuples of strings
and numbers, etc.
On many platforms hashable items also include mutables such as
NetworkX Graphs, though one should be careful that the hash
doesn't change on mutables.
"""
if node_for_adding not in self._succ:
if node_for_adding is None:
raise ValueError("None cannot be a node")
self._succ[node_for_adding] = self.adjlist_inner_dict_factory()
self._pred[node_for_adding] = self.adjlist_inner_dict_factory()
attr_dict = self._node[node_for_adding] = self.node_attr_dict_factory()
attr_dict.update(attr)
else: # update attr even if node already exists
self._node[node_for_adding].update(attr)
def add_nodes_from(self, nodes_for_adding, **attr):
"""Add multiple nodes.
Parameters
----------
nodes_for_adding : iterable container
A container of nodes (list, dict, set, etc.).
OR
A container of (node, attribute dict) tuples.
Node attributes are updated using the attribute dict.
attr : keyword arguments, optional (default= no attributes)
Update attributes for all nodes in nodes.
Node attributes specified in nodes as a tuple take
precedence over attributes specified via keyword arguments.
See Also
--------
add_node
Examples
--------
>>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc
>>> G.add_nodes_from("Hello")
>>> K3 = nx.Graph([(0, 1), (1, 2), (2, 0)])
>>> G.add_nodes_from(K3)
>>> sorted(G.nodes(), key=str)
[0, 1, 2, 'H', 'e', 'l', 'o']
Use keywords to update specific node attributes for every node.
>>> G.add_nodes_from([1, 2], size=10)
>>> G.add_nodes_from([3, 4], weight=0.4)
Use (node, attrdict) tuples to update attributes for specific nodes.
>>> G.add_nodes_from([(1, dict(size=11)), (2, {"color": "blue"})])
>>> G.nodes[1]["size"]
11
>>> H = nx.Graph()
>>> H.add_nodes_from(G.nodes(data=True))
>>> H.nodes[1]["size"]
11
"""
for n in nodes_for_adding:
try:
newnode = n not in self._node
newdict = attr
except TypeError:
n, ndict = n
newnode = n not in self._node
newdict = attr.copy()
newdict.update(ndict)
if newnode:
if n is None:
raise ValueError("None cannot be a node")
self._succ[n] = self.adjlist_inner_dict_factory()
self._pred[n] = self.adjlist_inner_dict_factory()
self._node[n] = self.node_attr_dict_factory()
self._node[n].update(newdict)
def remove_node(self, n):
"""Remove node n.
Removes the node n and all adjacent edges.
Attempting to remove a non-existent node will raise an exception.
Parameters
----------
n : node
A node in the graph
Raises
------
NetworkXError
If n is not in the graph.
See Also
--------
remove_nodes_from
Examples
--------
>>> G = nx.path_graph(3) # or DiGraph, MultiGraph, MultiDiGraph, etc
>>> list(G.edges)
[(0, 1), (1, 2)]
>>> G.remove_node(1)
>>> list(G.edges)
[]
"""
try:
nbrs = self._succ[n]
del self._node[n]
except KeyError as err: # NetworkXError if n not in self
raise NetworkXError(f"The node {n} is not in the digraph.") from err
for u in nbrs:
del self._pred[u][n] # remove all edges n-u in digraph
del self._succ[n] # remove node from succ
for u in self._pred[n]:
del self._succ[u][n] # remove all edges n-u in digraph
del self._pred[n] # remove node from pred
def remove_nodes_from(self, nodes):
"""Remove multiple nodes.
Parameters
----------
nodes : iterable container
A container of nodes (list, dict, set, etc.). If a node
in the container is not in the graph it is silently ignored.
See Also
--------
remove_node
Examples
--------
>>> G = nx.path_graph(3) # or DiGraph, MultiGraph, MultiDiGraph, etc
>>> e = list(G.nodes)
>>> e
[0, 1, 2]
>>> G.remove_nodes_from(e)
>>> list(G.nodes)
[]
"""
for n in nodes:
try:
succs = self._succ[n]
del self._node[n]
for u in succs:
del self._pred[u][n] # remove all edges n-u in digraph
del self._succ[n] # now remove node
for u in self._pred[n]:
del self._succ[u][n] # remove all edges n-u in digraph
del self._pred[n] # now remove node
except KeyError:
pass # silent failure on remove
def add_edge(self, u_of_edge, v_of_edge, **attr):
"""Add an edge between u and v.
The nodes u and v will be automatically added if they are
not already in the graph.
Edge attributes can be specified with keywords or by directly
accessing the edge's attribute dictionary. See examples below.
Parameters
----------
u_of_edge, v_of_edge : nodes
Nodes can be, for example, strings or numbers.
Nodes must be hashable (and not None) Python objects.
attr : keyword arguments, optional
Edge data (or labels or objects) can be assigned using
keyword arguments.
See Also
--------
add_edges_from : add a collection of edges
Notes
-----
Adding an edge that already exists updates the edge data.
Many NetworkX algorithms designed for weighted graphs use
an edge attribute (by default `weight`) to hold a numerical value.
Examples
--------
The following all add the edge e=(1, 2) to graph G:
>>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc
>>> e = (1, 2)
>>> G.add_edge(1, 2) # explicit two-node form
>>> G.add_edge(*e) # single edge as tuple of two nodes
>>> G.add_edges_from([(1, 2)]) # add edges from iterable container
Associate data to edges using keywords:
>>> G.add_edge(1, 2, weight=3)
>>> G.add_edge(1, 3, weight=7, capacity=15, length=342.7)
For non-string attribute keys, use subscript notation.
>>> G.add_edge(1, 2)
>>> G[1][2].update({0: 5})
>>> G.edges[1, 2].update({0: 5})
"""
u, v = u_of_edge, v_of_edge
# add nodes
if u not in self._succ:
if u is None:
raise ValueError("None cannot be a node")
self._succ[u] = self.adjlist_inner_dict_factory()
self._pred[u] = self.adjlist_inner_dict_factory()
self._node[u] = self.node_attr_dict_factory()
if v not in self._succ:
if v is None:
raise ValueError("None cannot be a node")
self._succ[v] = self.adjlist_inner_dict_factory()
self._pred[v] = self.adjlist_inner_dict_factory()
self._node[v] = self.node_attr_dict_factory()
# add the edge
datadict = self._adj[u].get(v, self.edge_attr_dict_factory())
datadict.update(attr)
self._succ[u][v] = datadict
self._pred[v][u] = datadict
def add_edges_from(self, ebunch_to_add, **attr):
"""Add all the edges in ebunch_to_add.
Parameters
----------
ebunch_to_add : container of edges
Each edge given in the container will be added to the
graph. The edges must be given as 2-tuples (u, v) or
3-tuples (u, v, d) where d is a dictionary containing edge data.
attr : keyword arguments, optional
Edge data (or labels or objects) can be assigned using
keyword arguments.
See Also
--------
add_edge : add a single edge
add_weighted_edges_from : convenient way to add weighted edges
Notes
-----
Adding the same edge twice has no effect but any edge data
will be updated when each duplicate edge is added.
Edge attributes specified in an ebunch take precedence over
attributes specified via keyword arguments.
Examples
--------
>>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc
>>> G.add_edges_from([(0, 1), (1, 2)]) # using a list of edge tuples
>>> e = zip(range(0, 3), range(1, 4))
>>> G.add_edges_from(e) # Add the path graph 0-1-2-3
Associate data to edges
>>> G.add_edges_from([(1, 2), (2, 3)], weight=3)
>>> G.add_edges_from([(3, 4), (1, 4)], label="WN2898")
"""
for e in ebunch_to_add:
ne = len(e)
if ne == 3:
u, v, dd = e
elif ne == 2:
u, v = e
dd = {}
else:
raise NetworkXError(f"Edge tuple {e} must be a 2-tuple or 3-tuple.")
if u not in self._succ:
if u is None:
raise ValueError("None cannot be a node")
self._succ[u] = self.adjlist_inner_dict_factory()
self._pred[u] = self.adjlist_inner_dict_factory()
self._node[u] = self.node_attr_dict_factory()
if v not in self._succ:
if v is None:
raise ValueError("None cannot be a node")
self._succ[v] = self.adjlist_inner_dict_factory()
self._pred[v] = self.adjlist_inner_dict_factory()
self._node[v] = self.node_attr_dict_factory()
datadict = self._adj[u].get(v, self.edge_attr_dict_factory())
datadict.update(attr)
datadict.update(dd)
self._succ[u][v] = datadict
self._pred[v][u] = datadict
def remove_edge(self, u, v):
"""Remove the edge between u and v.
Parameters
----------
u, v : nodes
Remove the edge between nodes u and v.
Raises
------
NetworkXError
If there is not an edge between u and v.
See Also
--------
remove_edges_from : remove a collection of edges
Examples
--------
>>> G = nx.Graph() # or DiGraph, etc
>>> nx.add_path(G, [0, 1, 2, 3])
>>> G.remove_edge(0, 1)
>>> e = (1, 2)
>>> G.remove_edge(*e) # unpacks e from an edge tuple
>>> e = (2, 3, {"weight": 7}) # an edge with attribute data
>>> G.remove_edge(*e[:2]) # select first part of edge tuple
"""
try:
del self._succ[u][v]
del self._pred[v][u]
except KeyError as err:
raise NetworkXError(f"The edge {u}-{v} not in graph.") from err
def remove_edges_from(self, ebunch):
"""Remove all edges specified in ebunch.
Parameters
----------
ebunch: list or container of edge tuples
Each edge given in the list or container will be removed
from the graph. The edges can be:
- 2-tuples (u, v) edge between u and v.
- 3-tuples (u, v, k) where k is ignored.
See Also
--------
remove_edge : remove a single edge
Notes
-----
Will fail silently if an edge in ebunch is not in the graph.
Examples
--------
>>> G = nx.path_graph(4) # or DiGraph, MultiGraph, MultiDiGraph, etc
>>> ebunch = [(1, 2), (2, 3)]
>>> G.remove_edges_from(ebunch)
"""
for e in ebunch:
u, v = e[:2] # ignore edge data
if u in self._succ and v in self._succ[u]:
del self._succ[u][v]
del self._pred[v][u]
def has_successor(self, u, v):
"""Returns True if node u has successor v.
This is true if graph has the edge u->v.
"""
return u in self._succ and v in self._succ[u]
def has_predecessor(self, u, v):
"""Returns True if node u has predecessor v.
This is true if graph has the edge u<-v.
"""
return u in self._pred and v in self._pred[u]
def successors(self, n):
"""Returns an iterator over successor nodes of n.
A successor of n is a node m such that there exists a directed
edge from n to m.
Parameters
----------
n : node
A node in the graph
Raises
------
NetworkXError
If n is not in the graph.
See Also
--------
predecessors
Notes
-----
neighbors() and successors() are the same.
"""
try:
return iter(self._succ[n])
except KeyError as err:
raise NetworkXError(f"The node {n} is not in the digraph.") from err
# digraph definitions
neighbors = successors
def predecessors(self, n):
"""Returns an iterator over predecessor nodes of n.
A predecessor of n is a node m such that there exists a directed
edge from m to n.
Parameters
----------
n : node
A node in the graph
Raises
------
NetworkXError
If n is not in the graph.
See Also
--------
successors
"""
try:
return iter(self._pred[n])
except KeyError as err:
raise NetworkXError(f"The node {n} is not in the digraph.") from err
@property
def edges(self):
"""An OutEdgeView of the DiGraph as G.edges or G.edges().
edges(self, nbunch=None, data=False, default=None)
The OutEdgeView provides set-like operations on the edge-tuples
as well as edge attribute lookup. When called, it also provides
an EdgeDataView object which allows control of access to edge
attributes (but does not provide set-like operations).
Hence, `G.edges[u, v]['color']` provides the value of the color
attribute for edge `(u, v)` while
`for (u, v, c) in G.edges.data('color', default='red'):`
iterates through all the edges yielding the color attribute
with default `'red'` if no color attribute exists.
Parameters
----------
nbunch : single node, container, or all nodes (default= all nodes)
The view will only report edges from these nodes.
data : string or bool, optional (default=False)
The edge attribute returned in 3-tuple (u, v, ddict[data]).
If True, return edge attribute dict in 3-tuple (u, v, ddict).
If False, return 2-tuple (u, v).
default : value, optional (default=None)
Value used for edges that don't have the requested attribute.
Only relevant if data is not True or False.
Returns
-------
edges : OutEdgeView
A view of edge attributes, usually it iterates over (u, v)
or (u, v, d) tuples of edges, but can also be used for
attribute lookup as `edges[u, v]['foo']`.
See Also
--------
in_edges, out_edges
Notes
-----
Nodes in nbunch that are not in the graph will be (quietly) ignored.
For directed graphs this returns the out-edges.
Examples
--------
>>> G = nx.DiGraph() # or MultiDiGraph, etc
>>> nx.add_path(G, [0, 1, 2])
>>> G.add_edge(2, 3, weight=5)
>>> [e for e in G.edges]
[(0, 1), (1, 2), (2, 3)]
>>> G.edges.data() # default data is {} (empty dict)
OutEdgeDataView([(0, 1, {}), (1, 2, {}), (2, 3, {'weight': 5})])
>>> G.edges.data("weight", default=1)
OutEdgeDataView([(0, 1, 1), (1, 2, 1), (2, 3, 5)])
>>> G.edges([0, 2]) # only edges originating from these nodes
OutEdgeDataView([(0, 1), (2, 3)])
>>> G.edges(0) # only edges from node 0
OutEdgeDataView([(0, 1)])
"""
return OutEdgeView(self)
# alias out_edges to edges
out_edges = edges
@property
def in_edges(self):
"""An InEdgeView of the Graph as G.in_edges or G.in_edges().
in_edges(self, nbunch=None, data=False, default=None):
Parameters
----------
nbunch : single node, container, or all nodes (default= all nodes)
The view will only report edges incident to these nodes.
data : string or bool, optional (default=False)
The edge attribute returned in 3-tuple (u, v, ddict[data]).
If True, return edge attribute dict in 3-tuple (u, v, ddict).
If False, return 2-tuple (u, v).
default : value, optional (default=None)
Value used for edges that don't have the requested attribute.
Only relevant if data is not True or False.
Returns
-------
in_edges : InEdgeView
A view of edge attributes, usually it iterates over (u, v)
or (u, v, d) tuples of edges, but can also be used for
attribute lookup as `edges[u, v]['foo']`.
See Also
--------
edges
"""
return InEdgeView(self)
@property
def degree(self):
"""A DegreeView for the Graph as G.degree or G.degree().
The node degree is the number of edges adjacent to the node.
The weighted node degree is the sum of the edge weights for
edges incident to that node.
This object provides an iterator for (node, degree) as well as
lookup for the degree for a single node.
Parameters
----------
nbunch : single node, container, or all nodes (default= all nodes)
The view will only report edges incident to these nodes.
weight : string or None, optional (default=None)
The name of an edge attribute that holds the numerical value used
as a weight. If None, then each edge has weight 1.
The degree is the sum of the edge weights adjacent to the node.
Returns
-------
If a single node is requested
deg : int
Degree of the node
OR if multiple nodes are requested
nd_iter : iterator
The iterator returns two-tuples of (node, degree).
See Also
--------
in_degree, out_degree
Examples
--------
>>> G = nx.DiGraph() # or MultiDiGraph
>>> nx.add_path(G, [0, 1, 2, 3])
>>> G.degree(0) # node 0 with degree 1
1
>>> list(G.degree([0, 1, 2]))
[(0, 1), (1, 2), (2, 2)]
"""
return DiDegreeView(self)
@property
def in_degree(self):
"""An InDegreeView for (node, in_degree) or in_degree for single node.
The node in_degree is the number of edges pointing to the node.
The weighted node degree is the sum of the edge weights for
edges incident to that node.
This object provides an iteration over (node, in_degree) as well as
lookup for the degree for a single node.
Parameters
----------
nbunch : single node, container, or all nodes (default= all nodes)
The view will only report edges incident to these nodes.
weight : string or None, optional (default=None)
The name of an edge attribute that holds the numerical value used
as a weight. If None, then each edge has weight 1.
The degree is the sum of the edge weights adjacent to the node.
Returns
-------
If a single node is requested
deg : int
In-degree of the node
OR if multiple nodes are requested
nd_iter : iterator
The iterator returns two-tuples of (node, in-degree).
See Also
--------
degree, out_degree
Examples
--------
>>> G = nx.DiGraph()
>>> nx.add_path(G, [0, 1, 2, 3])
>>> G.in_degree(0) # node 0 with degree 0
0
>>> list(G.in_degree([0, 1, 2]))
[(0, 0), (1, 1), (2, 1)]
"""
return InDegreeView(self)
@property
def out_degree(self):
"""An OutDegreeView for (node, out_degree)
The node out_degree is the number of edges pointing out of the node.
The weighted node degree is the sum of the edge weights for
edges incident to that node.
This object provides an iterator over (node, out_degree) as well as
lookup for the degree for a single node.
Parameters
----------
nbunch : single node, container, or all nodes (default= all nodes)
The view will only report edges incident to these nodes.
weight : string or None, optional (default=None)
The name of an edge attribute that holds the numerical value used
as a weight. If None, then each edge has weight 1.
The degree is the sum of the edge weights adjacent to the node.
Returns
-------
If a single node is requested
deg : int
Out-degree of the node
OR if multiple nodes are requested
nd_iter : iterator
The iterator returns two-tuples of (node, out-degree).
See Also
--------
degree, in_degree
Examples
--------
>>> G = nx.DiGraph()
>>> nx.add_path(G, [0, 1, 2, 3])
>>> G.out_degree(0) # node 0 with degree 1
1
>>> list(G.out_degree([0, 1, 2]))
[(0, 1), (1, 1), (2, 1)]
"""
return OutDegreeView(self)
def clear(self):
"""Remove all nodes and edges from the graph.
This also removes the name, and all graph, node, and edge attributes.
Examples
--------
>>> G = nx.path_graph(4) # or DiGraph, MultiGraph, MultiDiGraph, etc
>>> G.clear()
>>> list(G.nodes)
[]
>>> list(G.edges)
[]
"""
self._succ.clear()
self._pred.clear()
self._node.clear()
self.graph.clear()
def clear_edges(self):
"""Remove all edges from the graph without altering nodes.
Examples
--------
>>> G = nx.path_graph(4) # or DiGraph, MultiGraph, MultiDiGraph, etc
>>> G.clear_edges()
>>> list(G.nodes)
[0, 1, 2, 3]
>>> list(G.edges)
[]
"""
for predecessor_dict in self._pred.values():
predecessor_dict.clear()
for successor_dict in self._succ.values():
successor_dict.clear()
def is_multigraph(self):
"""Returns True if graph is a multigraph, False otherwise."""
return False
def is_directed(self):
"""Returns True if graph is directed, False otherwise."""
return True
def to_undirected(self, reciprocal=False, as_view=False):
"""Returns an undirected representation of the digraph.
Parameters
----------
reciprocal : bool (optional)
If True only keep edges that appear in both directions
in the original digraph.
as_view : bool (optional, default=False)
If True return an undirected view of the original directed graph.
Returns
-------
G : Graph
An undirected graph with the same name and nodes and
with edge (u, v, data) if either (u, v, data) or (v, u, data)
is in the digraph. If both edges exist in digraph and
their edge data is different, only one edge is created
with an arbitrary choice of which edge data to use.
You must check and correct for this manually if desired.
See Also
--------
Graph, copy, add_edge, add_edges_from
Notes
-----
If edges in both directions (u, v) and (v, u) exist in the
graph, attributes for the new undirected edge will be a combination of
the attributes of the directed edges. The edge data is updated
in the (arbitrary) order that the edges are encountered. For
more customized control of the edge attributes use add_edge().
This returns a "deepcopy" of the edge, node, and
graph attributes which attempts to completely copy
all of the data and references.
This is in contrast to the similar G=DiGraph(D) which returns a
shallow copy of the data.
See the Python copy module for more information on shallow
and deep copies, https://docs.python.org/3/library/copy.html.
Warning: If you have subclassed DiGraph to use dict-like objects
in the data structure, those changes do not transfer to the
Graph created by this method.
Examples
--------
>>> G = nx.path_graph(2) # or MultiGraph, etc
>>> H = G.to_directed()
>>> list(H.edges)
[(0, 1), (1, 0)]
>>> G2 = H.to_undirected()
>>> list(G2.edges)
[(0, 1)]
"""
graph_class = self.to_undirected_class()
if as_view is True:
return nx.graphviews.generic_graph_view(self, graph_class)
# deepcopy when not a view
G = graph_class()
G.graph.update(deepcopy(self.graph))
G.add_nodes_from((n, deepcopy(d)) for n, d in self._node.items())
if reciprocal is True:
G.add_edges_from(
(u, v, deepcopy(d))
for u, nbrs in self._adj.items()
for v, d in nbrs.items()
if v in self._pred[u]
)
else:
G.add_edges_from(
(u, v, deepcopy(d))
for u, nbrs in self._adj.items()
for v, d in nbrs.items()
)
return G
def reverse(self, copy=True):
"""Returns the reverse of the graph.
The reverse is a graph with the same nodes and edges
but with the directions of the edges reversed.
Parameters
----------
copy : bool optional (default=True)
If True, return a new DiGraph holding the reversed edges.
If False, the reverse graph is created using a view of
the original graph.
"""
if copy:
H = self.__class__()
H.graph.update(deepcopy(self.graph))
H.add_nodes_from((n, deepcopy(d)) for n, d in self.nodes.items())
H.add_edges_from((v, u, deepcopy(d)) for u, v, d in self.edges(data=True))
return H
return nx.graphviews.reverse_view(self)
| 34.900164 | 86 | 0.583732 |
a19f115944654f7bce5cae85b755d9ba96482200 | 4,824 | py | Python | gluon/contrib/pbkdf2.py | AustinKellar/web2py_installation | b6b8bb890762f875871d11a934b5ed7aea33563c | [
"BSD-3-Clause"
] | 1,573 | 2015-01-01T07:19:06.000Z | 2022-03-30T09:06:06.000Z | gluon/contrib/pbkdf2.py | AustinKellar/web2py_installation | b6b8bb890762f875871d11a934b5ed7aea33563c | [
"BSD-3-Clause"
] | 1,691 | 2015-01-03T11:03:23.000Z | 2022-03-30T07:27:28.000Z | gluon/contrib/pbkdf2.py | AustinKellar/web2py_installation | b6b8bb890762f875871d11a934b5ed7aea33563c | [
"BSD-3-Clause"
] | 895 | 2015-01-03T19:56:15.000Z | 2022-03-18T18:30:57.000Z | # -*- coding: utf-8 -*-
"""
pbkdf2
~~~~~~
This module implements pbkdf2 for Python. It also has some basic
tests that ensure that it works. The implementation is straightforward
and uses stdlib only stuff and can be easily be copy/pasted into
your favourite application.
Use this as replacement for bcrypt that does not need a c implementation
of a modified blowfish crypto algo.
Example usage:
>>> pbkdf2_hex('what i want to hash', 'the random salt')
'fa7cc8a2b0a932f8e6ea42f9787e9d36e592e0c222ada6a9'
How to use this:
1. Use a constant time string compare function to compare the stored hash
with the one you're generating::
def safe_str_cmp(a, b):
if len(a) != len(b):
return False
rv = 0
for x, y in izip(a, b):
rv |= ord(x) ^ ord(y)
return rv == 0
2. Use `os.urandom` to generate a proper salt of at least 8 byte.
Use a unique salt per hashed password.
3. Store ``algorithm$salt:costfactor$hash`` in the database so that
you can upgrade later easily to a different algorithm if you need
one. For instance ``PBKDF2-256$thesalt:10000$deadbeef...``.
:copyright: (c) Copyright 2011 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
import hmac
import hashlib
from struct import Struct
from operator import xor
from itertools import izip, starmap
_pack_int = Struct('>I').pack
def pbkdf2_hex(data, salt, iterations=1000, keylen=24, hashfunc=None):
"""Like :func:`pbkdf2_bin` but returns a hex encoded string."""
return pbkdf2_bin(data, salt, iterations, keylen, hashfunc).encode('hex')
def pbkdf2_bin(data, salt, iterations=1000, keylen=24, hashfunc=None):
"""Returns a binary digest for the PBKDF2 hash algorithm of `data`
with the given `salt`. It iterates `iterations` time and produces a
key of `keylen` bytes. By default SHA-1 is used as hash function,
a different hashlib `hashfunc` can be provided.
"""
hashfunc = hashfunc or hashlib.sha1
mac = hmac.new(data, None, hashfunc)
def _pseudorandom(x, mac=mac):
h = mac.copy()
h.update(x)
return map(ord, h.digest())
buf = []
for block in xrange(1, -(-keylen // mac.digest_size) + 1):
rv = u = _pseudorandom(salt + _pack_int(block))
for i in xrange(iterations - 1):
u = _pseudorandom(''.join(map(chr, u)))
rv = starmap(xor, izip(rv, u))
buf.extend(rv)
return ''.join(map(chr, buf))[:keylen]
def test():
failed = []
def check(data, salt, iterations, keylen, expected):
rv = pbkdf2_hex(data, salt, iterations, keylen)
if rv != expected:
print('Test failed:')
print(' Expected: %s' % expected)
print(' Got: %s' % rv)
print(' Parameters:')
print(' data=%s' % data)
print(' salt=%s' % salt)
print(' iterations=%d' % iterations)
print()
failed.append(1)
# From RFC 6070
check('password', 'salt', 1, 20,
'0c60c80f961f0e71f3a9b524af6012062fe037a6')
check('password', 'salt', 2, 20,
'ea6c014dc72d6f8ccd1ed92ace1d41f0d8de8957')
check('password', 'salt', 4096, 20,
'4b007901b765489abead49d926f721d065a429c1')
check('passwordPASSWORDpassword', 'saltSALTsaltSALTsaltSALTsaltSALTsalt',
4096, 25, '3d2eec4fe41c849b80c8d83662c0e44a8b291a964cf2f07038')
check('pass\x00word', 'sa\x00lt', 4096, 16,
'56fa6aa75548099dcc37d7f03425e0c3')
# This one is from the RFC but it just takes for ages
##check('password', 'salt', 16777216, 20,
## 'eefe3d61cd4da4e4e9945b3d6ba2158c2634e984')
# From Crypt-PBKDF2
check('password', 'ATHENA.MIT.EDUraeburn', 1, 16,
'cdedb5281bb2f801565a1122b2563515')
check('password', 'ATHENA.MIT.EDUraeburn', 1, 32,
'cdedb5281bb2f801565a1122b25635150ad1f7a04bb9f3a333ecc0e2e1f70837')
check('password', 'ATHENA.MIT.EDUraeburn', 2, 16,
'01dbee7f4a9e243e988b62c73cda935d')
check('password', 'ATHENA.MIT.EDUraeburn', 2, 32,
'01dbee7f4a9e243e988b62c73cda935da05378b93244ec8f48a99e61ad799d86')
check('password', 'ATHENA.MIT.EDUraeburn', 1200, 32,
'5c08eb61fdf71e4e4ec3cf6ba1f5512ba7e52ddbc5e5142f708a31e2e62b1e13')
check('X' * 64, 'pass phrase equals block size', 1200, 32,
'139c30c0966bc32ba55fdbf212530ac9c5ec59f1a452f5cc9ad940fea0598ed1')
check('X' * 65, 'pass phrase exceeds block size', 1200, 32,
'9ccad6d468770cd51b10e6a68721be611a8b4d282601db3b36be9246915ec82a')
raise SystemExit(bool(failed))
if __name__ == '__main__':
test()
| 36.824427 | 78 | 0.641169 |
1178a100b889180dbbd248234933ca474b0c3a6f | 2,003 | py | Python | backend/users/models.py | mnieber/django-graphql-registration | 20dc61e207f92dcbc88fb83707315e5b304238cd | [
"MIT"
] | null | null | null | backend/users/models.py | mnieber/django-graphql-registration | 20dc61e207f92dcbc88fb83707315e5b304238cd | [
"MIT"
] | null | null | null | backend/users/models.py | mnieber/django-graphql-registration | 20dc61e207f92dcbc88fb83707315e5b304238cd | [
"MIT"
] | null | null | null | import uuid
from django.conf import settings
from django.contrib.auth.models import (
AbstractBaseUser,
BaseUserManager,
PermissionsMixin,
)
from django.db import models
class UserManager(BaseUserManager):
def create_user(
self, email, username, password, accepts_terms, terms_version_accepted
):
if not email:
raise ValueError("Users must have an email address.")
if not username:
raise ValueError("Users must have a username.")
if not accepts_terms:
raise ValueError("Users must accept the terms.")
user = self.model(email=self.normalize_email(email), username=username)
user.accepts_terms = accepts_terms
user.terms_version_accepted = terms_version_accepted
user.set_password(password)
user.save() # using=self._db
return user
def create_superuser(self, email, password=None, **kwargs):
terms_version = settings.TERMS_VERSION
user = self.create_user(
email,
username="super",
password=password,
accepts_terms=True,
terms_version_accepted=terms_version,
)
user.is_superuser = True
user.is_staff = True
user.save() # using=self._db
return user
class User(AbstractBaseUser, PermissionsMixin):
objects = UserManager()
USERNAME_FIELD = "email"
REQUIRED_FIELDS = []
id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False)
email = models.EmailField(max_length=255, unique=True)
username = models.CharField(max_length=255, unique=True)
is_staff = models.BooleanField(default=False)
is_active = models.BooleanField("active", default=True)
date_joined = models.DateTimeField("date joined", auto_now_add=True)
accepts_terms = models.BooleanField()
terms_version_accepted = models.CharField(max_length=10, default="1.0.0")
def get_full_name(self):
return self.username
| 32.836066 | 79 | 0.676985 |
aa52c24867ebe239b5837c7e9c7cee34c29db504 | 63,073 | py | Python | mi/instrument/teledyne/workhorse_monitor_150_khz/particles.py | rhan1498/marine-integrations | ad94c865e0e4cc7c8fd337870410c74b57d5c826 | [
"BSD-2-Clause"
] | null | null | null | mi/instrument/teledyne/workhorse_monitor_150_khz/particles.py | rhan1498/marine-integrations | ad94c865e0e4cc7c8fd337870410c74b57d5c826 | [
"BSD-2-Clause"
] | null | null | null | mi/instrument/teledyne/workhorse_monitor_150_khz/particles.py | rhan1498/marine-integrations | ad94c865e0e4cc7c8fd337870410c74b57d5c826 | [
"BSD-2-Clause"
] | null | null | null | """
@package mi.instrument.teledyne.workhorse_monitor_75_khz.particles
@file marine-integrations/mi/instrument/teledyne/workhorse_monitor_75_khz/driver.py
@author Roger Unwin
@brief Driver particle code for the teledyne 75_khz particles
Release notes:
"""
import re
from struct import *
import time as time
import datetime as dt
from mi.core.log import get_logger ; log = get_logger()
from mi.core.common import BaseEnum
from mi.instrument.teledyne.driver import NEWLINE
from mi.instrument.teledyne.driver import TIMEOUT
from mi.core.instrument.data_particle import DataParticle
from mi.core.instrument.data_particle import DataParticleKey
from mi.core.instrument.data_particle import CommonDataParticleType
from mi.core.exceptions import SampleException
#
# Particle Regex's'
#
ADCP_PD0_PARSED_REGEX = r'\x7f\x7f(..)' # .*
ADCP_PD0_PARSED_REGEX_MATCHER = re.compile(ADCP_PD0_PARSED_REGEX, re.DOTALL)
ADCP_SYSTEM_CONFIGURATION_REGEX = r'(Instrument S/N.*?)\>'
ADCP_SYSTEM_CONFIGURATION_REGEX_MATCHER = re.compile(ADCP_SYSTEM_CONFIGURATION_REGEX, re.DOTALL)
ADCP_COMPASS_CALIBRATION_REGEX = r'(ACTIVE FLUXGATE CALIBRATION MATRICES in NVRAM.*?)\>'
ADCP_COMPASS_CALIBRATION_REGEX_MATCHER = re.compile(ADCP_COMPASS_CALIBRATION_REGEX, re.DOTALL)
###############################################################################
# Data Particles
###############################################################################
class DataParticleType(BaseEnum):
RAW = CommonDataParticleType.RAW
ADCP_PD0_PARSED_BEAM = 'adcp_pd0_beam_parsed'
ADCP_PD0_PARSED_EARTH = 'adcp_pd0_earth_parsed'
ADCP_SYSTEM_CONFIGURATION = 'adcp_system_configuration'
ADCP_COMPASS_CALIBRATION = 'adcp_compass_calibration'
# ENGINEERING_PARAMETERS - NONE found.
class ADCP_PD0_PARSED_KEY(BaseEnum):
HEADER_ID = "header_id"
DATA_SOURCE_ID = "data_source_id"
NUM_BYTES = "num_bytes"
NUM_DATA_TYPES = "num_data_types"
OFFSET_DATA_TYPES = "offset_data_types"
FIXED_LEADER_ID = "fixed_leader_id"
FIRMWARE_VERSION = "firmware_version"
FIRMWARE_REVISION = "firmware_revision"
SYSCONFIG_FREQUENCY = "sysconfig_frequency"
SYSCONFIG_BEAM_PATTERN = "sysconfig_beam_pattern"
SYSCONFIG_SENSOR_CONFIG = "sysconfig_sensor_config"
SYSCONFIG_HEAD_ATTACHED = "sysconfig_head_attached"
SYSCONFIG_VERTICAL_ORIENTATION = "sysconfig_vertical_orientation"
DATA_FLAG = "data_flag"
LAG_LENGTH = "lag_length"
NUM_BEAMS = "num_beams"
NUM_CELLS = "num_cells"
PINGS_PER_ENSEMBLE = "pings_per_ensemble"
DEPTH_CELL_LENGTH = "depth_cell_length"
BLANK_AFTER_TRANSMIT = "blank_after_transmit"
SIGNAL_PROCESSING_MODE = "signal_processing_mode"
LOW_CORR_THRESHOLD = "low_corr_threshold"
NUM_CODE_REPETITIONS = "num_code_repetitions"
PERCENT_GOOD_MIN = "percent_good_min"
ERROR_VEL_THRESHOLD = "error_vel_threshold"
TIME_PER_PING_MINUTES = "time_per_ping_minutes"
TIME_PER_PING_SECONDS = "time_per_ping_seconds"
COORD_TRANSFORM_TYPE = "coord_transform_type"
COORD_TRANSFORM_TILTS = "coord_transform_tilts"
COORD_TRANSFORM_BEAMS = "coord_transform_beams"
COORD_TRANSFORM_MAPPING = "coord_transform_mapping"
HEADING_ALIGNMENT = "heading_alignment"
HEADING_BIAS = "heading_bias"
SENSOR_SOURCE_SPEED = "sensor_source_speed"
SENSOR_SOURCE_DEPTH = "sensor_source_depth"
SENSOR_SOURCE_HEADING = "sensor_source_heading"
SENSOR_SOURCE_PITCH = "sensor_source_pitch"
SENSOR_SOURCE_ROLL = "sensor_source_roll"
SENSOR_SOURCE_CONDUCTIVITY = "sensor_source_conductivity"
SENSOR_SOURCE_TEMPERATURE = "sensor_source_temperature"
SENSOR_AVAILABLE_DEPTH = "sensor_available_depth"
SENSOR_AVAILABLE_HEADING = "sensor_available_heading"
SENSOR_AVAILABLE_PITCH = "sensor_available_pitch"
SENSOR_AVAILABLE_ROLL = "sensor_available_roll"
SENSOR_AVAILABLE_CONDUCTIVITY = "sensor_available_conductivity"
SENSOR_AVAILABLE_TEMPERATURE = "sensor_available_temperature"
BIN_1_DISTANCE = "bin_1_distance"
TRANSMIT_PULSE_LENGTH = "transmit_pulse_length"
REFERENCE_LAYER_START = "reference_layer_start"
REFERENCE_LAYER_STOP = "reference_layer_stop"
FALSE_TARGET_THRESHOLD = "false_target_threshold"
LOW_LATENCY_TRIGGER = "low_latency_trigger"
TRANSMIT_LAG_DISTANCE = "transmit_lag_distance"
CPU_BOARD_SERIAL_NUMBER = "cpu_board_serial_number"
SYSTEM_BANDWIDTH = "system_bandwidth"
SYSTEM_POWER = "system_power"
SERIAL_NUMBER = "serial_number"
BEAM_ANGLE = "beam_angle"
VARIABLE_LEADER_ID = "variable_leader_id"
ENSEMBLE_NUMBER = "ensemble_number"
INTERNAL_TIMESTAMP = "internal_timestamp"
ENSEMBLE_NUMBER_INCREMENT = "ensemble_number_increment"
BIT_RESULT_DEMOD_1 = "bit_result_demod_1"
BIT_RESULT_DEMOD_2 = "bit_result_demod_2"
BIT_RESULT_TIMING = "bit_result_timing"
SPEED_OF_SOUND = "speed_of_sound"
TRANSDUCER_DEPTH = "transducer_depth"
HEADING = "heading"
PITCH = "pitch"
ROLL = "roll"
SALINITY = "salinity"
TEMPERATURE = "temperature"
MPT_MINUTES = "mpt_minutes"
MPT_SECONDS = "mpt_seconds"
HEADING_STDEV = "heading_stdev"
PITCH_STDEV = "pitch_stdev"
ROLL_STDEV = "roll_stdev"
ADC_TRANSMIT_CURRENT = "adc_transmit_current"
ADC_TRANSMIT_VOLTAGE = "adc_transmit_voltage"
ADC_AMBIENT_TEMP = "adc_ambient_temp"
ADC_PRESSURE_PLUS = "adc_pressure_plus"
ADC_PRESSURE_MINUS = "adc_pressure_minus"
ADC_ATTITUDE_TEMP = "adc_attitude_temp"
ADC_ATTITUDE = "adc_attitiude"
ADC_CONTAMINATION_SENSOR = "adc_contamination_sensor"
BUS_ERROR_EXCEPTION = "bus_error_exception"
ADDRESS_ERROR_EXCEPTION = "address_error_exception"
ILLEGAL_INSTRUCTION_EXCEPTION = "illegal_instruction_exception"
ZERO_DIVIDE_INSTRUCTION = "zero_divide_instruction"
EMULATOR_EXCEPTION = "emulator_exception"
UNASSIGNED_EXCEPTION = "unassigned_exception"
WATCHDOG_RESTART_OCCURED = "watchdog_restart_occurred"
BATTERY_SAVER_POWER = "battery_saver_power"
PINGING = "pinging"
COLD_WAKEUP_OCCURED = "cold_wakeup_occurred"
UNKNOWN_WAKEUP_OCCURED = "unknown_wakeup_occurred"
CLOCK_READ_ERROR = "clock_read_error"
UNEXPECTED_ALARM = "unexpected_alarm"
CLOCK_JUMP_FORWARD = "clock_jump_forward"
CLOCK_JUMP_BACKWARD = "clock_jump_backward"
POWER_FAIL = "power_fail"
SPURIOUS_DSP_INTERRUPT = "spurious_dsp_interrupt"
SPURIOUS_UART_INTERRUPT = "spurious_uart_interrupt"
SPURIOUS_CLOCK_INTERRUPT = "spurious_clock_interrupt"
LEVEL_7_INTERRUPT = "level_7_interrupt"
ABSOLUTE_PRESSURE = "pressure"
PRESSURE_VARIANCE = "pressure_variance"
INTERNAL_TIMESTAMP = "internal_timestamp"
VELOCITY_DATA_ID = "velocity_data_id"
BEAM_1_VELOCITY = "beam_1_velocity" # These may live in OOICORE driver as a extension
BEAM_2_VELOCITY = "beam_2_velocity" # These may live in OOICORE driver as a extension
BEAM_3_VELOCITY = "beam_3_velocity" # These may live in OOICORE driver as a extension
BEAM_4_VELOCITY = "beam_4_velocity" # These may live in OOICORE driver as a extension
WATER_VELOCITY_EAST = "water_velocity_east" # These may live in OOICORE driver as a extension
WATER_VELOCITY_NORTH = "water_velocity_north" # These may live in OOICORE driver as a extension
WATER_VELOCITY_UP = "water_velocity_up" # These may live in OOICORE driver as a extension
ERROR_VELOCITY = "error_velocity" # These may live in OOICORE driver as a extension
CORRELATION_MAGNITUDE_ID = "correlation_magnitude_id"
CORRELATION_MAGNITUDE_BEAM1 = "correlation_magnitude_beam1"
CORRELATION_MAGNITUDE_BEAM2 = "correlation_magnitude_beam2"
CORRELATION_MAGNITUDE_BEAM3 = "correlation_magnitude_beam3"
CORRELATION_MAGNITUDE_BEAM4 = "correlation_magnitude_beam4"
ECHO_INTENSITY_ID = "echo_intensity_id"
ECHO_INTENSITY_BEAM1 = "echo_intesity_beam1"
ECHO_INTENSITY_BEAM2 = "echo_intesity_beam2"
ECHO_INTENSITY_BEAM3 = "echo_intesity_beam3"
ECHO_INTENSITY_BEAM4 = "echo_intesity_beam4"
PERCENT_GOOD_BEAM1 = "percent_good_beam1"# These may live in OOICORE driver as a extension
PERCENT_GOOD_BEAM2 = "percent_good_beam2"# These may live in OOICORE driver as a extension
PERCENT_GOOD_BEAM3 = "percent_good_beam3"# These may live in OOICORE driver as a extension
PERCENT_GOOD_BEAM4 = "percent_good_beam4"# These may live in OOICORE driver as a extension
PERCENT_GOOD_ID = "percent_good_id"
PERCENT_GOOD_3BEAM = "percent_good_3beam"
PERCENT_TRANSFORMS_REJECT = "percent_transforms_reject"
PERCENT_BAD_BEAMS = "percent_bad_beams"
PERCENT_GOOD_4BEAM = "percent_good_4beam"
CHECKSUM = "checksum"
class ADCP_PD0_PARSED_DataParticle(DataParticle):
_data_particle_type = 'UNASSIGNED IN mi.instrument.teledyne.workhorse_monitor_75_khz.particles ADCP_PD0_PARSED_DataParticle' # DataParticleType.ADCP_PD0_PARSED_BEAM #
def _build_parsed_values(self):
"""
Parse the base portion of the particle
"""
log.debug("****** ADCP_PD0_PARSED_DataParticle._build_parsed_values ******")
if "[BREAK Wakeup A]" in self.raw_data:
raise SampleException("BREAK encountered, Seems someone is escaping autosample mode.")
self.final_result = []
length = unpack("H", self.raw_data[2:4])[0]
data = str(self.raw_data)
#
# Calculate Checksum
#
total = int(0)
for i in range(0, length):
total += int(ord(data[i]))
checksum = total & 65535 # bitwise and with 65535 or mod vs 65536
if checksum != unpack("H", self.raw_data[length: length+2])[0]:
log.debug("Checksum mismatch "+ str(checksum) + "!= " + str(unpack("H", self.raw_data[length: length+2])[0]))
raise SampleException("Checksum mismatch")
self.final_result.append({DataParticleKey.VALUE_ID: ADCP_PD0_PARSED_KEY.CHECKSUM,
DataParticleKey.VALUE: checksum})
(header_id, data_source_id, num_bytes, filler, num_data_types) = \
unpack('!BBHBB', self.raw_data[0:6])
self.final_result.append({DataParticleKey.VALUE_ID: ADCP_PD0_PARSED_KEY.HEADER_ID,
DataParticleKey.VALUE: header_id})
self.final_result.append({DataParticleKey.VALUE_ID: ADCP_PD0_PARSED_KEY.DATA_SOURCE_ID,
DataParticleKey.VALUE: data_source_id})
self.final_result.append({DataParticleKey.VALUE_ID: ADCP_PD0_PARSED_KEY.NUM_BYTES,
DataParticleKey.VALUE: num_bytes})
self.final_result.append({DataParticleKey.VALUE_ID: ADCP_PD0_PARSED_KEY.NUM_DATA_TYPES,
DataParticleKey.VALUE: num_data_types})
offsets = []
for offset in range(0, num_data_types):
value = unpack('<H', self.raw_data[(2 * offset + 6):(2 * offset + 8)])[0]
offsets.append(value)
self.final_result.append({DataParticleKey.VALUE_ID: ADCP_PD0_PARSED_KEY.OFFSET_DATA_TYPES,
DataParticleKey.VALUE: offsets})
offsets.append(length - 2)
chunks = []
for offset in range(0, num_data_types):
chunks.append(self.raw_data[offsets[offset] : offsets[offset + 1] ])
variable_leader_id = unpack('!H', chunks[offset][0:2])[0]
if offset == 0:
self.parse_fixed_chunk(chunks[offset])
else:
if 32768 == variable_leader_id:
self.parse_variable_chunk(chunks[offset])
elif 1 == variable_leader_id:
self.parse_velocity_chunk(chunks[offset])
elif 2 == variable_leader_id:
self.parse_corelation_magnitude_chunk(chunks[offset])
elif 3 == variable_leader_id:
self.parse_echo_intensity_chunk(chunks[offset])
elif 4 == variable_leader_id:
self.parse_percent_good_chunk(chunks[offset])
return self.final_result
def parse_fixed_chunk(self, chunk):
"""
Parse the fixed portion of the particle
@throws SampleException If there is a problem with sample creation
"""
(fixed_leader_id, firmware_version, firmware_revision, sysconfig_frequency, data_flag, lag_length, num_beams, num_cells, pings_per_ensemble,
depth_cell_length, blank_after_transmit, signal_processing_mode, low_corr_threshold, num_code_repetitions, percent_good_min, error_vel_threshold,
time_per_ping_minutes, time_per_ping_seconds, time_per_ping_hundredths, coord_transform_type, heading_alignment, heading_bias, sensor_source,
sensor_available, bin_1_distance, transmit_pulse_length, reference_layer_start, reference_layer_stop, false_target_threshold,
low_latency_trigger, transmit_lag_distance, cpu_board_serial_number, system_bandwidth, system_power,
spare, serial_number, beam_angle) \
= unpack('!HBBHbBBBHHHBBBBHBBBBhhBBHHBBBBHQHBBIB', chunk[0:59])
if 0 != fixed_leader_id:
raise SampleException("fixed_leader_id was not equal to 0")
self.final_result.append({DataParticleKey.VALUE_ID: ADCP_PD0_PARSED_KEY.FIXED_LEADER_ID,
DataParticleKey.VALUE: fixed_leader_id})
self.final_result.append({DataParticleKey.VALUE_ID: ADCP_PD0_PARSED_KEY.FIRMWARE_VERSION,
DataParticleKey.VALUE: firmware_version})
self.final_result.append({DataParticleKey.VALUE_ID: ADCP_PD0_PARSED_KEY.FIRMWARE_REVISION,
DataParticleKey.VALUE: firmware_revision})
frequencies = [75, 150, 300, 600, 1200, 2400]
self.final_result.append({DataParticleKey.VALUE_ID: ADCP_PD0_PARSED_KEY.SYSCONFIG_FREQUENCY,
DataParticleKey.VALUE: frequencies[sysconfig_frequency & 0b00000111]})
self.final_result.append({DataParticleKey.VALUE_ID: ADCP_PD0_PARSED_KEY.SYSCONFIG_BEAM_PATTERN,
DataParticleKey.VALUE: 1 if sysconfig_frequency & 0b00001000 else 0})
self.final_result.append({DataParticleKey.VALUE_ID: ADCP_PD0_PARSED_KEY.SYSCONFIG_SENSOR_CONFIG,
DataParticleKey.VALUE: sysconfig_frequency & 0b00110000 >> 4})
self.final_result.append({DataParticleKey.VALUE_ID: ADCP_PD0_PARSED_KEY.SYSCONFIG_HEAD_ATTACHED,
DataParticleKey.VALUE: 1 if sysconfig_frequency & 0b01000000 else 0})
self.final_result.append({DataParticleKey.VALUE_ID: ADCP_PD0_PARSED_KEY.SYSCONFIG_VERTICAL_ORIENTATION,
DataParticleKey.VALUE: 1 if sysconfig_frequency & 0b10000000 else 0})
if 0 != data_flag:
raise SampleException("data_flag was not equal to 0")
self.final_result.append({DataParticleKey.VALUE_ID: ADCP_PD0_PARSED_KEY.DATA_FLAG,
DataParticleKey.VALUE: data_flag})
self.final_result.append({DataParticleKey.VALUE_ID: ADCP_PD0_PARSED_KEY.LAG_LENGTH,
DataParticleKey.VALUE: lag_length})
self.final_result.append({DataParticleKey.VALUE_ID: ADCP_PD0_PARSED_KEY.NUM_BEAMS,
DataParticleKey.VALUE: num_beams})
self.final_result.append({DataParticleKey.VALUE_ID: ADCP_PD0_PARSED_KEY.NUM_CELLS,
DataParticleKey.VALUE: num_cells})
self.final_result.append({DataParticleKey.VALUE_ID: ADCP_PD0_PARSED_KEY.PINGS_PER_ENSEMBLE,
DataParticleKey.VALUE: pings_per_ensemble})
self.final_result.append({DataParticleKey.VALUE_ID: ADCP_PD0_PARSED_KEY.DEPTH_CELL_LENGTH,
DataParticleKey.VALUE: depth_cell_length})
self.final_result.append({DataParticleKey.VALUE_ID: ADCP_PD0_PARSED_KEY.BLANK_AFTER_TRANSMIT,
DataParticleKey.VALUE: blank_after_transmit})
if 1 != signal_processing_mode:
raise SampleException("signal_processing_mode was not equal to 1")
self.final_result.append({DataParticleKey.VALUE_ID: ADCP_PD0_PARSED_KEY.SIGNAL_PROCESSING_MODE,
DataParticleKey.VALUE: signal_processing_mode})
self.final_result.append({DataParticleKey.VALUE_ID: ADCP_PD0_PARSED_KEY.LOW_CORR_THRESHOLD,
DataParticleKey.VALUE: low_corr_threshold})
self.final_result.append({DataParticleKey.VALUE_ID: ADCP_PD0_PARSED_KEY.NUM_CODE_REPETITIONS,
DataParticleKey.VALUE: num_code_repetitions})
self.final_result.append({DataParticleKey.VALUE_ID: ADCP_PD0_PARSED_KEY.PERCENT_GOOD_MIN,
DataParticleKey.VALUE: percent_good_min})
self.final_result.append({DataParticleKey.VALUE_ID: ADCP_PD0_PARSED_KEY.ERROR_VEL_THRESHOLD,
DataParticleKey.VALUE: error_vel_threshold})
self.final_result.append({DataParticleKey.VALUE_ID: ADCP_PD0_PARSED_KEY.TIME_PER_PING_MINUTES,
DataParticleKey.VALUE: time_per_ping_minutes})
tpp_float_seconds = float(time_per_ping_seconds + (time_per_ping_hundredths/100))
self.final_result.append({DataParticleKey.VALUE_ID: ADCP_PD0_PARSED_KEY.TIME_PER_PING_SECONDS,
DataParticleKey.VALUE: tpp_float_seconds})
self.final_result.append({DataParticleKey.VALUE_ID: ADCP_PD0_PARSED_KEY.COORD_TRANSFORM_TYPE,
DataParticleKey.VALUE: coord_transform_type & 0b00011000 >> 3})
self.final_result.append({DataParticleKey.VALUE_ID: ADCP_PD0_PARSED_KEY.COORD_TRANSFORM_TILTS,
DataParticleKey.VALUE: 1 if coord_transform_type & 0b00000100 else 0})
self.final_result.append({DataParticleKey.VALUE_ID: ADCP_PD0_PARSED_KEY.COORD_TRANSFORM_BEAMS,
DataParticleKey.VALUE: 1 if coord_transform_type & 0b0000000 else 0})
self.final_result.append({DataParticleKey.VALUE_ID: ADCP_PD0_PARSED_KEY.COORD_TRANSFORM_MAPPING,
DataParticleKey.VALUE: 1 if coord_transform_type & 0b00000001 else 0})
# lame, but expedient - mask off un-needed bits
self.coord_transform_type = (coord_transform_type & 0b00011000) >> 3
self.final_result.append({DataParticleKey.VALUE_ID: ADCP_PD0_PARSED_KEY.HEADING_ALIGNMENT,
DataParticleKey.VALUE: heading_alignment})
self.final_result.append({DataParticleKey.VALUE_ID: ADCP_PD0_PARSED_KEY.HEADING_BIAS,
DataParticleKey.VALUE: heading_bias})
self.final_result.append({DataParticleKey.VALUE_ID: ADCP_PD0_PARSED_KEY.SENSOR_SOURCE_SPEED,
DataParticleKey.VALUE: 1 if sensor_source & 0b01000000 else 0})
self.final_result.append({DataParticleKey.VALUE_ID: ADCP_PD0_PARSED_KEY.SENSOR_SOURCE_DEPTH,
DataParticleKey.VALUE: 1 if sensor_source & 0b00100000 else 0})
self.final_result.append({DataParticleKey.VALUE_ID: ADCP_PD0_PARSED_KEY.SENSOR_SOURCE_HEADING,
DataParticleKey.VALUE: 1 if sensor_source & 0b00010000 else 0})
self.final_result.append({DataParticleKey.VALUE_ID: ADCP_PD0_PARSED_KEY.SENSOR_SOURCE_PITCH,
DataParticleKey.VALUE: 1 if sensor_source & 0b00001000 else 0})
self.final_result.append({DataParticleKey.VALUE_ID: ADCP_PD0_PARSED_KEY.SENSOR_SOURCE_ROLL,
DataParticleKey.VALUE: 1 if sensor_source & 0b00000100 else 0})
self.final_result.append({DataParticleKey.VALUE_ID: ADCP_PD0_PARSED_KEY.SENSOR_SOURCE_CONDUCTIVITY,
DataParticleKey.VALUE: 1 if sensor_source & 0b00000010 else 0})
self.final_result.append({DataParticleKey.VALUE_ID: ADCP_PD0_PARSED_KEY.SENSOR_SOURCE_TEMPERATURE,
DataParticleKey.VALUE: 1 if sensor_source & 0b00000001 else 0})
self.final_result.append({DataParticleKey.VALUE_ID: ADCP_PD0_PARSED_KEY.SENSOR_AVAILABLE_DEPTH,
DataParticleKey.VALUE: 1 if sensor_available & 0b00100000 else 0})
self.final_result.append({DataParticleKey.VALUE_ID: ADCP_PD0_PARSED_KEY.SENSOR_AVAILABLE_HEADING,
DataParticleKey.VALUE: 1 if sensor_available & 0b00010000 else 0})
self.final_result.append({DataParticleKey.VALUE_ID: ADCP_PD0_PARSED_KEY.SENSOR_AVAILABLE_PITCH,
DataParticleKey.VALUE: 1 if sensor_available & 0b00001000 else 0})
self.final_result.append({DataParticleKey.VALUE_ID: ADCP_PD0_PARSED_KEY.SENSOR_AVAILABLE_ROLL,
DataParticleKey.VALUE: 1 if sensor_available & 0b00000100 else 0})
self.final_result.append({DataParticleKey.VALUE_ID: ADCP_PD0_PARSED_KEY.SENSOR_AVAILABLE_CONDUCTIVITY,
DataParticleKey.VALUE: 1 if sensor_available & 0b00000010 else 0})
self.final_result.append({DataParticleKey.VALUE_ID: ADCP_PD0_PARSED_KEY.SENSOR_AVAILABLE_TEMPERATURE,
DataParticleKey.VALUE: 1 if sensor_available & 0b00000001 else 0})
self.final_result.append({DataParticleKey.VALUE_ID: ADCP_PD0_PARSED_KEY.BIN_1_DISTANCE,
DataParticleKey.VALUE: bin_1_distance})
self.final_result.append({DataParticleKey.VALUE_ID: ADCP_PD0_PARSED_KEY.TRANSMIT_PULSE_LENGTH,
DataParticleKey.VALUE: transmit_pulse_length})
self.final_result.append({DataParticleKey.VALUE_ID: ADCP_PD0_PARSED_KEY.REFERENCE_LAYER_START,
DataParticleKey.VALUE: reference_layer_start})
self.final_result.append({DataParticleKey.VALUE_ID: ADCP_PD0_PARSED_KEY.REFERENCE_LAYER_STOP,
DataParticleKey.VALUE: reference_layer_stop})
self.final_result.append({DataParticleKey.VALUE_ID: ADCP_PD0_PARSED_KEY.FALSE_TARGET_THRESHOLD,
DataParticleKey.VALUE: false_target_threshold})
self.final_result.append({DataParticleKey.VALUE_ID: ADCP_PD0_PARSED_KEY.LOW_LATENCY_TRIGGER,
DataParticleKey.VALUE: low_latency_trigger})
self.final_result.append({DataParticleKey.VALUE_ID: ADCP_PD0_PARSED_KEY.TRANSMIT_LAG_DISTANCE,
DataParticleKey.VALUE: transmit_lag_distance})
self.final_result.append({DataParticleKey.VALUE_ID: ADCP_PD0_PARSED_KEY.CPU_BOARD_SERIAL_NUMBER,
DataParticleKey.VALUE: cpu_board_serial_number})
self.final_result.append({DataParticleKey.VALUE_ID: ADCP_PD0_PARSED_KEY.SYSTEM_BANDWIDTH,
DataParticleKey.VALUE: system_bandwidth})
self.final_result.append({DataParticleKey.VALUE_ID: ADCP_PD0_PARSED_KEY.SYSTEM_POWER,
DataParticleKey.VALUE: system_power})
self.final_result.append({DataParticleKey.VALUE_ID: ADCP_PD0_PARSED_KEY.SERIAL_NUMBER,
DataParticleKey.VALUE: serial_number})
self.final_result.append({DataParticleKey.VALUE_ID: ADCP_PD0_PARSED_KEY.BEAM_ANGLE,
DataParticleKey.VALUE: beam_angle})
def parse_variable_chunk(self, chunk):
"""
Parse the variable portion of the particle
@throws SampleException If there is a problem with sample creation
"""
rtc = {}
rtc2k = {}
(variable_leader_id, ensemble_number,
rtc['year'], rtc['month'], rtc['day'], rtc['hour'], rtc['minute'], rtc['second'], rtc['hundredths'],
ensemble_number_increment,
error_bit_field, reserved_error_bit_field, speed_of_sound, transducer_depth,
heading, pitch, roll, salinity, temperature,
mpt_minutes, mpt_seconds_component, mpt_hundredths_component,
heading_stdev, pitch_stdev, roll_stdev,
adc_transmit_current, adc_transmit_voltage, adc_ambient_temp, adc_pressure_plus,
adc_pressure_minus, adc_attitude_temp, adc_attitiude, adc_contamination_sensor,
error_status_word_1, error_status_word_2, error_status_word_3, error_status_word_4,
RESERVED1, RESERVED2, pressure, RESERVED3, pressure_variance,
rtc2k['century'], rtc2k['year'], rtc2k['month'], rtc2k['day'], rtc2k['hour'], rtc2k['minute'], rtc2k['second'], rtc2k['hundredths']) \
= unpack('<HHBBBBBBBBBBHHHhhHhBBBBBBBBBBBBBBBBBBBBLBLBBBBBBBB', chunk[0:65])
if 128 != variable_leader_id:
raise SampleException("variable_leader_id was not equal to 128")
self.final_result.append({DataParticleKey.VALUE_ID: ADCP_PD0_PARSED_KEY.VARIABLE_LEADER_ID,
DataParticleKey.VALUE: variable_leader_id})
self.final_result.append({DataParticleKey.VALUE_ID: ADCP_PD0_PARSED_KEY.ENSEMBLE_NUMBER,
DataParticleKey.VALUE: ensemble_number})
self.final_result.append({DataParticleKey.VALUE_ID: ADCP_PD0_PARSED_KEY.ENSEMBLE_NUMBER_INCREMENT,
DataParticleKey.VALUE: ensemble_number_increment})
self.final_result.append({DataParticleKey.VALUE_ID: ADCP_PD0_PARSED_KEY.BIT_RESULT_DEMOD_1,
DataParticleKey.VALUE: 1 if error_bit_field & 0b00001000 else 0})
self.final_result.append({DataParticleKey.VALUE_ID: ADCP_PD0_PARSED_KEY.BIT_RESULT_DEMOD_2,
DataParticleKey.VALUE: 1 if error_bit_field & 0b00010000 else 0})
self.final_result.append({DataParticleKey.VALUE_ID: ADCP_PD0_PARSED_KEY.BIT_RESULT_TIMING,
DataParticleKey.VALUE: 1 if error_bit_field & 0b00000010 else 0})
self.final_result.append({DataParticleKey.VALUE_ID: ADCP_PD0_PARSED_KEY.SPEED_OF_SOUND,
DataParticleKey.VALUE: speed_of_sound})
self.final_result.append({DataParticleKey.VALUE_ID: ADCP_PD0_PARSED_KEY.TRANSDUCER_DEPTH,
DataParticleKey.VALUE: transducer_depth})
self.final_result.append({DataParticleKey.VALUE_ID: ADCP_PD0_PARSED_KEY.HEADING,
DataParticleKey.VALUE: heading})
self.final_result.append({DataParticleKey.VALUE_ID: ADCP_PD0_PARSED_KEY.PITCH,
DataParticleKey.VALUE: pitch})
self.final_result.append({DataParticleKey.VALUE_ID: ADCP_PD0_PARSED_KEY.ROLL,
DataParticleKey.VALUE: roll})
self.final_result.append({DataParticleKey.VALUE_ID: ADCP_PD0_PARSED_KEY.SALINITY,
DataParticleKey.VALUE: salinity})
self.final_result.append({DataParticleKey.VALUE_ID: ADCP_PD0_PARSED_KEY.TEMPERATURE,
DataParticleKey.VALUE: temperature})
self.final_result.append({DataParticleKey.VALUE_ID: ADCP_PD0_PARSED_KEY.MPT_MINUTES,
DataParticleKey.VALUE: mpt_minutes})
mpt_seconds = float(mpt_seconds_component + (mpt_hundredths_component/100))
self.final_result.append({DataParticleKey.VALUE_ID: ADCP_PD0_PARSED_KEY.MPT_SECONDS,
DataParticleKey.VALUE: mpt_seconds})
self.final_result.append({DataParticleKey.VALUE_ID: ADCP_PD0_PARSED_KEY.HEADING_STDEV,
DataParticleKey.VALUE: heading_stdev})
self.final_result.append({DataParticleKey.VALUE_ID: ADCP_PD0_PARSED_KEY.PITCH_STDEV,
DataParticleKey.VALUE: pitch_stdev})
self.final_result.append({DataParticleKey.VALUE_ID: ADCP_PD0_PARSED_KEY.ROLL_STDEV,
DataParticleKey.VALUE: roll_stdev})
self.final_result.append({DataParticleKey.VALUE_ID: ADCP_PD0_PARSED_KEY.ADC_TRANSMIT_CURRENT,
DataParticleKey.VALUE: adc_transmit_current})
self.final_result.append({DataParticleKey.VALUE_ID: ADCP_PD0_PARSED_KEY.ADC_TRANSMIT_VOLTAGE,
DataParticleKey.VALUE: adc_transmit_voltage})
self.final_result.append({DataParticleKey.VALUE_ID: ADCP_PD0_PARSED_KEY.ADC_AMBIENT_TEMP,
DataParticleKey.VALUE: adc_ambient_temp})
self.final_result.append({DataParticleKey.VALUE_ID: ADCP_PD0_PARSED_KEY.ADC_PRESSURE_PLUS,
DataParticleKey.VALUE: adc_pressure_plus})
self.final_result.append({DataParticleKey.VALUE_ID: ADCP_PD0_PARSED_KEY.ADC_PRESSURE_MINUS,
DataParticleKey.VALUE: adc_pressure_minus})
self.final_result.append({DataParticleKey.VALUE_ID: ADCP_PD0_PARSED_KEY.ADC_ATTITUDE_TEMP,
DataParticleKey.VALUE: adc_attitude_temp})
self.final_result.append({DataParticleKey.VALUE_ID: ADCP_PD0_PARSED_KEY.ADC_ATTITUDE,
DataParticleKey.VALUE: adc_attitiude})
self.final_result.append({DataParticleKey.VALUE_ID: ADCP_PD0_PARSED_KEY.ADC_CONTAMINATION_SENSOR,
DataParticleKey.VALUE: adc_contamination_sensor})
self.final_result.append({DataParticleKey.VALUE_ID: ADCP_PD0_PARSED_KEY.BUS_ERROR_EXCEPTION,
DataParticleKey.VALUE: 1 if error_status_word_1 & 0b00000001 else 0})
self.final_result.append({DataParticleKey.VALUE_ID: ADCP_PD0_PARSED_KEY.ADDRESS_ERROR_EXCEPTION,
DataParticleKey.VALUE: 1 if error_status_word_1 & 0b00000010 else 0})
self.final_result.append({DataParticleKey.VALUE_ID: ADCP_PD0_PARSED_KEY.ILLEGAL_INSTRUCTION_EXCEPTION,
DataParticleKey.VALUE: 1 if error_status_word_1 & 0b00000100 else 0})
self.final_result.append({DataParticleKey.VALUE_ID: ADCP_PD0_PARSED_KEY.ZERO_DIVIDE_INSTRUCTION,
DataParticleKey.VALUE: 1 if error_status_word_1 & 0b00001000 else 0})
self.final_result.append({DataParticleKey.VALUE_ID: ADCP_PD0_PARSED_KEY.EMULATOR_EXCEPTION,
DataParticleKey.VALUE: 1 if error_status_word_1 & 0b00010000 else 0})
self.final_result.append({DataParticleKey.VALUE_ID: ADCP_PD0_PARSED_KEY.UNASSIGNED_EXCEPTION,
DataParticleKey.VALUE: 1 if error_status_word_1 & 0b00100000 else 0})
self.final_result.append({DataParticleKey.VALUE_ID: ADCP_PD0_PARSED_KEY.WATCHDOG_RESTART_OCCURED,
DataParticleKey.VALUE: 1 if error_status_word_1 & 0b01000000 else 0})
self.final_result.append({DataParticleKey.VALUE_ID: ADCP_PD0_PARSED_KEY.BATTERY_SAVER_POWER,
DataParticleKey.VALUE: 1 if error_status_word_1 & 0b10000000 else 0})
self.final_result.append({DataParticleKey.VALUE_ID: ADCP_PD0_PARSED_KEY.PINGING,
DataParticleKey.VALUE: 1 if error_status_word_1 & 0b00000001 else 0})
self.final_result.append({DataParticleKey.VALUE_ID: ADCP_PD0_PARSED_KEY.COLD_WAKEUP_OCCURED,
DataParticleKey.VALUE: 1 if error_status_word_1 & 0b01000000 else 0})
self.final_result.append({DataParticleKey.VALUE_ID: ADCP_PD0_PARSED_KEY.UNKNOWN_WAKEUP_OCCURED,
DataParticleKey.VALUE: 1 if error_status_word_1 & 0b10000000 else 0})
self.final_result.append({DataParticleKey.VALUE_ID: ADCP_PD0_PARSED_KEY.CLOCK_READ_ERROR,
DataParticleKey.VALUE: 1 if error_status_word_3 & 0b00000001 else 0})
self.final_result.append({DataParticleKey.VALUE_ID: ADCP_PD0_PARSED_KEY.UNEXPECTED_ALARM,
DataParticleKey.VALUE: 1 if error_status_word_3 & 0b00000010 else 0})
self.final_result.append({DataParticleKey.VALUE_ID: ADCP_PD0_PARSED_KEY.CLOCK_JUMP_FORWARD,
DataParticleKey.VALUE: 1 if error_status_word_3 & 0b00000100 else 0})
self.final_result.append({DataParticleKey.VALUE_ID: ADCP_PD0_PARSED_KEY.CLOCK_JUMP_BACKWARD,
DataParticleKey.VALUE: 1 if error_status_word_3 & 0b00001000 else 0})
self.final_result.append({DataParticleKey.VALUE_ID: ADCP_PD0_PARSED_KEY.POWER_FAIL,
DataParticleKey.VALUE: 1 if error_status_word_4 & 0b00001000 else 0})
self.final_result.append({DataParticleKey.VALUE_ID: ADCP_PD0_PARSED_KEY.SPURIOUS_DSP_INTERRUPT,
DataParticleKey.VALUE: 1 if error_status_word_4 & 0b00010000 else 0})
self.final_result.append({DataParticleKey.VALUE_ID: ADCP_PD0_PARSED_KEY.SPURIOUS_UART_INTERRUPT,
DataParticleKey.VALUE: 1 if error_status_word_4 & 0b00100000 else 0})
self.final_result.append({DataParticleKey.VALUE_ID: ADCP_PD0_PARSED_KEY.SPURIOUS_CLOCK_INTERRUPT,
DataParticleKey.VALUE: 1 if error_status_word_4 & 0b01000000 else 0})
self.final_result.append({DataParticleKey.VALUE_ID: ADCP_PD0_PARSED_KEY.LEVEL_7_INTERRUPT,
DataParticleKey.VALUE: 1 if error_status_word_4 & 0b10000000 else 0})
self.final_result.append({DataParticleKey.VALUE_ID: ADCP_PD0_PARSED_KEY.ABSOLUTE_PRESSURE,
DataParticleKey.VALUE: pressure})
self.final_result.append({DataParticleKey.VALUE_ID: ADCP_PD0_PARSED_KEY.PRESSURE_VARIANCE,
DataParticleKey.VALUE: pressure_variance})
dts = dt.datetime(rtc2k['century'] * 100 + rtc2k['year'],
rtc2k['month'],
rtc2k['day'],
rtc2k['hour'],
rtc2k['minute'],
rtc2k['second'])
self.final_result.append({DataParticleKey.VALUE_ID: ADCP_PD0_PARSED_KEY.INTERNAL_TIMESTAMP,
DataParticleKey.VALUE: time.mktime(dts.timetuple()) + (rtc2k['second'] / 100.0)})
def parse_velocity_chunk(self, chunk):
"""
Parse the velocity portion of the particle
@throws SampleException If there is a problem with sample creation
"""
N = (len(chunk) - 2) / 2 /4
offset = 0
velocity_data_id = unpack("!H", chunk[0:2])[0]
if 1 != velocity_data_id:
raise SampleException("velocity_data_id was not equal to 1")
self.final_result.append({DataParticleKey.VALUE_ID: ADCP_PD0_PARSED_KEY.VELOCITY_DATA_ID,
DataParticleKey.VALUE: velocity_data_id})
if 0 == self.coord_transform_type: # BEAM Coordinates
self._data_particle_type = DataParticleType.ADCP_PD0_PARSED_BEAM
beam_1_velocity = []
beam_2_velocity = []
beam_3_velocity = []
beam_4_velocity = []
for row in range (1, N):
(a,b,c,d) = unpack('!HHHH', chunk[offset + 2: offset + 10])
beam_1_velocity.append(a)
beam_2_velocity.append(b)
beam_3_velocity.append(c)
beam_4_velocity.append(d)
offset += 4 * 2
self.final_result.append({DataParticleKey.VALUE_ID: ADCP_PD0_PARSED_KEY.BEAM_1_VELOCITY,
DataParticleKey.VALUE: beam_1_velocity})
self.final_result.append({DataParticleKey.VALUE_ID: ADCP_PD0_PARSED_KEY.BEAM_2_VELOCITY,
DataParticleKey.VALUE: beam_2_velocity})
self.final_result.append({DataParticleKey.VALUE_ID: ADCP_PD0_PARSED_KEY.BEAM_3_VELOCITY,
DataParticleKey.VALUE: beam_3_velocity})
self.final_result.append({DataParticleKey.VALUE_ID: ADCP_PD0_PARSED_KEY.BEAM_4_VELOCITY,
DataParticleKey.VALUE: beam_4_velocity})
elif 3 == self.coord_transform_type: # Earth Coordinates
self._data_particle_type = DataParticleType.ADCP_PD0_PARSED_EARTH
water_velocity_east = []
water_velocity_north = []
water_velocity_up = []
error_velocity = []
for row in range (1, N):
(a,b,c,d) = unpack('!HHHH', chunk[offset + 2: offset + 10])
water_velocity_east.append(a)
water_velocity_north.append(b)
water_velocity_up.append(c)
error_velocity.append(d)
offset += 4 * 2
self.final_result.append({DataParticleKey.VALUE_ID: ADCP_PD0_PARSED_KEY.WATER_VELOCITY_EAST,
DataParticleKey.VALUE: water_velocity_east})
self.final_result.append({DataParticleKey.VALUE_ID: ADCP_PD0_PARSED_KEY.WATER_VELOCITY_NORTH,
DataParticleKey.VALUE: water_velocity_north})
self.final_result.append({DataParticleKey.VALUE_ID: ADCP_PD0_PARSED_KEY.WATER_VELOCITY_UP,
DataParticleKey.VALUE: water_velocity_up})
self.final_result.append({DataParticleKey.VALUE_ID: ADCP_PD0_PARSED_KEY.ERROR_VELOCITY,
DataParticleKey.VALUE: error_velocity})
else:
raise SampleException("coord_transform_type not coded for. " + str(self.coord_transform_type))
def parse_corelation_magnitude_chunk(self, chunk):
"""
Parse the corelation magnitude portion of the particle
@throws SampleException If there is a problem with sample creation
"""
N = (len(chunk) - 2) / 2 /4
offset = 0
correlation_magnitude_id = unpack("!H", chunk[0:2])[0]
if 2 != correlation_magnitude_id:
raise SampleException("correlation_magnitude_id was not equal to 2")
self.final_result.append({DataParticleKey.VALUE_ID: ADCP_PD0_PARSED_KEY.CORRELATION_MAGNITUDE_ID,
DataParticleKey.VALUE: correlation_magnitude_id})
correlation_magnitude_beam1 = []
correlation_magnitude_beam2 = []
correlation_magnitude_beam3 = []
correlation_magnitude_beam4 = []
for row in range (1, N):
(a, b, c, d) = unpack('!HHHH', chunk[offset + 2: offset + 10])
correlation_magnitude_beam1.append(a)
correlation_magnitude_beam2.append(b)
correlation_magnitude_beam3.append(c)
correlation_magnitude_beam4.append(d)
offset += 4 * 2
self.final_result.append({DataParticleKey.VALUE_ID: ADCP_PD0_PARSED_KEY.CORRELATION_MAGNITUDE_BEAM1,
DataParticleKey.VALUE: correlation_magnitude_beam1})
self.final_result.append({DataParticleKey.VALUE_ID: ADCP_PD0_PARSED_KEY.CORRELATION_MAGNITUDE_BEAM2,
DataParticleKey.VALUE: correlation_magnitude_beam2})
self.final_result.append({DataParticleKey.VALUE_ID: ADCP_PD0_PARSED_KEY.CORRELATION_MAGNITUDE_BEAM3,
DataParticleKey.VALUE: correlation_magnitude_beam3})
self.final_result.append({DataParticleKey.VALUE_ID: ADCP_PD0_PARSED_KEY.CORRELATION_MAGNITUDE_BEAM4,
DataParticleKey.VALUE: correlation_magnitude_beam4})
def parse_echo_intensity_chunk(self, chunk):
"""
Parse the echo intensity portion of the particle
@throws SampleException If there is a problem with sample creation
"""
N = (len(chunk) - 2) / 2 /4
offset = 0
echo_intensity_id = unpack("!H", chunk[0:2])[0]
if 3 != echo_intensity_id:
raise SampleException("echo_intensity_id was not equal to 3")
self.final_result.append({DataParticleKey.VALUE_ID: ADCP_PD0_PARSED_KEY.ECHO_INTENSITY_ID,
DataParticleKey.VALUE: echo_intensity_id})
echo_intesity_beam1 = []
echo_intesity_beam2 = []
echo_intesity_beam3 = []
echo_intesity_beam4 = []
for row in range (1, N):
(a, b, c, d) = unpack('!HHHH', chunk[offset + 2: offset + 10])
echo_intesity_beam1.append(a)
echo_intesity_beam2.append(b)
echo_intesity_beam3.append(c)
echo_intesity_beam4.append(d)
offset += 4 * 2
self.final_result.append({DataParticleKey.VALUE_ID: ADCP_PD0_PARSED_KEY.ECHO_INTENSITY_BEAM1,
DataParticleKey.VALUE: echo_intesity_beam1})
self.final_result.append({DataParticleKey.VALUE_ID: ADCP_PD0_PARSED_KEY.ECHO_INTENSITY_BEAM2,
DataParticleKey.VALUE: echo_intesity_beam2})
self.final_result.append({DataParticleKey.VALUE_ID: ADCP_PD0_PARSED_KEY.ECHO_INTENSITY_BEAM3,
DataParticleKey.VALUE: echo_intesity_beam3})
self.final_result.append({DataParticleKey.VALUE_ID: ADCP_PD0_PARSED_KEY.ECHO_INTENSITY_BEAM4,
DataParticleKey.VALUE: echo_intesity_beam4})
def parse_percent_good_chunk(self, chunk):
"""
Parse the percent good portion of the particle
@throws SampleException If there is a problem with sample creation
"""
N = (len(chunk) - 2) / 2 /4
offset = 0
# coord_transform_type
# Coordinate Transformation type:
# 0 = None (Beam), 1 = Instrument, 2 = Ship, 3 = Earth.
percent_good_id = unpack("!H", chunk[0:2])[0]
if 4 != percent_good_id:
raise SampleException("percent_good_id was not equal to 4")
self.final_result.append({DataParticleKey.VALUE_ID: ADCP_PD0_PARSED_KEY.PERCENT_GOOD_ID,
DataParticleKey.VALUE: percent_good_id})
if 0 == self.coord_transform_type: # BEAM Coordinates
self._data_particle_type = DataParticleType.ADCP_PD0_PARSED_BEAM
percent_good_beam1 = []
percent_good_beam2 = []
percent_good_beam3 = []
percent_good_beam4 = []
for row in range (1, N):
(a,b,c,d) = unpack('!HHHH', chunk[offset + 2: offset + 10])
percent_good_beam1.append(a)
percent_good_beam2.append(b)
percent_good_beam3.append(c)
percent_good_beam4.append(d)
offset += 4 * 2
self.final_result.append({DataParticleKey.VALUE_ID: ADCP_PD0_PARSED_KEY.PERCENT_GOOD_BEAM1,
DataParticleKey.VALUE: percent_good_beam1})
self.final_result.append({DataParticleKey.VALUE_ID: ADCP_PD0_PARSED_KEY.PERCENT_GOOD_BEAM2,
DataParticleKey.VALUE: percent_good_beam2})
self.final_result.append({DataParticleKey.VALUE_ID: ADCP_PD0_PARSED_KEY.PERCENT_GOOD_BEAM3,
DataParticleKey.VALUE: percent_good_beam3})
self.final_result.append({DataParticleKey.VALUE_ID: ADCP_PD0_PARSED_KEY.PERCENT_GOOD_BEAM4,
DataParticleKey.VALUE: percent_good_beam4})
elif 3 == self.coord_transform_type: # Earth Coordinates
self._data_particle_type = DataParticleType.ADCP_PD0_PARSED_EARTH
percent_good_3beam = []
percent_transforms_reject = []
percent_bad_beams = []
percent_good_4beam = []
for row in range (1, N):
(a,b,c,d) = unpack('!HHHH', chunk[offset + 2: offset + 10])
percent_good_3beam.append(a)
percent_transforms_reject.append(b)
percent_bad_beams.append(c)
percent_good_4beam.append(d)
offset += 4 * 2
self.final_result.append({DataParticleKey.VALUE_ID: ADCP_PD0_PARSED_KEY.PERCENT_GOOD_3BEAM,
DataParticleKey.VALUE: percent_good_3beam})
self.final_result.append({DataParticleKey.VALUE_ID: ADCP_PD0_PARSED_KEY.PERCENT_TRANSFORMS_REJECT,
DataParticleKey.VALUE: percent_transforms_reject})
self.final_result.append({DataParticleKey.VALUE_ID: ADCP_PD0_PARSED_KEY.PERCENT_BAD_BEAMS,
DataParticleKey.VALUE: percent_bad_beams})
self.final_result.append({DataParticleKey.VALUE_ID: ADCP_PD0_PARSED_KEY.PERCENT_GOOD_4BEAM,
DataParticleKey.VALUE: percent_good_4beam})
else:
raise SampleException("1 coord_transform_type not coded for." + str(self.coord_transform_type))
class ADCP_SYSTEM_CONFIGURATION_KEY(BaseEnum):
# https://confluence.oceanobservatories.org/display/instruments/ADCP+Driver
# from PS0
SERIAL_NUMBER = "serial_number"
TRANSDUCER_FREQUENCY = "transducer_frequency"
CONFIGURATION = "configuration"
MATCH_LAYER = "match_layer"
BEAM_ANGLE = "beam_angle"
BEAM_PATTERN = "beam_pattern"
ORIENTATION = "orientation"
SENSORS = "sensors"
#PRESSURE_COEFF_c3 = "pressure_coeff_c3"
#PRESSURE_COEFF_c2 = "pressure_coeff_c2"
#PRESSURE_COEFF_c1 = "pressure_coeff_c1"
#PRESSURE_COEFF_OFFSET = "pressure_coeff_offset"
TEMPERATURE_SENSOR_OFFSET = "temperature_sensor_offset"
CPU_FIRMWARE = "cpu_firmware"
BOOT_CODE_REQUIRED = "boot_code_required"
BOOT_CODE_ACTUAL = "boot_code_actual"
DEMOD_1_VERSION = "demod_1_version"
DEMOD_1_TYPE = "demod_1_type"
DEMOD_2_VERSION = "demod_2_version"
DEMOD_2_TYPE = "demod_2_type"
POWER_TIMING_VERSION = "power_timing_version"
POWER_TIMING_TYPE = "power_timing_type"
BOARD_SERIAL_NUMBERS = "board_serial_numbers"
class ADCP_SYSTEM_CONFIGURATION_DataParticle(DataParticle):
_data_particle_type = DataParticleType.ADCP_SYSTEM_CONFIGURATION
RE00 = re.compile(r'Instrument S/N: +(\d+)')
RE01 = re.compile(r' Frequency: +(\d+) HZ')
RE02 = re.compile(r' Configuration: +([a-zA-Z0-9, ]+)')
RE03 = re.compile(r' Match Layer: +(\d+)')
RE04 = re.compile(r' Beam Angle: ([0-9.]+) DEGREES')
RE05 = re.compile(r' Beam Pattern: ([a-zA-Z]+)')
RE06 = re.compile(r' Orientation: ([a-zA-Z]+)')
RE07 = re.compile(r' Sensor\(s\): ([a-zA-Z0-9 ]+)')
RE14 = re.compile(r'Temp Sens Offset: +([\+\-0-9.]+) degrees C')
RE16 = re.compile(r' CPU Firmware: ([0-9.\[\] ]+)')
RE17 = re.compile(r' Boot Code Ver: Required: +([0-9.]+) +Actual: +([0-9.]+)')
RE18 = re.compile(r' DEMOD #1 Ver: +([a-zA-Z0-9]+), Type: +([a-zA-Z0-9]+)')
RE19 = re.compile(r' DEMOD #2 Ver: +([a-zA-Z0-9]+), Type: +([a-zA-Z0-9]+)')
RE20 = re.compile(r' PWRTIMG Ver: +([a-zA-Z0-9]+), Type: +([a-zA-Z0-9]+)')
RE23 = re.compile(r' +([0-9a-zA-Z\- ]+)')
RE24 = re.compile(r' +([0-9a-zA-Z\- ]+)')
RE25 = re.compile(r' +([0-9a-zA-Z\- ]+)')
RE26 = re.compile(r' +([0-9a-zA-Z\- ]+)')
def _build_parsed_values(self):
# Initialize
matches = {}
try:
lines = self.raw_data.split(NEWLINE)
match = self.RE00.match(lines[0])
matches[ADCP_SYSTEM_CONFIGURATION_KEY.SERIAL_NUMBER] = match.group(1)
match = self.RE01.match(lines[1])
matches[ADCP_SYSTEM_CONFIGURATION_KEY.TRANSDUCER_FREQUENCY] = int(match.group(1))
match = self.RE02.match(lines[2])
matches[ADCP_SYSTEM_CONFIGURATION_KEY.CONFIGURATION] = match.group(1)
match = self.RE03.match(lines[3])
matches[ADCP_SYSTEM_CONFIGURATION_KEY.MATCH_LAYER] = match.group(1)
match = self.RE04.match(lines[4])
matches[ADCP_SYSTEM_CONFIGURATION_KEY.BEAM_ANGLE] = int(match.group(1))
match = self.RE05.match(lines[5])
matches[ADCP_SYSTEM_CONFIGURATION_KEY.BEAM_PATTERN] = match.group(1)
match = self.RE06.match(lines[6])
matches[ADCP_SYSTEM_CONFIGURATION_KEY.ORIENTATION] = match.group(1)
match = self.RE07.match(lines[7])
matches[ADCP_SYSTEM_CONFIGURATION_KEY.SENSORS] = match.group(1)
match = self.RE14.match(lines[8])
matches[ADCP_SYSTEM_CONFIGURATION_KEY.TEMPERATURE_SENSOR_OFFSET] = float(match.group(1))
match = self.RE16.match(lines[10])
matches[ADCP_SYSTEM_CONFIGURATION_KEY.CPU_FIRMWARE] = match.group(1)
match = self.RE17.match(lines[11])
matches[ADCP_SYSTEM_CONFIGURATION_KEY.BOOT_CODE_REQUIRED] = match.group(1)
matches[ADCP_SYSTEM_CONFIGURATION_KEY.BOOT_CODE_ACTUAL] = match.group(2)
match = self.RE18.match(lines[12])
matches[ADCP_SYSTEM_CONFIGURATION_KEY.DEMOD_1_VERSION] = match.group(1)
matches[ADCP_SYSTEM_CONFIGURATION_KEY.DEMOD_1_TYPE] = match.group(2)
match = self.RE19.match(lines[13])
matches[ADCP_SYSTEM_CONFIGURATION_KEY.DEMOD_2_VERSION] = match.group(1)
matches[ADCP_SYSTEM_CONFIGURATION_KEY.DEMOD_2_TYPE] = match.group(2)
match = self.RE20.match(lines[14])
matches[ADCP_SYSTEM_CONFIGURATION_KEY.POWER_TIMING_VERSION] = match.group(1)
matches[ADCP_SYSTEM_CONFIGURATION_KEY.POWER_TIMING_TYPE] = match.group(2)
match = self.RE23.match(lines[17])
matches[ADCP_SYSTEM_CONFIGURATION_KEY.BOARD_SERIAL_NUMBERS] = str(match.group(1)) + "\n"
match = self.RE24.match(lines[18])
matches[ADCP_SYSTEM_CONFIGURATION_KEY.BOARD_SERIAL_NUMBERS] += str(match.group(1)) + "\n"
match = self.RE25.match(lines[19])
matches[ADCP_SYSTEM_CONFIGURATION_KEY.BOARD_SERIAL_NUMBERS] += str(match.group(1)) + "\n"
match = self.RE26.match(lines[20])
matches[ADCP_SYSTEM_CONFIGURATION_KEY.BOARD_SERIAL_NUMBERS] += str(match.group(1))
except Exception as e:
log.error("EXCEPTION WAS !!!! " + str(e))
result = []
for (key, value) in matches.iteritems():
result.append({DataParticleKey.VALUE_ID: key,
DataParticleKey.VALUE: value})
return result
class BROKE_ADCP_SYSTEM_CONFIGURATION_DataParticle(DataParticle):
_data_particle_type = DataParticleType.ADCP_SYSTEM_CONFIGURATION
RE00 = re.compile(r'Instrument S/N: +(\d+)')
RE01 = re.compile(r' Frequency: +(\d+) HZ')
RE02 = re.compile(r' Configuration: +([a-zA-Z0-9, ]+)')
RE03 = re.compile(r' Match Layer: +(\d+)')
RE04 = re.compile(r' Beam Angle: ([0-9.]+) DEGREES')
RE05 = re.compile(r' Beam Pattern: ([a-zA-Z]+)')
RE06 = re.compile(r' Orientation: ([a-zA-Z]+)')
RE07 = re.compile(r' Sensor\(s\): ([a-zA-Z0-9 ]+)')
RE09 = re.compile(r' c3 = ([\+\-0-9.E]+)')
RE10 = re.compile(r' c2 = ([\+\-0-9.E]+)')
RE11 = re.compile(r' c1 = ([\+\-0-9.E]+)')
RE12 = re.compile(r' Offset = ([\+\-0-9.E]+)')
RE14 = re.compile(r'Temp Sens Offset: +([\+\-0-9.]+) degrees C')
RE16 = re.compile(r' CPU Firmware: ([0-9.\[\] ]+)')
RE17 = re.compile(r' +Boot Code Ver: +Required: +([0-9.]+) +Actual: +([0-9.]+)')
RE18 = re.compile(r' DEMOD #1 Ver: +([a-zA-Z0-9]+), Type: +([a-zA-Z0-9]+)')
RE19 = re.compile(r' DEMOD #2 Ver: +([a-zA-Z0-9]+), Type: +([a-zA-Z0-9]+)')
RE20 = re.compile(r' PWRTIMG Ver: +([a-zA-Z0-9]+), Type: +([a-zA-Z0-9]+)')
RE23 = re.compile(r' +([0-9a-zA-Z\- ]+)')
RE24 = re.compile(r' +([0-9a-zA-Z\- ]+)')
RE25 = re.compile(r' +([0-9a-zA-Z\- ]+)')
RE26 = re.compile(r' +([0-9a-zA-Z\- ]+)')
def _build_parsed_values(self):
# Initialize
log.error("in ADCP_SYSTEM_CONFIGURATION_DataParticle _build_parsed_values")
matches = {}
try:
lines = self.raw_data.split(NEWLINE)
line_num = 0
log.error("LINE = " + repr(lines[0]))
match = self.RE00.match(lines[line_num])
if match:
matches[ADCP_SYSTEM_CONFIGURATION_KEY.SERIAL_NUMBER] = match.group(1)
line_num += 1
match = self.RE01.match(lines[line_num])
if match:
matches[ADCP_SYSTEM_CONFIGURATION_KEY.TRANSDUCER_FREQUENCY] = int(match.group(1))
line_num += 1
match = self.RE02.match(lines[line_num])
if match:
matches[ADCP_SYSTEM_CONFIGURATION_KEY.CONFIGURATION] = match.group(1)
line_num += 1
match = self.RE03.match(lines[line_num])
if match:
matches[ADCP_SYSTEM_CONFIGURATION_KEY.MATCH_LAYER] = match.group(1)
line_num += 1
match = self.RE04.match(lines[line_num])
if match:
matches[ADCP_SYSTEM_CONFIGURATION_KEY.BEAM_ANGLE] = int(match.group(1))
line_num += 1
match = self.RE05.match(lines[line_num])
if match:
matches[ADCP_SYSTEM_CONFIGURATION_KEY.BEAM_PATTERN] = match.group(1)
line_num += 1
match = self.RE06.match(lines[line_num])
if match:
matches[ADCP_SYSTEM_CONFIGURATION_KEY.ORIENTATION] = match.group(1)
line_num += 1
match = self.RE07.match(lines[line_num])
if match:
matches[ADCP_SYSTEM_CONFIGURATION_KEY.SENSORS] = match.group(1)
line_num += 1
match = self.RE09.match(lines[line_num])
matches[ADCP_SYSTEM_CONFIGURATION_KEY.PRESSURE_COEFF_c3] = float(match.group(1))
line_num += 1
match = self.RE10.match(lines[line_num])
if match:
matches[ADCP_SYSTEM_CONFIGURATION_KEY.PRESSURE_COEFF_c2] = float(match.group(1))
line_num += 1
match = self.RE11.match(lines[line_num])
if match:
matches[ADCP_SYSTEM_CONFIGURATION_KEY.PRESSURE_COEFF_c1] = float(match.group(1))
line_num += 1
match = self.RE12.match(lines[line_num])
if match:
matches[ADCP_SYSTEM_CONFIGURATION_KEY.PRESSURE_COEFF_OFFSET] = float(match.group(1))
line_num += 2
match = self.RE14.match(lines[line_num])
if match:
matches[ADCP_SYSTEM_CONFIGURATION_KEY.TEMPERATURE_SENSOR_OFFSET] = float(match.group(1))
line_num += 2
match = self.RE16.match(lines[line_num])
if match:
matches[ADCP_SYSTEM_CONFIGURATION_KEY.CPU_FIRMWARE] = match.group(1)
line_num += 1
match = self.RE17.match(lines[line_num])
if match:
matches[ADCP_SYSTEM_CONFIGURATION_KEY.BOOT_CODE_REQUIRED] = match.group(1)
matches[ADCP_SYSTEM_CONFIGURATION_KEY.BOOT_CODE_ACTUAL] = match.group(2)
else:
log.error(line_num)
log.error(lines[line_num])
log.error(match.group(1))
log.error(match.group(1))
line_num += 1
match = self.RE18.match(lines[line_num])
if match:
matches[ADCP_SYSTEM_CONFIGURATION_KEY.DEMOD_1_VERSION] = match.group(1)
matches[ADCP_SYSTEM_CONFIGURATION_KEY.DEMOD_1_TYPE] = match.group(2)
line_num += 1
match = self.RE19.match(lines[line_num])
if match:
matches[ADCP_SYSTEM_CONFIGURATION_KEY.DEMOD_2_VERSION] = match.group(1)
matches[ADCP_SYSTEM_CONFIGURATION_KEY.DEMOD_2_TYPE] = match.group(2)
line_num += 1
match = self.RE20.match(lines[line_num])
if match:
matches[ADCP_SYSTEM_CONFIGURATION_KEY.POWER_TIMING_VERSION] = match.group(1)
matches[ADCP_SYSTEM_CONFIGURATION_KEY.POWER_TIMING_TYPE] = match.group(2)
line_num += 3
match = self.RE23.match(lines[line_num])
if match:
matches[ADCP_SYSTEM_CONFIGURATION_KEY.BOARD_SERIAL_NUMBERS] = str(match.group(1)) + "\n"
line_num += 1
match = self.RE24.match(lines[line_num])
if match:
matches[ADCP_SYSTEM_CONFIGURATION_KEY.BOARD_SERIAL_NUMBERS] += str(match.group(1)) + "\n"
line_num += 1
match = self.RE25.match(lines[line_num])
if match:
matches[ADCP_SYSTEM_CONFIGURATION_KEY.BOARD_SERIAL_NUMBERS] += str(match.group(1)) + "\n"
line_num += 1
match = self.RE26.match(lines[line_num])
if match:
matches[ADCP_SYSTEM_CONFIGURATION_KEY.BOARD_SERIAL_NUMBERS] += str(match.group(1)) + "\n"
except Exception as e:
log.error("GOT AN EXCEPTION" + str(e))
result = []
for (key, value) in matches.iteritems():
result.append({DataParticleKey.VALUE_ID: key,
DataParticleKey.VALUE: value})
log.error("RETURNING result = " + repr(result))
return result
class ADCP_COMPASS_CALIBRATION_KEY(BaseEnum):
# from AC command / CALIBRATION_RAW_DATA
FLUXGATE_CALIBRATION_TIMESTAMP = "fluxgate_calibration_timestamp"
S_INVERSE_BX = "s_inverse_bx"
S_INVERSE_BY = "s_inverse_by"
S_INVERSE_BZ = "s_inverse_bz"
S_INVERSE_ERR = "s_inverse_err"
COIL_OFFSET = "coil_offset"
ELECTRICAL_NULL = "electrical_null"
TILT_CALIBRATION_TIMESTAMP = "tilt_calibration_timestamp"
CALIBRATION_TEMP = "calibration_temp"
ROLL_UP_DOWN = "roll_up_down"
PITCH_UP_DOWN = "pitch_up_down"
OFFSET_UP_DOWN = "offset_up_down"
TILT_NULL = "tilt_null"
class ADCP_COMPASS_CALIBRATION_DataParticle(DataParticle):
_data_particle_type = DataParticleType.ADCP_COMPASS_CALIBRATION
RE01 = re.compile(r' +Calibration date and time: ([/0-9: ]+)')
RE04 = re.compile(r' +Bx +. +([0-9e+-.]+) +([0-9e+-.]+) +([0-9e+-.]+) +([0-9e+-.]+) .')
RE05 = re.compile(r' +By +. +([0-9e+-.]+) +([0-9e+-.]+) +([0-9e+-.]+) +([0-9e+-.]+) .')
RE06 = re.compile(r' +Bz +. +([0-9e+-.]+) +([0-9e+-.]+) +([0-9e+-.]+) +([0-9e+-.]+) .')
RE07 = re.compile(r' +Err +. +([0-9e+-.]+) +([0-9e+-.]+) +([0-9e+-.]+) +([0-9e+-.]+) .')
RE11 = re.compile(r' +. +([0-9e+-.]+) +.')
RE12 = re.compile(r' +. +([0-9e+-.]+) +.')
RE13 = re.compile(r' +. +([0-9e+-.]+) +.')
RE14 = re.compile(r' +. +([0-9e+-.]+) +.')
RE18 = re.compile(r' +. ([0-9.]+) .')
RE21 = re.compile(r' +Calibration date and time: ([/0-9: ]+)')
RE22 = re.compile(r' +Average Temperature During Calibration was +([0-9.]+) .')
RE27 = re.compile(r' Roll +. +([0-9e+-.]+) +([0-9e+-.]+) +. +. +([0-9e+-.]+) +([0-9e+-.]+) +.')
RE28 = re.compile(r' Pitch +. +([0-9e+-.]+) +([0-9e+-.]+) +. +. +([0-9e+-.]+) +([0-9e+-.]+) +.')
RE32 = re.compile(r' Offset . +([0-9e+-.]+) +([0-9e+-.]+) +. +. +([0-9e+-.]+) +([0-9e+-.]+) +.')
RE36 = re.compile(r' +Null +. (\d+) +.')
def _build_parsed_values(self):
# Initialize
log.error("in ADCP_COMPASS_CALIBRATION_DataParticle _build_parsed_values")
matches = {}
try:
lines = self.raw_data.split(NEWLINE)
match = self.RE01.match(lines[1])
timestamp = match.group(1)
matches[ADCP_COMPASS_CALIBRATION_KEY.FLUXGATE_CALIBRATION_TIMESTAMP] = time.mktime(time.strptime(timestamp, "%m/%d/%Y %H:%M:%S"))
match = self.RE04.match(lines[4])
matches[ADCP_COMPASS_CALIBRATION_KEY.S_INVERSE_BX] = [float(match.group(1)), float(match.group(2)), float(match.group(3)), float(match.group(4))]
match = self.RE05.match(lines[5])
matches[ADCP_COMPASS_CALIBRATION_KEY.S_INVERSE_BY] = [float(match.group(1)), float(match.group(2)), float(match.group(3)), float(match.group(4))]
match = self.RE06.match(lines[6])
matches[ADCP_COMPASS_CALIBRATION_KEY.S_INVERSE_BZ] = [float(match.group(1)), float(match.group(2)), float(match.group(3)), float(match.group(4))]
match = self.RE07.match(lines[7])
matches[ADCP_COMPASS_CALIBRATION_KEY.S_INVERSE_ERR] = [float(match.group(1)), float(match.group(2)), float(match.group(3)), float(match.group(4))]
match = self.RE11.match(lines[11])
matches[ADCP_COMPASS_CALIBRATION_KEY.COIL_OFFSET] = [float(match.group(1))]
match = self.RE12.match(lines[12])
matches[ADCP_COMPASS_CALIBRATION_KEY.COIL_OFFSET].append(float(match.group(1)))
match = self.RE13.match(lines[13])
matches[ADCP_COMPASS_CALIBRATION_KEY.COIL_OFFSET].append(float(match.group(1)))
match = self.RE14.match(lines[14])
matches[ADCP_COMPASS_CALIBRATION_KEY.COIL_OFFSET].append(float(match.group(1)))
match = self.RE18.match(lines[18])
matches[ADCP_COMPASS_CALIBRATION_KEY.ELECTRICAL_NULL] = float(match.group(1))
match = self.RE21.match(lines[21])
timestamp = match.group(1)
matches[ADCP_COMPASS_CALIBRATION_KEY.TILT_CALIBRATION_TIMESTAMP] = time.mktime(time.strptime(timestamp, "%m/%d/%Y %H:%M:%S"))
match = self.RE22.match(lines[22])
matches[ADCP_COMPASS_CALIBRATION_KEY.CALIBRATION_TEMP] = float(match.group(1))
match = self.RE27.match(lines[27])
matches[ADCP_COMPASS_CALIBRATION_KEY.ROLL_UP_DOWN] = [float(match.group(1)), float(match.group(2)), float(match.group(3)), float(match.group(4))]
match = self.RE28.match(lines[28])
matches[ADCP_COMPASS_CALIBRATION_KEY.PITCH_UP_DOWN] = [float(match.group(1)), float(match.group(2)), float(match.group(3)), float(match.group(4))]
match = self.RE32.match(lines[32])
matches[ADCP_COMPASS_CALIBRATION_KEY.OFFSET_UP_DOWN] = [float(match.group(1)), float(match.group(2)), float(match.group(3)), float(match.group(4))]
match = self.RE36.match(lines[36])
matches[ADCP_COMPASS_CALIBRATION_KEY.TILT_NULL] = float(match.group(1))
except Exception as e:
log.error("EXCEPTION WAS !!!! " + str(e))
result = []
for (key, value) in matches.iteritems():
result.append({DataParticleKey.VALUE_ID: key,
DataParticleKey.VALUE: value})
return result
| 56.976513 | 170 | 0.651594 |
f91f489f1aac45ceddb8d33ae1983cd9c6dc8909 | 129 | py | Python | xero_python/__init__.py | GraceYBR/xero-python | 3682b1e61273ccbceb3617bf4fdb1cd1a36187c2 | [
"MIT"
] | null | null | null | xero_python/__init__.py | GraceYBR/xero-python | 3682b1e61273ccbceb3617bf4fdb1cd1a36187c2 | [
"MIT"
] | null | null | null | xero_python/__init__.py | GraceYBR/xero-python | 3682b1e61273ccbceb3617bf4fdb1cd1a36187c2 | [
"MIT"
] | null | null | null | """Top-level package for xero-python."""
__author__ = """Xero Developer API"""
__email__ = "api@xero.com"
__version__ = "1.5.2"
| 21.5 | 40 | 0.674419 |
234ebcbb67394f4ea28290e3dee9868668ac28fd | 12,198 | py | Python | leg/software/lab_experiments/gui example/__init__.py | sburden-group/pareto_leg_hardware | 39283d67be67bed464580db8b2487edd33c30100 | [
"MIT"
] | null | null | null | leg/software/lab_experiments/gui example/__init__.py | sburden-group/pareto_leg_hardware | 39283d67be67bed464580db8b2487edd33c30100 | [
"MIT"
] | 5 | 2022-02-18T22:49:26.000Z | 2022-03-11T22:09:42.000Z | leg/software/lab_experiments/gui example/__init__.py | sburden-group/pareto_leg_hardware | 39283d67be67bed464580db8b2487edd33c30100 | [
"MIT"
] | null | null | null | import tkinter as tk
import tkinter.messagebox as tkmsgbox
import tkinter.simpledialog as tksimpledialog
import tkinter.filedialog as tkfiledialog
from pareto_leg.odrive_driver import odrive, OdriveDriver
import numpy as np
import yaml
from math import remainder
class PIController():
""" this is math stuff for controlling motors """
def __init__(self,Kp,Ki,imax):
self.Kp = Kp
self.Ki = Ki
self.integrator = 0.
self.imax = imax
def update(self, error, dt):
self.integrator = np.clip(-self.imax,self.imax,self.integrator+dt*error)
return self.Kp*error + self.Ki*self.integrator
def reset(self):
self.integrator = 0.
from tkinter.commondialog import Dialog
from tkinter.ttk import Combobox
class ComboboxDialog(Dialog):
"""
This class is used by the App class to present a pop-up window that contains a drop-down menu,
and which persists until the OK button is clicked.
"""
def __init__(self,callback):
""" The argument callback is a function handle of the form fun(string) which returns the
value selected by the user to the main App."""
self.master = tk.Toplevel()
super().__init__(self.master)
self.label = tk.Label(master=self.master,text="Select an odrive")
self.label.pack()
self.choices = {}
while True:
device = odrive.find_any()
if device.serial_number in self.choices.keys():
break
self.choices[device.serial_number] = device
self.cbox = Combobox(master=self.master,values=list(self.choices.keys()))
self.cbox.pack()
self.ok_button = tk.Button(master=self.master,command=self.ok,text="OK")
self.ok_button.pack()
self.callback = callback
def ok(self):
""" When the ok button is clicked, this function is executed, which in turn executes
the callback function given to __init__
"""
# self.callback(self.cbox.get())
serial = int(self.cbox.get())
self.callback(self.choices[serial])
self.master.destroy()
class App(tk.Frame):
"""
This is the main class which implements the bulk of the GUI. It has buttons, text entries, and a slider bar.
"""
LOOP_DELAY_MS = 10
MAX_I = 10.
def __init__(self, master):
self.master = master
super().__init__(master)
self.pack()
self.paused = True
self.setpoint = np.NaN
self.pi = PIController(0.,0.,App.MAX_I)
self.init_theta = np.NaN
""" A TK Frame for holding several buttons, on the bottom of the window. """
self.button_frame = tk.Frame(master=self)
self.connect_button = tk.Button(master=self.button_frame,command=self.connect,text="CONNECT")
self.connected = False
self.connect_button.grid(row=0,column=0,sticky='s')
self.update_button = tk.Button(master=self.button_frame,command=self.update,text="UPDATE")
self.update_button.grid(row=0,column=1,sticky='s')
self.start_button = tk.Button(master=self.button_frame,command=self.start,text="START")
self.start_button["state"]="disabled"
self.start_button.grid(row=0,column=2,sticky='s')
self.stop_button = tk.Button(master=self.button_frame,command=self.stop,text="STOP")
self.stop_button["state"]="disabled"
self.stop_button.grid(row=0,column=3,sticky='s')
self.save_button = tk.Button(master=self.button_frame,command=self.save,text="SAVE")
self.save_button["state"]="disabled"
self.save_button.grid(row=0,column=4,sticky='s')
self.button_frame.pack(side="bottom")
""" A horizontal scale used to adjust the motor controller setpoint """
self.m0_setpoint_scale = tk.Scale(master = self,label="M0 Setpoint",orient="horizontal")
self.m0_setpoint_scale["from"] = -90
self.m0_setpoint_scale["to"]= 90
self.m0_setpoint_scale["resolution"] = 0.5
self.m0_setpoint_scale.pack(side="bottom",fill="x")
self.m1_setpoint_scale = tk.Scale(master = self,label="M1 Setpoint",orient="horizontal")
self.m1_setpoint_scale["from"] = -90
self.m1_setpoint_scale["to"]= 90
self.m1_setpoint_scale["resolution"] = 0.5
self.m1_setpoint_scale.pack(side="bottom",fill="x")
""" A TK frame for holding several text labels and text entry fields, allowing the user
to modify several different data.
"""
self.conf_frame = tk.Frame(master=self)
self.conf_frame.pack(side="left",fill="both")
self.kp_label = tk.Label(master=self.conf_frame,text="Kp: ")
self.kp_label.grid(row=0,column=0)
self.kp_ent = tk.Entry(master=self.conf_frame)
self.kp_ent.grid(row=0,column=1,columnspan=2)
self.ki_label = tk.Label(master=self.conf_frame,text="Ki: ")
self.ki_label.grid(row=1,column=0)
self.ki_ent = tk.Entry(master=self.conf_frame)
self.ki_ent.grid(row=1,column=1,columnspan=2)
self.m0_sign_label = tk.Label(master=self.conf_frame,text="M0 sign:")
self.m0_sign_label.grid(row=2,column=0)
self.m0_sign_var = tk.IntVar()
m0_btn1 = tk.Radiobutton(master=self.conf_frame,text="1",variable=self.m0_sign_var,value=1)
m0_btn1.grid(row=2,column=1)
m0_btn2 = tk.Radiobutton(master=self.conf_frame,text="-1",variable=self.m0_sign_var,value=-1)
m0_btn2.grid(row=2,column=2)
self.m1_sign_label = tk.Label(master=self.conf_frame,text="M1 sign:")
self.m1_sign_label.grid(row=3,column=0)
self.m1_sign_var = tk.IntVar()
m1_btn1 = tk.Radiobutton(master=self.conf_frame,text="1",variable=self.m1_sign_var,value=1)
m1_btn1.grid(row=3,column=1)
m1_btn2 = tk.Radiobutton(master=self.conf_frame,text="-1",variable=self.m1_sign_var,value=-1)
m1_btn2.grid(row=3,column=2)
self.m0_target_label = tk.Label(master=self.conf_frame,text="M0 target: ")
self.m0_target_label.grid(row=4,column=0)
self.m0_target_ent = tk.Entry(master=self.conf_frame)
self.m0_target_ent.grid(row=4,column=1,columnspan=2)
self.m1_target_label = tk.Label(master=self.conf_frame,text="M1 target: ")
self.m1_target_label.grid(row=5,column=0)
self.m1_target_ent = tk.Entry(master=self.conf_frame)
self.m1_target_ent.grid(row=5,column=1,columnspan=2)
""" A TK Frame for holding several text labels that display motor telemetry for the user. """
self.telemetry_frame= tk.Frame(master=self)
self.telemetry_frame.pack(side="left",fill="both")
self.m0_text = tk.Label(master=self.telemetry_frame,text="M0 angle: %.1f"%(np.NaN),anchor="w",width=16)
self.m0_text.grid(row=0,column=0)
self.I0_text = tk.Label(master=self.telemetry_frame,text="I0: %.1f"%(np.NaN),anchor="w",width=16)
self.I0_text.grid(row=0,column=1)
self.m1_text = tk.Label(master=self.telemetry_frame,text="M1 angle: %.1f"%(np.NaN),anchor="w",width=16)
self.m1_text.grid(row=1,column=0)
self.I1_text = tk.Label(master=self.telemetry_frame,text="I1: %.1f"%(np.NaN),anchor="w",width=16)
self.I1_text.grid(row=1,column=1)
self.avg_angle_text = tk.Label(master=self.telemetry_frame,text="(M0+M1)/2: %.1f"%(np.NaN),anchor="w",width=32)
self.avg_angle_text.grid(row=2,column=0,columnspan=2)
""" The following line schedules the function App.apploop to run in App.LOOP_DELAY_MS milliseconds in the future """
self.after(App.LOOP_DELAY_MS,self.apploop)
def update(self):
""" This function is called when the update button is clicked, and takes
data from the entry fields in the config frame, and converts them into numerical data
"""
try:
self.pi.Kp = float(self.kp_ent.get())
self.pi.Ki = float(self.ki_ent.get())
except BaseException as e:
# a neat way to handle exceptions is to make a popup window which prints the error
return tkmsgbox.showerror(title="Error in entry", message=str(e))
def connect(self):
""" This function is called when the connect button is pressed, it triggers the opening of a ComboboxDialog,
(see above for the class definition of ComboboxDialog. """
self.connected = False
self.odrive = None
self.start_button["state"]="disabled"
self.stop_button["state"]="disabled"
self.save_button["state"]="disabled"
dialog = ComboboxDialog(self.connect_callback)
def connect_callback(self,device):
""" This function is called by the ComboboxDialog after the user makes their selection."""
try:
self.odrive = OdriveDriver(device)
self.odrive.set_torque_control_mode()
self.odrive.arm()
self.init_theta= np.array([self.m0_sign_var.get(),self.m1_sign_var.get()])*self.odrive.get_motor_angles()
self.connected = True
self.start_button["state"]="normal"
self.stop_button["state"]="normal"
self.save_button["state"]="normal"
except BaseException as e:
return tkmsgbox.showerror(title="Error in connection", message=str(e))
def start(self):
""" This is called when the start button is clicked. """
self.paused = False
def stop(self):
""" This is called then the stop button is clicked. """
self.paused = True
def save(self):
""" This is called when the save button is clicked, and it causes a filepicker to open to save
data to the disk"""
try:
with tkfiledialog.asksaveasfile(mode="w") as file:
thetas = np.array([self.m0_sign_var.get(),self.m1_sign_var.get()])*self.odrive.get_motor_angles()
m0_target = float(self.m0_target_ent.get())*float(np.pi/180)
m1_target = float(self.m1_target_ent.get())*float(np.pi/180)
data = {"motor axis sign": [self.m0_sign_var.get(),self.m1_sign_var.get()],
"calibreation measurement": [float(thetas[0]),float(thetas[1])],
"calibration position": [m0_target,m1_target]}
file.write(yaml.dump(data))
except BaseException as e:
return tkmsgbox.showerror(title="Error when saving file", message=str(e))
def apploop(self):
""" The apploop is a function which is called every App.LOOP_DELAY_MS, and does work of updating
the telemetry text as well as some other things."""
self.after(App.LOOP_DELAY_MS,self.apploop)
dt = App.LOOP_DELAY_MS/1000.
if not self.paused:
thetas = np.array([self.m0_sign_var.get(),self.m1_sign_var.get()])*self.odrive.get_motor_angles()
I = self.odrive.get_torques()
m0_setpoint = self.m0_setpoint_scale.get()*(6.283/180)+self.init_theta[0]
m0_setpoint = remainder(m0_setpoint,2*np.pi)
m1_setpoint = self.m1_setpoint_scale.get()*(6.283/180)+self.init_theta[1]
m1_setpoint = remainder(m1_setpoint,2*np.pi)
error = np.array([remainder(m0_setpoint-thetas[0],2*np.pi),remainder(m1_setpoint-thetas[1],2*np.pi)])
current_command = self.pi.update(error*180/6.283,dt)*np.array([self.m0_sign_var.get(),self.m1_sign_var.get()])
print(current_command)
self.odrive.set_torques(*current_command)
self.m0_text["text"]="M0 angle: %.1f" % (thetas[0]*180/np.pi)
self.m1_text["text"]="M1 angle: %.1f" % (thetas[1]*180/np.pi)
self.I0_text["text"]="I0: %.1f" % (I[0])
self.I1_text["text"]="I1: %.1f" % (I[1])
self.avg_angle_text["text"]="(M0+M1)/2: %.1f" % ((.5*thetas[0]+.5*thetas[1])*180/np.pi)
if __name__ == "__main__":
# program entry point
root = tk.Tk() # gets handle of top-level TK instance (idk what this actually does)
app = App(root)# constructs the App class (see definition above)
app.mainloop() # starts the TK event loop (necessary for anything to run)
| 49.185484 | 124 | 0.646581 |
34bc3ea7f361963efaf78218b6c73be51339222a | 21,525 | py | Python | glearn/nerualnetworks/cnn.py | ggutierrez545/neuralnetworks | 891fc622515af765b8529091b99e377215e195ca | [
"MIT"
] | null | null | null | glearn/nerualnetworks/cnn.py | ggutierrez545/neuralnetworks | 891fc622515af765b8529091b99e377215e195ca | [
"MIT"
] | 7 | 2020-07-17T23:58:40.000Z | 2020-08-17T21:28:21.000Z | glearn/nerualnetworks/cnn.py | ggutierrez545/neuralnetworks | 891fc622515af765b8529091b99e377215e195ca | [
"MIT"
] | null | null | null | import numpy as np
from glearn.utils.activation import activation, loss
from glearn.nerualnetworks.nn import NeuralNetwork, InputLayer, ConnectedLayer, ConnectedSegment
class ConvolutionalNeuralNet(NeuralNetwork):
def __init__(self, seed=10, l_rate=0.01, m_factor=0.9, loss_func=''):
super().__init__(seed=seed, l_rate=l_rate, m_factor=m_factor)
self.loss_func = loss_func
def add_connected_layer(self, size, activation_func='relu'):
"""Method to add `ConnectedLayer` of inputted size.
Overwrites method of same name from `NeuralNetwork` class.
Parameters
----------
size : int
Number of neurons in connected layer.
activation_func : str
Keyword indicating the activation function to use in the layer.
Raises
------
AssertionError
If `NeuralNetwork` does not already contain an `InputLayer` instance.
"""
# Before adding ConnectedLayer, verify an InputLayer has already been initialized
if [type(i) for i in self.layers].__contains__(InputLayer):
self.layers.append(ConnectedLayer(size, activation=activation_func, parent=self.__class__))
# After each ConnectedLayer is added, create a ConnectedSegment from the last two elements in self.layers.
# Using elements from self.layers to create the ConnectedSegment instance allows the chain of InputLayer and
# ConnectedLayer references to be maintained. This is crucial for this architecture
self.segments.append(ConnectedSegment(*self.layers[-2:]))
else:
raise AssertionError("NeuralNetwork instance must contain an InputLayer before adding a ConnectedLayer")
def add_convolution_layer(self, num_kernals, kernal_size=3, activation_func='relu', padding='valid'):
if [type(i) for i in self.layers].__contains__(InputLayer):
self.layers.append(ConvolutionLayer(num_kernals, kernal_size=kernal_size, padding=padding, activation_func=activation_func))
# Calculate output characteristics from new layer
self.layers[-1].calc_output_chars(self.layers[-2])
# After each ConvolutionLayer is added, create a ConvolutionSegment from the last two elements in
# self.layers.
self.segments.append(ConvolutionSegment(*self.layers[-2:]))
else:
raise AssertionError("ConvolutionlNeuralNet instance must contain"
" an InputLayer before adding a ConvolutionLayer")
def add_pooling_layer(self, agg_type, pool_size=2):
if type(self.layers[-1]) is not ConvolutionLayer:
raise AssertionError(f"PoolingLayer must come after ConvolutionLayer, not {type(self.layers[-1])}")
else:
self.layers.append(PoolingLayer(agg_type=agg_type, pool_size=pool_size))
# Calculate output characteristics from new layer
self.layers[-1].calc_output_chars(self.layers[-2])
self.segments.append(PoolingSegment(*self.layers[-2:]))
def feedforward(self, x):
self.input_layer.act_vals = x
for segment in self.segments:
segment.forward_pass()
def backpropagate(self, truth, updater='sgd', batch_size=50, momentum=False):
cost = loss(self.layers[-1].act_vals, truth, loss_type=self.loss_func)
delta = None
for segment in reversed(self.segments):
if delta is None:
activated = activation(segment.back.raw_vals, func=segment.back.a_func, derivative=True)
if self.loss_func == 'cross-entropy':
delta = (cost.T @ activated).reshape(-1, 1)
else:
delta = cost * activated
segment.back_propagate(delta)
delta = segment.setup_next_delta(delta)
if type(segment.back) is not PoolingLayer:
segment.update_weights(self.l_rate, self.m_factor, updater=updater, batch_size=batch_size, momentum=False)
class SegmentationLayer(object):
def __init__(self, segment_size):
self.segment_size = segment_size
def segment_image(self, image, new_image_shape):
"""Generator to segment `image` into kernal-sized chunks.
Parameters
----------
image : :obj:`ndarray`
2d array (an image) or 3d array (array of images).
new_image_shape : tuple
Shape of the filtered image.
Yields
------
:obj:`ndarray`
2d or 3d depending on `image` dimensions.
"""
rows_post_filter, cols_post_filter = new_image_shape
if self.__class__ is PoolingLayer:
# Segmenting an image for pooling does not overlap pixels, so we
# must make our iterables reflect that
row_segs = [i*self.segment_size for i in range(rows_post_filter)]
col_segs = [i*self.segment_size for i in range(cols_post_filter)]
else:
# Segmenting an image for convolution does overlap pixels, so
# iterable is just the range of rows / columns
row_segs = range(rows_post_filter)
col_segs = range(cols_post_filter)
for i, row in enumerate(row_segs):
lst_row = row + self.segment_size
for j, col in enumerate(col_segs):
lst_col = col + self.segment_size
try:
yield i, j, image[:, row:lst_row, col:lst_col]
except IndexError:
yield i, j, image[row:lst_row, col:lst_col]
@staticmethod
def array_crawl(array):
try:
_, h, w = array.shape
except ValueError:
h, w = array.shape
for i in range(h):
for j in range(w):
yield i, j
class PoolingLayer(SegmentationLayer):
def __init__(self, agg_type='max', pool_size=2):
super().__init__(pool_size)
self.shape = None
self.agg_type = agg_type
self.pool_size = pool_size
self.output_image = None
self.output_size = None
@property
def agg_type(self):
return self.__agg_type
@agg_type.setter
def agg_type(self, agg_type):
assert agg_type in ['max', 'avg'], f"Unsupported agg_type, {agg_type}"
self.__agg_type = agg_type
@property
def output_image(self):
return self.__output_image
@output_image.setter
def output_image(self, output_image):
self.__output_image = output_image
@property
def raveled_output(self):
try:
return self.output_image.ravel().reshape(-1, 1)
except AttributeError:
return None
def process_image(self, image):
try:
d, h, w = image.shape
except ValueError:
d = 1
h, w = image.shape
if (h % 2 != 0) or (w % 2 != 0):
image = self._pad_image(image)
new_rows = h // self.pool_size
new_cols = w // self.pool_size
filtered_image = np.zeros((d, new_rows, new_cols))
for row, col, img_seg in self.segment_image(image, (new_rows, new_cols)):
try:
filtered_image[:, row, col] = self._pool_segment(img_seg)
except IndexError:
filtered_image[row, col] = self._pool_segment(img_seg)
self.output_image = filtered_image
def _pool_segment(self, image_segment):
if self.agg_type == 'max':
return np.amax(np.amax(image_segment, axis=1), axis=1)
else:
return np.average(np.average(image_segment, axis=1), axis=1)
def calc_output_chars(self, prev_layer):
if len(prev_layer.shape) == 3:
self.shape = [prev_layer.shape[0], *[x // self.pool_size for x in prev_layer.shape[-2:]]]
else:
self.shape = [prev_layer.num_kernals, *[x // self.pool_size for x in prev_layer.shape[-2:]]]
self.output_size = np.prod(self.shape)
@staticmethod
def _pad_image(image):
if image.shape[-2] % 2 == 0:
try:
image = np.pad(image, ((0, 0), (0, 1), (0, 0)), 'constant', constant_values=0)
except ValueError:
image = np.pad(image, ((0, 1), (0, 0)), 'constant', constant_values=0)
if image.shape[-1] % 2 == 0:
try:
image = np.pad(image, ((0, 0), (0, 0), (0, 1)), 'constant', constant_values=0)
except ValueError:
image = np.pad(image, ((0, 0), (0, 1)), 'constant', constant_values=0)
return image
class ConvolutionLayer(SegmentationLayer):
"""Convolution layer for use in a CNN.
Child class of `SegmentationLayer`; contains kernal (filter) arrays along with
logic to apply them to an image.
Parameters
----------
num_kernals : int
Number of kernals in layer.
kernal_size : int
Size of each kernal, i.e. 3 -> 3x3, 5 -> 5x5, etc.
padding : str
activation_func : str
Attributes
----------
kernal_size
kernals : :obj:`ndarray`
Array of convolution kernals.
"""
def __init__(self, num_kernals, kernal_size=3, padding='valid', activation_func='relu'):
super().__init__(kernal_size)
self.num_kernals = num_kernals
self.kernals = None
self.kernal_size = kernal_size
# Initialize random kernals
self._create_kernals()
self.raw_output = None
self.output_image = None
self.shape = None
self.output_size = None
self.padding = padding
self.a_func = activation_func
self.kernal_hist = []
@property
def kernals(self):
return self.__kernals
@kernals.setter
def kernals(self, kernals):
self.__kernals = kernals
@property
def raw_output(self):
return self.__raw_output
@raw_output.setter
def raw_output(self, raw_output):
self.__raw_output = raw_output
if raw_output is not None:
self.output_image = activation(self.raw_output, func=self.a_func)
@property
def output_image(self):
return self.__output_image
@output_image.setter
def output_image(self, output_image):
self.__output_image = output_image
@property
def raveled_output(self):
try:
return self.output_image.ravel().reshape(-1, 1)
except AttributeError:
return None
def _create_kernals(self):
"""Private method to initialize random kernals.
"""
args = [self.num_kernals, self.kernal_size, self.kernal_size]
self.kernals = np.random.randn(*args)
def process_image(self, image):
"""Method to apply kernals to an image or array of images.
Parameters
----------
image : :obj:`ndarray`
2d array (an image) or 3d array (array of images)
Returns
-------
:obj:`ndarray`
"""
if self.padding == 'same':
image = self._pad_image(image)
try:
d, h, w = image.shape
except ValueError:
d = 1
h, w = image.shape
new_rows = (h - self.kernal_size) + 1
new_cols = (w - self.kernal_size) + 1
filtered_image = np.zeros((d * self.num_kernals, new_rows, new_cols))
for row, col, img_seg in self.segment_image(image, (new_rows, new_cols)):
filtered_image[:, row, col] = self._apply_kernals(img_seg)
self.raw_output = filtered_image
def _apply_kernals(self, image_segment):
"""Apply kernals (filters) to an image segment.
Parameters
----------
image_segment : :obj:`ndarray`
2d or 3d array representing segment of image/s.
Returns
-------
:obj:`ndarray`
Filtered image segment.
"""
if len(image_segment.shape) == 2:
image_segment = [image_segment]
sums = np.zeros(self.num_kernals * len(image_segment))
begin_idx = 0
for seg in image_segment:
end_idx = begin_idx + self.num_kernals
first_row_sum = np.sum(self.kernals * seg, axis=1)
second_row_sum = np.sum(first_row_sum, axis=1)
sums[begin_idx:end_idx] = second_row_sum
begin_idx = end_idx
return sums
def _pad_image(self, image):
"""Apply padding to image or array of images.
Used when user selects to do same padding when filtering an image.
Parameters
----------
image : :obj:`ndarray`
2d array (an image) or 3d array (array of images).
Returns
-------
:obj:`ndarray`
Image or array of images with paddding of 0s.
"""
pad = (self.kernal_size - 1) // 2
try:
return np.pad(image, ((0, 0), (pad, pad), (pad, pad)), 'constant', constant_values=0)
except ValueError:
return np.pad(image, (pad, pad), 'constant', constant_values=0)
def calc_output_chars(self, prev_layer):
"""Method to calculate length of flattened output.
Parameters
----------
prev_layer : :obj:`InputLayer`, :obj:`PoolingLayer`, or :obj:`ConvoluionLayer`
The previous layer in the CNN.
Returns
-------
"""
if self.padding == 'valid':
new_shape = [self.num_kernals, *[x - self.kernal_size + 1 for x in prev_layer.shape[-2:]]]
else:
new_shape = [self.num_kernals, *prev_layer.shape[-2:]]
flat_len = np.prod(new_shape)
if len(prev_layer.shape) == 3:
flat_len = flat_len * prev_layer.shape[0]
new_shape[0] = new_shape[0] * prev_layer.shape[0]
self.shape = new_shape
self.output_size = flat_len
class ConvolutionSegment(object):
def __init__(self, input_layer, output_layer):
self.front = input_layer
self.back = output_layer
self.kernal_updates = np.zeros(self.back.kernals.shape)
self.bias_updates = np.zeros(self.back.num_kernals).reshape(-1, 1, 1)
@property
def front(self):
"""Front layer of `ConvolutionSegment`.
Supported classes for front layer are `InputLayer`, `ConvolutionLayer`, or `PoolingLayer`.
Setter method verifies above class restrictions.
"""
return self.__front
@front.setter
def front(self, front):
assert type(front) in [InputLayer, PoolingLayer, ConvolutionLayer], f"" \
f"ConvolutionSegment input_layer cannot be {type(front)}; must be " \
"InputLayer, ConvolutionLayer, or PoolingLayer instance."
self.__front = front
@property
def back(self):
"""Back layer of `ConvolutionSegment`
Back layer must be `ConvolutionLayer`; setter method verifies this.
"""
return self.__back
@back.setter
def back(self, back):
assert type(back) is ConvolutionLayer, f"ConvolutionSegment output_layer" \
f"cannot be {type(back)}; must be ConvolutionLayer instance."
self.__back = back
def forward_pass(self):
"""Pass an image through a `ConvolutionLayer` instance.
"""
if type(self.front) is InputLayer:
self.back.process_image(self.front.act_vals)
else:
self.back.process_image(self.front.output_image)
def back_propagate(self, delta):
self.back.kernal_hist.append(delta.max())
if type(self.front) is InputLayer:
image = self.front.act_vals
else:
image = self.front.output_image
for row, col, img_seg in self.back.segment_image(image, self.back.shape[-2:]):
if type(self.front) is not InputLayer:
for k in np.arange(0, len(img_seg), self.back.num_kernals):
lst_k = k + self.back.num_kernals
self.kernal_updates += delta[k:lst_k, row, col].reshape(-1, 1, 1) * img_seg[k:lst_k]
else:
self.kernal_updates += delta[:, row, col].reshape(-1, 1, 1) * img_seg
self.kernal_updates /= self.back.num_kernals
def setup_next_delta(self, delta):
if type(self.front) is InputLayer:
return None
else:
next_delta = np.zeros(self.front.shape)
rot_kern = np.rot90(self.back.kernals, 2, (1, 2))
rot_del = np.rot90(delta, 2, (1, 2))
for row, col in self.back.array_crawl(next_delta):
for fst in np.arange(0, len(delta), self.back.num_kernals):
lst = fst + self.back.num_kernals
if col < self.back.kernal_size:
if row < self.back.kernal_size:
hold = rot_del[fst:lst, :row + 1, :col + 1] * rot_kern[:, :row + 1, :col + 1]
elif row + self.back.kernal_size > rot_del.shape[1]:
# In this case, e_row will be negative so we can utilize the reverse indices in rot_kern
e_row = rot_del.shape[1] - (row + self.back.kernal_size)
hold = rot_del[fst:lst, row:, :col + 1] * rot_kern[:, :e_row, :col + 1]
else:
e_row = row + self.back.kernal_size
hold = rot_del[fst:lst, row:e_row, :col + 1] * rot_kern[:, :, :col + 1]
elif col + self.back.kernal_size > rot_del.shape[2]:
e_col = rot_del.shape[2] - (col + self.back.kernal_size)
if row < self.back.kernal_size:
hold = rot_del[fst:lst, :row + 1, col:] * rot_kern[:, :row + 1, :e_col]
elif row + self.back.kernal_size > rot_del.shape[1]:
e_row = rot_del.shape[1] - (row + self.back.kernal_size)
hold = rot_del[fst:lst, row:, col:] * rot_kern[:, :e_row, :e_col]
else:
e_row = row + self.back.kernal_size
hold = rot_del[fst:lst, row:e_row, col:] * rot_kern[:, :, :e_col]
else:
e_col = col + self.back.kernal_size
if row < self.back.kernal_size:
hold = rot_del[fst:lst, :row + 1, col:e_col] * rot_kern[:, :row + 1, :]
elif row + self.back.kernal_size > rot_del.shape[1]:
e_row = rot_del.shape[1] - (row + self.back.kernal_size)
hold = rot_del[fst:lst, row:, col:e_col] * rot_kern[:, :e_row, :]
else:
e_row = row + self.back.kernal_size
hold = rot_del[fst:lst, row:e_row, col:e_col] * rot_kern
next_delta[:, row, col] += np.sum(np.sum(hold, axis=1), axis=1)
next_delta = next_delta / self.back.num_kernals
return np.rot90(next_delta, 2, (1, 2))
def update_weights(self, l_rate, m_factor, updater='', batch_size='', momentum=True):
if updater == 'sgd':
self.back.kernals -= (l_rate * self.kernal_updates)
class PoolingSegment(object):
def __init__(self, input_layer, output_layer):
self.front = input_layer
self.back = output_layer
self.unpool = np.zeros(self.front.shape)
@property
def front(self):
return self.__front
@front.setter
def front(self, front):
assert type(front) is ConvolutionLayer, f"PoolingSegment front must be ConvolutionLayer, not {type(front)}"
self.__front = front
@property
def back(self):
return self.__back
@back.setter
def back(self, back):
assert type(back) is PoolingLayer, f"PoolingSegment back must be PoolingLayer, not {type(back)}"
self.__back = back
def forward_pass(self):
self.back.process_image(self.front.output_image)
def back_propagate(self, delta):
for row, col, img_seg in self.back.segment_image(self.front.output_image, self.back.shape[-2:]):
lst_row = row + self.back.pool_size
lst_col = col + self.back.pool_size
self.unpool[:, row:lst_row, col:lst_col][img_seg == img_seg.max()] = img_seg.max()
def setup_next_delta(self, delta):
if delta.shape[0] == self.front.shape[0]:
summed_delta = delta
else:
prev_num_kernals = delta.shape[0] // self.front.shape[0]
summed_delta = np.zeros((self.front.shape[0], *delta.shape[-2:]))
for fst in np.arange(0, delta.shape[0], prev_num_kernals):
lst = fst + prev_num_kernals
summed_delta += delta[fst:lst]
new_delta = np.zeros(self.unpool.shape)
for row, col, img_seg in self.back.segment_image(self.unpool, self.back.shape[-2:]):
new_row = row * self.back.pool_size
lst_row = new_row + self.back.pool_size
new_col = col * self.back.pool_size
lst_col = new_col + self.back.pool_size
for i, seg in enumerate(img_seg):
bool_map = seg > 0
if bool_map.any():
new_delta[i, new_row:lst_row, new_col:lst_col][bool_map] = summed_delta[i, row, col]
return new_delta
| 36.055276 | 136 | 0.586481 |
83069a70003b5005dba143faf3cc3344c3175927 | 61 | py | Python | careplus/search/models.py | stemado/satb | 7ef5698db8125072e3609da3766d3e80feff6e40 | [
"MIT"
] | null | null | null | careplus/search/models.py | stemado/satb | 7ef5698db8125072e3609da3766d3e80feff6e40 | [
"MIT"
] | 8 | 2020-03-24T15:42:15.000Z | 2022-01-13T00:44:54.000Z | careplus/search/models.py | stemado/satb | 7ef5698db8125072e3609da3766d3e80feff6e40 | [
"MIT"
] | null | null | null |
from django.db import models
from django.db import models
| 10.166667 | 28 | 0.786885 |
712978fddf7a6bbb366e6a200ad607eb9aae31b3 | 5,958 | py | Python | sortUi.py | rtli/AccountBook | 55fc783f71edb4931057d81157de82abc897fb07 | [
"MIT"
] | null | null | null | sortUi.py | rtli/AccountBook | 55fc783f71edb4931057d81157de82abc897fb07 | [
"MIT"
] | null | null | null | sortUi.py | rtli/AccountBook | 55fc783f71edb4931057d81157de82abc897fb07 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'sort.ui'
#
# Created by: PyQt5 UI code generator 5.15.2
#
# WARNING: Any manual changes made to this file will be lost when pyuic5 is
# run again. Do not edit this file unless you know what you are doing.
from PyQt5 import QtCore, QtGui, QtWidgets
import csvIssue
second_classifier_list=csvIssue.init_second_classifier()
first_classifier_list=csvIssue.init_first_classifier()
class sortUi(object):
def setupUi(self, Form):
Form.setObjectName("Form")
Form.resize(440, 420)
Form.setMinimumSize(QtCore.QSize(440, 420))
Form.setMaximumSize(QtCore.QSize(440, 420))
self.second_classifier = QtWidgets.QListWidget(Form)
self.second_classifier.setGeometry(QtCore.QRect(30, 70, 151, 131))
font = QtGui.QFont()
font.setFamily("思源黑体 CN Medium")
font.setPointSize(14)
self.second_classifier.setFont(font)
self.second_classifier.setObjectName("second_classifier")
self.second_classifier.addItems(second_classifier_list)
self.first_classifier = QtWidgets.QListWidget(Form)
self.first_classifier.setGeometry(QtCore.QRect(250, 70, 151, 131))
font = QtGui.QFont()
font.setFamily("思源黑体 CN Medium")
font.setPointSize(14)
self.first_classifier.setFont(font)
self.first_classifier.setObjectName("first_classifier")
self.first_classifier.addItems(first_classifier_list)
self.second_line = QtWidgets.QLineEdit(Form)
self.second_line.setGeometry(QtCore.QRect(30, 220, 151, 31))
font = QtGui.QFont()
font.setFamily("思源黑体 CN Heavy")
font.setPointSize(14)
font.setBold(True)
font.setWeight(75)
self.second_line.setFont(font)
self.second_line.setObjectName("second_line")
self.second_line.setPlaceholderText("二级分类")
self.first_line = QtWidgets.QLineEdit(Form)
self.first_line.setGeometry(QtCore.QRect(250, 220, 151, 31))
font = QtGui.QFont()
font.setFamily("思源黑体 CN Heavy")
font.setPointSize(14)
font.setBold(True)
font.setWeight(75)
self.first_line.setFont(font)
self.first_line.setObjectName("first_line")
self.first_line.setPlaceholderText("一级分类")
self.first_add = QtWidgets.QPushButton(Form)
self.first_add.setGeometry(QtCore.QRect(250, 320, 71, 61))
font = QtGui.QFont()
font.setFamily("思源黑体 CN Heavy")
font.setPointSize(24)
font.setBold(True)
font.setWeight(75)
self.first_add.setFont(font)
self.first_add.setObjectName("first_add")
self.second_add = QtWidgets.QPushButton(Form)
self.second_add.setGeometry(QtCore.QRect(30, 320, 71, 61))
font = QtGui.QFont()
font.setFamily("思源黑体 CN Heavy")
font.setPointSize(24)
font.setBold(True)
font.setWeight(75)
self.second_add.setFont(font)
self.second_add.setObjectName("second_add")
self.second_Box = QtWidgets.QComboBox(Form)
self.second_Box.setGeometry(QtCore.QRect(30, 270, 151, 31))
self.second_Box.setObjectName("second_Box")
self.second_Box.addItems(first_classifier_list)
font = QtGui.QFont()
font.setFamily("思源黑体 CN Light")
font.setPointSize(14)
font.setBold(False)
font.setWeight(75)
self.second_Box.setFont(font)
self.first_Box = QtWidgets.QComboBox(Form)
self.first_Box.setGeometry(QtCore.QRect(250, 270, 151, 31))
self.first_Box.setObjectName("first_Box")
self.first_Box.addItem("总支出")
font = QtGui.QFont()
font.setFamily("思源黑体 CN Light")
font.setPointSize(14)
font.setBold(False)
font.setWeight(75)
self.first_Box.setFont(font)
self.second_delete = QtWidgets.QPushButton(Form)
self.second_delete.setGeometry(QtCore.QRect(110, 320, 71, 61))
font = QtGui.QFont()
font.setFamily("思源黑体 CN Heavy")
font.setPointSize(24)
font.setBold(True)
font.setWeight(75)
self.second_delete.setFont(font)
self.second_delete.setObjectName("second_delete")
self.first_delete = QtWidgets.QPushButton(Form)
self.first_delete.setGeometry(QtCore.QRect(330, 320, 71, 61))
font = QtGui.QFont()
font.setFamily("思源黑体 CN Heavy")
font.setPointSize(24)
font.setBold(True)
font.setWeight(75)
self.first_delete.setFont(font)
self.first_delete.setObjectName("first_delete")
self.second_label = QtWidgets.QLabel(Form)
self.second_label.setGeometry(QtCore.QRect(50, 20, 111, 31))
font = QtGui.QFont()
font.setFamily("思源黑体 CN Light")
font.setPointSize(20)
font.setBold(False)
font.setWeight(50)
self.second_label.setFont(font)
self.second_label.setObjectName("second_label")
self.first_label = QtWidgets.QLabel(Form)
self.first_label.setGeometry(QtCore.QRect(270, 20, 111, 31))
font = QtGui.QFont()
font.setFamily("思源黑体 CN Normal")
font.setPointSize(20)
font.setBold(False)
font.setWeight(50)
self.first_label.setFont(font)
self.first_label.setObjectName("first_label")
self.retranslateUi(Form)
QtCore.QMetaObject.connectSlotsByName(Form)
def retranslateUi(self, Form):
_translate = QtCore.QCoreApplication.translate
Form.setWindowTitle(_translate("Form", "编辑分类"))
self.first_add.setText(_translate("Form", "添加"))
self.second_add.setText(_translate("Form", "添加"))
self.second_delete.setText(_translate("Form", "删除"))
self.first_delete.setText(_translate("Form", "删除"))
self.second_label.setText(_translate("Form", "二级分类"))
self.first_label.setText(_translate("Form", "一级分类"))
| 40.530612 | 75 | 0.662135 |
2b7bf2d70b5d176a8d4b78d179d33daeae3c4a12 | 560 | py | Python | main.py | Nedarb111/01-Introduction | 7183092096c344ccc5627408d0f6d15ef92e59bc | [
"MIT"
] | null | null | null | main.py | Nedarb111/01-Introduction | 7183092096c344ccc5627408d0f6d15ef92e59bc | [
"MIT"
] | null | null | null | main.py | Nedarb111/01-Introduction | 7183092096c344ccc5627408d0f6d15ef92e59bc | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
import utils
utils.check_version((3,7))
utils.clear()
print('My name is Braden Marksberry')
print('I love "A Link to the Past" and a large majority of other "Zelda" titles.')
print('I am not the most expirienced programer...I have not really done any code- I usually just write music for games.')
print('I am excited to develop the base level of programming so that I can get my feet wet in the game community.')
print('\nMy stackoverflow.com user number is user:12026390.')
print('the link to my profie https://github.com/Nedarb111') | 43.076923 | 121 | 0.75 |
16bfeac9ffeb39cd4e5882dcd8b75b3280fdd625 | 44 | py | Python | scale/port/__init__.py | stevevarner/scale | 9623b261db4ddcf770f00df16afc91176142bb7c | [
"Apache-2.0"
] | null | null | null | scale/port/__init__.py | stevevarner/scale | 9623b261db4ddcf770f00df16afc91176142bb7c | [
"Apache-2.0"
] | null | null | null | scale/port/__init__.py | stevevarner/scale | 9623b261db4ddcf770f00df16afc91176142bb7c | [
"Apache-2.0"
] | null | null | null | default_app_config = 'port.apps.PortConfig'
| 22 | 43 | 0.818182 |
fd10b4f6fd5443377be246b7c3d9e592b72e204c | 1,442 | py | Python | multitier/__init__.py | knivets/djaodjin-multitier | faef56e9424ab493c9e0fca0b6fd56231a648070 | [
"BSD-2-Clause"
] | null | null | null | multitier/__init__.py | knivets/djaodjin-multitier | faef56e9424ab493c9e0fca0b6fd56231a648070 | [
"BSD-2-Clause"
] | null | null | null | multitier/__init__.py | knivets/djaodjin-multitier | faef56e9424ab493c9e0fca0b6fd56231a648070 | [
"BSD-2-Clause"
] | null | null | null | # Copyright (c) 2018, Djaodjin Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
PEP 386-compliant version number for the multitier django app.
"""
__version__ = '0.1.8-dev'
| 48.066667 | 78 | 0.778779 |
a3df1196344896ab8ec388ad5653b48f79bf3d18 | 1,355 | py | Python | Lib/lib2to3/pygram.py | gerph/cpython | 98813cb03c2371789669c3d8debf8fca2a344de9 | [
"CNRI-Python-GPL-Compatible"
] | 5 | 2020-01-25T19:30:31.000Z | 2021-03-05T20:34:57.000Z | Lib/lib2to3/pygram.py | gerph/cpython | 98813cb03c2371789669c3d8debf8fca2a344de9 | [
"CNRI-Python-GPL-Compatible"
] | 18 | 2019-12-09T17:05:24.000Z | 2021-06-09T15:19:49.000Z | Lib/lib2to3/pygram.py | gerph/cpython | 98813cb03c2371789669c3d8debf8fca2a344de9 | [
"CNRI-Python-GPL-Compatible"
] | 3 | 2020-05-15T22:25:58.000Z | 2021-03-05T20:35:00.000Z | # Copyright 2006 Google, Inc. All Rights Reserved.
# Licensed to PSF under a Contributor Agreement.
"""Export the Python grammar and symbols."""
# Python imports
import os
# Local imports
from .pgen2 import token
from .pgen2 import driver
from . import pytree
# The grammar file
_GRAMMAR_FILE = os.path.join(os.path.dirname(__file__),
f"Grammar{os.extsep}txt")
_PATTERN_GRAMMAR_FILE = os.path.join(os.path.dirname(__file__),
f"PatternGrammar{os.extsep}txt")
class Symbols(object):
def __init__(self, grammar):
"""Initializer.
Creates an attribute for each grammar symbol (nonterminal),
whose value is the symbol's type (an int >= 256).
"""
for name, symbol in grammar.symbol2number.items():
setattr(self, name, symbol)
python_grammar = driver.load_packaged_grammar("lib2to3", _GRAMMAR_FILE)
python_symbols = Symbols(python_grammar)
python_grammar_no_print_statement = python_grammar.copy()
del python_grammar_no_print_statement.keywords["print"]
python_grammar_no_print_and_exec_statement = python_grammar_no_print_statement.copy()
del python_grammar_no_print_and_exec_statement.keywords["exec"]
pattern_grammar = driver.load_packaged_grammar("lib2to3", _PATTERN_GRAMMAR_FILE)
pattern_symbols = Symbols(pattern_grammar)
| 30.795455 | 85 | 0.729889 |
abe9ca2331937bc311afd175243c3b7b68472e52 | 24,625 | py | Python | apex/transformer/tensor_parallel/layers.py | anirudhprabhakaran3/apex | 3c88451afbbf5b835ce0ff21165b48787feeeab4 | [
"BSD-3-Clause"
] | null | null | null | apex/transformer/tensor_parallel/layers.py | anirudhprabhakaran3/apex | 3c88451afbbf5b835ce0ff21165b48787feeeab4 | [
"BSD-3-Clause"
] | null | null | null | apex/transformer/tensor_parallel/layers.py | anirudhprabhakaran3/apex | 3c88451afbbf5b835ce0ff21165b48787feeeab4 | [
"BSD-3-Clause"
] | 1 | 2021-12-20T00:49:01.000Z | 2021-12-20T00:49:01.000Z | # coding=utf-8
# Copyright (c) 2021-22, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Parts of the code here are adapted from PyTorch
# repo: https://github.com/pytorch/pytorch
import torch
import torch.nn.functional as F
import torch.nn.init as init
from torch.nn.parameter import Parameter
from apex._autocast_utils import _cast_if_autocast_enabled
from apex.transformer.parallel_state import get_tensor_model_parallel_group
from apex.transformer.parallel_state import get_tensor_model_parallel_rank
from apex.transformer.parallel_state import get_tensor_model_parallel_world_size
from apex.transformer.utils import divide
from apex.transformer.tensor_parallel.mappings import (
copy_to_tensor_model_parallel_region,
)
from apex.transformer.tensor_parallel.mappings import (
gather_from_tensor_model_parallel_region,
)
from apex.transformer.tensor_parallel.mappings import (
reduce_from_tensor_model_parallel_region,
)
from apex.transformer.tensor_parallel.mappings import (
scatter_to_tensor_model_parallel_region,
)
from apex.transformer.tensor_parallel.random import get_cuda_rng_tracker
from apex.transformer.tensor_parallel.utils import VocabUtility
from apex.transformer.log_util import get_transformer_logger
_logger = get_transformer_logger(__name__)
_grad_accum_fusion_available = True
try:
import fused_weight_gradient_mlp_cuda
except ImportError:
_grad_accum_fusion_available = False
_MODEL_PARALLEL_ATTRIBUTE_DEFAULTS = {
"tensor_model_parallel": False,
"partition_dim": -1,
"partition_stride": 1,
}
def param_is_not_tensor_parallel_duplicate(param):
return (
hasattr(param, "tensor_model_parallel") and param.tensor_model_parallel
) or (get_tensor_model_parallel_rank() == 0)
def set_tensor_model_parallel_attributes(tensor, is_parallel, dim, stride):
# Make sure the attributes are not set.
for attribute in _MODEL_PARALLEL_ATTRIBUTE_DEFAULTS:
assert not hasattr(tensor, attribute)
# Set the attributes.
setattr(tensor, "tensor_model_parallel", is_parallel)
setattr(tensor, "partition_dim", dim)
setattr(tensor, "partition_stride", stride)
def set_defaults_if_not_set_tensor_model_parallel_attributes(tensor):
def maybe_set(attribute, value):
if not hasattr(tensor, attribute):
setattr(tensor, attribute, value)
for attribute in _MODEL_PARALLEL_ATTRIBUTE_DEFAULTS:
maybe_set(attribute, _MODEL_PARALLEL_ATTRIBUTE_DEFAULTS[attribute])
def copy_tensor_model_parallel_attributes(destination_tensor, source_tensor):
def maybe_copy(attribute):
if hasattr(source_tensor, attribute):
setattr(destination_tensor, attribute, getattr(source_tensor, attribute))
for attribute in _MODEL_PARALLEL_ATTRIBUTE_DEFAULTS:
maybe_copy(attribute)
def _initialize_affine_weight_gpu(weight, init_method, partition_dim, stride=1):
"""Initialize affine weight for model parallel on GPU."""
set_tensor_model_parallel_attributes(
tensor=weight, is_parallel=True, dim=partition_dim, stride=stride
)
with get_cuda_rng_tracker().fork():
init_method(weight)
# TODO (mkozuki): Re-consider removing params_dtype from arguments to make this
# more parallel with _initialize_affine_weight_gpu
def _initialize_affine_weight_cpu(
weight,
output_size,
input_size,
per_partition_size,
partition_dim,
init_method,
stride=1,
return_master_weight=False,
*,
params_dtype=torch.float32,
):
"""Initialize affine weight for model parallel.
Build the master weight on all processes and scatter
the relevant chunk."""
set_tensor_model_parallel_attributes(
tensor=weight, is_parallel=True, dim=partition_dim, stride=stride
)
# Initialize master weight
master_weight = torch.empty(
output_size, input_size, dtype=torch.float, requires_grad=False
)
init_method(master_weight)
master_weight = master_weight.to(dtype=params_dtype)
# Split and copy
per_partition_per_stride_size = divide(per_partition_size, stride)
weight_list = torch.split(
master_weight, per_partition_per_stride_size, dim=partition_dim
)
rank = get_tensor_model_parallel_rank()
world_size = get_tensor_model_parallel_world_size()
my_weight_list = weight_list[rank::world_size]
with torch.no_grad():
torch.cat(my_weight_list, dim=partition_dim, out=weight)
if return_master_weight:
return master_weight
return None
class VocabParallelEmbedding(torch.nn.Module):
"""Embedding parallelized in the vocabulary dimension.
This is mainly adapted from torch.nn.Embedding and all the default
values are kept.
Arguments:
num_embeddings: vocabulary size.
embedding_dim: size of hidden state.
init_method: method to initialize weights.
"""
def __init__(
self,
num_embeddings,
embedding_dim,
init_method=init.xavier_normal_,
*,
params_dtype=torch.float32,
use_cpu_initialization=False,
):
super(VocabParallelEmbedding, self).__init__()
# Keep the input dimensions.
self.num_embeddings = num_embeddings
self.embedding_dim = embedding_dim
# Set the detauls for compatibility.
self.padding_idx = None
self.max_norm = None
self.norm_type = 2.0
self.scale_grad_by_freq = False
self.sparse = False
self._weight = None
self.tensor_model_parallel_size = get_tensor_model_parallel_world_size()
# Divide the weight matrix along the vocaburaly dimension.
(
self.vocab_start_index,
self.vocab_end_index,
) = VocabUtility.vocab_range_from_global_vocab_size(
self.num_embeddings,
get_tensor_model_parallel_rank(),
self.tensor_model_parallel_size,
)
self.num_embeddings_per_partition = (
self.vocab_end_index - self.vocab_start_index
)
# Allocate weights and initialize.
if use_cpu_initialization:
self.weight = Parameter(
torch.empty(
self.num_embeddings_per_partition,
self.embedding_dim,
dtype=params_dtype,
)
)
_initialize_affine_weight_cpu(
self.weight,
self.num_embeddings,
self.embedding_dim,
self.num_embeddings_per_partition,
0,
init_method,
params_dtype=params_dtype,
)
else:
self.weight = Parameter(
torch.empty(
self.num_embeddings_per_partition,
self.embedding_dim,
device=torch.cuda.current_device(),
dtype=params_dtype,
)
)
_initialize_affine_weight_gpu(
self.weight, init_method, partition_dim=0, stride=1
)
def forward(self, input_):
if self.tensor_model_parallel_size > 1:
# Build the mask.
input_mask = (input_ < self.vocab_start_index) | (
input_ >= self.vocab_end_index
)
# Mask the input.
masked_input = input_.clone() - self.vocab_start_index
masked_input[input_mask] = 0
else:
masked_input = input_
# Get the embeddings.
output_parallel = F.embedding(
masked_input,
self.weight,
self.padding_idx,
self.max_norm,
self.norm_type,
self.scale_grad_by_freq,
self.sparse,
)
# Mask the output embedding.
if self.tensor_model_parallel_size > 1:
output_parallel[input_mask, :] = 0.0
# Reduce across all the model parallel GPUs.
output = reduce_from_tensor_model_parallel_region(output_parallel)
return output
class LinearWithGradAccumulationAndAsyncAllreduce(torch.autograd.Function):
"""Linear layer execution with asynchronous all-reduce and gradient accumulation fusion in backprop."""
@staticmethod
def forward(
ctx, input, weight, bias, gradient_accumulation_fusion, async_grad_allreduce
):
ctx.save_for_backward(input, weight)
ctx.use_bias = bias is not None
ctx.gradient_accumulation_fusion = gradient_accumulation_fusion
ctx.async_grad_allreduce = async_grad_allreduce
output = torch.matmul(input, weight.t())
if bias is not None:
output = output + bias
return output
@staticmethod
def backward(ctx, grad_output):
input, weight = ctx.saved_tensors
use_bias = ctx.use_bias
grad_input = grad_output.matmul(weight)
# Convert the tensor shapes to 2D for execution compatibility
grad_output = grad_output.view(
grad_output.shape[0] * grad_output.shape[1], grad_output.shape[2]
)
input = input.view(input.shape[0] * input.shape[1], input.shape[2])
if ctx.async_grad_allreduce:
# Asynchronous all-reduce
handle = torch.distributed.all_reduce(
grad_input, group=get_tensor_model_parallel_group(), async_op=True
)
# Delay the start of weight gradient computation shortly (3us) to have
# all-reduce scheduled first and have GPU resources allocated
_ = torch.empty(1, device=grad_output.device) + 1
if ctx.gradient_accumulation_fusion:
fused_weight_gradient_mlp_cuda.wgrad_gemm_accum_fp32(
input, grad_output, weight.main_grad
)
grad_weight = None
else:
grad_weight = grad_output.t().matmul(input)
grad_bias = grad_output.sum(dim=0) if use_bias else None
if ctx.async_grad_allreduce:
handle.wait()
return grad_input, grad_weight, grad_bias, None, None
def linear_with_grad_accumulation_and_async_allreduce(
input, weight, bias, gradient_accumulation_fusion, async_grad_allreduce,
):
args = _cast_if_autocast_enabled(
input, weight, bias, gradient_accumulation_fusion, async_grad_allreduce
)
with torch.cuda.amp.autocast(enabled=False):
return LinearWithGradAccumulationAndAsyncAllreduce.apply(*args)
class LinearWithGradAccumulationAndAsyncAllreduceIn16Bit(torch.autograd.Function):
"""Linear layer execution with asynchronous all-reduce and gradient accumulation fusion in backprop."""
@staticmethod
def forward(
ctx, input, weight, bias, gradient_accumulation_fusion, async_grad_allreduce
):
ctx.save_for_backward(input, weight)
ctx.use_bias = bias is not None
ctx.gradient_accumulation_fusion = gradient_accumulation_fusion
ctx.async_grad_allreduce = async_grad_allreduce
output = torch.matmul(input, weight.t())
if bias is not None:
output = output + bias
return output
@staticmethod
def backward(ctx, grad_output):
input, weight = ctx.saved_tensors
use_bias = ctx.use_bias
grad_input = grad_output.matmul(weight)
# Convert the tensor shapes to 2D for execution compatibility
grad_output = grad_output.view(
grad_output.shape[0] * grad_output.shape[1], grad_output.shape[2]
)
input = input.view(input.shape[0] * input.shape[1], input.shape[2])
if ctx.async_grad_allreduce:
# Asynchronous all-reduce
handle = torch.distributed.all_reduce(
grad_input, group=get_tensor_model_parallel_group(), async_op=True
)
# Delay the start of weight gradient computation shortly (3us) to have
# all-reduce scheduled first and have GPU resources allocated
_ = torch.empty(1, device=grad_output.device) + 1
if ctx.gradient_accumulation_fusion:
fused_weight_gradient_mlp_cuda.wgrad_gemm_accum_fp16(
input, grad_output, weight.main_grad
)
grad_weight = None
else:
grad_weight = grad_output.t().matmul(input)
grad_bias = grad_output.sum(dim=0) if use_bias else None
if ctx.async_grad_allreduce:
handle.wait()
return grad_input, grad_weight, grad_bias, None, None
def linear_with_grad_accumulation_and_async_allreduce_in16bit(
input, weight, bias, gradient_accumulation_fusion, async_grad_allreduce,
):
args = _cast_if_autocast_enabled(
input, weight, bias, gradient_accumulation_fusion, async_grad_allreduce
)
with torch.cuda.amp.autocast(enabled=False):
return LinearWithGradAccumulationAndAsyncAllreduceIn16Bit.apply(*args)
class ColumnParallelLinear(torch.nn.Module):
"""Linear layer with column parallelism.
The linear layer is defined as Y = XA + b. A is parallelized along
its second dimension as A = [A_1, ..., A_p].
Arguments:
input_size: first dimension of matrix A.
output_size: second dimension of matrix A.
bias: If true, add bias
gather_output: If true, call all-gether on output and make Y avaiable
to all GPUs, otherwise, every GPU will have its output
which is Y_i = XA_i
init_method: method to initialize weights. Note that bias is always set
to zero.
stride: For the strided linear layers.
keep_master_weight_for_test: This was added for testing and should be
set to False. It returns the master weights
used for initialization.
skip_bias_add: This was added to enable performance optimations where bias
can be fused with other elementwise operations. we skip
adding bias but instead return it.
Keyword Arguments:
no_async_tensor_model_parallel_allreduce:
params_dtype:
use_cpu_initialization:
gradient_accumulation_fusion:
accumulation_in_fp16:
"""
def __init__(
self,
input_size,
output_size,
bias=True,
gather_output=True,
init_method=init.xavier_normal_,
stride=1,
keep_master_weight_for_test=False,
skip_bias_add=False,
*,
no_async_tensor_model_parallel_allreduce=False,
params_dtype=torch.float32,
use_cpu_initialization=False,
gradient_accumulation_fusion=False,
accumulation_in_fp16: bool = False,
):
super(ColumnParallelLinear, self).__init__()
# Keep input parameters
self.input_size = input_size
self.output_size = output_size
self.gather_output = gather_output
# Divide the weight matrix along the last dimension.
world_size = get_tensor_model_parallel_world_size()
self.output_size_per_partition = divide(output_size, world_size)
self.skip_bias_add = skip_bias_add
# Parameters.
# Note: torch.nn.functional.linear performs XA^T + b and as a result
# we allocate the transpose.
# Initialize weight.
if use_cpu_initialization:
self.weight = Parameter(
torch.empty(
self.output_size_per_partition, self.input_size, dtype=params_dtype
)
)
self.master_weight = _initialize_affine_weight_cpu(
self.weight,
self.output_size,
self.input_size,
self.output_size_per_partition,
0,
init_method,
stride=stride,
return_master_weight=keep_master_weight_for_test,
params_dtype=params_dtype,
)
else:
self.weight = Parameter(
torch.empty(
self.output_size_per_partition,
self.input_size,
device=torch.cuda.current_device(),
dtype=params_dtype,
)
)
_initialize_affine_weight_gpu(
self.weight, init_method, partition_dim=0, stride=stride
)
if bias:
if use_cpu_initialization:
self.bias = Parameter(
torch.empty(self.output_size_per_partition, dtype=params_dtype)
)
else:
self.bias = Parameter(
torch.empty(
self.output_size_per_partition,
device=torch.cuda.current_device(),
dtype=params_dtype,
)
)
set_tensor_model_parallel_attributes(self.bias, True, 0, stride)
# Always initialize bias to zero.
with torch.no_grad():
self.bias.zero_()
else:
self.register_parameter("bias", None)
self.async_tensor_model_parallel_allreduce = (
not no_async_tensor_model_parallel_allreduce and world_size > 1
)
if gradient_accumulation_fusion:
if not _grad_accum_fusion_available:
# Basically, apex.transformer module users are expected to install APEX's
# `--cpp_ext` and `--cuda_ext`. The example installation command is as follows:
# `pip install --global-option="--cpp_ext" --global-option="--cuda_ext ."
# at the root of APEX repository.
import warnings
warnings.warn(
"`gradient_accumulation_fusion` is set to `True` but "
"the custom CUDA extension of `fused_weight_gradient_mlp_cuda` module not "
"found. Thus `gradient_accumulation_fusion` set to `False`. "
"Note that the extension requires CUDA>=11."
)
gradient_accumulation_fusion = False
self.gradient_accumulation_fusion = gradient_accumulation_fusion
self._forward_impl = (
linear_with_grad_accumulation_and_async_allreduce_in16bit
if accumulation_in_fp16
else linear_with_grad_accumulation_and_async_allreduce
)
def forward(self, input_):
bias = self.bias if not self.skip_bias_add else None
if not self.async_tensor_model_parallel_allreduce:
# Set up backprop all-reduce.
input_parallel = copy_to_tensor_model_parallel_region(input_)
else:
input_parallel = input_
# Matrix multiply.
output_parallel = self._forward_impl(
input_parallel,
self.weight,
bias,
self.gradient_accumulation_fusion,
self.async_tensor_model_parallel_allreduce,
)
if self.gather_output:
# All-gather across the partitions.
output = gather_from_tensor_model_parallel_region(output_parallel)
else:
output = output_parallel
output_bias = self.bias if self.skip_bias_add else None
return output, output_bias
class RowParallelLinear(torch.nn.Module):
"""Linear layer with row parallelism.
The linear layer is defined as Y = XA + b. A is parallelized along
its first dimension and X along its second dimension as:
- -
| A_1 |
| . |
A = | . | X = [X_1, ..., X_p]
| . |
| A_p |
- -
Arguments:
input_size: first dimension of matrix A.
output_size: second dimension of matrix A.
bias: If true, add bias. Note that bias is not parallelized.
input_is_parallel: If true, we assume that the input is already
split across the GPUs and we do not split
again.
init_method: method to initialize weights. Note that bias is always set
to zero.
stride: For the strided linear layers.
keep_master_weight_for_test: This was added for testing and should be
set to False. It returns the master weights
used for initialization.
skip_bias_add: This was added to enable performance optimization where bias
can be fused with other elementwise operations. We skip
adding bias but instead return it.
"""
def __init__(
self,
input_size,
output_size,
bias=True,
input_is_parallel=False,
init_method=init.xavier_normal_,
stride=1,
keep_master_weight_for_test=False,
skip_bias_add=False,
*,
params_dtype=torch.float32,
use_cpu_initialization=False,
):
super(RowParallelLinear, self).__init__()
# Keep input parameters
self.input_size = input_size
self.output_size = output_size
self.input_is_parallel = input_is_parallel
# Divide the weight matrix along the last dimension.
world_size = get_tensor_model_parallel_world_size()
self.input_size_per_partition = divide(input_size, world_size)
self.skip_bias_add = skip_bias_add
# as an argument to this function?
# Parameters.
# Note: torch.nn.functional.linear performs XA^T + b and as a result
# we allocate the transpose.
# Initialize weight.
if use_cpu_initialization:
self.weight = Parameter(
torch.empty(
self.output_size, self.input_size_per_partition, dtype=params_dtype
)
)
self.master_weight = _initialize_affine_weight_cpu(
self.weight,
self.output_size,
self.input_size,
self.input_size_per_partition,
1,
init_method,
stride=stride,
return_master_weight=keep_master_weight_for_test,
params_dtype=params_dtype,
)
else:
self.weight = Parameter(
torch.empty(
self.output_size,
self.input_size_per_partition,
device=torch.cuda.current_device(),
dtype=params_dtype,
)
)
_initialize_affine_weight_gpu(
self.weight, init_method, partition_dim=1, stride=stride
)
if bias:
if use_cpu_initialization:
self.bias = Parameter(torch.empty(self.output_size, dtype=params_dtype))
else:
self.bias = Parameter(
torch.empty(
self.output_size,
device=torch.cuda.current_device(),
dtype=params_dtype,
)
)
# Always initialize bias to zero.
with torch.no_grad():
self.bias.zero_()
else:
self.register_parameter("bias", None)
def forward(self, input_):
# Set up backprop all-reduce.
if self.input_is_parallel:
input_parallel = input_
else:
input_parallel = scatter_to_tensor_model_parallel_region(input_)
# Matrix multiply.
output_parallel = F.linear(input_parallel, self.weight)
# All-reduce across all the partitions.
output_ = reduce_from_tensor_model_parallel_region(output_parallel)
if not self.skip_bias_add:
output = output_ + self.bias if self.bias is not None else output_
output_bias = None
else:
output = output_
output_bias = self.bias
return output, output_bias
| 37.085843 | 107 | 0.635655 |
4c2599c3bc59ac5686166b6befa8490f4ddec0b7 | 1,458 | py | Python | neptune/generated/analytics/test/test_analyticscontroller_api.py | jiji-online/neptune-cli | 50cf680a80d141497f9331ab7cdaee49fcb90b0c | [
"Apache-2.0"
] | null | null | null | neptune/generated/analytics/test/test_analyticscontroller_api.py | jiji-online/neptune-cli | 50cf680a80d141497f9331ab7cdaee49fcb90b0c | [
"Apache-2.0"
] | null | null | null | neptune/generated/analytics/test/test_analyticscontroller_api.py | jiji-online/neptune-cli | 50cf680a80d141497f9331ab7cdaee49fcb90b0c | [
"Apache-2.0"
] | null | null | null | # coding: utf-8
"""
Copyright 2016 SmartBear Software
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
ref: https://github.com/swagger-api/swagger-codegen
"""
from __future__ import absolute_import
import os
import sys
import unittest
import swagger_client
from swagger_client.rest import ApiException
from swagger_client.apis.analyticscontroller_api import AnalyticscontrollerApi
class TestAnalyticscontrollerApi(unittest.TestCase):
""" AnalyticscontrollerApi unit test stubs """
def setUp(self):
self.api = swagger_client.apis.analyticscontroller_api.AnalyticscontrollerApi()
def tearDown(self):
pass
def test_feed_using_post(self):
"""
Test case for feed_using_post
feed
"""
pass
def test_sync_using_post(self):
"""
Test case for sync_using_post
sync
"""
pass
if __name__ == '__main__':
unittest.main() | 23.516129 | 87 | 0.707133 |
701ef1692d06a302334436e9f5a8400802fdbb5b | 2,326 | py | Python | portfolios/migrations/0002_auto_20170809_1352.py | tkanemoto/django-portfolios | 328990d030b3509cf5aeef09c3ae605104a18564 | [
"Apache-2.0"
] | null | null | null | portfolios/migrations/0002_auto_20170809_1352.py | tkanemoto/django-portfolios | 328990d030b3509cf5aeef09c3ae605104a18564 | [
"Apache-2.0"
] | null | null | null | portfolios/migrations/0002_auto_20170809_1352.py | tkanemoto/django-portfolios | 328990d030b3509cf5aeef09c3ae605104a18564 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# Generated by Django 1.11.1 on 2017-08-09 13:52
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('portfolios', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Client',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('description', models.CharField(max_length=200, verbose_name='description')),
('person', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL, unique=True)),
],
),
migrations.CreateModel(
name='Page',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('showreel', models.FileField(upload_to=b'')),
('clients', models.ManyToManyField(to='portfolios.Client')),
('testimonials', models.ManyToManyField(to='portfolios.Testimonial')),
],
),
migrations.CreateModel(
name='Project',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100, verbose_name='name')),
('client', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='portfolios.Client')),
],
),
migrations.CreateModel(
name='Role',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=20, verbose_name='name')),
('product', models.CharField(max_length=20, verbose_name='product')),
],
),
migrations.AddField(
model_name='project',
name='role',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='portfolios.Role'),
),
]
| 40.807018 | 133 | 0.595873 |
a881d9ffc8a2da348ebea1e482fd5303678f3206 | 57,208 | py | Python | project/app/api/utils.py | Lambda-School-Labs/Labs25-SaverLife-TeamC-ds | f7d5f663abca860d055ad0b6a47508c50e689b08 | [
"MIT"
] | 1 | 2020-08-04T20:15:57.000Z | 2020-08-04T20:15:57.000Z | project/app/api/utils.py | Lambda-School-Labs/Labs25-SaverLife-TeamC-ds | f7d5f663abca860d055ad0b6a47508c50e689b08 | [
"MIT"
] | 3 | 2020-08-10T20:19:10.000Z | 2020-09-25T20:19:36.000Z | project/app/api/utils.py | Lambda-School-Labs/Labs25-SaverLife-TeamC-ds | f7d5f663abca860d055ad0b6a47508c50e689b08 | [
"MIT"
] | null | null | null | from fastapi import APIRouter, HTTPException
import pandas as pd
import numpy as np
import psycopg2
import plotly.express as px
from pydantic import BaseModel, Field, validator
from fastapi.templating import Jinja2Templates
from typing import Optional
import json
import random
import sys
import traceback
from sktime.forecasting.model_selection import temporal_train_test_split
from sktime.performance_metrics.forecasting import smape_loss
from sktime.utils.plotting.forecasting import plot_ys
from sktime.forecasting.naive import NaiveForecaster
from sktime.forecasting.compose import ReducedRegressionForecaster
from sklearn.neighbors import KNeighborsRegressor
from sklearn.ensemble import RandomForestRegressor
import plotly.graph_objects as go
from dotenv import load_dotenv
from os.path import join, dirname
import os
from app.api.basemodels import User, GraphRequest
dotenv_path = join(dirname(__file__), '.env')
load_dotenv(dotenv_path)
router = APIRouter()
templates = Jinja2Templates(directory="./app/static/dist")
class SaverlifeUtility(object):
"""General utility class to handle database cursor objects and other miscellaneous functions."""
def __init__(self):
self._cursor = self._handle_cursor()
def _handle_connection(self):
"""Connect to a database."""
return psycopg2.connect(
host=os.getenv('POSTGRES_ADDRESS_EXTERNAL'),
dbname=os.getenv('POSTGRES_DBNAME_EXTERNAL'),
user=os.getenv('POSTGRES_USER_EXTERNAL'),
password=os.getenv('POSTGRES_PASSWORD_EXTERNAL'),
port=os.getenv('POSTGRES_PORT_EXTERNAL')
)
def _handle_cursor(self):
"""Create a cursor to perform database operations."""
conn = self._handle_connection()
cur = conn.cursor()
return conn, cur
def handle_query(self, query: str, fetchone: bool = False):
"""Handle simple query operations."""
try:
conn, cur = self._handle_cursor()
cur.execute(query)
except BaseException:
traceback.print_exc()
finally:
if cur:
if fetchone is True:
try:
result = cur.fetchone()
except ProgrammingError:
result = None
else:
result = cur.fetchall()
cur.close()
conn.close()
return result
def _generate_dataframe(self, table: str, bank_account_id: str = None, sample_size: int = 1):
"""Support utility function to handle database manipulation and other miscellaneous functions."""
df = None
if table is 'transactions':
df = self._configure_transactions_dataframe(bank_account_id=bank_account_id, sample_size=sample_size)
if table is 'accounts':
df = self._configure_accounts_dataframe(bank_account_id=bank_account_id, sample_size=sample_size)
if table is 'requests':
df = self._configure_requests_dataframe()
return df
def _configure_transactions_dataframe(self, bank_account_id: str, sample_size: int = 1):
df = self._fetch_transactions_dataframe(bank_account_id=bank_account_id, sample_size=sample_size)
df = self._wrangle_transactions(df)
return df
def _configure_accounts_dataframe(self, bank_account_id: str, sample_size: int = 1):
df = self._fetch_accounts_dataframe(bank_account_id=bank_account_id, sample_size=sample_size)
df = self._wrangle_accounts(df)
return df
def _configure_requests_dataframe(self):
df = self._fetch_requests_dataframe()
df = self._wrangle_requests(df)
return df
def _handle_category_features(self, debug: bool = False):
"""Parse user category features from lists to a nested list for dataframe.
Args:
debug (bool): debug mode. Prints the result if TRUE else returns the result.
Returns:
list: nested list of features.
[
[category_id], [category_name], [parent_category_name], [grandparent_category_name]
]
"""
category_id = [
'18001001', '18001002', '18001003', '18001004', '18001005', '18001006', '18001007', '18001008', '18001009', '18001010', '18073001', '18073002', '18073003', '18073004', '22001000', '22002000', '17001001', '17001002', '17001003', '17001004', '17001005', '17001006', '17001007', '17001008', '17001009', '17001010', '17001011', '17001012', '17001013', '17001014', '17001015', '17001016', '17001017', '17001018', '17001019', '17001000', '18020013', '21012002', '21007001', '21007002', '10002000', '22013000', '22017000', '22009000', '18006001', '18006002', '18006003', '18006004', '18006005', '18006006', '18006007', '18006008', '18006009', '19005001', '19005002', '19005003', '19005004', '19005005', '19005006', '19005007', '18006000', '10001000', '10003000', '10004000', '10005000', '10007000', '10008000', '10009000', '18008001', '22006001', '22006000', '22011000', '22016000', '21012001', '19012001', '19012002', '19012003', '19012004', '19012005', '19012006', '19012007', '19012008', '12002001', '12002002', '12001000', '12002000', '12003000', '12005000', '12006000', '12007000', '12015000', '12018000', '12019000', '12015001', '12015002', '12015003', '12019001', '18012001', '18012002', '16001000', '12008000', '12008001', '12008002', '12008003', '12008004', '12008005', '12008006', '12008007', '12008008', '12008009', '12008010', '12008011', '19013001', '19013002', '19013003', '18018001', '18020003', '18020005', '18020006', '18020007', '18020008', '18020009', '18020010', '18020011', '18020012', '18020014', '18021000', '18021001', '18021002', '19025000', '19025001', '19025002', '19025003', '19025004', '19047000', '12004000', '12010000', '12011000', '12012000', '12013000', '12014000', '12016000', '12017000', '12012001', '12012002', '12012003', '12009000', '21009001', '14001000', '14002000', '14001001', '14001002', '14001003', '14001004', '14001005', '14001006', '14001007', '14001008', '14001009', '14001010', '14001011', '14001012', '14001013', '14001014', '14001015', '14001016', '14001017', '14002001', '14002002', '14002003', '14002004', '14002005', '14002006', '14002007', '14002008', '14002009', '14002010', '14002011', '14002012', '14002013', '14002014', '14002015', '14002016', '14002017', '14002018', '14002019', '14002020', '18013001', '18013002', '18013003', '18013004', '18013005', '18013006', '18013007', '18013008', '18013009', '18013010', '18024001', '18024002', '18024003', '18024004', '18024005', '18024006', '18024007', '18024008', '18024009', '18024010', '18024011', '18024012', '18024013', '18024014', '18024015', '18024016', '18024017', '18024018', '18024019', '18024020', '18024021', '18024022', '18024023', '18024024', '18024025', '18024026', '18024027', '15001000', '15002000', '18020004', '16003000', '22012001', '22012002', '22012003', '22012004', '22012005', '22012006', '22012000', '18037001', '18037002', '18037003', '18037004', '18037005', '18037006', '18037007', '18037008', '18037009', '18037010', '18037011', '18037012', '18037013', '18037014', '18037015', '18037016', '18037017', '18037018', '18037019', '18037020', '18040001', '18040002', '18040003', '13001001', '13001002', '13001003', '13001000', '13002000', '13003000', '13004000', '13004001', '13004002', '13004003', '13004004', '13004005', '13004006', '22003000', '22004000', '22005000', '22007000', '22008000', '22010000', '22015000', '19040001', '19040002', '19040003', '19040004', '19040005', '19040006', '19040007', '19040008', '17023001', '17023002', '17023003', '17023004', '17025001', '17025002', '17025003', '17025004', '17025005', '17027001', '17027002', '17027003', '17027000', '21009000', '18045001', '18045002', '18045003', '18045004', '18045005', '18045006', '18045007', '18045008', '18045009', '18045010', '22014000', '22018000', '18050001', '18050002', '18050003', '18050004', '18050005', '18050006', '18050007', '18050008', '18050009', '18050010', '17002000', '17003000', '17004000', '17005000', '17006000', '17007000', '17008000', '17009000', '17010000', '17011000', '17012000', '17013000', '17014000', '17015000', '17016000', '17017000', '17018000', '17019000', '17020000', '17021000', '17022000', '17023000', '17024000', '17025000', '17026000', '17028000', '17029000', '17030000', '17031000', '17032000', '17033000', '17034000', '17035000', '17036000', '17037000', '17038000', '17039000', '17040000', '17041000', '17042000', '17043000', '17044000', '17045000', '17046000', '17047000', '17048000', '12018001', '12018002', '12018003', '12018004', '16002000', '13005000', '13005001', '13005002', '13005003', '13005004', '13005005', '13005006', '13005007', '13005008', '13005009', '13005010', '13005011', '13005012', '13005013', '13005014', '13005015', '13005016', '13005017', '13005018', '13005019', '13005020', '13005021', '13005022', '13005023', '13005024', '13005025', '13005026', '13005027', '13005028', '13005029', '13005030', '13005031', '13005032', '13005033', '13005034', '13005035', '13005036', '13005037', '13005038', '13005039', '13005040', '13005041', '13005042', '13005043', '13005044', '13005045', '13005046', '13005047', '13005048', '13005049', '13005050', '13005051', '13005052', '13005053', '13005054', '13005055', '13005056', '13005057', '13005058', '13005059', '18001000', '18003000', '18004000', '18005000', '18007000', '18008000', '18009000', '18010000', '18011000', '18012000', '18013000', '18014000', '18015000', '18016000', '18017000', '18018000', '18019000', '18020000', '18022000', '18023000', '18024000', '18025000', '18026000', '18027000', '18028000', '18029000', '18030000', '18031000', '18032000', '18033000', '18034000', '18035000', '18036000', '18037000', '18038000', '18039000', '18040000', '18041000', '18042000', '18043000', '18044000', '18045000', '18046000', '18047000', '18048000', '18049000', '18050000', '18051000', '18052000', '18053000', '18054000', '18055000', '18056000', '18057000', '18058000', '18059000', '18060000', '18061000', '18062000', '18063000', '18064000', '18065000', '18066000', '18067000', '18068000', '18069000', '18070000', '18071000', '18072000', '18073000', '18074000', '19001000', '19002000', '19003000', '19004000', '19005000', '19006000', '19007000', '19008000', '19009000', '19010000', '19011000', '19012000', '19013000', '19014000', '19015000', '19016000', '19017000', '19018000', '19019000', '19020000', '19021000', '19022000', '19023000', '19024000', '19026000', '19027000', '19028000', '19029000', '19030000', '19031000', '19032000', '19033000', '19034000', '19035000', '19036000', '19037000', '19038000', '19039000', '19040000', '19041000', '19042000', '19043000', '19044000', '19045000', '19046000', '19048000', '19049000', '19050000', '19051000', '19052000', '19053000', '19054000', '18020002', '18020001', '20001000', '20002000', '10006000', '21010001', '21010002', '21010003', '21010004', '21010005', '21010006', '21010007', '21010008', '21010009', '21010010', '21010011', '21001000', '21002000', '21003000', '21004000', '21005000', '21006000', '21007000', '21008000', '21010000', '21011000', '21012000', '21013000', '18068001', '18068002', '18068003', '18068004', '18068005'
]
category_name = [
'Writing, Copywriting and Technical Writing', 'Search Engine Marketing and Optimization', 'Public Relations', 'Promotional Items', 'Print, TV, Radio and Outdoor Advertising', 'Online Advertising', 'Market Research and Consulting', 'Direct Mail and Email Marketing Services', 'Creative Services', 'Advertising Agencies and Media Buyers', 'Crop Production', 'Forestry', 'Livestock and Animals', 'Services', 'Airlines and Aviation Services', 'Airports', 'Theatrical Productions', 'Symphony and Opera', 'Sports Venues', 'Social Clubs', 'Psychics and Astrologers', 'Party Centers', 'Music and Show Venues', 'Museums', 'Movie Theatres', 'Fairgrounds and Rodeos', 'Entertainment', 'Dance Halls and Saloons', 'Circuses and Carnivals', 'Casinos and Gaming', 'Bowling', 'Billiards and Pool', 'Art Dealers and Galleries', 'Arcades and Amusement Parks', 'Aquarium', 'Arts and Entertainment', 'ATMs', 'ATM', 'Check', 'ATM', 'ATM', 'Parking', 'Tolls and Fees', 'Gas Stations', 'Towing', 'Motorcycle, Moped and Scooter Repair', 'Maintenance and Repair', 'Car Wash and Detail', 'Car Appraisers', 'Auto Transmission', 'Auto Tires', 'Auto Smog Check', 'Auto Oil and Lube', 'Used Car Dealers', 'Salvage Yards', 'RVs and Motor Homes', 'Motorcycles, Mopeds and Scooters', 'Classic and Antique Car', 'Car Parts and Accessories', 'Car Dealers and Leasing', 'Automotive', 'Overdraft', 'Late Payment', 'Fraud Dispute', 'Foreign Transaction', 'Insufficient Funds', 'Cash Advance', 'Excess Activity', 'Printing and Publishing', 'Ride Share', 'Car Service', 'Limos and Chauffeurs', 'Taxi', 'Check', "Women's Store", 'Swimwear', 'Shoe Store', "Men's Store", 'Lingerie Store', "Kids' Store", 'Boutique', 'Accessories Store', 'Facilities and Nursing Homes', 'Caretakers', 'Animal Shelter', 'Assisted Living Services', 'Cemetery', 'Day Care and Preschools', 'Disabled Persons Services', 'Drug and Alcohol Services', 'Organizations and Associations', 'Religious', 'Senior Citizen Services', 'Youth Organizations', 'Environmental', 'Charities and Non-Profits', 'Retirement', 'Maintenance and Repair', 'Software Development', 'Credit Card', 'Education', 'Vocational Schools', 'Tutoring and Educational Services', 'Primary and Secondary Schools', 'Fraternities and Sororities', 'Driving Schools', 'Dance Schools', 'Culinary Lessons and Schools', 'Computer Training', 'Colleges and Universities', 'Art School', 'Adult Education', 'Video Games', 'Mobile Phones', 'Cameras', 'Media', 'Stock Brokers', 'Holding and Investment Offices', 'Fund Raising', 'Financial Planning and Investments', 'Credit Reporting', 'Collections', 'Check Cashing', 'Business Brokers and Franchises', 'Banking and Finance', 'Accounting and Bookkeeping', 'Food and Beverage', 'Distribution', 'Catering', 'Food and Beverage Store', 'Specialty', 'Health Food', 'Farmers Markets', 'Beer, Wine and Spirits', 'Supermarkets and Groceries', 'Courts', 'Government Lobbyists', 'Housing Assistance and Shelters', 'Law Enforcement', 'Libraries', 'Military', 'Post Offices', 'Public and Social Services', 'Police Stations', 'Fire Stations', 'Correctional Institutions', 'Government Departments and Agencies', 'Benefits', 'Healthcare Services', 'Physicians', 'Psychologists', 'Pregnancy and Sexual Health', 'Podiatrists', 'Physical Therapy', 'Optometrists', 'Nutritionists', 'Nurses', 'Mental Health', 'Medical Supplies and Labs', 'Hospitals, Clinics and Medical Centers', 'Emergency Services', 'Dentists', 'Counseling and Therapy', 'Chiropractors', 'Blood Banks and Centers', 'Alternative Medicine', 'Acupuncture', 'Urologists', 'Respiratory', 'Radiologists', 'Psychiatrists', 'Plastic Surgeons', 'Pediatricians', 'Pathologists', 'Orthopedic Surgeons', 'Ophthalmologists', 'Oncologists', 'Obstetricians and Gynecologists', 'Neurologists', 'Internal Medicine', 'General Surgery', 'Gastroenterologists', 'Family Medicine', 'Ear, Nose and Throat', 'Dermatologists', 'Cardiologists', 'Anesthesiologists', 'Specialty', 'Roofers', 'Painting', 'Masonry', 'Infrastructure', 'Heating, Ventilating and Air Conditioning', 'Electricians', 'Contractors', 'Carpet and Flooring', 'Carpenters', 'Upholstery', 'Tree Service', 'Swimming Pool Maintenance and Services', 'Storage', 'Roofers', 'Pools and Spas', 'Plumbing', 'Pest Control', 'Painting', 'Movers', 'Mobile Homes', 'Lighting Fixtures', 'Landscaping and Gardeners', 'Kitchens', 'Interior Design', 'Housewares', 'Home Inspection Services', 'Home Appliances', 'Heating, Ventilation and Air Conditioning', 'Hardware and Services', 'Fences, Fireplaces and Garage Doors', 'Electricians', 'Doors and Windows', 'Contractors', 'Carpet and Flooring', 'Carpenters', 'Architects', 'Interest Earned', 'Interest Charged', 'Loans and Mortgages', 'Loan', 'Resorts', 'Lodges and Vacation Rentals', 'Hotels and Motels', 'Hostels', 'Cottages and Cabins', 'Bed and Breakfasts', 'Lodging', 'Apparel and Fabric Products', 'Chemicals and Gasses', 'Computers and Office Machines', 'Electrical Equipment and Components', 'Food and Beverage', 'Furniture and Fixtures', 'Glass Products', 'Industrial Machinery and Equipment', 'Leather Goods', 'Metal Products', 'Nonmetallic Mineral Products', 'Paper Products', 'Petroleum', 'Plastic Products', 'Rubber Products', 'Service Instruments', 'Textiles', 'Tobacco', 'Transportation Equipment', 'Wood Products', 'Coal', 'Metal', 'Non-Metallic Minerals', 'Wine Bar', 'Sports Bar', 'Hotel Lounge', 'Bar', 'Breweries', 'Internet Cafes', 'Nightlife', 'Strip Club', 'Night Clubs', 'Karaoke', 'Jazz and Blues Cafe', 'Hookah Lounges', 'Adult Entertainment', 'Boat', 'Bus Stations', 'Car and Truck Rentals', 'Charter Buses', 'Cruises', 'Heliports', 'Rail', "Women's Store", 'Swimwear', 'Shoe Store', "Men's Store", 'Lingerie Store', "Kids' Store", 'Boutique', 'Accessories Store', 'Monuments and Memorials', 'Historic Sites', 'Gardens', 'Buildings and Structures', 'Rivers', 'Mountains', 'Lakes', 'Forests', 'Beaches', 'Playgrounds', 'Picnic Areas', 'Natural Parks', 'Parks', 'Payroll', 'Tattooing', 'Tanning Salons', 'Spas', 'Skin Care', 'Piercing', 'Massage Clinics and Therapists', 'Manicures and Pedicures', 'Laundry and Garment Services', 'Hair Salons and Barbers', 'Hair Removal', 'Public Transportation Services', 'Transportation Centers', 'Real Estate Development and Title Companies', 'Real Estate Appraiser', 'Real Estate Agents', 'Property Management', 'Corporate Housing', 'Commercial Real Estate', 'Building and Land Surveyors', 'Boarding Houses', 'Apartments, Condos and Houses', 'Rent', 'Athletic Fields', 'Baseball', 'Basketball', 'Batting Cages', 'Boating', 'Campgrounds and RV Parks', 'Canoes and Kayaks', 'Combat Sports', 'Cycling', 'Dance', 'Equestrian', 'Football', 'Go Carts', 'Golf', 'Gun Ranges', 'Gymnastics', 'Gyms and Fitness Centers', 'Hiking', 'Hockey', 'Hot Air Balloons', 'Hunting and Fishing', 'Landmarks', 'Miniature Golf', 'Outdoors', 'Paintball', 'Personal Trainers', 'Race Tracks', 'Racquet Sports', 'Racquetball', 'Rafting', 'Recreation Centers', 'Rock Climbing', 'Running', 'Scuba Diving', 'Skating', 'Skydiving', 'Snow Sports', 'Soccer', 'Sports and Recreation Camps', 'Sports Clubs', 'Stadiums and Arenas', 'Swimming', 'Tennis', 'Water Sports', 'Yoga and Pilates', 'Zoo', 'Temple', 'Synagogues', 'Mosques', 'Churches', 'Rent', 'Restaurants', 'Winery', 'Vegan and Vegetarian', 'Turkish', 'Thai', 'Swiss', 'Sushi', 'Steakhouses', 'Spanish', 'Seafood', 'Scandinavian', 'Portuguese', 'Pizza', 'Moroccan', 'Middle Eastern', 'Mexican', 'Mediterranean', 'Latin American', 'Korean', 'Juice Bar', 'Japanese', 'Italian', 'Indonesian', 'Indian', 'Ice Cream', 'Greek', 'German', 'Gastropub', 'French', 'Food Truck', 'Fish and Chips', 'Filipino', 'Fast Food', 'Falafel', 'Ethiopian', 'Eastern European', 'Donuts', 'Distillery', 'Diners', 'Dessert', 'Delis', 'Cupcake Shop', 'Cuban', 'Coffee Shop', 'Chinese', 'Caribbean', 'Cajun', 'Cafe', 'Burrito', 'Burgers', 'Breakfast Spot', 'Brazilian', 'Barbecue', 'Bakery', 'Bagel Shop', 'Australian', 'Asian', 'American', 'African', 'Afghan', 'Advertising and Marketing', 'Art Restoration', 'Audiovisual', 'Automation and Control Systems', 'Business and Strategy Consulting', 'Business Services', 'Cable', 'Chemicals and Gasses', 'Cleaning', 'Computers', 'Construction', 'Credit Counseling and Bankruptcy Services', 'Dating and Escort', 'Employment Agencies', 'Engineering', 'Entertainment', 'Events and Event Planning', 'Financial', 'Funeral Services', 'Geological', 'Home Improvement', 'Household', 'Human Resources', 'Immigration', 'Import and Export', 'Industrial Machinery and Vehicles', 'Insurance', 'Internet Services', 'Leather', 'Legal', 'Logging and Sawmills', 'Machine Shops', 'Management', 'Manufacturing', 'Media Production', 'Metals', 'Mining', 'News Reporting', 'Oil and Gas', 'Packaging', 'Paper', 'Personal Care', 'Petroleum', 'Photography', 'Plastics', 'Rail', 'Real Estate', 'Refrigeration and Ice', 'Renewable Energy', 'Repair Services', 'Research', 'Rubber', 'Scientific', 'Security and Safety', 'Shipping and Freight', 'Software Development', 'Storage', 'Subscription', 'Tailors', 'Telecommunication Services', 'Textiles', 'Tourist Information and Services', 'Transportation', 'Travel Agents and Tour Operators', 'Utilities', 'Veterinarians', 'Water and Waste Management', 'Web Design and Development', 'Welding', 'Agriculture and Forestry', 'Art and Graphic Design', 'Adult', 'Antiques', 'Arts and Crafts', 'Auctions', 'Automotive', 'Beauty Products', 'Bicycles', 'Boat Dealers', 'Bookstores', 'Cards and Stationery', 'Children', 'Clothing and Accessories', 'Computers and Electronics', 'Construction Supplies', 'Convenience Stores', 'Costumes', 'Dance and Music', 'Department Stores', 'Digital Purchase', 'Discount Stores', 'Electrical Equipment', 'Equipment Rental', 'Flea Markets', 'Florists', 'Fuel Dealer', 'Furniture and Home Decor', 'Gift and Novelty', 'Glasses and Optometrist', 'Hardware Store', 'Hobby and Collectibles', 'Industrial Supplies', 'Jewelry and Watches', 'Luggage', 'Marine Supplies', 'Music, Video and DVD', 'Musical Instruments', 'Newsstands', 'Office Supplies', 'Outlet', 'Pawn Shops', 'Pets', 'Pharmacies', 'Photos and Frames', 'Shopping Centers and Malls', 'Sporting Goods', 'Tobacco', 'Toys', 'Vintage and Thrift', 'Warehouses and Wholesale Stores', 'Wedding and Bridal', 'Wholesale', 'Lawn and Garden', 'Student Aid and Grants', 'Taxes', 'Refund', 'Payment', 'Wire Transfer', 'Venmo', 'Square Cash', 'Square', 'PayPal', 'Dwolla', 'Coinbase', 'Chase QuickPay', 'Acorns', 'Digit', 'Betterment', 'Plaid', 'Internal Account Transfer', 'ACH', 'Billpay', 'Check', 'Credit', 'Debit', 'Deposit', 'Keep the Change Savings Program', 'Third Party', 'Wire', 'Withdrawal', 'Save As You Go', 'Water', 'Sanitary and Waste Management', 'Heating, Ventilating, and Air Conditioning', 'Gas', 'Electric'
]
parent_category_name = [
'Advertising and Marketing', 'Advertising and Marketing', 'Advertising and Marketing', 'Advertising and Marketing', 'Advertising and Marketing', 'Advertising and Marketing', 'Advertising and Marketing', 'Advertising and Marketing', 'Advertising and Marketing', 'Advertising and Marketing', 'Agriculture and Forestry', 'Agriculture and Forestry', 'Agriculture and Forestry', 'Agriculture and Forestry', 'Air Travel', 'Air Travel', 'Arts and Entertainment', 'Arts and Entertainment', 'Arts and Entertainment', 'Arts and Entertainment', 'Arts and Entertainment', 'Arts and Entertainment', 'Arts and Entertainment', 'Arts and Entertainment', 'Arts and Entertainment', 'Arts and Entertainment', 'Arts and Entertainment', 'Arts and Entertainment', 'Arts and Entertainment', 'Arts and Entertainment', 'Arts and Entertainment', 'Arts and Entertainment', 'Arts and Entertainment', 'Arts and Entertainment', 'Arts and Entertainment', 'Arts and Entertainment', 'ATM', 'ATM', 'ATM', 'ATM', 'ATM', 'Auto Transportation', 'Auto Transportation', 'Auto Transportation', 'Automotive', 'Automotive', 'Automotive', 'Automotive', 'Automotive', 'Automotive', 'Automotive', 'Automotive', 'Automotive', 'Automotive', 'Automotive', 'Automotive', 'Automotive', 'Automotive', 'Automotive', 'Automotive', 'Automotive', 'Bank Fees', 'Bank Fees', 'Bank Fees', 'Bank Fees', 'Bank Fees', 'Bank Fees', 'Bank Fees', 'Business Services', 'Car Service', 'Car Service', 'Car Service', 'Car Service', 'Check', 'Clothing and Accessories', 'Clothing and Accessories', 'Clothing and Accessories', 'Clothing and Accessories', 'Clothing and Accessories', 'Clothing and Accessories', 'Clothing and Accessories', 'Clothing and Accessories', 'Community Services', 'Community Services', 'Community Services', 'Community Services', 'Community Services', 'Community Services', 'Community Services', 'Community Services', 'Community Services', 'Community Services', 'Community Services', 'Community Services', 'Community Services', 'Community Services', 'Community Services', 'Computers', 'Computers', 'Credit Card', 'Education', 'Education', 'Education', 'Education', 'Education', 'Education', 'Education', 'Education', 'Education', 'Education', 'Education', 'Education', 'Electronics', 'Electronics', 'Electronics', 'Entertainment', 'Financial', 'Financial', 'Financial', 'Financial', 'Financial', 'Financial', 'Financial', 'Financial', 'Financial', 'Financial', 'Food Delivery Services', 'Food and Beverage Store', 'Food and Beverage Store', 'Food and Beverage Store', 'Food and Beverage Store', 'Food and Beverage Store', 'Food and Beverage Store', 'Food and Beverage Store', 'Food and Beverage Store', 'Government Departments and Agencies', 'Government Departments and Agencies', 'Government Departments and Agencies', 'Government Departments and Agencies', 'Government Departments and Agencies', 'Government Departments and Agencies', 'Government Departments and Agencies', 'Government Departments and Agencies', 'Government Departments and Agencies', 'Government Departments and Agencies', 'Government Departments and Agencies', 'Government Support', 'Government Support', 'Healthcare', 'Healthcare', 'Healthcare', 'Healthcare', 'Healthcare', 'Healthcare', 'Healthcare', 'Healthcare', 'Healthcare', 'Healthcare', 'Healthcare', 'Healthcare', 'Healthcare', 'Healthcare', 'Healthcare', 'Healthcare', 'Healthcare', 'Healthcare', 'Healthcare', 'Healthcare', 'Healthcare', 'Healthcare', 'Healthcare', 'Healthcare', 'Healthcare', 'Healthcare', 'Healthcare', 'Healthcare', 'Healthcare', 'Healthcare', 'Healthcare', 'Healthcare', 'Healthcare', 'Healthcare', 'Healthcare', 'Healthcare', 'Healthcare', 'Healthcare', 'Healthcare', 'Home Improvement', 'Home Improvement', 'Home Improvement', 'Home Improvement', 'Home Improvement', 'Home Improvement', 'Home Improvement', 'Home Improvement', 'Home Improvement', 'Home Improvement', 'Home Improvement', 'Home Improvement', 'Home Improvement', 'Home Improvement', 'Home Improvement', 'Home Improvement', 'Home Improvement', 'Home Improvement', 'Home Improvement', 'Home Improvement', 'Home Improvement', 'Home Improvement', 'Home Improvement', 'Home Improvement', 'Home Improvement', 'Home Improvement', 'Home Improvement', 'Home Improvement', 'Home Improvement', 'Home Improvement', 'Home Improvement', 'Home Improvement', 'Home Improvement', 'Home Improvement', 'Home Improvement', 'Home Improvement', 'Home Improvement', 'Interest', 'Interest', 'Loans and Mortgages', 'Loans and Mortgages', 'Lodging', 'Lodging', 'Lodging', 'Lodging', 'Lodging', 'Lodging', 'Lodging', 'Manufacturing', 'Manufacturing', 'Manufacturing', 'Manufacturing', 'Manufacturing', 'Manufacturing', 'Manufacturing', 'Manufacturing', 'Manufacturing', 'Manufacturing', 'Manufacturing', 'Manufacturing', 'Manufacturing', 'Manufacturing', 'Manufacturing', 'Manufacturing', 'Manufacturing', 'Manufacturing', 'Manufacturing', 'Manufacturing', 'Mining', 'Mining', 'Mining', 'Nightlife', 'Nightlife', 'Nightlife', 'Nightlife', 'Nightlife', 'Nightlife', 'Nightlife', 'Nightlife', 'Nightlife', 'Nightlife', 'Nightlife', 'Nightlife', 'Nightlife', 'Other Travel', 'Other Travel', 'Other Travel', 'Other Travel', 'Other Travel', 'Other Travel', 'Other Travel', 'Outlet', 'Outlet', 'Outlet', 'Outlet', 'Outlet', 'Outlet', 'Outlet', 'Outlet', 'Parks', 'Parks', 'Parks', 'Parks', 'Parks', 'Parks', 'Parks', 'Parks', 'Parks', 'Parks', 'Parks', 'Parks', 'Parks', 'Payroll', 'Personal Care', 'Personal Care', 'Personal Care', 'Personal Care', 'Personal Care', 'Personal Care', 'Personal Care', 'Personal Care', 'Personal Care', 'Personal Care', 'Public Transit', 'Public Transit', 'Real Estate', 'Real Estate', 'Real Estate', 'Real Estate', 'Real Estate', 'Real Estate', 'Real Estate', 'Real Estate', 'Real Estate', 'Real Estate', 'Recreation', 'Recreation', 'Recreation', 'Recreation', 'Recreation', 'Recreation', 'Recreation', 'Recreation', 'Recreation', 'Recreation', 'Recreation', 'Recreation', 'Recreation', 'Recreation', 'Recreation', 'Recreation', 'Recreation', 'Recreation', 'Recreation', 'Recreation', 'Recreation', 'Recreation', 'Recreation', 'Recreation', 'Recreation', 'Recreation', 'Recreation', 'Recreation', 'Recreation', 'Recreation', 'Recreation', 'Recreation', 'Recreation', 'Recreation', 'Recreation', 'Recreation', 'Recreation', 'Recreation', 'Recreation', 'Recreation', 'Recreation', 'Recreation', 'Recreation', 'Recreation', 'Recreation', 'Recreation', 'Religious', 'Religious', 'Religious', 'Religious', 'Rent', 'Restaurants', 'Restaurants', 'Restaurants', 'Restaurants', 'Restaurants', 'Restaurants', 'Restaurants', 'Restaurants', 'Restaurants', 'Restaurants', 'Restaurants', 'Restaurants', 'Restaurants', 'Restaurants', 'Restaurants', 'Restaurants', 'Restaurants', 'Restaurants', 'Restaurants', 'Restaurants', 'Restaurants', 'Restaurants', 'Restaurants', 'Restaurants', 'Restaurants', 'Restaurants', 'Restaurants', 'Restaurants', 'Restaurants', 'Restaurants', 'Restaurants', 'Restaurants', 'Restaurants', 'Restaurants', 'Restaurants', 'Restaurants', 'Restaurants', 'Restaurants', 'Restaurants', 'Restaurants', 'Restaurants', 'Restaurants', 'Restaurants', 'Restaurants', 'Restaurants', 'Restaurants', 'Restaurants', 'Restaurants', 'Restaurants', 'Restaurants', 'Restaurants', 'Restaurants', 'Restaurants', 'Restaurants', 'Restaurants', 'Restaurants', 'Restaurants', 'Restaurants', 'Restaurants', 'Restaurants', 'Service', 'Service', 'Service', 'Service', 'Service', 'Service', 'Service', 'Service', 'Service', 'Service', 'Service', 'Service', 'Service', 'Service', 'Service', 'Service', 'Service', 'Service', 'Service', 'Service', 'Service', 'Service', 'Service', 'Service', 'Service', 'Service', 'Service', 'Service', 'Service', 'Service', 'Service', 'Service', 'Service', 'Service', 'Service', 'Service', 'Service', 'Service', 'Service', 'Service', 'Service', 'Service', 'Service', 'Service', 'Service', 'Service', 'Service', 'Service', 'Service', 'Service', 'Service', 'Service', 'Service', 'Service', 'Service', 'Service', 'Service', 'Service', 'Service', 'Service', 'Service', 'Service', 'Service', 'Service', 'Service', 'Service', 'Service', 'Service', 'Service', 'Service', 'Service', 'Shops', 'Shops', 'Shops', 'Shops', 'Shops', 'Shops', 'Shops', 'Shops', 'Shops', 'Shops', 'Shops', 'Shops', 'Shops', 'Shops', 'Shops', 'Shops', 'Shops', 'Shops', 'Shops', 'Shops', 'Shops', 'Shops', 'Shops', 'Shops', 'Shops', 'Shops', 'Shops', 'Shops', 'Shops', 'Shops', 'Shops', 'Shops', 'Shops', 'Shops', 'Shops', 'Shops', 'Shops', 'Shops', 'Shops', 'Shops', 'Shops', 'Shops', 'Shops', 'Shops', 'Shops', 'Shops', 'Shops', 'Shops', 'Shops', 'Shops', 'Shops', 'Shops', 'Student Aid and Grants', 'Taxes', 'Taxes', 'Taxes', 'Third Party', 'Third Party', 'Third Party', 'Third Party', 'Third Party', 'Third Party', 'Third Party', 'Third Party', 'Savings Apps', 'Savings Apps', 'Savings Apps', 'Third Party', 'Transfer', 'Transfer', 'Transfer', 'Transfer', 'Transfer', 'Transfer', 'Transfer', 'Transfer', 'Transfer', 'Transfer', 'Transfer', 'Transfer', 'Utilities', 'Utilities', 'Utilities', 'Utilities', 'Utilities'
]
grandparent_category_name = [
'Other', 'Other', 'Other', 'Other', 'Other', 'Other', 'Other', 'Other', 'Other', 'Other', 'Other', 'Other', 'Other', 'Other', 'Travel', 'Travel', 'Recreation', 'Recreation', 'Recreation', 'Recreation', 'Recreation', 'Recreation', 'Recreation', 'Recreation', 'Recreation', 'Recreation', 'Recreation', 'Recreation', 'Recreation', 'Recreation', 'Recreation', 'Recreation', 'Recreation', 'Recreation', 'Recreation', 'Recreation', 'Financial', 'Financial', 'Financial', 'Financial', 'Financial', 'Transportation', 'Transportation', 'Transportation', 'Auto', 'Auto', 'Auto', 'Auto', 'Auto', 'Auto', 'Auto', 'Auto', 'Auto', 'Auto', 'Auto', 'Auto', 'Auto', 'Auto', 'Auto', 'Auto', 'Auto', 'Financial', 'Financial', 'Financial', 'Financial', 'Financial', 'Financial', 'Financial', 'Other', 'Transportation', 'Transportation', 'Transportation', 'Transportation', 'Financial', 'Shopping', 'Shopping', 'Shopping', 'Shopping', 'Shopping', 'Shopping', 'Shopping', 'Shopping', 'Other', 'Other', 'Other', 'Other', 'Other', 'Other', 'Other', 'Other', 'Other', 'Other', 'Other', 'Other', 'Other', 'Other', 'Other', 'Other', 'Other', 'Financial', 'Other', 'Other', 'Other', 'Other', 'Other', 'Other', 'Other', 'Other', 'Other', 'Other', 'Other', 'Other', 'Shopping', 'Shopping', 'Shopping', 'Recreation', 'Financial', 'Financial', 'Financial', 'Financial', 'Financial', 'Financial', 'Financial', 'Financial', 'Financial', 'Financial', 'Food', 'Food', 'Food', 'Food', 'Food', 'Food', 'Food', 'Food', 'Food', 'Govt Agencies', 'Govt Agencies', 'Govt Agencies', 'Govt Agencies', 'Govt Agencies', 'Govt Agencies', 'Govt Agencies', 'Govt Agencies', 'Govt Agencies', 'Govt Agencies', 'Govt Agencies', 'Govt Agencies', 'Govt Agencies', 'Healthcare', 'Healthcare', 'Healthcare', 'Healthcare', 'Healthcare', 'Healthcare', 'Healthcare', 'Healthcare', 'Healthcare', 'Healthcare', 'Healthcare', 'Healthcare', 'Healthcare', 'Healthcare', 'Healthcare', 'Healthcare', 'Healthcare', 'Healthcare', 'Healthcare', 'Healthcare', 'Healthcare', 'Healthcare', 'Healthcare', 'Healthcare', 'Healthcare', 'Healthcare', 'Healthcare', 'Healthcare', 'Healthcare', 'Healthcare', 'Healthcare', 'Healthcare', 'Healthcare', 'Healthcare', 'Healthcare', 'Healthcare', 'Healthcare', 'Healthcare', 'Healthcare', 'Other', 'Other', 'Other', 'Other', 'Other', 'Other', 'Other', 'Other', 'Other', 'Other', 'Other', 'Other', 'Other', 'Other', 'Other', 'Other', 'Other', 'Other', 'Other', 'Other', 'Other', 'Other', 'Other', 'Other', 'Other', 'Other', 'Other', 'Other', 'Other', 'Other', 'Other', 'Other', 'Other', 'Other', 'Other', 'Other', 'Other', 'Financial', 'Financial', 'Financial', 'Financial', 'Travel', 'Travel', 'Travel', 'Travel', 'Travel', 'Travel', 'Travel', 'Other', 'Other', 'Other', 'Other', 'Other', 'Other', 'Other', 'Other', 'Other', 'Other', 'Other', 'Other', 'Other', 'Other', 'Other', 'Other', 'Other', 'Other', 'Other', 'Other', 'Other', 'Other', 'Other', 'Other', 'Other', 'Other', 'Other', 'Other', 'Other', 'Other', 'Other', 'Other', 'Other', 'Other', 'Other', 'Other', 'Travel', 'Travel', 'Travel', 'Travel', 'Travel', 'Travel', 'Travel', 'Shopping', 'Shopping', 'Shopping', 'Shopping', 'Shopping', 'Shopping', 'Shopping', 'Shopping', 'Recreation', 'Recreation', 'Recreation', 'Recreation', 'Recreation', 'Recreation', 'Recreation', 'Recreation', 'Recreation', 'Recreation', 'Recreation', 'Recreation', 'Recreation', 'Payroll', 'Other', 'Other', 'Other', 'Other', 'Other', 'Other', 'Other', 'Other', 'Other', 'Other', 'Transportation', 'Transportation', 'Other', 'Other', 'Other', 'Other', 'Other', 'Other', 'Other', 'Other', 'Other', 'Other', 'Recreation', 'Recreation', 'Recreation', 'Recreation', 'Recreation', 'Recreation', 'Recreation', 'Recreation', 'Recreation', 'Recreation', 'Recreation', 'Recreation', 'Recreation', 'Recreation', 'Recreation', 'Recreation', 'Recreation', 'Recreation', 'Recreation', 'Recreation', 'Recreation', 'Recreation', 'Recreation', 'Recreation', 'Recreation', 'Recreation', 'Recreation', 'Recreation', 'Recreation', 'Recreation', 'Recreation', 'Recreation', 'Recreation', 'Recreation', 'Recreation', 'Recreation', 'Recreation', 'Recreation', 'Recreation', 'Recreation', 'Recreation', 'Recreation', 'Recreation', 'Recreation', 'Recreation', 'Recreation', 'Other', 'Other', 'Other', 'Other', 'Financial', 'Food', 'Food', 'Food', 'Food', 'Food', 'Food', 'Food', 'Food', 'Food', 'Food', 'Food', 'Food', 'Food', 'Food', 'Food', 'Food', 'Food', 'Food', 'Food', 'Food', 'Food', 'Food', 'Food', 'Food', 'Food', 'Food', 'Food', 'Food', 'Food', 'Food', 'Food', 'Food', 'Food', 'Food', 'Food', 'Food', 'Food', 'Food', 'Food', 'Food', 'Food', 'Food', 'Food', 'Food', 'Food', 'Food', 'Food', 'Food', 'Food', 'Food', 'Food', 'Food', 'Food', 'Food', 'Food', 'Food', 'Food', 'Food', 'Food', 'Food', 'Other', 'Other', 'Other', 'Other', 'Other', 'Other', 'Other', 'Other', 'Other', 'Other', 'Other', 'Other', 'Other', 'Other', 'Other', 'Other', 'Other', 'Other', 'Other', 'Other', 'Other', 'Other', 'Other', 'Other', 'Other', 'Other', 'Other', 'Other', 'Other', 'Other', 'Other', 'Other', 'Other', 'Other', 'Other', 'Other', 'Other', 'Other', 'Other', 'Other', 'Other', 'Other', 'Other', 'Other', 'Other', 'Other', 'Other', 'Other', 'Other', 'Other', 'Other', 'Other', 'Other', 'Other', 'Other', 'Other', 'Other', 'Other', 'Other', 'Other', 'Other', 'Other', 'Other', 'Other', 'Other', 'Other', 'Other', 'Other', 'Other', 'Other', 'Other', 'Shopping', 'Shopping', 'Shopping', 'Shopping', 'Shopping', 'Shopping', 'Shopping', 'Shopping', 'Shopping', 'Shopping', 'Shopping', 'Shopping', 'Shopping', 'Shopping', 'Shopping', 'Shopping', 'Shopping', 'Shopping', 'Shopping', 'Shopping', 'Shopping', 'Shopping', 'Shopping', 'Shopping', 'Shopping', 'Shopping', 'Shopping', 'Shopping', 'Shopping', 'Shopping', 'Shopping', 'Shopping', 'Shopping', 'Shopping', 'Shopping', 'Shopping', 'Shopping', 'Shopping', 'Shopping', 'Shopping', 'Shopping', 'Shopping', 'Shopping', 'Shopping', 'Shopping', 'Shopping', 'Shopping', 'Shopping', 'Shopping', 'Shopping', 'Shopping', 'Shopping', 'Financial', 'Financial', 'Financial', 'Financial', 'Financial', 'Financial', 'Financial', 'Financial', 'Financial', 'Financial', 'Financial', 'Financial', 'Financial', 'Financial', 'Financial', 'Financial', 'Transfers', 'Transfers', 'Transfers', 'Transfers', 'Transfers', 'Transfers', 'Transfers', 'Transfers', 'Transfers', 'Transfers', 'Transfers', 'Transfers', 'Utilities', 'Utilities', 'Utilities', 'Utilities', 'Utilities'
]
cache = {
'category_id': category_id,
'category_name': category_name,
'parent_category_name': parent_category_name,
'grandparent_category_name': grandparent_category_name
}
df = pd.DataFrame(cache)
if debug is True:
print(df)
else:
return df
def _fetch_transactions_dataframe(self, bank_account_id: str = None, sample_size: int = 1):
random_list = []
feature_list = []
primary_features = [
'bank_account_id',
'id',
'date',
'amount_cents',
'category_id',
'created_at',
]
secondary_features = [
'plaid_transaction_id',
'merchant_city',
'merchant_state',
'lat',
'lon',
'purpose'
]
feature_list = primary_features + secondary_features
feature_query = ", ".join(feature_list)
if bank_account_id:
query_operation = f"""
SELECT {feature_query}
FROM plaid_main_transactions
WHERE bank_account_id = {bank_account_id}
"""
else:
while len(random_list) != sample_size:
random_number = random.randrange(1, 257603)
query_operation = f"""
SELECT {feature_query}
FROM plaid_main_transactions
WHERE bank_account_id = {random_number}
"""
query_fetch = self.handle_query(query_operation, fetchone=True)
if query_fetch is None:
pass
else:
if random_number in random_list:
pass
else:
random_list.append(random_number)
random_query = ", ".join(repr(i) for i in random_list)
query_operation = f"""
SELECT {feature_query}
FROM plaid_main_transactions
WHERE bank_account_id IN ({random_query})
"""
query_fetch = self.handle_query(query_operation)
df = pd.DataFrame(query_fetch, columns=feature_list)
return df
def _wrangle_transactions(self, x):
"""Wrangle incoming transaction data."""
# Prevent SettingWithCopyWarning
X = x.copy()
# remove empty or 'None' values
X.replace('', np.nan, inplace=True)
X = X.fillna(value=np.nan)
# test datetime features
datetime_features = ['date', 'created_at']
for i in datetime_features:
X[i] = pd.to_datetime(X[i],
format="%m/%d/%Y, %H:%M:%S",
errors='raise')
# remove duplicate entries !WARNING (may affect resulting table)
X = X.drop_duplicates(subset='plaid_transaction_id').reset_index(drop=True)
X.rename(columns={'amount_cents':'amount'}, inplace=True)
X['amount'] = (X['amount'] / 100).round(2)
# insert category data
df = self._handle_category_features()
X = pd.merge(X, df, on='category_id')
return X
def _fetch_accounts_dataframe(self, bank_account_id: str = None, sample_size: int = 1):
random_list = []
feature_list = []
primary_features = [
'id',
'current_balance_cents',
'created_at',
'updated_at',
'name',
'account_type',
'available_balance_cents',
'last_balance_update_at',
'plaid_state',
'initial_balance_cents',
'main_saving'
]
secondary_features = [
'account_subtype'
]
feature_list = primary_features + secondary_features
feature_query = ", ".join(feature_list)
if bank_account_id:
query_operation = f"""
SELECT {feature_query}
FROM bank_accounts
WHERE id = {bank_account_id}
"""
else:
for _ in range(sample_size):
random_list.append(random.randrange(1, 257603))
random_query = ", ".join(repr(i) for i in random_list)
query_operation = f"""
SELECT {feature_query}
FROM bank_accounts
WHERE id IN ({random_query})
"""
query_fetch = self.handle_query(query_operation)
df = pd.DataFrame(query_fetch, columns=feature_list)
return df
def _wrangle_accounts(self, x):
X = x.copy()
return X
def _fetch_requests_dataframe(self):
feature_list = []
primary_features = [
'description',
'state'
]
feature_list = primary_features
feature_query = ", ".join(primary_features)
query_operation = f"""
SELECT {feature_query}
FROM emergency_fund_requests
"""
query_fetch = self.handle_query(query_operation)
df = pd.DataFrame(query_fetch, columns=feature_list)
return df
def _wrangle_requests(self, x):
X = x.copy()
return X
SaverlifeUtility = SaverlifeUtility()
class Visualize():
"""
Visualize different aspects of user data
for SaverLife C Lambda School Labs project
"""
def __init__(self, user_id: str):
self.user_id = user_id
self.user_transactions_df = self.handle_user_transaction_data()
self.transaction_time_series_df = self.handle_transaction_timeseries_data()
def handle_user_transaction_data(self):
"""
Helper method to filter user data from SaverLife DB
"""
df = SaverlifeUtility._generate_dataframe(bank_account_id=self.user_id, table='transactions')
return df
def handle_transaction_timeseries_data(self):
"""
Helper method to clean transaction time series data
"""
self.transactions_time_series_df = self.user_transactions_df.sort_values("date")
self.transactions_time_series_df["amount"] = self.transactions_time_series_df["amount"].astype(int)
self.transactions_time_series_df["formatted_date"] = self.transactions_time_series_df.date.dt.strftime('%Y-%m-%d')
self.transactions_time_series_df.sort_values("formatted_date", ascending=False, inplace=True)
return self.transactions_time_series_df
def handle_resampling_transaction_timeseries_df(self, offset_string):
"""
Helper method to resample transaction timeseries data
to a user-specified time frequency
Args:
frequency: a pandas DateOffset, Timedelta or str
See https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#dateoffset-objects
for more on dateoffset strings
Returns:
resampled_transaction_timeseries
Usage:
# Resample to weekly sum
>>> resampled_data = self.handle_resampling_transaction_timeseries_df(offset_string="W")
"""
self.resampled_transaction_timeseries = self.transactions_time_series_df.copy()
self.resampled_transaction_timeseries["date"] = pd.to_datetime(self.resampled_transaction_timeseries["date"])
self.resampled_transaction_timeseries.set_index("date", inplace=True)
return self.resampled_transaction_timeseries.groupby("category_name").resample(offset_string).sum().reset_index()
def handle_resampling_transaction_timeseries_df_parent_categories(self, offset_string):
"""
Helper method to resample transaction timeseries data
to a user-specified time frequency
Args:
frequency: a pandas DateOffset, Timedelta or str
See https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#dateoffset-objects
for more on dateoffset strings
Returns:
resampled_transaction_timeseries
Usage:
# Resample to weekly sum
>>> resampled_data = self.handle_resampling_transaction_timeseries_df(offset_string="W")
"""
self.resampled_transaction_timeseries = self.transactions_time_series_df.copy()
self.resampled_transaction_timeseries["date"] = pd.to_datetime(self.resampled_transaction_timeseries["date"])
self.resampled_transaction_timeseries.set_index("date", inplace=True)
return self.resampled_transaction_timeseries.groupby("parent_category_name").resample(offset_string).sum().reset_index()[["parent_category_name","date","amount"]]
def return_all_transactions_for_user(self):
"""
Plotly Table Object of all transactions for a user
Usage:
# Instantiate the class
>>> visualize = Visualize(user_id=4923847023975)
# Plotly table of all transactions for a single user
>>> visualize.return_all_transactions_for_user()
"""
fig = go.Figure(data=[go.Table(header=dict(values=["Date",
"Amount",
"Category",
"Parent Category",
"Grandparent Category"],
fill_color='lightgray',
align='left'),
cells=dict(values=[self.transaction_time_series_df.formatted_date,
self.transaction_time_series_df.amount,
self.transaction_time_series_df.category_name,
self.transaction_time_series_df.parent_category_name,
self.transaction_time_series_df.grandparent_category_name],
fill_color='whitesmoke',
align='left'))])
fig.update_layout(title_text="Transactions: User {}".format(self.user_id),
title_font_size=30)
return fig.to_json()
def categorized_bar_chart_per_month(self):
"""
Plotly Bar Chart Object of monthly sum transactions for a user
Usage:
# Instantiate the class
>>> visualize = Visualize(user_id=4923847023975)
# Plotly bar chart of monthly sum transactions
>>> visualize.categorized_bar_chart_per_month()
"""
def helper_function_for_trace_visibility(len_array, i):
intermediate_array = [False] * len_array
intermediate_array[i] = True
return intermediate_array
self.monthly_sum_transactions_time_series_df = self.handle_resampling_transaction_timeseries_df("M").sort_values("date")
self.monthly_sum_transactions_time_series_df.drop(columns=["bank_account_id","id","lat","lon"], inplace=True)
self.monthly_sum_transactions_time_series_df = self.monthly_sum_transactions_time_series_df.loc[self.monthly_sum_transactions_time_series_df['amount'] != 0]
months_of_interest = self.monthly_sum_transactions_time_series_df.date.dt.strftime('%Y-%m').unique().tolist()
self.monthly_sum_transactions_time_series_df['label'] = self.monthly_sum_transactions_time_series_df['amount'].apply(lambda x: 'outflow' if x >= 0 else 'inflow')
self.monthly_sum_transactions_time_series_df = self.monthly_sum_transactions_time_series_df.sort_values(['amount'], ascending=True)
colorsIdx = {'inflow': '#C01089', 'outflow': '#4066B0'}
length_of_interest = len(months_of_interest)
list_of_monthly_dfs = []
for month in months_of_interest:
list_of_monthly_dfs.append(self.monthly_sum_transactions_time_series_df[self.monthly_sum_transactions_time_series_df.date.dt.strftime('%Y-%m') == month])
fig = go.Figure()
for i in range(len(list_of_monthly_dfs)-1):
cols = list_of_monthly_dfs[i]['label'].map(colorsIdx)
fig.add_trace(go.Bar(y=list(list_of_monthly_dfs[i].category_name),
x=list(list_of_monthly_dfs[i].amount),
name=str(list_of_monthly_dfs[i].date.dt.strftime('%Y-%m').iloc[0]),
visible=False,
orientation='h',
marker=dict(color=cols)))
cols = list_of_monthly_dfs[-1]['label'].map(colorsIdx)
fig.add_trace(go.Bar(y=list(list_of_monthly_dfs[-1].category_name),
x=list(list_of_monthly_dfs[-1].amount),
name=str(list_of_monthly_dfs[-1].date.dt.strftime('%Y-%m').iloc[0]),
visible=True,
orientation='h',
marker=dict(color=cols)))
fig.update_layout(
font_family='Arial',
template='simple_white',
height=800)
fig.update_layout(
updatemenus=[
dict(active=length_of_interest-1, buttons=list([
dict(label=months_of_interest[i],
method="update",
args=[{"visible": helper_function_for_trace_visibility(length_of_interest, i)},
{"annotations": []}]) for i in range(length_of_interest)]))])
return fig.to_json()
def next_month_forecast(self, model="kNeighbors"):
"""
Forecast next month's transactions based on historical transactions
Caveats:
Only forecasts for parent_categories for which
there are at least 12 months of observations available
Returns:
Dictionary of forecasts, with parent_category_name
as key and forecasted amount_cents as value
Usage:
# Instantiate the class
>>> visualize = Visualize(user_id=45153)
# Forecast transactiosn for next month
>>> visualize.next_month_forecast()
"""
# Resample to monthly sum per parent_category_name
self.monthly_parent_category_total = self.handle_resampling_transaction_timeseries_df_parent_categories("M")
# Filter for parent_categories with at least 12 months of data
self.df12 = self.monthly_parent_category_total[self.monthly_parent_category_total['parent_category_name'].map(self.monthly_parent_category_total['parent_category_name'].value_counts()) > 12]
# Container to store forecasting results
self.forecasting_results = {}
# Loop through each parent category and forecast month ahead with Naive Baseline
for parent_cat in self.df12.parent_category_name.unique().tolist():
# Select relevant transaction data for training the model
y = self.df12[self.df12.parent_category_name == parent_cat]["amount"]
# Set forecasting horizon
fh = np.arange(len(y)) + 1
# Initialize a forecaster, seasonal periodicity of 12 (months per year)
if model == "Naive":
forecaster = NaiveForecaster(strategy="seasonal_last", sp=12)
else:
regressor = KNeighborsRegressor(n_neighbors=1)
forecaster = ReducedRegressionForecaster(regressor=regressor, window_length=12, strategy="recursive")
# Fit forecaster to training data
forecaster.fit(y)
# Forecast prediction to match size of forecasting horizon
y_pred = forecaster.predict(fh)
# Store results in a dictionary
self.forecasting_results[parent_cat] = y_pred.values[0]
# Return the results for use in other parts of app
return self.forecasting_results
@router.post('/dev/requesttesting', tags=["Graph"])
async def read_user(payload: GraphRequest):
"""
Returns a visual table or graph according to input parameters.
"""
user_id = f"{request.user_id}"
graph_type = f"{request.graph_type}"
start_month = f"{request.start_month}"
end_month = f"{request.end_month}"
return {
'message': 'The payload sent in a 200 response.',
'payload': {
'user_id': user_id,
'graph_type': graph_type,
'optional[start_month]': start_month,
'optional[end_month]': end_month
}
}
@router.post('/dev/requestvisual', tags=["Graph"])
async def read_user(payload: GraphRequest):
"""
Returns a visual table or graph according to input parameters.
"""
SaverlifeVisual = Visualize(user_id=payload.user_id)
if SaverlifeVisual.user_transactions_df.size > 0:
pass
else:
return {
'details': [
{
'loc': [
'internal',
'dataframe'
],
'msg': 'dataframe size 0, possible invalid user_id',
'type': 'internal'
}
]
}
def _parse_graph(graph_type=payload.graph_type):
if graph_type == 'TransactionTable':
fig = SaverlifeVisual.return_all_transactions_for_user()
if graph_type == 'CategoryBarMonth':
fig = SaverlifeVisual.categorized_bar_chart_per_month()
return fig
return _parse_graph()
@router.get('/dev/forecast/', tags=['Forecast'])
async def return_forecast(payload: Optional[User] = None, user_id: Optional[str] = None):
"""
Returns a dictionary forecast.
"""
if payload:
SaverlifeVisual = Visualize(user_id=payload.user_id)
else:
SaverlifeVisual = Visualize(user_id=user_id)
forecast = SaverlifeVisual.next_month_forecast()
cache = {}
for key, value in forecast.items():
cache[str(key)] = int(value)
if forecast:
return cache
else:
return {
'details': [
{
'loc': [
'internal',
'model'
],
'msg': 'dictionary size 0, possible too few model observations.',
'doc': {
'description': "Forecast next month's transactions based on historical transactions.",
'caveats': "Only forecasts for parent_categories for which there are at least 12 months of observations available",
'returns': "Dictionary of forecasts, with parent_category_name as key and forecasted amount_cents as value"
},
'type': 'internal'
}
]
} | 90.518987 | 10,787 | 0.649525 |
cb9cfc174df1533750d164fb94209f0ed3e90fc0 | 13,170 | py | Python | Quantum q-learning/neural_net.py | NimishMishra/research | 2448fc9da643429117b851cb582d490cfe46d422 | [
"MIT"
] | 4 | 2020-06-21T07:22:27.000Z | 2021-07-12T16:51:20.000Z | Quantum q-learning/neural_net.py | NimishMishra/research | 2448fc9da643429117b851cb582d490cfe46d422 | [
"MIT"
] | 2 | 2020-12-30T18:06:26.000Z | 2020-12-30T18:06:26.000Z | Quantum q-learning/neural_net.py | NimishMishra/research | 2448fc9da643429117b851cb582d490cfe46d422 | [
"MIT"
] | null | null | null | import numpy as np
import keras.backend.tensorflow_backend as backend
from keras.models import Sequential
from keras.layers import Dense, Dropout, Conv2D, MaxPooling2D, Activation, Flatten
from keras.optimizers import Adam
from keras.callbacks import TensorBoard
import tensorflow as tf
from collections import deque
import time
import random
from tqdm import tqdm
import os
from PIL import Image
import cv2
DISCOUNT = 0.99
REPLAY_MEMORY_SIZE = 50_000 # How many last steps to keep for model training
MIN_REPLAY_MEMORY_SIZE = 1_000 # Minimum number of steps in a memory to start training
MINIBATCH_SIZE = 64 # How many steps (samples) to use for training
UPDATE_TARGET_EVERY = 5 # Terminal states (end of episodes)
MODEL_NAME = '2x256'
MIN_REWARD = -200 # For model save
MEMORY_FRACTION = 0.20
# Environment settings
EPISODES = 20_000
# Exploration settings
epsilon = 1 # not a constant, going to be decayed
EPSILON_DECAY = 0.99975
MIN_EPSILON = 0.001
# Stats settings
AGGREGATE_STATS_EVERY = 50 # episodes
SHOW_PREVIEW = False
class Blob:
def __init__(self, size):
self.size = size
self.x = np.random.randint(0, size)
self.y = np.random.randint(0, size)
def __str__(self):
return f"Blob ({self.x}, {self.y})"
def __sub__(self, other):
return (self.x-other.x, self.y-other.y)
def __eq__(self, other):
return self.x == other.x and self.y == other.y
def action(self, choice):
'''
Gives us 9 total movement options. (0,1,2,3,4,5,6,7,8)
'''
if choice == 0:
self.move(x=1, y=1)
elif choice == 1:
self.move(x=-1, y=-1)
elif choice == 2:
self.move(x=-1, y=1)
elif choice == 3:
self.move(x=1, y=-1)
elif choice == 4:
self.move(x=1, y=0)
elif choice == 5:
self.move(x=-1, y=0)
elif choice == 6:
self.move(x=0, y=1)
elif choice == 7:
self.move(x=0, y=-1)
elif choice == 8:
self.move(x=0, y=0)
def move(self, x=False, y=False):
# If no value for x, move randomly
if not x:
self.x += np.random.randint(-1, 2)
else:
self.x += x
# If no value for y, move randomly
if not y:
self.y += np.random.randint(-1, 2)
else:
self.y += y
# If we are out of bounds, fix!
if self.x < 0:
self.x = 0
elif self.x > self.size-1:
self.x = self.size-1
if self.y < 0:
self.y = 0
elif self.y > self.size-1:
self.y = self.size-1
class BlobEnv:
SIZE = 10
RETURN_IMAGES = True
MOVE_PENALTY = 1
ENEMY_PENALTY = 300
FOOD_REWARD = 25
OBSERVATION_SPACE_VALUES = (SIZE, SIZE, 3) # 4
ACTION_SPACE_SIZE = 9
PLAYER_N = 1 # player key in dict
FOOD_N = 2 # food key in dict
ENEMY_N = 3 # enemy key in dict
# the dict! (colors)
d = {1: (255, 175, 0),
2: (0, 255, 0),
3: (0, 0, 255)}
def reset(self):
self.player = Blob(self.SIZE)
self.food = Blob(self.SIZE)
while self.food == self.player:
self.food = Blob(self.SIZE)
self.enemy = Blob(self.SIZE)
while self.enemy == self.player or self.enemy == self.food:
self.enemy = Blob(self.SIZE)
self.episode_step = 0
if self.RETURN_IMAGES:
observation = np.array(self.get_image())
else:
observation = (self.player-self.food) + (self.player-self.enemy)
return observation
def step(self, action):
self.episode_step += 1
self.player.action(action)
#### MAYBE ###
#enemy.move()
#food.move()
##############
if self.RETURN_IMAGES:
new_observation = np.array(self.get_image())
else:
new_observation = (self.player-self.food) + (self.player-self.enemy)
if self.player == self.enemy:
reward = -self.ENEMY_PENALTY
elif self.player == self.food:
reward = self.FOOD_REWARD
else:
reward = -self.MOVE_PENALTY
done = False
if reward == self.FOOD_REWARD or reward == -self.ENEMY_PENALTY or self.episode_step >= 200:
done = True
return new_observation, reward, done
def render(self):
img = self.get_image()
img = img.resize((300, 300)) # resizing so we can see our agent in all its glory.
cv2.imshow("image", np.array(img)) # show it!
cv2.waitKey(1)
# FOR CNN #
def get_image(self):
env = np.zeros((self.SIZE, self.SIZE, 3), dtype=np.uint8) # starts an rbg of our size
env[self.food.x][self.food.y] = self.d[self.FOOD_N] # sets the food location tile to green color
env[self.enemy.x][self.enemy.y] = self.d[self.ENEMY_N] # sets the enemy location to red
env[self.player.x][self.player.y] = self.d[self.PLAYER_N] # sets the player tile to blue
img = Image.fromarray(env, 'RGB') # reading to rgb. Apparently. Even tho color definitions are bgr. ???
return img
env = BlobEnv()
# For stats
ep_rewards = [-200]
# For more repetitive results
random.seed(1)
np.random.seed(1)
tf.set_random_seed(1)
# Memory fraction, used mostly when training multiple agents
#gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=MEMORY_FRACTION)
#backend.set_session(tf.Session(config=tf.ConfigProto(gpu_options=gpu_options)))
# Create models folder
if not os.path.isdir('models'):
os.makedirs('models')
# Own Tensorboard class
class ModifiedTensorBoard(TensorBoard):
# Overriding init to set initial step and writer (we want one log file for all .fit() calls)
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.step = 1
self.writer = tf.summary.FileWriter(self.log_dir)
# Overriding this method to stop creating default log writer
def set_model(self, model):
pass
# Overrided, saves logs with our step number
# (otherwise every .fit() will start writing from 0th step)
def on_epoch_end(self, epoch, logs=None):
self.update_stats(**logs)
# Overrided
# We train for one batch only, no need to save anything at epoch end
def on_batch_end(self, batch, logs=None):
pass
# Overrided, so won't close writer
def on_train_end(self, _):
pass
# Custom method for saving own metrics
# Creates writer, writes custom metrics and closes writer
def update_stats(self, **stats):
self._write_logs(stats, self.step)
# Agent class
class DQNAgent:
def __init__(self):
# Main model
self.model = self.create_model()
# Target network
self.target_model = self.create_model()
self.target_model.set_weights(self.model.get_weights())
# An array with last n steps for training
self.replay_memory = deque(maxlen=REPLAY_MEMORY_SIZE)
# Custom tensorboard object
self.tensorboard = ModifiedTensorBoard(log_dir="logs/{}-{}".format(MODEL_NAME, int(time.time())))
# Used to count when to update target network with main network's weights
self.target_update_counter = 0
def create_model(self):
model = Sequential()
model.add(Conv2D(256, (3, 3), input_shape=env.OBSERVATION_SPACE_VALUES)) # OBSERVATION_SPACE_VALUES = (10, 10, 3) a 10x10 RGB image.
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.2))
model.add(Conv2D(256, (3, 3)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.2))
model.add(Flatten()) # this converts our 3D feature maps to 1D feature vectors
model.add(Dense(64))
model.add(Dense(env.ACTION_SPACE_SIZE, activation='linear')) # ACTION_SPACE_SIZE = how many choices (9)
model.compile(loss="mse", optimizer=Adam(lr=0.001), metrics=['accuracy'])
return model
# Adds step's data to a memory replay array
# (observation space, action, reward, new observation space, done)
def update_replay_memory(self, transition):
self.replay_memory.append(transition)
# Trains main network every step during episode
def train(self, terminal_state, step):
# Start training only if certain number of samples is already saved
if len(self.replay_memory) < MIN_REPLAY_MEMORY_SIZE:
return
# Get a minibatch of random samples from memory replay table
minibatch = random.sample(self.replay_memory, MINIBATCH_SIZE)
# Get current states from minibatch, then query NN model for Q values
current_states = np.array([transition[0] for transition in minibatch])/255
current_qs_list = self.model.predict(current_states)
# Get future states from minibatch, then query NN model for Q values
# When using target network, query it, otherwise main network should be queried
new_current_states = np.array([transition[3] for transition in minibatch])/255
future_qs_list = self.target_model.predict(new_current_states)
X = []
y = []
# Now we need to enumerate our batches
for index, (current_state, action, reward, new_current_state, done) in enumerate(minibatch):
# If not a terminal state, get new q from future states, otherwise set it to 0
# almost like with Q Learning, but we use just part of equation here
if not done:
max_future_q = np.max(future_qs_list[index])
new_q = reward + DISCOUNT * max_future_q
else:
new_q = reward
# Update Q value for given state
current_qs = current_qs_list[index]
current_qs[action] = new_q
# And append to our training data
X.append(current_state)
y.append(current_qs)
# Fit on all samples as one batch, log only on terminal state
self.model.fit(np.array(X)/255, np.array(y), batch_size=MINIBATCH_SIZE, verbose=0, shuffle=False, callbacks=[self.tensorboard] if terminal_state else None)
# Update target network counter every episode
if terminal_state:
self.target_update_counter += 1
# If counter reaches set value, update target network with weights of main network
if self.target_update_counter > UPDATE_TARGET_EVERY:
self.target_model.set_weights(self.model.get_weights())
self.target_update_counter = 0
# Queries main network for Q values given current observation space (environment state)
def get_qs(self, state):
return self.model.predict(np.array(state).reshape(-1, *state.shape)/255)[0]
agent = DQNAgent()
# Iterate over episodes
for episode in tqdm(range(1, EPISODES + 1), ascii=True, unit='episodes'):
# Update tensorboard step every episode
agent.tensorboard.step = episode
# Restarting episode - reset episode reward and step number
episode_reward = 0
step = 1
# Reset environment and get initial state
current_state = env.reset()
# Reset flag and start iterating until episode ends
done = False
while not done:
# This part stays mostly the same, the change is to query a model for Q values
if np.random.random() > epsilon:
# Get action from Q table
action = np.argmax(agent.get_qs(current_state))
else:
# Get random action
action = np.random.randint(0, env.ACTION_SPACE_SIZE)
new_state, reward, done = env.step(action)
# Transform new continous state to new discrete state and count reward
episode_reward += reward
if SHOW_PREVIEW and not episode % AGGREGATE_STATS_EVERY:
env.render()
# Every step we update replay memory and train main network
agent.update_replay_memory((current_state, action, reward, new_state, done))
agent.train(done, step)
current_state = new_state
step += 1
# Append episode reward to a list and log stats (every given number of episodes)
ep_rewards.append(episode_reward)
if not episode % AGGREGATE_STATS_EVERY or episode == 1:
average_reward = sum(ep_rewards[-AGGREGATE_STATS_EVERY:])/len(ep_rewards[-AGGREGATE_STATS_EVERY:])
min_reward = min(ep_rewards[-AGGREGATE_STATS_EVERY:])
max_reward = max(ep_rewards[-AGGREGATE_STATS_EVERY:])
agent.tensorboard.update_stats(reward_avg=average_reward, reward_min=min_reward, reward_max=max_reward, epsilon=epsilon)
# Save model, but only when min reward is greater or equal a set value
if min_reward >= MIN_REWARD:
agent.model.save(f'models/{MODEL_NAME}__{max_reward:_>7.2f}max_{average_reward:_>7.2f}avg_{min_reward:_>7.2f}min__{int(time.time())}.model')
# Decay epsilon
if epsilon > MIN_EPSILON:
epsilon *= EPSILON_DECAY
epsilon = max(MIN_EPSILON, epsilon) | 33.51145 | 163 | 0.636446 |
550596433c2080d2210de68e60461c51953707b9 | 341 | py | Python | tableprint/metadata.py | sumanthratna/tableprint | f91c39652d2c500add3373597af2df0b54472652 | [
"MIT"
] | 1 | 2020-05-18T21:31:30.000Z | 2020-05-18T21:31:30.000Z | tableprint/metadata.py | sumanthratna/tableprint | f91c39652d2c500add3373597af2df0b54472652 | [
"MIT"
] | 1 | 2020-07-07T23:46:34.000Z | 2020-07-07T23:46:34.000Z | tableprint/metadata.py | sumanthratna/tableprint | f91c39652d2c500add3373597af2df0b54472652 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Version info
__version__ = '0.9.0'
__license__ = 'MIT'
# Project description(s)
__description__ = 'Pretty console printing of tabular data'
# The project's main homepage.
__url__ = 'https://github.com/nirum/tableprint'
# Author details
__author__ = 'Niru Maheswaranathan'
__author_email__ = 'niru@fastmail.com'
| 21.3125 | 59 | 0.730205 |
8212ba70a3f255bdb354f97ae585643ef387f804 | 6,416 | py | Python | main.py | helish88/giveaway | 1513ff8ba3f6384ba3c37d158de1ef51b9a07511 | [
"Apache-2.0"
] | 12 | 2020-09-17T04:27:13.000Z | 2022-03-08T08:55:47.000Z | main.py | helish88/giveaway | 1513ff8ba3f6384ba3c37d158de1ef51b9a07511 | [
"Apache-2.0"
] | 3 | 2021-01-25T09:22:53.000Z | 2021-01-28T09:17:52.000Z | main.py | helish88/giveaway | 1513ff8ba3f6384ba3c37d158de1ef51b9a07511 | [
"Apache-2.0"
] | 5 | 2020-08-05T14:47:26.000Z | 2021-07-17T19:50:46.000Z | # Imports
import discord
import asyncio
import random
import re
# From imports
from discord.ext import commands
from datetime import datetime, timedelta
from colorama import Fore, Style
token = '' # Input your token here.
intents = discord.Intents.default()
bot = commands.Bot(command_prefix='!', case_insensitive=True, intents=intents)
bot.remove_command('help')
# Commands
# Giveaway Command
@bot.command(aliases=['start', 'g'])
@commands.has_permissions(manage_guild=True)
async def giveaway(ctx):
await ctx.send("Select the channel, you would like the giveaway to be in.")
def check(m):
return m.author == ctx.author and m.channel == ctx.channel
try:
msg1 = await bot.wait_for('message', check=check, timeout=30.0)
channel_converter = discord.ext.commands.TextChannelConverter()
try:
giveawaychannel = await channel_converter.convert(ctx, msg1.content)
except commands.BadArgument:
return await ctx.send("This channel doesn't exist, please try again.")
except asyncio.TimeoutError:
await ctx.send("You took to long, please try again!")
if not giveawaychannel.permissions_for(ctx.guild.me).send_messages or not giveawaychannel.permissions_for(
ctx.guild.me).add_reactions:
return await ctx.send(
f"Bot does not have correct permissions to send in: {giveawaychannel}\n **Permissions needed:** ``Add reactions | Send messages.``")
await ctx.send("How many winners to the giveaway would you like?")
try:
msg2 = await bot.wait_for('message', check=check, timeout=30.0)
try:
winerscount = int(msg2.content)
except ValueError:
return await ctx.send("You didn't specify a number of winners, please try again.")
except asyncio.TimeoutError:
await ctx.send("You took to long, please try again!")
await ctx.send("Select an amount of time for the giveaway.")
try:
since = await bot.wait_for('message', check=check, timeout=30.0)
except asyncio.TimeoutError:
await ctx.send("You took to long, please try again!")
seconds = ("s", "sec", "secs", 'second', "seconds")
minutes = ("m", "min", "mins", "minute", "minutes")
hours = ("h", "hour", "hours")
days = ("d", "day", "days")
weeks = ("w", "week", "weeks")
rawsince = since.content
try:
temp = re.compile("([0-9]+)([a-zA-Z]+)")
if not temp.match(since.content):
return await ctx.send("You did not specify a unit of time, please try again.")
res = temp.match(since.content).groups()
time = int(res[0])
since = res[1]
except ValueError:
return await ctx.send("You did not specify a unit of time, please try again.")
if since.lower() in seconds:
timewait = time
elif since.lower() in minutes:
timewait = time * 60
elif since.lower() in hours:
timewait = time * 3600
elif since.lower() in days:
timewait = time * 86400
elif since.lower() in weeks:
timewait = time * 604800
else:
return await ctx.send("You did not specify a unit of time, please try again.")
await ctx.send("What would you like the prize to be?")
try:
msg4 = await bot.wait_for('message', check=check, timeout=30.0)
except asyncio.TimeoutError:
await ctx.send("You took to long, please try again.")
logembed = discord.Embed(title="Giveaway Logged",
description=f"**Prize:** ``{msg4.content}``\n**Winners:** ``{winerscount}``\n**Channel:** {giveawaychannel.mention}\n**Host:** {ctx.author.mention}",
color=discord.Color.red())
logembed.set_thumbnail(url=ctx.author.avatar_url)
logchannel = ctx.guild.get_channel(609431364445405194) # Put your channel, you would like to send giveaway logs to.
await logchannel.send(embed=logembed)
futuredate = datetime.utcnow() + timedelta(seconds=timewait)
embed1 = discord.Embed(color=discord.Color(random.randint(0x000000, 0xFFFFFF)),
title=f"🎉GIVEAWAY🎉\n`{msg4.content}`", timestamp=futuredate,
description=f'React with 🎉 to enter!\nHosted by: {ctx.author.mention}')
embed1.set_footer(text=f"Giveaway will end")
msg = await giveawaychannel.send(embed=embed1)
await msg.add_reaction("🎉")
await asyncio.sleep(timewait)
message = await giveawaychannel.fetch_message(msg.id)
for reaction in message.reactions:
if str(reaction.emoji) == "🎉":
users = await reaction.users().flatten()
if len(users) == 1:
return await msg.edit(embed=discord.Embed(title="Nobody has won the giveaway."))
try:
winners = random.sample([user for user in users if not user.bot], k=winerscount)
except ValueError:
return await giveawaychannel.send("not enough participants")
winnerstosend = "\n".join([winner.mention for winner in winners])
win = await msg.edit(embed=discord.Embed(title="WINNER",
description=f"Congratulations {winnerstosend}, you have won **{msg4.content}**!",
color=discord.Color.blue()))
# Reroll command, used for chosing a new random winner in the giveaway
@bot.command()
@commands.has_permissions(manage_guild=True)
async def reroll(ctx):
async for message in ctx.channel.history(limit=100, oldest_first=False):
if message.author.id == bot.user.id and message.embeds:
reroll = await ctx.fetch_message(message.id)
users = await reroll.reactions[0].users().flatten()
users.pop(users.index(bot.user))
winner = random.choice(users)
await ctx.send(f"The new winner is {winner.mention}")
break
else:
await ctx.send("No giveaways going on in this channel.")
@bot.command()
async def ping(ctx):
ping = bot.latency
await ctx.send(f"The bot's ping is: ``{round(ping * 1000)}ms``!")
# Events
@bot.event
async def on_ready():
print(Fore.RED + 'Logged in as')
print(Fore.GREEN + bot.user.name)
print(Style.RESET_ALL)
bot.run(token)
| 37.964497 | 179 | 0.624065 |
02ab2d6983c2944fb7e0991456e8eda8bf2e0762 | 227 | py | Python | 2_UNIXCommands/Exercise10.py | takeyoshinitta/NLP-100-Exercise | e77fb385fbbf50c8a8bdc47442db1421739ea5b6 | [
"MIT"
] | 3 | 2022-01-04T19:02:22.000Z | 2022-02-21T08:52:18.000Z | 2_UNIXCommands/Exercise10.py | takeyoshinitta/NLP-100-Exercise | e77fb385fbbf50c8a8bdc47442db1421739ea5b6 | [
"MIT"
] | null | null | null | 2_UNIXCommands/Exercise10.py | takeyoshinitta/NLP-100-Exercise | e77fb385fbbf50c8a8bdc47442db1421739ea5b6 | [
"MIT"
] | null | null | null | # 10. Line count
# Count the number of lines of the file. Confirm the result by using wc command.
count = 0
with open('popular-names.txt') as f:
for line in f:
count += 1
print ('Number of lines is ' + str(count))
| 25.222222 | 80 | 0.656388 |
ce7d7bae9b30678b7b253e0527fb4244bfd899e7 | 35 | py | Python | HelloWorld.py | StevenLearningHub/PythonLeaning | 3e7260f3965ef9966e76ef0f9a5c8a2649188c47 | [
"Apache-2.0"
] | null | null | null | HelloWorld.py | StevenLearningHub/PythonLeaning | 3e7260f3965ef9966e76ef0f9a5c8a2649188c47 | [
"Apache-2.0"
] | null | null | null | HelloWorld.py | StevenLearningHub/PythonLeaning | 3e7260f3965ef9966e76ef0f9a5c8a2649188c47 | [
"Apache-2.0"
] | null | null | null | print "This line will be printed."
| 17.5 | 34 | 0.742857 |
0c3042e50697d5005b0439948f18005440053a80 | 5,215 | py | Python | svm/master.py | EnsekiTT/ml_review | acedd27d877519b413605a5db5ddd6fb4ffff765 | [
"MIT"
] | null | null | null | svm/master.py | EnsekiTT/ml_review | acedd27d877519b413605a5db5ddd6fb4ffff765 | [
"MIT"
] | null | null | null | svm/master.py | EnsekiTT/ml_review | acedd27d877519b413605a5db5ddd6fb4ffff765 | [
"MIT"
] | null | null | null | #!/usr/bin/python
# -*- coding: utf-8 -*-
import sys
import random
from scipy import *
from scipy.linalg import norm
import pandas as pd
import matplotlib.pyplot as plt
class svm(object):
"""
Support Vector Machine
using SMO Algorithm.
"""
def __init__(self,
kernel=lambda x,y:dot(x,y),
c=10000,
tol=1e-2,
eps=1e-2,
loop=float('inf')):
"""
Arguments:
- `kernel`: カーネル関数
- `c`: パラメータ
- `tol`: KKT条件の許容する誤差
- `eps`: αの許容する誤差
- `loop`: ループの上限
"""
self._kernel = kernel
self._c = c
self._tol = tol
self._eps = eps
self._loop = loop
def _takeStep(self, i1, i2):
if i1 == i2:
return False
alph1 = self._alpha[i1]
alph2 = self._alpha[i2]
y1 = self._target[i1]
y2 = self._target[i2]
e1 = self._e[i1]
e2 = self._e[i2]
s = y1 * y2
if y1 != y2:
L = max(0, alph2 - alph1)
H = min(self._c, self._c-alph1+alph2)
else:
L = max(0, alph2 + alph1 - self._c)
H = min(self._c, alph1+alph2)
if L == H:
return False
k11 = self._kernel(self._point[i1], self._point[i1])
k12 = self._kernel(self._point[i1], self._point[i2])
k22 = self._kernel(self._point[i2], self._point[i2])
eta = 2 * k12 - k11 - k22
if eta > 0:
return False
a2 = alph2 - y2 * (e1 - e2) / eta
a2 = min(H, max(a2, L))
if abs(a2 - alph2) < self._eps * (a2 + alph2 + self._eps):
return False
a1 = alph1 + s * (alph2 - a2)
# update
da1 = a1 - alph1
da2 = a2 - alph2
self._e += array([(da1 * self._target[i1] * self._kernel(self._point[i1], p) +
da2 * self._target[i2] * self._kernel(self._point[i2], p))
for p in self._point])
self._alpha[i1] = a1
self._alpha[i2] = a2
return True
def _search(self, i, lst):
if self._e[i] >= 0:
return reduce(lambda j,k: j if self._e[j] < self._e[k] else j, lst)
else:
return reduce(lambda j,k: j if self._e[j] > self._e[k] else j, lst)
def _examinEx(self, i2):
y2 = self._target[i2]
alph2 = self._alpha[i2]
e2 = self._e[i2]
r2 = e2*y2
if ((r2 < -self._tol and alph2 < self._c - self._eps) or
(r2 > self._tol and alph2 > self._eps)):
alst1 = [i for i in range(len(self._alpha))
if self._eps < self._alpha[i] < self._c - self._eps]
if alst1:
i1 = self._search(i2, alst1)
if self._takeStep(i1, i2):
return True
random.shuffle(alst1)
for i1 in alst1:
if self._takeStep(i1, i2):
return True
alst2 = [i for i in range(len(self._alpha))
if (self._alpha[i] <= self._eps or
self._alpha[i] >= self._c - self._eps)]
random.shuffle(alst2)
for i1 in alst2:
if self._takeStep(i1, i2):
return True
return False
def _calc_b(self):
self._b = 0.0
for i in self._m:
self._b += self._target[i]
for j in self._s:
self._b -= (self._alpha[j]*self._target[j]*
self._kernel(self._point[i], self._point[j]))
self._b /= len(self._m)
def calc(self, x):
ret = self._b
for i in self._s:
ret += (self._alpha[i]*self._target[i]*
self._kernel(x, self._point[i]))
return ret
def learn(self, point, target):
self._target = target
self._point = point
self._alpha = zeros(len(target), dtype=float)
self._b = 0
self._e = -1*array(target, dtype=float)
changed = False
examine_all = True
count = 0
while changed or examine_all:
count += 1
if count > self._loop:
break
changed = False
if examine_all:
for i in range(len(self._target)):
changed |= self._examinEx(i)
else:
for i in (j for j in range(len(self._target))
if self._eps < self._alpha[j] < self._c - self._eps):
changed |= self._examinEx(i)
if examine_all:
examine_all = False
elif not changed:
examine_all = True
self._s = [i for i in range(len(self._target))
if self._eps < self._alpha[i]]
self._m = [i for i in range(len(self._target))
if self._eps < self._alpha[i] < self._c - self._eps]
self._calc_b()
def _get_s(self):
return self._s
s = property(_get_s)
def _get_m(self):
return self._m
m = property(_get_m)
def _get_alpha(self):
return self._alpha
alpha = property(_get_alpha)
if __name__ == '__main__':
print 'hello'
try:
import psyco
psyco.full()
except ImportError:
pass
s = svm(c=2, kernel=lambda x,y:exp(-norm(x-y)/0.45))
datas = pd.read_csv('../datasets/iris.txt', delim_whitespace=True)
Label = []
for i, d in enumerate(datas['Species']):
if d == 'setosa':
Label.append(0)
elif d == 'versicolor':
Label.append(1)
elif d == 'virginica':
Label.append(2)
datas['Label'] = Label
t = list(datas['Label'][0:100]*2.0-1.0)
p = datas.as_matrix(columns=['Sepal.Length', 'Sepal.Width'])[0:100] #, 'Petal.Length', 'Petal.Width'
s.learn(p, t)
for i in range(len(p)):
c = 'r' if t[i] > 0 else 'b'
plt.scatter([p[i][0]], [p[i][1]], color=c)
X, Y = meshgrid(arange(-2.5, 2.5, 00.1), arange(-2.5, 2.5, 00.1))
w, h = X.shape
X.resize(X.size)
Y.resize(Y.size)
Z = array([s.calc([x, y]) for (x, y) in zip(X, Y)])
X.resize((w, h))
Y.resize((w, h))
Z.resize((w, h))
CS = plt.contour(X, Y, Z, [0.0],colors = ('k'),linewidths = (3,),origin = 'lower')
plt.xlim(-2.5, 2.5)
plt.ylim(-2.5, 2.5)
plt.show()
print s.alpha | 22.381974 | 102 | 0.601534 |
beb369b1d8a9b2769833833b06524f2bc86080db | 1,962 | py | Python | lib/bullet/src/examples/pybullet/vrEvent.py | mtesseracttech/CustomEngine | 1a9ed564408ae29fe49681a810b851403d71f486 | [
"Apache-2.0"
] | 12 | 2017-08-24T05:58:53.000Z | 2021-07-15T17:32:26.000Z | lib/bullet/src/examples/pybullet/vrEvent.py | mtesseracttech/CustomEngine | 1a9ed564408ae29fe49681a810b851403d71f486 | [
"Apache-2.0"
] | 1 | 2017-06-05T13:38:29.000Z | 2017-06-05T13:38:29.000Z | lib/bullet/src/examples/pybullet/vrEvent.py | mtesseracttech/CustomEngine | 1a9ed564408ae29fe49681a810b851403d71f486 | [
"Apache-2.0"
] | 6 | 2017-06-04T22:43:37.000Z | 2019-07-15T05:36:11.000Z | # See pybullet quickstart guide here:
# https://docs.google.com/document/d/10sXEhzFRSnvFcl3XxNGhnD4N2SedqwdAvK3dsihxVUA/edit#
# Create a Tiltbrush-like app, drawing lines using any controller
# Line width can be changed
import pybullet as p
CONTROLLER_ID = 0
POSITION=1
ORIENTATION=2
BUTTONS=6
#assume that the VR physics server is already started before
c = p.connect(p.SHARED_MEMORY)
print(c)
if (c<0):
p.connect(p.GUI)
p.setInternalSimFlags(0)#don't load default robot assets etc
p.resetSimulation()
p.loadURDF("plane.urdf")
prevPosition=[None]*p.VR_MAX_CONTROLLERS
colors=[0.,0.5,0.5]*p.VR_MAX_CONTROLLERS
widths = [3]*p.VR_MAX_CONTROLLERS
#use a few default colors
colors[0] = [0,0,0]
colors[1] = [0.5,0,0]
colors[2] = [0,0.5,0]
colors[3] = [0,0,0.5]
colors[4] = [0.5,0.5,0.]
colors[5] = [.5,.5,.5]
while True:
events = p.getVREvents()
for e in (events):
if (e[BUTTONS][33]&p.VR_BUTTON_WAS_TRIGGERED):
prevPosition[e[CONTROLLER_ID]] = e[POSITION]
if (e[BUTTONS][32]&p.VR_BUTTON_WAS_TRIGGERED):
widths[e[CONTROLLER_ID]]=widths[e[0]]+1
if (widths[e[CONTROLLER_ID]]>20):
widths[e[CONTROLLER_ID]] = 1
if (e[BUTTONS][1]&p.VR_BUTTON_WAS_TRIGGERED):
p.resetSimulation()
#p.setGravity(0,0,-10)
p.removeAllUserDebugItems()
p.loadURDF("plane.urdf")
if (e[BUTTONS][33]==p.VR_BUTTON_IS_DOWN):
pt = prevPosition[e[CONTROLLER_ID]]
#print(prevPosition[e[0]])
#print(e[1])
diff = [pt[0]-e[POSITION][0],pt[1]-e[POSITION][1],pt[2]-e[POSITION][2]]
lenSqr = diff[0]*diff[0]+diff[1]*diff[1]+diff[2]*diff[2]
ptDistThreshold = 0.01
if (lenSqr>(ptDistThreshold*ptDistThreshold)):
p.addUserDebugLine(e[POSITION],prevPosition[e[CONTROLLER_ID]],colors[e[CONTROLLER_ID]],widths[e[CONTROLLER_ID]])
#p.loadURDF("cube_small.urdf",e[1])
colors[e[CONTROLLER_ID]] = [1-colors[e[CONTROLLER_ID]][0],1-colors[e[CONTROLLER_ID]][1],1-colors[e[CONTROLLER_ID]][2]]
prevPosition[e[CONTROLLER_ID]] = e[POSITION] | 31.645161 | 122 | 0.702854 |
01a0263760ebf637cf7d9aa3c8d1a06d1daa3dba | 17,177 | py | Python | venv/Lib/site-packages/selenium/webdriver/common/devtools/v85/profiler.py | gilbertekalea/booking.com_crawler | 71e52c87cd72a77f80a3e5fc0af0e1a68a5712ae | [
"MIT"
] | 2 | 2022-02-25T09:12:07.000Z | 2022-03-22T19:45:43.000Z | venv/Lib/site-packages/selenium/webdriver/common/devtools/v85/profiler.py | gilbertekalea/booking.com_crawler | 71e52c87cd72a77f80a3e5fc0af0e1a68a5712ae | [
"MIT"
] | null | null | null | venv/Lib/site-packages/selenium/webdriver/common/devtools/v85/profiler.py | gilbertekalea/booking.com_crawler | 71e52c87cd72a77f80a3e5fc0af0e1a68a5712ae | [
"MIT"
] | 1 | 2022-03-28T09:19:34.000Z | 2022-03-28T09:19:34.000Z | # DO NOT EDIT THIS FILE!
#
# This file is generated from the CDP specification. If you need to make
# changes, edit the generator and regenerate all of the modules.
#
# CDP domain: Profiler
from __future__ import annotations
from .util import event_class, T_JSON_DICT
from dataclasses import dataclass
import enum
import typing
from . import debugger
from . import runtime
@dataclass
class ProfileNode:
'''
Profile node. Holds callsite information, execution statistics and child nodes.
'''
#: Unique id of the node.
id_: int
#: Function location.
call_frame: runtime.CallFrame
#: Number of samples where this node was on top of the call stack.
hit_count: typing.Optional[int] = None
#: Child node ids.
children: typing.Optional[typing.List[int]] = None
#: The reason of being not optimized. The function may be deoptimized or marked as don't
#: optimize.
deopt_reason: typing.Optional[str] = None
#: An array of source position ticks.
position_ticks: typing.Optional[typing.List[PositionTickInfo]] = None
def to_json(self):
json = dict()
json['id'] = self.id_
json['callFrame'] = self.call_frame.to_json()
if self.hit_count is not None:
json['hitCount'] = self.hit_count
if self.children is not None:
json['children'] = [i for i in self.children]
if self.deopt_reason is not None:
json['deoptReason'] = self.deopt_reason
if self.position_ticks is not None:
json['positionTicks'] = [i.to_json() for i in self.position_ticks]
return json
@classmethod
def from_json(cls, json):
return cls(
id_=int(json['id']),
call_frame=runtime.CallFrame.from_json(json['callFrame']),
hit_count=int(json['hitCount']) if 'hitCount' in json else None,
children=[int(i) for i in json['children']] if 'children' in json else None,
deopt_reason=str(json['deoptReason']) if 'deoptReason' in json else None,
position_ticks=[PositionTickInfo.from_json(i) for i in json['positionTicks']] if 'positionTicks' in json else None,
)
@dataclass
class Profile:
'''
Profile.
'''
#: The list of profile nodes. First item is the root node.
nodes: typing.List[ProfileNode]
#: Profiling start timestamp in microseconds.
start_time: float
#: Profiling end timestamp in microseconds.
end_time: float
#: Ids of samples top nodes.
samples: typing.Optional[typing.List[int]] = None
#: Time intervals between adjacent samples in microseconds. The first delta is relative to the
#: profile startTime.
time_deltas: typing.Optional[typing.List[int]] = None
def to_json(self):
json = dict()
json['nodes'] = [i.to_json() for i in self.nodes]
json['startTime'] = self.start_time
json['endTime'] = self.end_time
if self.samples is not None:
json['samples'] = [i for i in self.samples]
if self.time_deltas is not None:
json['timeDeltas'] = [i for i in self.time_deltas]
return json
@classmethod
def from_json(cls, json):
return cls(
nodes=[ProfileNode.from_json(i) for i in json['nodes']],
start_time=float(json['startTime']),
end_time=float(json['endTime']),
samples=[int(i) for i in json['samples']] if 'samples' in json else None,
time_deltas=[int(i) for i in json['timeDeltas']] if 'timeDeltas' in json else None,
)
@dataclass
class PositionTickInfo:
'''
Specifies a number of samples attributed to a certain source position.
'''
#: Source line number (1-based).
line: int
#: Number of samples attributed to the source line.
ticks: int
def to_json(self):
json = dict()
json['line'] = self.line
json['ticks'] = self.ticks
return json
@classmethod
def from_json(cls, json):
return cls(
line=int(json['line']),
ticks=int(json['ticks']),
)
@dataclass
class CoverageRange:
'''
Coverage data for a source range.
'''
#: JavaScript script source offset for the range start.
start_offset: int
#: JavaScript script source offset for the range end.
end_offset: int
#: Collected execution count of the source range.
count: int
def to_json(self):
json = dict()
json['startOffset'] = self.start_offset
json['endOffset'] = self.end_offset
json['count'] = self.count
return json
@classmethod
def from_json(cls, json):
return cls(
start_offset=int(json['startOffset']),
end_offset=int(json['endOffset']),
count=int(json['count']),
)
@dataclass
class FunctionCoverage:
'''
Coverage data for a JavaScript function.
'''
#: JavaScript function name.
function_name: str
#: Source ranges inside the function with coverage data.
ranges: typing.List[CoverageRange]
#: Whether coverage data for this function has block granularity.
is_block_coverage: bool
def to_json(self):
json = dict()
json['functionName'] = self.function_name
json['ranges'] = [i.to_json() for i in self.ranges]
json['isBlockCoverage'] = self.is_block_coverage
return json
@classmethod
def from_json(cls, json):
return cls(
function_name=str(json['functionName']),
ranges=[CoverageRange.from_json(i) for i in json['ranges']],
is_block_coverage=bool(json['isBlockCoverage']),
)
@dataclass
class ScriptCoverage:
'''
Coverage data for a JavaScript script.
'''
#: JavaScript script id.
script_id: runtime.ScriptId
#: JavaScript script name or url.
url: str
#: Functions contained in the script that has coverage data.
functions: typing.List[FunctionCoverage]
def to_json(self):
json = dict()
json['scriptId'] = self.script_id.to_json()
json['url'] = self.url
json['functions'] = [i.to_json() for i in self.functions]
return json
@classmethod
def from_json(cls, json):
return cls(
script_id=runtime.ScriptId.from_json(json['scriptId']),
url=str(json['url']),
functions=[FunctionCoverage.from_json(i) for i in json['functions']],
)
@dataclass
class TypeObject:
'''
Describes a type collected during runtime.
'''
#: Name of a type collected with type profiling.
name: str
def to_json(self):
json = dict()
json['name'] = self.name
return json
@classmethod
def from_json(cls, json):
return cls(
name=str(json['name']),
)
@dataclass
class TypeProfileEntry:
'''
Source offset and types for a parameter or return value.
'''
#: Source offset of the parameter or end of function for return values.
offset: int
#: The types for this parameter or return value.
types: typing.List[TypeObject]
def to_json(self):
json = dict()
json['offset'] = self.offset
json['types'] = [i.to_json() for i in self.types]
return json
@classmethod
def from_json(cls, json):
return cls(
offset=int(json['offset']),
types=[TypeObject.from_json(i) for i in json['types']],
)
@dataclass
class ScriptTypeProfile:
'''
Type profile data collected during runtime for a JavaScript script.
'''
#: JavaScript script id.
script_id: runtime.ScriptId
#: JavaScript script name or url.
url: str
#: Type profile entries for parameters and return values of the functions in the script.
entries: typing.List[TypeProfileEntry]
def to_json(self):
json = dict()
json['scriptId'] = self.script_id.to_json()
json['url'] = self.url
json['entries'] = [i.to_json() for i in self.entries]
return json
@classmethod
def from_json(cls, json):
return cls(
script_id=runtime.ScriptId.from_json(json['scriptId']),
url=str(json['url']),
entries=[TypeProfileEntry.from_json(i) for i in json['entries']],
)
@dataclass
class CounterInfo:
'''
Collected counter information.
'''
#: Counter name.
name: str
#: Counter value.
value: int
def to_json(self):
json = dict()
json['name'] = self.name
json['value'] = self.value
return json
@classmethod
def from_json(cls, json):
return cls(
name=str(json['name']),
value=int(json['value']),
)
def disable() -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
cmd_dict: T_JSON_DICT = {
'method': 'Profiler.disable',
}
json = yield cmd_dict
def enable() -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
cmd_dict: T_JSON_DICT = {
'method': 'Profiler.enable',
}
json = yield cmd_dict
def get_best_effort_coverage() -> typing.Generator[T_JSON_DICT,T_JSON_DICT,typing.List[ScriptCoverage]]:
'''
Collect coverage data for the current isolate. The coverage data may be incomplete due to
garbage collection.
:returns: Coverage data for the current isolate.
'''
cmd_dict: T_JSON_DICT = {
'method': 'Profiler.getBestEffortCoverage',
}
json = yield cmd_dict
return [ScriptCoverage.from_json(i) for i in json['result']]
def set_sampling_interval(
interval: int
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
'''
Changes CPU profiler sampling interval. Must be called before CPU profiles recording started.
:param interval: New sampling interval in microseconds.
'''
params: T_JSON_DICT = dict()
params['interval'] = interval
cmd_dict: T_JSON_DICT = {
'method': 'Profiler.setSamplingInterval',
'params': params,
}
json = yield cmd_dict
def start() -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
cmd_dict: T_JSON_DICT = {
'method': 'Profiler.start',
}
json = yield cmd_dict
def start_precise_coverage(
call_count: typing.Optional[bool] = None,
detailed: typing.Optional[bool] = None,
allow_triggered_updates: typing.Optional[bool] = None
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,float]:
'''
Enable precise code coverage. Coverage data for JavaScript executed before enabling precise code
coverage may be incomplete. Enabling prevents running optimized code and resets execution
counters.
:param call_count: *(Optional)* Collect accurate call counts beyond simple 'covered' or 'not covered'.
:param detailed: *(Optional)* Collect block-based coverage.
:param allow_triggered_updates: *(Optional)* Allow the backend to send updates on its own initiative
:returns: Monotonically increasing time (in seconds) when the coverage update was taken in the backend.
'''
params: T_JSON_DICT = dict()
if call_count is not None:
params['callCount'] = call_count
if detailed is not None:
params['detailed'] = detailed
if allow_triggered_updates is not None:
params['allowTriggeredUpdates'] = allow_triggered_updates
cmd_dict: T_JSON_DICT = {
'method': 'Profiler.startPreciseCoverage',
'params': params,
}
json = yield cmd_dict
return float(json['timestamp'])
def start_type_profile() -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
'''
Enable type profile.
**EXPERIMENTAL**
'''
cmd_dict: T_JSON_DICT = {
'method': 'Profiler.startTypeProfile',
}
json = yield cmd_dict
def stop() -> typing.Generator[T_JSON_DICT,T_JSON_DICT,Profile]:
'''
:returns: Recorded profile.
'''
cmd_dict: T_JSON_DICT = {
'method': 'Profiler.stop',
}
json = yield cmd_dict
return Profile.from_json(json['profile'])
def stop_precise_coverage() -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
'''
Disable precise code coverage. Disabling releases unnecessary execution count records and allows
executing optimized code.
'''
cmd_dict: T_JSON_DICT = {
'method': 'Profiler.stopPreciseCoverage',
}
json = yield cmd_dict
def stop_type_profile() -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
'''
Disable type profile. Disabling releases type profile data collected so far.
**EXPERIMENTAL**
'''
cmd_dict: T_JSON_DICT = {
'method': 'Profiler.stopTypeProfile',
}
json = yield cmd_dict
def take_precise_coverage() -> typing.Generator[T_JSON_DICT,T_JSON_DICT,typing.Tuple[typing.List[ScriptCoverage], float]]:
'''
Collect coverage data for the current isolate, and resets execution counters. Precise code
coverage needs to have started.
:returns: A tuple with the following items:
0. **result** - Coverage data for the current isolate.
1. **timestamp** - Monotonically increasing time (in seconds) when the coverage update was taken in the backend.
'''
cmd_dict: T_JSON_DICT = {
'method': 'Profiler.takePreciseCoverage',
}
json = yield cmd_dict
return (
[ScriptCoverage.from_json(i) for i in json['result']],
float(json['timestamp'])
)
def take_type_profile() -> typing.Generator[T_JSON_DICT,T_JSON_DICT,typing.List[ScriptTypeProfile]]:
'''
Collect type profile.
**EXPERIMENTAL**
:returns: Type profile for all scripts since startTypeProfile() was turned on.
'''
cmd_dict: T_JSON_DICT = {
'method': 'Profiler.takeTypeProfile',
}
json = yield cmd_dict
return [ScriptTypeProfile.from_json(i) for i in json['result']]
def enable_runtime_call_stats() -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
'''
Enable run time call stats collection.
**EXPERIMENTAL**
'''
cmd_dict: T_JSON_DICT = {
'method': 'Profiler.enableRuntimeCallStats',
}
json = yield cmd_dict
def disable_runtime_call_stats() -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
'''
Disable run time call stats collection.
**EXPERIMENTAL**
'''
cmd_dict: T_JSON_DICT = {
'method': 'Profiler.disableRuntimeCallStats',
}
json = yield cmd_dict
def get_runtime_call_stats() -> typing.Generator[T_JSON_DICT,T_JSON_DICT,typing.List[CounterInfo]]:
'''
Retrieve run time call stats.
**EXPERIMENTAL**
:returns: Collected counter information.
'''
cmd_dict: T_JSON_DICT = {
'method': 'Profiler.getRuntimeCallStats',
}
json = yield cmd_dict
return [CounterInfo.from_json(i) for i in json['result']]
@event_class('Profiler.consoleProfileFinished')
@dataclass
class ConsoleProfileFinished:
id_: str
#: Location of console.profileEnd().
location: debugger.Location
profile: Profile
#: Profile title passed as an argument to console.profile().
title: typing.Optional[str]
@classmethod
def from_json(cls, json: T_JSON_DICT) -> ConsoleProfileFinished:
return cls(
id_=str(json['id']),
location=debugger.Location.from_json(json['location']),
profile=Profile.from_json(json['profile']),
title=str(json['title']) if 'title' in json else None
)
@event_class('Profiler.consoleProfileStarted')
@dataclass
class ConsoleProfileStarted:
'''
Sent when new profile recording is started using console.profile() call.
'''
id_: str
#: Location of console.profile().
location: debugger.Location
#: Profile title passed as an argument to console.profile().
title: typing.Optional[str]
@classmethod
def from_json(cls, json: T_JSON_DICT) -> ConsoleProfileStarted:
return cls(
id_=str(json['id']),
location=debugger.Location.from_json(json['location']),
title=str(json['title']) if 'title' in json else None
)
@event_class('Profiler.preciseCoverageDeltaUpdate')
@dataclass
class PreciseCoverageDeltaUpdate:
'''
**EXPERIMENTAL**
Reports coverage delta since the last poll (either from an event like this, or from
``takePreciseCoverage`` for the current isolate. May only be sent if precise code
coverage has been started. This event can be trigged by the embedder to, for example,
trigger collection of coverage data immediatelly at a certain point in time.
'''
#: Monotonically increasing time (in seconds) when the coverage update was taken in the backend.
timestamp: float
#: Identifier for distinguishing coverage events.
occassion: str
#: Coverage data for the current isolate.
result: typing.List[ScriptCoverage]
@classmethod
def from_json(cls, json: T_JSON_DICT) -> PreciseCoverageDeltaUpdate:
return cls(
timestamp=float(json['timestamp']),
occassion=str(json['occassion']),
result=[ScriptCoverage.from_json(i) for i in json['result']]
)
| 28.772194 | 127 | 0.645864 |
b76a22b63971ec2c0610d28f6dd9ef621e29f4bd | 2,278 | py | Python | wasm/compat.py | bigdot123456/wasmDecode | 063abc4a951cbc721bd6c42718de87bef9a574a0 | [
"MIT"
] | 269 | 2016-11-09T19:07:10.000Z | 2022-02-20T00:55:07.000Z | wasm/compat.py | bigdot123456/wasmDecode | 063abc4a951cbc721bd6c42718de87bef9a574a0 | [
"MIT"
] | 8 | 2017-01-16T20:28:48.000Z | 2021-07-20T18:31:07.000Z | wasm/compat.py | bigdot123456/wasmDecode | 063abc4a951cbc721bd6c42718de87bef9a574a0 | [
"MIT"
] | 42 | 2016-11-09T19:13:58.000Z | 2022-03-08T08:51:15.000Z | """Defines compatibility quirks for Python 2.7."""
from __future__ import print_function, absolute_import, division, unicode_literals
import sys
import functools
import logging
import warnings
def add_metaclass(metaclass):
"""
Class decorator for creating a class with a metaclass.
Borrowed from `six` module.
"""
@functools.wraps(metaclass)
def wrapper(cls):
orig_vars = cls.__dict__.copy()
slots = orig_vars.get('__slots__')
if slots is not None:
if isinstance(slots, str):
slots = [slots]
for slots_var in slots:
orig_vars.pop(slots_var)
orig_vars.pop('__dict__', None)
orig_vars.pop('__weakref__', None)
return metaclass(cls.__name__, cls.__bases__, orig_vars)
return wrapper
def indent(text, prefix, predicate=None):
"""Adds 'prefix' to the beginning of selected lines in 'text'.
If 'predicate' is provided, 'prefix' will only be added to the lines
where 'predicate(line)' is True. If 'predicate' is not provided,
it will default to adding 'prefix' to all non-empty lines that do not
consist solely of whitespace characters.
Borrowed from Py3 `textwrap` module.
"""
if predicate is None:
def predicate(line):
return line.strip()
def prefixed_lines():
for line in text.splitlines(True):
yield (prefix + line if predicate(line) else line)
return ''.join(prefixed_lines())
def deprecated_func(func):
"""Deprecates a function, printing a warning on the first usage."""
# We use a mutable container here to work around Py2's lack of
# the `nonlocal` keyword.
first_usage = [True]
@functools.wraps(func)
def wrapper(*args, **kwargs):
if first_usage[0]:
warnings.warn(
"Call to deprecated function {}.".format(func.__name__),
DeprecationWarning,
)
first_usage[0] = False
return func(*args, **kwargs)
return wrapper
if sys.version_info[0] >= 3:
def byte2int(x):
return x
elif sys.version_info[0] == 2:
def byte2int(x):
return ord(x) if type(x) == str else x
else:
raise Exception("Unsupported Python version")
| 27.780488 | 82 | 0.636084 |
f47d9b6db371b96886e9dc310616b8808e6c7b76 | 3,439 | py | Python | sparkcommonfunc/data_transformation/PivotRowToColumnAndBack.py | pengfei99/PySparkCommonFunc | 8238949f52a8e0d2c30c42d9f4002941f43db466 | [
"MIT"
] | null | null | null | sparkcommonfunc/data_transformation/PivotRowToColumnAndBack.py | pengfei99/PySparkCommonFunc | 8238949f52a8e0d2c30c42d9f4002941f43db466 | [
"MIT"
] | null | null | null | sparkcommonfunc/data_transformation/PivotRowToColumnAndBack.py | pengfei99/PySparkCommonFunc | 8238949f52a8e0d2c30c42d9f4002941f43db466 | [
"MIT"
] | null | null | null | from pyspark.sql import SparkSession, DataFrame
from pyspark.sql import functions as f
"""Exp1 : Simple pivot example, understand what pivot will do"""
def exp1(df):
print("Exp1: show the country list groupby Product")
df.groupBy("Product").agg(f.collect_list("Country")).show(truncate=False)
print("Exp1: show the Amount by Product and country")
df.groupBy("Product", "Country").sum("Amount").show(truncate=False)
# The pivot function will transform the country list into columns with
pivot_country = df.groupBy("Product").pivot("Country").sum("Amount")
pivot_country.printSchema()
pivot_country.show(truncate=False)
return pivot_country
""" Exp2 : Improve pivot performance
Pivot is a very expensive operation, you may want to optimize it by using following solutions.
Solution 1: We can provide a list of the column name(row value) that we want to pivot.
Solution 2: Two phrase groupby
"""
def exp2(df):
# Solution 1: We can provide a list of the column name(row value) that we want to pivot.
# You don't have to have all distinct value in the list, try to remove one country and re-run the example
country_list = ["USA", "China", "Canada", "Mexico"]
pivot_country = df.groupBy("Product").pivot("Country", country_list).sum("Amount")
print("Exp2: Optimize pivot with country list")
pivot_country.show(truncate=False)
# Solution 2 with custom col name:
pivot_2 = df.groupBy("Product", "Country").agg(f.sum("Amount").alias("sum_amount")) \
.groupBy("Product").pivot("Country").sum("sum_amount")
print("Exp2: Optimize pivot with two groupBy and custom column name")
pivot_2.show(truncate=False)
# solution 2 with auto col name:
pivot_3 = df.groupBy("Product", "Country").sum("Amount") \
.groupBy("Product").pivot("Country").sum("sum(Amount)")
print("Exp2: Optimize pivot with two groupBy and auto gen column name")
pivot_3.show(truncate=False)
""" Exp3 : Unpivot
Unpivot is a reverse operation, we can achieve by rotating column values into rows values.
PySpark SQL doesn’t have unpivot function hence will use the stack() function. Below code converts
column countries to row.
"""
def exp3(pivoted_df: DataFrame):
unpivot_expr = "stack(3, 'Canada', Canada, 'China', China, 'Mexico', Mexico) as (Country,Total)"
unpivot_df = pivoted_df.select("Product", f.expr(unpivot_expr)).where("Total is not null")
unpivot_df.show(truncate=False)
unpivot_df.printSchema()
def main():
spark = SparkSession.builder \
.master("local[2]") \
.appName("Pivot and UnPivot") \
.config("spark.executor.memory", "4g") \
.getOrCreate()
data = [("Banana", 1000, "USA"), ("Carrots", 1500, "USA"), ("Beans", 1600, "USA"),
("Orange", 2000, "USA"), ("Orange", 2000, "USA"), ("Banana", 400, "China"),
("Carrots", 1200, "China"), ("Beans", 1500, "China"), ("Orange", 4000, "China"),
("Banana", 2000, "Canada"), ("Carrots", 2000, "Canada"), ("Beans", 2000, "Mexico")]
columns = ["Product", "Amount", "Country"]
df = spark.createDataFrame(data=data, schema=columns)
print("main output: source data schema")
df.printSchema()
print("main output: source data")
df.show(truncate=False)
# run exp1
pivot_df = exp1(df)
# run exp2
# exp2(df)
# run exp3
exp3(pivot_df)
if __name__ == "__main__":
main()
| 37.791209 | 109 | 0.668218 |
e3830c06f89d6f2d74a584e9e1bc699ca3e50f84 | 1,954 | py | Python | course_schedule/solution.py | mahimadubey/leetcode-python | 38acc65fa4315f86acb62874ca488620c5d77e17 | [
"BSD-2-Clause"
] | 528 | 2015-01-08T21:27:06.000Z | 2022-03-17T09:23:44.000Z | course_schedule/solution.py | durgaharish1993/leetcode-python | 6c523ef4759a57433e10271b584eece16f9f05f3 | [
"BSD-2-Clause"
] | null | null | null | course_schedule/solution.py | durgaharish1993/leetcode-python | 6c523ef4759a57433e10271b584eece16f9f05f3 | [
"BSD-2-Clause"
] | 278 | 2015-01-12T06:45:17.000Z | 2022-02-20T08:09:22.000Z | """
There are a total of n courses you have to take, labeled from 0 to n - 1.
Some courses may have prerequisites, for example to take course 0 you have to
first take course 1, which is expressed as a pair: [0,1]
Given the total number of courses and a list of prerequisite pairs, is it
possible for you to finish all courses?
For example:
2, [[1,0]]
There are a total of 2 courses to take. To take course 1 you should have
finished course 0. So it is possible.
2, [[1,0],[0,1]]
There are a total of 2 courses to take. To take course 1 you should have
finished course 0, and to take course 0 you should also have finished course
1. So it is impossible.
"""
class Solution(object):
def canFinish(self, numCourses, prerequisites):
"""
:type numCourses: int
:type prerequisites: List[List[int]]
:rtype: bool
Topological sort
"""
# An implementation of
# https://en.wikipedia.org/wiki/Topological_sorting#Algorithms
#
# Graph in which each node has its prerequisite courses as a
# adjacency list
queue = []
finished_courses = []
prq_graph = {x: set() for x in range(numCourses)}
for c, p in prerequisites:
prq_graph[c].add(p)
# Add nodes with no prerequisites
for c in prq_graph:
if not prq_graph[c]:
queue.append(c)
# For each of the remaining node, remove its prerequisites in queue;
# if node has no prerequisites, add it to queue, and repeat
while queue:
u = queue.pop(0)
for v, prqs in prq_graph.items():
if u in prqs:
prqs.remove(u)
if not prqs:
queue.append(v)
finished_courses.append(u)
return len(finished_courses) == numCourses
s = Solution()
print(s.canFinish(1, []))
print(s.canFinish(3, [[1, 0], [0, 1]]))
| 31.015873 | 77 | 0.608495 |
083f66a03dec913991616bc78c957c015717c27f | 1,151 | py | Python | build/make_functions.py | gigamonkey/sheets | a89e76360ad9a35e44e5e352346eeccbe6952b1f | [
"BSD-3-Clause"
] | null | null | null | build/make_functions.py | gigamonkey/sheets | a89e76360ad9a35e44e5e352346eeccbe6952b1f | [
"BSD-3-Clause"
] | 1 | 2021-04-03T23:07:35.000Z | 2021-04-03T23:07:35.000Z | build/make_functions.py | gigamonkey/sheets | a89e76360ad9a35e44e5e352346eeccbe6952b1f | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python
from urllib.parse import urlparse
from build.make_spreadsheets import emit_docs
from bs4 import BeautifulSoup
import sys
with open(sys.argv[1]) as f:
html_doc = f.read()
url = sys.argv[2]
p = urlparse(sys.argv[2])
base_url = f"{p.scheme}//{p.netloc}"
soup = BeautifulSoup(html_doc, 'html.parser')
def description(cells):
td = cells[3]
for a in td.find_all("a"):
if a.string.startswith("Learn more"):
a.string.replace_with(f"\n\nLearn more: {base_url}{a['href']}")
return td.get_text()
def emit_function(name, description):
print()
print()
print(f"def {name}(*args) -> Function:")
emit_docs(description, 4)
print(f' return Function("{name}", args)')
print(f"# Generated from names extracted from {url}")
print()
print(f"from gigamonkeys.formulas import Function")
for table in soup.find_all("table"):
cols = [th.get_text().lower() for th in table.thead.tr.find_all("th")]
for tr in table.tbody.find_all("tr"):
cells = [c for c in tr.children]
name = cells[1].get_text().replace(".", "_")
emit_function(name, description(cells))
| 26.159091 | 75 | 0.65682 |
14ae8e06c21a15ef7f3ed6fe68fe29bc6130f0cf | 26,828 | py | Python | game/OTPInternalRepository.py | AnythingTechPro/toontown-otp-original | 40749161f02c6f75844b1d072bf1498b42c2800d | [
"BSD-3-Clause"
] | 2 | 2019-12-05T01:07:38.000Z | 2021-02-25T06:00:47.000Z | game/OTPInternalRepository.py | rasheelprogrammer/toontown-otp-original | 40749161f02c6f75844b1d072bf1498b42c2800d | [
"BSD-3-Clause"
] | null | null | null | game/OTPInternalRepository.py | rasheelprogrammer/toontown-otp-original | 40749161f02c6f75844b1d072bf1498b42c2800d | [
"BSD-3-Clause"
] | 1 | 2021-02-25T06:00:48.000Z | 2021-02-25T06:00:48.000Z | import collections
from panda3d.core import *
from direct.distributed.MsgTypes import *
from realtime.types import *
from direct.showbase import ShowBase # __builtin__.config
from direct.task.TaskManagerGlobal import * # taskMgr
from direct.directnotify import DirectNotifyGlobal
from direct.distributed.ConnectionRepository import ConnectionRepository
from direct.distributed.PyDatagram import PyDatagram
from direct.distributed.PyDatagramIterator import PyDatagramIterator
# Helper functions for logging output:
def msgpack_length(dg, length, fix, maxfix, tag8, tag16, tag32):
if length < maxfix:
dg.addUint8(fix + length)
elif tag8 is not None and length < 1<<8:
dg.addUint8(tag8)
dg.addUint8(length)
elif tag16 is not None and length < 1<<16:
dg.addUint8(tag16)
dg.addBeUint16(length)
elif tag32 is not None and length < 1<<32:
dg.addUint8(tag32)
dg.addBeUint32(length)
else:
raise ValueError('Value too big for MessagePack')
def msgpack_encode(dg, element):
if element == None:
dg.addUint8(0xc0)
elif element is False:
dg.addUint8(0xc2)
elif element is True:
dg.addUint8(0xc3)
elif isinstance(element, (int, long)):
if -32 <= element < 128:
dg.addInt8(element)
elif 128 <= element < 256:
dg.addUint8(0xcc)
dg.addUint8(element)
elif 256 <= element < 65536:
dg.addUint8(0xcd)
dg.addBeUint16(element)
elif 65536 <= element < (1<<32):
dg.addUint8(0xce)
dg.addBeUint32(element)
elif (1<<32) <= element < (1<<64):
dg.addUint8(0xcf)
dg.addBeUint64(element)
elif -128 <= element < -32:
dg.addUint8(0xd0)
dg.addInt8(element)
elif -32768 <= element < -128:
dg.addUint8(0xd1)
dg.addBeInt16(element)
elif -1<<31 <= element < -32768:
dg.addUint8(0xd2)
dg.addBeInt32(element)
elif -1<<63 <= element < -1<<31:
dg.addUint8(0xd3)
dg.addBeInt64(element)
else:
raise ValueError('int out of range for msgpack: %d' % element)
elif isinstance(element, dict):
msgpack_length(dg, len(element), 0x80, 0x10, None, 0xde, 0xdf)
for k,v in element.items():
msgpack_encode(dg, k)
msgpack_encode(dg, v)
elif isinstance(element, list):
msgpack_length(dg, len(element), 0x90, 0x10, None, 0xdc, 0xdd)
for v in element:
msgpack_encode(dg, v)
elif isinstance(element, basestring):
# 0xd9 is str 8 in all recent versions of the MsgPack spec, but somehow
# Logstash bundles a MsgPack implementation SO OLD that this isn't
# handled correctly so this function avoids it too
msgpack_length(dg, len(element), 0xa0, 0x20, None, 0xda, 0xdb)
dg.appendData(element)
elif isinstance(element, float):
# Python does not distinguish between floats and doubles, so we send
# everything as a double in MsgPack:
dg.addUint8(0xcb)
dg.addBeFloat64(element)
else:
raise TypeError('Encountered non-MsgPack-packable value: %r' % element)
class OTPInternalRepository(ConnectionRepository):
notify = DirectNotifyGlobal.directNotify.newCategory("OTPInternalRepository")
def __init__(self, baseChannel, serverId=None, dcFileNames = None,
dcSuffix = 'AI', connectMethod = None, threadedNet = None):
if connectMethod is None:
connectMethod = self.CM_NATIVE
ConnectionRepository.__init__(self, connectMethod, config, hasOwnerView = False, threadedNet = threadedNet)
self.setClientDatagram(False)
self.dcSuffix = dcSuffix
if hasattr(self, 'setVerbose'):
if self.config.GetBool('verbose-internalrepository'):
self.setVerbose(1)
# The State Server we are configured to use for creating objects.
#If this is None, generating objects is not possible.
self.serverId = self.config.GetInt('air-stateserver', 0) or None
if serverId is not None:
self.serverId = serverId
maxChannels = self.config.GetInt('air-channel-allocation', 1000000)
self.channelAllocator = UniqueIdAllocator(baseChannel, baseChannel+maxChannels-1)
self._registeredChannels = set()
self.__contextCounter = 0
#self.netMessenger = NetMessenger(self)
#self.dbInterface = AstronDatabaseInterface(self)
self.__callbacks = {}
self.ourChannel = self.allocateChannel()
self.eventLogId = self.config.GetString('eventlog-id', 'AIR:%d' % self.ourChannel)
self.eventSocket = None
eventLogHost = self.config.GetString('eventlog-host', '')
if eventLogHost:
if ':' in eventLogHost:
host, port = eventLogHost.split(':', 1)
self.setEventLogHost(host, int(port))
else:
self.setEventLogHost(eventLogHost)
self.readDCFile(dcFileNames)
def getContext(self):
self.__contextCounter = (self.__contextCounter + 1) & 0xFFFFFFFF
return self.__contextCounter
def allocateChannel(self):
"""
Allocate an unused channel out of this AIR's configured channel space.
This is also used to allocate IDs for DistributedObjects, since those
occupy a channel.
"""
return self.channelAllocator.allocate()
def deallocateChannel(self, channel):
"""
Return the previously-allocated channel back to the allocation pool.
"""
self.channelAllocator.free(channel)
def registerForChannel(self, channel):
"""
Register for messages on a specific Message Director channel.
If the channel is already open by this AIR, nothing will happen.
"""
if channel in self._registeredChannels:
return
self._registeredChannels.add(channel)
dg = PyDatagram()
dg.addServerControlHeader(CONTROL_SET_CHANNEL)
dg.addChannel(channel)
self.send(dg)
def unregisterForChannel(self, channel):
"""
Unregister a channel subscription on the Message Director. The Message
Director will cease to relay messages to this AIR sent on the channel.
"""
if channel not in self._registeredChannels:
return
self._registeredChannels.remove(channel)
dg = PyDatagram()
dg.addServerControlHeader(CONTROL_REMOVE_CHANNEL)
dg.addChannel(channel)
self.send(dg)
def addPostRemove(self, dg):
"""
Register a datagram with the Message Director that gets sent out if the
connection is ever lost.
This is useful for registering cleanup messages: If the Panda3D process
ever crashes unexpectedly, the Message Director will detect the socket
close and automatically process any post-remove datagrams.
"""
dg2 = PyDatagram()
dg2.addServerControlHeader(CONTROL_ADD_POST_REMOVE)
dg2.addUint64(self.ourChannel)
dg2.appendData(dg.getMessage())
self.send(dg2)
def clearPostRemove(self):
"""
Clear all datagrams registered with addPostRemove.
This is useful if the Panda3D process is performing a clean exit. It may
clear the "emergency clean-up" post-remove messages and perform a normal
exit-time clean-up instead, depending on the specific design of the game.
"""
dg = PyDatagram()
dg.addServerControlHeader(CONTROL_CLEAR_POST_REMOVE)
dg.addUint64(self.ourChannel)
self.send(dg)
def handleDatagram(self, di):
msgType = self.getMsgType()
if msgType in (STATESERVER_OBJECT_ENTER_AI_WITH_REQUIRED,
STATESERVER_OBJECT_ENTER_AI_WITH_REQUIRED_OTHER):
self.handleObjEntry(di, msgType == STATESERVER_OBJECT_ENTER_AI_WITH_REQUIRED_OTHER)
elif msgType in (STATESERVER_OBJECT_CHANGING_AI,
STATESERVER_OBJECT_DELETE_RAM):
self.handleObjExit(di)
elif msgType == STATESERVER_OBJECT_CHANGING_LOCATION:
self.handleObjLocation(di)
#elif msgType in (DBSERVER_CREATE_OBJECT_RESP,
# DBSERVER_OBJECT_GET_ALL_RESP,
# DBSERVER_OBJECT_GET_FIELDS_RESP,
# DBSERVER_OBJECT_GET_FIELD_RESP,
# DBSERVER_OBJECT_SET_FIELD_IF_EQUALS_RESP,
# DBSERVER_OBJECT_SET_FIELDS_IF_EQUALS_RESP):
# self.dbInterface.handleDatagram(msgType, di)
#elif msgType == DBSS_OBJECT_GET_ACTIVATED_RESP:
# self.handleGetActivatedResp(di)
#elif msgType == STATESERVER_OBJECT_GET_LOCATION_RESP:
# self.handleGetLocationResp(di)
#elif msgType == STATESERVER_OBJECT_GET_ALL_RESP:
# self.handleGetObjectResp(di)
#elif msgType == CLIENTAGENT_GET_NETWORK_ADDRESS_RESP:
# self.handleGetNetworkAddressResp(di)
#elif msgType >= 20000:
# # These messages belong to the NetMessenger:
# self.netMessenger.handle(msgType, di)
else:
self.notify.warning('Received message with unknown MsgType=%d' % msgType)
def handleObjLocation(self, di):
doId = di.getUint32()
parentId = di.getUint32()
zoneId = di.getUint32()
do = self.doId2do.get(doId)
if not do:
self.notify.warning('Received location for unknown doId=%d!' % (doId))
return
do.setLocation(parentId, zoneId)
def handleObjEntry(self, di, other):
doId = di.getUint32()
parentId = di.getUint32()
zoneId = di.getUint32()
classId = di.getUint16()
if classId not in self.dclassesByNumber:
self.notify.warning('Received entry for unknown dclass=%d! (Object %d)' % (classId, doId))
return
if doId in self.doId2do:
return # We already know about this object; ignore the entry.
dclass = self.dclassesByNumber[classId]
do = dclass.getClassDef()(self)
do.dclass = dclass
do.doId = doId
# The DO came in off the server, so we do not unregister the channel when
# it dies:
do.doNotDeallocateChannel = True
self.addDOToTables(do, location=(parentId, zoneId))
# Now for generation:
do.generate()
if other:
do.updateAllRequiredOtherFields(dclass, di)
else:
do.updateAllRequiredFields(dclass, di)
def handleObjExit(self, di):
doId = di.getUint32()
if doId not in self.doId2do:
self.notify.warning('Received AI exit for unknown object %d' % (doId))
return
do = self.doId2do[doId]
self.removeDOFromTables(do)
do.delete()
do.sendDeleteEvent()
def handleGetActivatedResp(self, di):
ctx = di.getUint32()
doId = di.getUint32()
activated = di.getUint8()
if ctx not in self.__callbacks:
self.notify.warning('Received unexpected DBSS_OBJECT_GET_ACTIVATED_RESP (ctx: %d)' %ctx)
return
try:
self.__callbacks[ctx](doId, activated)
finally:
del self.__callbacks[ctx]
def getActivated(self, doId, callback):
ctx = self.getContext()
self.__callbacks[ctx] = callback
dg = PyDatagram()
dg.addServerHeader(doId, self.ourChannel, DBSS_OBJECT_GET_ACTIVATED)
dg.addUint32(ctx)
dg.addUint32(doId)
self.send(dg)
def getLocation(self, doId, callback):
"""
Ask a DistributedObject where it is.
You should already be sure the object actually exists, otherwise the
callback will never be called.
Callback is called as: callback(doId, parentId, zoneId)
"""
ctx = self.getContext()
self.__callbacks[ctx] = callback
dg = PyDatagram()
dg.addServerHeader(doId, self.ourChannel, STATESERVER_OBJECT_GET_LOCATION)
dg.addUint32(ctx)
self.send(dg)
def handleGetLocationResp(self, di):
ctx = di.getUint32()
doId = di.getUint32()
parentId = di.getUint32()
zoneId = di.getUint32()
if ctx not in self.__callbacks:
self.notify.warning('Received unexpected STATESERVER_OBJECT_GET_LOCATION_RESP (ctx: %d)' % ctx)
return
try:
self.__callbacks[ctx](doId, parentId, zoneId)
finally:
del self.__callbacks[ctx]
def getObject(self, doId, callback):
"""
Get the entire state of an object.
You should already be sure the object actually exists, otherwise the
callback will never be called.
Callback is called as: callback(doId, parentId, zoneId, dclass, fields)
"""
ctx = self.getContext()
self.__callbacks[ctx] = callback
dg = PyDatagram()
dg.addServerHeader(doId, self.ourChannel, STATESERVER_OBJECT_GET_ALL)
dg.addUint32(ctx)
dg.addUint32(doId)
self.send(dg)
def handleGetObjectResp(self, di):
ctx = di.getUint32()
doId = di.getUint32()
parentId = di.getUint32()
zoneId = di.getUint32()
classId = di.getUint16()
if ctx not in self.__callbacks:
self.notify.warning('Received unexpected STATESERVER_OBJECT_GET_ALL_RESP (ctx: %d)' % ctx)
return
if classId not in self.dclassesByNumber:
self.notify.warning('Received STATESERVER_OBJECT_GET_ALL_RESP for unknown dclass=%d! (Object %d)' % (classId, doId))
return
dclass = self.dclassesByNumber[classId]
fields = {}
unpacker = DCPacker()
unpacker.setUnpackData(di.getRemainingBytes())
# Required:
for i in xrange(dclass.getNumInheritedFields()):
field = dclass.getInheritedField(i)
if not field.isRequired() or field.asMolecularField():
continue
unpacker.beginUnpack(field)
fields[field.getName()] = field.unpackArgs(unpacker)
unpacker.endUnpack()
# Other:
other = unpacker.rawUnpackUint16()
for i in xrange(other):
field = dclass.getFieldByIndex(unpacker.rawUnpackUint16())
unpacker.beginUnpack(field)
fields[field.getName()] = field.unpackArgs(unpacker)
unpacker.endUnpack()
try:
self.__callbacks[ctx](doId, parentId, zoneId, dclass, fields)
finally:
del self.__callbacks[ctx]
def getNetworkAddress(self, clientId, callback):
"""
Get the endpoints of a client connection.
You should already be sure the client actually exists, otherwise the
callback will never be called.
Callback is called as: callback(remoteIp, remotePort, localIp, localPort)
"""
ctx = self.getContext()
self.__callbacks[ctx] = callback
dg = PyDatagram()
dg.addServerHeader(clientId, self.ourChannel, CLIENTAGENT_GET_NETWORK_ADDRESS)
dg.addUint32(ctx)
self.send(dg)
def handleGetNetworkAddressResp(self, di):
ctx = di.getUint32()
remoteIp = di.getString()
remotePort = di.getUint16()
localIp = di.getString()
localPort = di.getUint16()
if ctx not in self.__callbacks:
self.notify.warning('Received unexpected CLIENTAGENT_GET_NETWORK_ADDRESS_RESP (ctx: %d)' % ctx)
return
try:
self.__callbacks[ctx](remoteIp, remotePort, localIp, localPort)
finally:
del self.__callbacks[ctx]
def sendUpdate(self, do, fieldName, args):
"""
Send a field update for the given object.
You should use do.sendUpdate(...) instead. This is not meant to be
called directly unless you really know what you are doing.
"""
self.sendUpdateToChannel(do, do.doId, fieldName, args)
def sendUpdateToChannel(self, do, channelId, fieldName, args):
"""
Send an object field update to a specific channel.
This is useful for directing the update to a specific client or node,
rather than at the State Server managing the object.
You should use do.sendUpdateToChannel(...) instead. This is not meant
to be called directly unless you really know what you are doing.
"""
dclass = do.dclass
field = dclass.getFieldByName(fieldName)
dg = field.aiFormatUpdate(do.doId, channelId, self.ourChannel, args)
self.send(dg)
def sendActivate(self, doId, parentId, zoneId, dclass=None, fields=None):
"""
Activate a DBSS object, given its doId, into the specified parentId/zoneId.
If both dclass and fields are specified, an ACTIVATE_WITH_DEFAULTS_OTHER
will be sent instead. In other words, the specified fields will be
auto-applied during the activation.
"""
fieldPacker = DCPacker()
fieldCount = 0
if dclass and fields:
for k,v in fields.items():
field = dclass.getFieldByName(k)
if not field:
self.notify.error('Activation request for %s object contains '
'invalid field named %s' % (dclass.getName(), k))
fieldPacker.rawPackUint16(field.getNumber())
fieldPacker.beginPack(field)
field.packArgs(fieldPacker, v)
fieldPacker.endPack()
fieldCount += 1
dg = PyDatagram()
dg.addServerHeader(doId, self.ourChannel, DBSS_OBJECT_ACTIVATE_WITH_DEFAULTS)
dg.addUint32(doId)
dg.addUint32(0)
dg.addUint32(0)
self.send(dg)
# DEFAULTS_OTHER isn't implemented yet, so we chase it with a SET_FIELDS
dg = PyDatagram()
dg.addServerHeader(doId, self.ourChannel, STATESERVER_OBJECT_SET_FIELDS)
dg.addUint32(doId)
dg.addUint16(fieldCount)
dg.appendData(fieldPacker.getString())
self.send(dg)
# Now slide it into the zone we expect to see it in (so it
# generates onto us with all of the fields in place)
dg = PyDatagram()
dg.addServerHeader(doId, self.ourChannel, STATESERVER_OBJECT_SET_LOCATION)
dg.addUint32(parentId)
dg.addUint32(zoneId)
self.send(dg)
else:
dg = PyDatagram()
dg.addServerHeader(doId, self.ourChannel, DBSS_OBJECT_ACTIVATE_WITH_DEFAULTS)
dg.addUint32(doId)
dg.addUint32(parentId)
dg.addUint32(zoneId)
self.send(dg)
def sendSetLocation(self, do, parentId, zoneId):
dg = PyDatagram()
dg.addServerHeader(do.doId, self.ourChannel, STATESERVER_OBJECT_SET_LOCATION)
dg.addUint32(parentId)
dg.addUint32(zoneId)
self.send(dg)
def generateWithRequired(self, do, parentId, zoneId, optionalFields=[]):
"""
Generate an object onto the State Server, choosing an ID from the pool.
You should use do.generateWithRequired(...) instead. This is not meant
to be called directly unless you really know what you are doing.
"""
doId = self.allocateChannel()
self.generateWithRequiredAndId(do, doId, parentId, zoneId, optionalFields)
def generateWithRequiredAndId(self, do, doId, parentId, zoneId, optionalFields=[]):
"""
Generate an object onto the State Server, specifying its ID and location.
You should use do.generateWithRequiredAndId(...) instead. This is not
meant to be called directly unless you really know what you are doing.
"""
do.doId = doId
self.addDOToTables(do, location=(parentId, zoneId))
do.sendGenerateWithRequired(self, parentId, zoneId, optionalFields)
def requestDelete(self, do):
"""
Request the deletion of an object that already exists on the State Server.
You should use do.requestDelete() instead. This is not meant to be
called directly unless you really know what you are doing.
"""
dg = PyDatagram()
dg.addServerHeader(do.doId, self.ourChannel, STATESERVER_OBJECT_DELETE_RAM)
dg.addUint32(do.doId)
self.send(dg)
def connect(self, host, port=7199):
"""
Connect to a Message Director. The airConnected message is sent upon
success.
N.B. This overrides the base class's connect(). You cannot use the
ConnectionRepository connect() parameters.
"""
url = URLSpec()
url.setServer(host)
url.setPort(port)
self.notify.info('Now connecting to %s:%s...' % (host, port))
ConnectionRepository.connect(self, [url],
successCallback=self.__connected,
failureCallback=self.__connectFailed,
failureArgs=[host, port])
def __connected(self):
self.notify.info('Connected successfully.')
# Listen to our channel...
self.registerForChannel(self.ourChannel)
# If we're configured with a State Server, register a post-remove to
# clean up whatever objects we own on this server should we unexpectedly
# fall over and die.
if self.serverId:
dg = PyDatagram()
dg.addServerHeader(self.serverId, self.ourChannel, STATESERVER_DELETE_AI_OBJECTS)
dg.addChannel(self.ourChannel)
self.addPostRemove(dg)
messenger.send('airConnected')
self.handleConnected()
def __connectFailed(self, code, explanation, host, port):
self.notify.warning('Failed to connect! (code=%s; %r)' % (code, explanation))
# Try again...
retryInterval = config.GetFloat('air-reconnect-delay', 5.0)
taskMgr.doMethodLater(retryInterval, self.connect, 'Reconnect delay', extraArgs=[host, port])
def handleConnected(self):
"""
Subclasses should override this if they wish to handle the connection
event.
"""
def lostConnection(self):
# This should be overridden by a subclass if unexpectedly losing connection
# is okay.
self.notify.error('Lost connection to gameserver!')
def setEventLogHost(self, host, port=7197):
"""
Set the target host for Event Logger messaging. This should be pointed
at the UDP IP:port that hosts the cluster's running Event Logger.
Providing a value of None or an empty string for 'host' will disable
event logging.
"""
if not host:
self.eventSocket = None
return
address = SocketAddress()
if not address.setHost(host, port):
self.notify.warning('Invalid Event Log host specified: %s:%s' % (host, port))
self.eventSocket = None
else:
self.eventSocket = SocketUDPOutgoing()
self.eventSocket.InitToAddress(address)
def writeServerEvent(self, logtype, *args, **kwargs):
"""
Write an event to the central Event Logger, if one is configured.
The purpose of the Event Logger is to keep a game-wide record of all
interesting in-game events that take place. Therefore, this function
should be used whenever such an interesting in-game event occurs.
"""
if self.eventSocket is None:
return # No event logger configured!
log = collections.OrderedDict()
log['type'] = logtype
log['sender'] = self.eventLogId
for i,v in enumerate(args):
# +1 because the logtype was _0, so we start at _1
log['_%d' % (i+1)] = v
log.update(kwargs)
dg = PyDatagram()
msgpack_encode(dg, log)
self.eventSocket.Send(dg.getMessage())
def setAI(self, doId, aiChannel):
"""
Sets the AI of the specified DistributedObjectAI to be the specified channel.
Generally, you should not call this method, and instead call DistributedObjectAI.setAI.
"""
dg = PyDatagram()
dg.addServerHeader(doId, aiChannel, STATESERVER_OBJECT_SET_AI)
dg.add_uint64(aiChannel)
self.send(dg)
def eject(self, clientChannel, reasonCode, reason):
"""
Kicks the client residing at the specified clientChannel, using the specifed reasoning.
"""
dg = PyDatagram()
dg.addServerHeader(clientChannel, self.ourChannel, CLIENTAGENT_EJECT)
dg.add_uint16(reasonCode)
dg.addString(reason)
self.send(dg)
def setClientState(self, clientChannel, state):
"""
Sets the state of the client on the CA.
Useful for logging in and logging out, and for little else.
"""
dg = PyDatagram()
dg.addServerHeader(clientChannel, self.ourChannel, CLIENTAGENT_SET_STATE)
dg.add_uint16(state)
self.send(dg)
def clientAddSessionObject(self, clientChannel, doId):
"""
Declares the specified DistributedObject to be a "session object",
meaning that it is destroyed when the client disconnects.
Generally used for avatars owned by the client.
"""
dg = PyDatagram()
dg.addServerHeader(clientChannel, self.ourChannel, CLIENTAGENT_ADD_SESSION_OBJECT)
dg.add_uint32(doId)
self.send(dg)
def clientAddInterest(self, clientChannel, interestId, parentId, zoneId):
"""
Opens an interest on the behalf of the client. This, used in conjunction
with add_interest: visible (or preferably, disabled altogether), will mitigate
possible security risks.
"""
dg = PyDatagram()
dg.addServerHeader(clientChannel, self.ourChannel, CLIENTAGENT_ADD_INTEREST)
dg.add_uint16(interestId)
dg.add_uint32(parentId)
dg.add_uint32(zoneId)
self.send(dg)
def setOwner(self, doId, newOwner):
"""
Sets the owner of a DistributedObject. This will enable the new owner to send "ownsend" fields,
and will generate an OwnerView.
"""
dg = PyDatagram()
dg.addServerHeader(doId, self.ourChannel, STATESERVER_OBJECT_SET_OWNER)
dg.add_uint64(newOwner)
self.send(dg)
| 35.962466 | 128 | 0.625578 |
9477e57b7c5d2cf35e4a875781835dc50c722f9f | 6,403 | py | Python | amqpstorm/management/api.py | ZygusPatryk/amqpstorm | 0f3ad84a529f12769d34638a88c38f3055cb05cd | [
"MIT"
] | 140 | 2016-06-07T18:53:57.000Z | 2022-03-23T01:50:15.000Z | amqpstorm/management/api.py | ZygusPatryk/amqpstorm | 0f3ad84a529f12769d34638a88c38f3055cb05cd | [
"MIT"
] | 85 | 2016-04-11T23:32:32.000Z | 2022-03-19T07:21:21.000Z | amqpstorm/management/api.py | ZygusPatryk/amqpstorm | 0f3ad84a529f12769d34638a88c38f3055cb05cd | [
"MIT"
] | 38 | 2016-04-20T20:21:13.000Z | 2022-03-23T05:31:58.000Z | from amqpstorm.compatibility import quote
from amqpstorm.management.basic import Basic
from amqpstorm.management.channel import Channel
from amqpstorm.management.connection import Connection
from amqpstorm.management.exchange import Exchange
from amqpstorm.management.healthchecks import HealthChecks
from amqpstorm.management.http_client import HTTPClient
from amqpstorm.management.queue import Queue
from amqpstorm.management.user import User
from amqpstorm.management.virtual_host import VirtualHost
API_ALIVENESS_TEST = 'aliveness-test/%s'
API_NODES = 'nodes'
API_OVERVIEW = 'overview'
API_WHOAMI = 'whoami'
API_TOP = 'top/%s'
class ManagementApi(object):
"""RabbitMQ Management Api
e.g.
::
from amqpstorm.management import ManagementApi
client = ManagementApi('https://localhost:15671', 'guest', 'guest', verify=True)
client.user.create('my_user', 'password', tags='administrator')
client.user.set_permission(
'my_user',
virtual_host='/',
configure_regex='.*',
write_regex='.*',
read_regex='.*'
)
:param str api_url: RabbitMQ Management url (e.g. https://rmq.amqpstorm.io:15671)
:param str username: Username (e.g. guest)
:param str password: Password (e.g. guest)
:param int timeout: TCP Timeout
:param None,str,bool verify: Requests session verify (e.g. True, False or path to CA bundle)
:param None,str,tuple cert: Requests session cert
"""
def __init__(self, api_url, username, password, timeout=10,
verify=None, cert=None):
self.http_client = HTTPClient(
api_url, username, password,
timeout=timeout, verify=verify, cert=cert
)
self._basic = Basic(self.http_client)
self._channel = Channel(self.http_client)
self._connection = Connection(self.http_client)
self._exchange = Exchange(self.http_client)
self._healthchecks = HealthChecks(self.http_client)
self._queue = Queue(self.http_client)
self._user = User(self.http_client)
self._virtual_host = VirtualHost(self.http_client)
def __enter__(self):
return self
def __exit__(self, *_):
pass
def __del__(self):
self.http_client.session.close()
@property
def basic(self):
"""RabbitMQ Basic Operations.
e.g.
::
client.basic.publish('Hello RabbitMQ', routing_key='my_queue')
:rtype: amqpstorm.management.basic.Basic
"""
return self._basic
@property
def channel(self):
"""RabbitMQ Channel Operations.
e.g.
::
client.channel.list()
:rtype: amqpstorm.management.channel.Channel
"""
return self._channel
@property
def connection(self):
"""RabbitMQ Connection Operations.
e.g.
::
client.connection.list()
:rtype: amqpstorm.management.connection.Connection
"""
return self._connection
@property
def exchange(self):
"""RabbitMQ Exchange Operations.
e.g.
::
client.exchange.declare('my_exchange')
:rtype: amqpstorm.management.exchange.Exchange
"""
return self._exchange
@property
def healthchecks(self):
"""RabbitMQ Healthchecks.
e.g.
::
client.healthchecks.get()
:rtype: amqpstorm.management.healthchecks.Healthchecks
"""
return self._healthchecks
@property
def queue(self):
"""RabbitMQ Queue Operations.
e.g.
::
client.queue.declare('my_queue', virtual_host='/')
:rtype: amqpstorm.management.queue.Queue
"""
return self._queue
@property
def user(self):
"""RabbitMQ User Operations.
e.g.
::
client.user.create('my_user', 'password')
:rtype: amqpstorm.management.user.User
"""
return self._user
@property
def virtual_host(self):
"""RabbitMQ VirtualHost Operations.
:rtype: amqpstorm.management.virtual_host.VirtualHost
"""
return self._virtual_host
def aliveness_test(self, virtual_host='/'):
"""Aliveness Test.
e.g.
::
from amqpstorm.management import ManagementApi
client = ManagementApi('http://localhost:15672', 'guest', 'guest')
result = client.aliveness_test('/')
if result['status'] == 'ok':
print("RabbitMQ is alive!")
else:
print("RabbitMQ is not alive! :(")
:param str virtual_host: Virtual host name
:raises ApiError: Raises if the remote server encountered an error.
:raises ApiConnectionError: Raises if there was a connectivity issue.
:rtype: dict
"""
virtual_host = quote(virtual_host, '')
return self.http_client.get(API_ALIVENESS_TEST %
virtual_host)
def overview(self):
"""Get Overview.
:raises ApiError: Raises if the remote server encountered an error.
:raises ApiConnectionError: Raises if there was a connectivity issue.
:rtype: dict
"""
return self.http_client.get(API_OVERVIEW)
def nodes(self):
"""Get Nodes.
:raises ApiError: Raises if the remote server encountered an error.
:raises ApiConnectionError: Raises if there was a connectivity issue.
:rtype: dict
"""
return self.http_client.get(API_NODES)
def top(self):
"""Top Processes.
:raises ApiError: Raises if the remote server encountered an error.
:raises ApiConnectionError: Raises if there was a connectivity issue.
:rtype: list
"""
nodes = []
for node in self.nodes():
nodes.append(self.http_client.get(API_TOP % node['name']))
return nodes
def whoami(self):
"""Who am I?
:raises ApiError: Raises if the remote server encountered an error.
:raises ApiConnectionError: Raises if there was a connectivity issue.
:rtype: dict
"""
return self.http_client.get(API_WHOAMI)
| 27.363248 | 96 | 0.606122 |
0d6ddff95ea03e809a867e8f308299ca6ab5a1e5 | 1,681 | py | Python | igsr_archive/object.py | olaaustine/igsr_archive | 7cca14a2ed58845e2101968098859dddeab627fc | [
"Apache-2.0"
] | 1 | 2020-04-16T12:44:24.000Z | 2020-04-16T12:44:24.000Z | igsr_archive/object.py | olaaustine/igsr_archive | 7cca14a2ed58845e2101968098859dddeab627fc | [
"Apache-2.0"
] | null | null | null | igsr_archive/object.py | olaaustine/igsr_archive | 7cca14a2ed58845e2101968098859dddeab627fc | [
"Apache-2.0"
] | 2 | 2022-01-11T12:26:52.000Z | 2022-02-22T16:22:34.000Z | import logging
# create logger
object_logger = logging.getLogger(__name__)
class fObject(object):
"""
Class to represent a FIRE object. An Object is
the encapsulation of a file which was successfully archived.
Attributes
----------
objectId : int
The numerical unique identifier of an object.
Intended for making paging queries easier.
fireOid : str
The unique identifier in the string form of an object.
md5 : str
md5sum.
size : int
Size of objects in bytes.
createTime : str
Representing datetime: i.e. '2020-02-17 16:44:55'.
path : str
Fire path of the object.
published : bool
True if associated object is exposed by the FUSE layer.
"""
def __init__(self, **kwargs):
"""
Constructor
Parameters
----------
**kwargs : dict, optional
Extra arguments to `fObject`: refer to each fObject documentation for a
list of all possible arguments.
"""
object_logger.debug('Creating a FIRE Object')
allowed_keys = ['objectId', 'fireOid', 'md5', 'size',
'createTime', 'path', 'published', 'objectMd5',
'objectSize']
self.__dict__.update((k, v) for k, v in kwargs.items() if k in allowed_keys)
# object introspection
def __str__(self):
sb = []
for key in self.__dict__:
sb.append("{key}='{value}'".format(key=key, value=self.__dict__[key]))
return ', '.join(sb)
def __repr__(self):
return self.__str__() | 28.982759 | 90 | 0.56395 |
a478bf4e50a326bffefeb30af518dbb51a8fe18f | 64,693 | py | Python | speedtest.py | AndradeClaudio/speedtest-cli | cccfa97515e8c673bf99f2ca0ffcfe37045a6e2a | [
"Apache-2.0"
] | null | null | null | speedtest.py | AndradeClaudio/speedtest-cli | cccfa97515e8c673bf99f2ca0ffcfe37045a6e2a | [
"Apache-2.0"
] | null | null | null | speedtest.py | AndradeClaudio/speedtest-cli | cccfa97515e8c673bf99f2ca0ffcfe37045a6e2a | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2012 Matt Martz
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import re
import csv
import sys
import math
import errno
import signal
import socket
import timeit
import datetime
import platform
import threading
import xml.parsers.expat
try:
import gzip
GZIP_BASE = gzip.GzipFile
except ImportError:
gzip = None
GZIP_BASE = object
__version__ = '2.1.2'
class FakeShutdownEvent(object):
"""Class to fake a threading.Event.isSet so that users of this module
are not required to register their own threading.Event()
"""
@staticmethod
def isSet():
"Dummy method to always return false"""
return False
# Some global variables we use
DEBUG = False
_GLOBAL_DEFAULT_TIMEOUT = object()
PY25PLUS = sys.version_info[:2] >= (2, 5)
PY26PLUS = sys.version_info[:2] >= (2, 6)
PY32PLUS = sys.version_info[:2] >= (3, 2)
# Begin import game to handle Python 2 and Python 3
try:
import json
except ImportError:
try:
import simplejson as json
except ImportError:
json = None
try:
import xml.etree.ElementTree as ET
try:
from xml.etree.ElementTree import _Element as ET_Element
except ImportError:
pass
except ImportError:
from xml.dom import minidom as DOM
from xml.parsers.expat import ExpatError
ET = None
try:
from urllib2 import (urlopen, Request, HTTPError, URLError,
AbstractHTTPHandler, ProxyHandler,
HTTPDefaultErrorHandler, HTTPRedirectHandler,
HTTPErrorProcessor, OpenerDirector)
except ImportError:
from urllib.request import (urlopen, Request, HTTPError, URLError,
AbstractHTTPHandler, ProxyHandler,
HTTPDefaultErrorHandler, HTTPRedirectHandler,
HTTPErrorProcessor, OpenerDirector)
try:
from httplib import HTTPConnection, BadStatusLine
except ImportError:
from http.client import HTTPConnection, BadStatusLine
try:
from httplib import HTTPSConnection
except ImportError:
try:
from http.client import HTTPSConnection
except ImportError:
HTTPSConnection = None
try:
from httplib import FakeSocket
except ImportError:
FakeSocket = None
try:
from Queue import Queue
except ImportError:
from queue import Queue
try:
from urlparse import urlparse
except ImportError:
from urllib.parse import urlparse
try:
from urlparse import parse_qs
except ImportError:
try:
from urllib.parse import parse_qs
except ImportError:
from cgi import parse_qs
try:
from hashlib import md5
except ImportError:
from md5 import md5
try:
from argparse import ArgumentParser as ArgParser
from argparse import SUPPRESS as ARG_SUPPRESS
PARSER_TYPE_INT = int
PARSER_TYPE_STR = str
PARSER_TYPE_FLOAT = float
except ImportError:
from optparse import OptionParser as ArgParser
from optparse import SUPPRESS_HELP as ARG_SUPPRESS
PARSER_TYPE_INT = 'int'
PARSER_TYPE_STR = 'string'
PARSER_TYPE_FLOAT = 'float'
try:
from cStringIO import StringIO
BytesIO = None
except ImportError:
try:
from StringIO import StringIO
BytesIO = None
except ImportError:
from io import StringIO, BytesIO
try:
import __builtin__
except ImportError:
import builtins
from io import TextIOWrapper, FileIO
class _Py3Utf8Output(TextIOWrapper):
"""UTF-8 encoded wrapper around stdout for py3, to override
ASCII stdout
"""
def __init__(self, f, **kwargs):
buf = FileIO(f.fileno(), 'w')
super(_Py3Utf8Output, self).__init__(
buf,
encoding='utf8',
errors='strict'
)
def write(self, s):
super(_Py3Utf8Output, self).write(s)
self.flush()
_py3_print = getattr(builtins, 'print')
try:
_py3_utf8_stdout = _Py3Utf8Output(sys.stdout)
_py3_utf8_stderr = _Py3Utf8Output(sys.stderr)
except OSError:
# sys.stdout/sys.stderr is not a compatible stdout/stderr object
# just use it and hope things go ok
_py3_utf8_stdout = sys.stdout
_py3_utf8_stderr = sys.stderr
def to_utf8(v):
"""No-op encode to utf-8 for py3"""
return v
def print_(*args, **kwargs):
"""Wrapper function for py3 to print, with a utf-8 encoded stdout"""
if kwargs.get('file') == sys.stderr:
kwargs['file'] = _py3_utf8_stderr
else:
kwargs['file'] = kwargs.get('file', _py3_utf8_stdout)
_py3_print(*args, **kwargs)
else:
del __builtin__
def to_utf8(v):
"""Encode value to utf-8 if possible for py2"""
try:
return v.encode('utf8', 'strict')
except AttributeError:
return v
def print_(*args, **kwargs):
"""The new-style print function for Python 2.4 and 2.5.
Taken from https://pypi.python.org/pypi/six/
Modified to set encoding to UTF-8 always, and to flush after write
"""
fp = kwargs.pop("file", sys.stdout)
if fp is None:
return
def write(data):
if not isinstance(data, basestring):
data = str(data)
# If the file has an encoding, encode unicode with it.
encoding = 'utf8' # Always trust UTF-8 for output
if (isinstance(fp, file) and
isinstance(data, unicode) and
encoding is not None):
errors = getattr(fp, "errors", None)
if errors is None:
errors = "strict"
data = data.encode(encoding, errors)
fp.write(data)
fp.flush()
want_unicode = False
sep = kwargs.pop("sep", None)
if sep is not None:
if isinstance(sep, unicode):
want_unicode = True
elif not isinstance(sep, str):
raise TypeError("sep must be None or a string")
end = kwargs.pop("end", None)
if end is not None:
if isinstance(end, unicode):
want_unicode = True
elif not isinstance(end, str):
raise TypeError("end must be None or a string")
if kwargs:
raise TypeError("invalid keyword arguments to print()")
if not want_unicode:
for arg in args:
if isinstance(arg, unicode):
want_unicode = True
break
if want_unicode:
newline = unicode("\n")
space = unicode(" ")
else:
newline = "\n"
space = " "
if sep is None:
sep = space
if end is None:
end = newline
for i, arg in enumerate(args):
if i:
write(sep)
write(arg)
write(end)
if PY32PLUS:
etree_iter = ET.Element.iter
elif PY25PLUS:
etree_iter = ET_Element.getiterator
if PY26PLUS:
thread_is_alive = threading.Thread.is_alive
else:
thread_is_alive = threading.Thread.isAlive
# Exception "constants" to support Python 2 through Python 3
try:
import ssl
try:
CERT_ERROR = (ssl.CertificateError,)
except AttributeError:
CERT_ERROR = tuple()
HTTP_ERRORS = (
(HTTPError, URLError, socket.error, ssl.SSLError, BadStatusLine) +
CERT_ERROR
)
except ImportError:
ssl = None
HTTP_ERRORS = (HTTPError, URLError, socket.error, BadStatusLine)
class SpeedtestException(Exception):
"""Base exception for this module"""
class SpeedtestCLIError(SpeedtestException):
"""Generic exception for raising errors during CLI operation"""
class SpeedtestHTTPError(SpeedtestException):
"""Base HTTP exception for this module"""
class SpeedtestConfigError(SpeedtestException):
"""Configuration XML is invalid"""
class SpeedtestServersError(SpeedtestException):
"""Servers XML is invalid"""
class ConfigRetrievalError(SpeedtestHTTPError):
"""Could not retrieve config.php"""
class ServersRetrievalError(SpeedtestHTTPError):
"""Could not retrieve speedtest-servers.php"""
class InvalidServerIDType(SpeedtestException):
"""Server ID used for filtering was not an integer"""
class NoMatchedServers(SpeedtestException):
"""No servers matched when filtering"""
class SpeedtestMiniConnectFailure(SpeedtestException):
"""Could not connect to the provided speedtest mini server"""
class InvalidSpeedtestMiniServer(SpeedtestException):
"""Server provided as a speedtest mini server does not actually appear
to be a speedtest mini server
"""
class ShareResultsConnectFailure(SpeedtestException):
"""Could not connect to speedtest.net API to POST results"""
class ShareResultsSubmitFailure(SpeedtestException):
"""Unable to successfully POST results to speedtest.net API after
connection
"""
class SpeedtestUploadTimeout(SpeedtestException):
"""testlength configuration reached during upload
Used to ensure the upload halts when no additional data should be sent
"""
class SpeedtestBestServerFailure(SpeedtestException):
"""Unable to determine best server"""
class SpeedtestMissingBestServer(SpeedtestException):
"""get_best_server not called or not able to determine best server"""
def create_connection(address, timeout=_GLOBAL_DEFAULT_TIMEOUT,
source_address=None):
"""Connect to *address* and return the socket object.
Convenience function. Connect to *address* (a 2-tuple ``(host,
port)``) and return the socket object. Passing the optional
*timeout* parameter will set the timeout on the socket instance
before attempting to connect. If no *timeout* is supplied, the
global default timeout setting returned by :func:`getdefaulttimeout`
is used. If *source_address* is set it must be a tuple of (host, port)
for the socket to bind as a source address before making the connection.
An host of '' or port 0 tells the OS to use the default.
Largely vendored from Python 2.7, modified to work with Python 2.4
"""
host, port = address
err = None
for res in socket.getaddrinfo(host, port, 0, socket.SOCK_STREAM):
af, socktype, proto, canonname, sa = res
sock = None
try:
sock = socket.socket(af, socktype, proto)
if timeout is not _GLOBAL_DEFAULT_TIMEOUT:
sock.settimeout(float(timeout))
if source_address:
sock.bind(source_address)
sock.connect(sa)
return sock
except socket.error:
err = get_exception()
if sock is not None:
sock.close()
if err is not None:
raise err
else:
raise socket.error("getaddrinfo returns an empty list")
class SpeedtestHTTPConnection(HTTPConnection):
"""Custom HTTPConnection to support source_address across
Python 2.4 - Python 3
"""
def __init__(self, *args, **kwargs):
source_address = kwargs.pop('source_address', None)
timeout = kwargs.pop('timeout', 10)
self._tunnel_host = None
HTTPConnection.__init__(self, *args, **kwargs)
self.source_address = source_address
self.timeout = timeout
def connect(self):
"""Connect to the host and port specified in __init__."""
try:
self.sock = socket.create_connection(
(self.host, self.port),
self.timeout,
self.source_address
)
except (AttributeError, TypeError):
self.sock = create_connection(
(self.host, self.port),
self.timeout,
self.source_address
)
if self._tunnel_host:
self._tunnel()
if HTTPSConnection:
class SpeedtestHTTPSConnection(HTTPSConnection):
"""Custom HTTPSConnection to support source_address across
Python 2.4 - Python 3
"""
default_port = 443
def __init__(self, *args, **kwargs):
source_address = kwargs.pop('source_address', None)
timeout = kwargs.pop('timeout', 10)
self._tunnel_host = None
HTTPSConnection.__init__(self, *args, **kwargs)
self.timeout = timeout
self.source_address = source_address
def connect(self):
"Connect to a host on a given (SSL) port."
try:
self.sock = socket.create_connection(
(self.host, self.port),
self.timeout,
self.source_address
)
except (AttributeError, TypeError):
self.sock = create_connection(
(self.host, self.port),
self.timeout,
self.source_address
)
if self._tunnel_host:
self._tunnel()
if ssl:
try:
kwargs = {}
if hasattr(ssl, 'SSLContext'):
if self._tunnel_host:
kwargs['server_hostname'] = self._tunnel_host
else:
kwargs['server_hostname'] = self.host
self.sock = self._context.wrap_socket(self.sock, **kwargs)
except AttributeError:
self.sock = ssl.wrap_socket(self.sock)
try:
self.sock.server_hostname = self.host
except AttributeError:
pass
elif FakeSocket:
# Python 2.4/2.5 support
try:
self.sock = FakeSocket(self.sock, socket.ssl(self.sock))
except AttributeError:
raise SpeedtestException(
'This version of Python does not support HTTPS/SSL '
'functionality'
)
else:
raise SpeedtestException(
'This version of Python does not support HTTPS/SSL '
'functionality'
)
def _build_connection(connection, source_address, timeout, context=None):
"""Cross Python 2.4 - Python 3 callable to build an ``HTTPConnection`` or
``HTTPSConnection`` with the args we need
Called from ``http(s)_open`` methods of ``SpeedtestHTTPHandler`` or
``SpeedtestHTTPSHandler``
"""
def inner(host, **kwargs):
kwargs.update({
'source_address': source_address,
'timeout': timeout
})
if context:
kwargs['context'] = context
return connection(host, **kwargs)
return inner
class SpeedtestHTTPHandler(AbstractHTTPHandler):
"""Custom ``HTTPHandler`` that can build a ``HTTPConnection`` with the
args we need for ``source_address`` and ``timeout``
"""
def __init__(self, debuglevel=0, source_address=None, timeout=10):
AbstractHTTPHandler.__init__(self, debuglevel)
self.source_address = source_address
self.timeout = timeout
def http_open(self, req):
return self.do_open(
_build_connection(
SpeedtestHTTPConnection,
self.source_address,
self.timeout
),
req
)
http_request = AbstractHTTPHandler.do_request_
class SpeedtestHTTPSHandler(AbstractHTTPHandler):
"""Custom ``HTTPSHandler`` that can build a ``HTTPSConnection`` with the
args we need for ``source_address`` and ``timeout``
"""
def __init__(self, debuglevel=0, context=None, source_address=None,
timeout=10):
AbstractHTTPHandler.__init__(self, debuglevel)
self._context = context
self.source_address = source_address
self.timeout = timeout
def https_open(self, req):
return self.do_open(
_build_connection(
SpeedtestHTTPSConnection,
self.source_address,
self.timeout,
context=self._context,
),
req
)
https_request = AbstractHTTPHandler.do_request_
def build_opener(source_address=None, timeout=10):
"""Function similar to ``urllib2.build_opener`` that will build
an ``OpenerDirector`` with the explicit handlers we want,
``source_address`` for binding, ``timeout`` and our custom
`User-Agent`
"""
printer('Timeout set to %d' % timeout, debug=True)
if source_address:
source_address_tuple = (source_address, 0)
printer('Binding to source address: %r' % (source_address_tuple,),
debug=True)
else:
source_address_tuple = None
handlers = [
ProxyHandler(),
SpeedtestHTTPHandler(source_address=source_address_tuple,
timeout=timeout),
SpeedtestHTTPSHandler(source_address=source_address_tuple,
timeout=timeout),
HTTPDefaultErrorHandler(),
HTTPRedirectHandler(),
HTTPErrorProcessor()
]
opener = OpenerDirector()
opener.addheaders = [('User-agent', build_user_agent())]
for handler in handlers:
opener.add_handler(handler)
return opener
class GzipDecodedResponse(GZIP_BASE):
"""A file-like object to decode a response encoded with the gzip
method, as described in RFC 1952.
Largely copied from ``xmlrpclib``/``xmlrpc.client`` and modified
to work for py2.4-py3
"""
def __init__(self, response):
# response doesn't support tell() and read(), required by
# GzipFile
if not gzip:
raise SpeedtestHTTPError('HTTP response body is gzip encoded, '
'but gzip support is not available')
IO = BytesIO or StringIO
self.io = IO()
while 1:
chunk = response.read(1024)
if len(chunk) == 0:
break
self.io.write(chunk)
self.io.seek(0)
gzip.GzipFile.__init__(self, mode='rb', fileobj=self.io)
def close(self):
try:
gzip.GzipFile.close(self)
finally:
self.io.close()
def get_exception():
"""Helper function to work with py2.4-py3 for getting the current
exception in a try/except block
"""
return sys.exc_info()[1]
def distance(origin, destination):
"""Determine distance between 2 sets of [lat,lon] in km"""
lat1, lon1 = origin
lat2, lon2 = destination
radius = 6371 # km
dlat = math.radians(lat2 - lat1)
dlon = math.radians(lon2 - lon1)
a = (math.sin(dlat / 2) * math.sin(dlat / 2) +
math.cos(math.radians(lat1)) *
math.cos(math.radians(lat2)) * math.sin(dlon / 2) *
math.sin(dlon / 2))
c = 2 * math.atan2(math.sqrt(a), math.sqrt(1 - a))
d = radius * c
return d
def build_user_agent():
"""Build a Mozilla/5.0 compatible User-Agent string"""
ua_tuple = (
'Mozilla/5.0',
'(%s; U; %s; en-us)' % (platform.platform(),
platform.architecture()[0]),
'Python/%s' % platform.python_version(),
'(KHTML, like Gecko)',
'speedtest-cli/%s' % __version__
)
user_agent = ' '.join(ua_tuple)
printer('User-Agent: %s' % user_agent, debug=True)
return user_agent
def build_request(url, data=None, headers=None, bump='0', secure=False):
"""Build a urllib2 request object
This function automatically adds a User-Agent header to all requests
"""
if not headers:
headers = {}
if url[0] == ':':
scheme = ('http', 'https')[bool(secure)]
schemed_url = '%s%s' % (scheme, url)
else:
schemed_url = url
if '?' in url:
delim = '&'
else:
delim = '?'
# WHO YOU GONNA CALL? CACHE BUSTERS!
final_url = '%s%sx=%s.%s' % (schemed_url, delim,
int(timeit.time.time() * 1000),
bump)
headers.update({
'Cache-Control': 'no-cache',
})
printer('%s %s' % (('GET', 'POST')[bool(data)], final_url),
debug=True)
return Request(final_url, data=data, headers=headers)
def catch_request(request, opener=None):
"""Helper function to catch common exceptions encountered when
establishing a connection with a HTTP/HTTPS request
"""
if opener:
_open = opener.open
else:
_open = urlopen
try:
uh = _open(request)
if request.get_full_url() != uh.geturl():
printer('Redirected to %s' % uh.geturl(), debug=True)
return uh, False
except HTTP_ERRORS:
e = get_exception()
return None, e
def get_response_stream(response):
"""Helper function to return either a Gzip reader if
``Content-Encoding`` is ``gzip`` otherwise the response itself
"""
try:
getheader = response.headers.getheader
except AttributeError:
getheader = response.getheader
if getheader('content-encoding') == 'gzip':
return GzipDecodedResponse(response)
return response
def get_attributes_by_tag_name(dom, tag_name):
"""Retrieve an attribute from an XML document and return it in a
consistent format
Only used with xml.dom.minidom, which is likely only to be used
with python versions older than 2.5
"""
elem = dom.getElementsByTagName(tag_name)[0]
return dict(list(elem.attributes.items()))
def print_dots(shutdown_event):
"""Built in callback function used by Thread classes for printing
status
"""
def inner(current, total, start=False, end=False):
if shutdown_event.isSet():
return
sys.stdout.write('.')
if current + 1 == total and end is True:
sys.stdout.write('\n')
sys.stdout.flush()
return inner
def do_nothing(*args, **kwargs):
pass
class HTTPDownloader(threading.Thread):
"""Thread class for retrieving a URL"""
def __init__(self, i, request, start, timeout, opener=None,
shutdown_event=None):
threading.Thread.__init__(self)
self.request = request
self.result = [0]
self.starttime = start
self.timeout = timeout
self.i = i
if opener:
self._opener = opener.open
else:
self._opener = urlopen
if shutdown_event:
self._shutdown_event = shutdown_event
else:
self._shutdown_event = FakeShutdownEvent()
def run(self):
try:
if (timeit.default_timer() - self.starttime) <= self.timeout:
f = self._opener(self.request)
while (not self._shutdown_event.isSet() and
(timeit.default_timer() - self.starttime) <=
self.timeout):
self.result.append(len(f.read(10240)))
if self.result[-1] == 0:
break
f.close()
except IOError:
pass
class HTTPUploaderData(object):
"""File like object to improve cutting off the upload once the timeout
has been reached
"""
def __init__(self, length, start, timeout, shutdown_event=None):
self.length = length
self.start = start
self.timeout = timeout
if shutdown_event:
self._shutdown_event = shutdown_event
else:
self._shutdown_event = FakeShutdownEvent()
self._data = None
self.total = [0]
def pre_allocate(self):
chars = '0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ'
multiplier = int(round(int(self.length) / 36.0))
IO = BytesIO or StringIO
try:
self._data = IO(
('content1=%s' %
(chars * multiplier)[0:int(self.length) - 9]
).encode()
)
except MemoryError:
raise SpeedtestCLIError(
'Insufficient memory to pre-allocate upload data. Please '
'use --no-pre-allocate'
)
@property
def data(self):
if not self._data:
self.pre_allocate()
return self._data
def read(self, n=10240):
if ((timeit.default_timer() - self.start) <= self.timeout and
not self._shutdown_event.isSet()):
chunk = self.data.read(n)
self.total.append(len(chunk))
return chunk
else:
raise SpeedtestUploadTimeout()
def __len__(self):
return self.length
class HTTPUploader(threading.Thread):
"""Thread class for putting a URL"""
def __init__(self, i, request, start, size, timeout, opener=None,
shutdown_event=None):
threading.Thread.__init__(self)
self.request = request
self.request.data.start = self.starttime = start
self.size = size
self.result = None
self.timeout = timeout
self.i = i
if opener:
self._opener = opener.open
else:
self._opener = urlopen
if shutdown_event:
self._shutdown_event = shutdown_event
else:
self._shutdown_event = FakeShutdownEvent()
def run(self):
request = self.request
try:
if ((timeit.default_timer() - self.starttime) <= self.timeout and
not self._shutdown_event.isSet()):
try:
f = self._opener(request)
except TypeError:
# PY24 expects a string or buffer
# This also causes issues with Ctrl-C, but we will concede
# for the moment that Ctrl-C on PY24 isn't immediate
request = build_request(self.request.get_full_url(),
data=request.data.read(self.size))
f = self._opener(request)
f.read(11)
f.close()
self.result = sum(self.request.data.total)
else:
self.result = 0
except (IOError, SpeedtestUploadTimeout):
self.result = sum(self.request.data.total)
class SpeedtestResults(object):
"""Class for holding the results of a speedtest, including:
Download speed
Upload speed
Ping/Latency to test server
Data about server that the test was run against
Additionally this class can return a result data as a dictionary or CSV,
as well as submit a POST of the result data to the speedtest.net API
to get a share results image link.
"""
def __init__(self, download=0, upload=0, ping=0, server=None, client=None,
opener=None, secure=False):
self.download = download
self.upload = upload
self.ping = ping
if server is None:
self.server = {}
else:
self.server = server
self.client = client or {}
self._share = None
self.timestamp = '%sZ' % datetime.datetime.utcnow().isoformat()
self.bytes_received = 0
self.bytes_sent = 0
if opener:
self._opener = opener
else:
self._opener = build_opener()
self._secure = secure
def __repr__(self):
return repr(self.dict())
def share(self):
"""POST data to the speedtest.net API to obtain a share results
link
"""
if self._share:
return self._share
download = int(round(self.download / 1000.0, 0))
ping = int(round(self.ping, 0))
upload = int(round(self.upload / 1000.0, 0))
# Build the request to send results back to speedtest.net
# We use a list instead of a dict because the API expects parameters
# in a certain order
api_data = [
'recommendedserverid=%s' % self.server['id'],
'ping=%s' % ping,
'screenresolution=',
'promo=',
'download=%s' % download,
'screendpi=',
'upload=%s' % upload,
'testmethod=http',
'hash=%s' % md5(('%s-%s-%s-%s' %
(ping, upload, download, '297aae72'))
.encode()).hexdigest(),
'touchscreen=none',
'startmode=pingselect',
'accuracy=1',
'bytesreceived=%s' % self.bytes_received,
'bytessent=%s' % self.bytes_sent,
'serverid=%s' % self.server['id'],
]
headers = {'Referer': 'http://c.speedtest.net/flash/speedtest.swf'}
request = build_request('://www.speedtest.net/api/api.php',
data='&'.join(api_data).encode(),
headers=headers, secure=self._secure)
f, e = catch_request(request, opener=self._opener)
if e:
raise ShareResultsConnectFailure(e)
response = f.read()
code = f.code
f.close()
if int(code) != 200:
raise ShareResultsSubmitFailure('Could not submit results to '
'speedtest.net')
qsargs = parse_qs(response.decode())
resultid = qsargs.get('resultid')
if not resultid or len(resultid) != 1:
raise ShareResultsSubmitFailure('Could not submit results to '
'speedtest.net')
self._share = 'http://www.speedtest.net/result/%s.png' % resultid[0]
return self._share
def dict(self):
"""Return dictionary of result data"""
return {
'download': self.download,
'upload': self.upload,
'ping': self.ping,
'server': self.server,
'timestamp': self.timestamp,
'bytes_sent': self.bytes_sent,
'bytes_received': self.bytes_received,
'share': self._share,
'client': self.client,
}
@staticmethod
def csv_header(delimiter=','):
"""Return CSV Headers"""
row = ['Server ID', 'Sponsor', 'Server Name', 'Timestamp', 'Distance',
'Ping', 'Download', 'Upload', 'Share', 'IP Address']
out = StringIO()
writer = csv.writer(out, delimiter=delimiter, lineterminator='')
writer.writerow([to_utf8(v) for v in row])
return out.getvalue()
def csv(self, delimiter=','):
"""Return data in CSV format"""
data = self.dict()
out = StringIO()
writer = csv.writer(out, delimiter=delimiter, lineterminator='')
row = [data['server']['id'], data['server']['sponsor'],
data['server']['name'], data['timestamp'],
data['server']['d'], data['ping'], data['download'],
data['upload'], self._share or '', self.client['ip']]
writer.writerow([to_utf8(v) for v in row])
return out.getvalue()
def json(self, pretty=False):
"""Return data in JSON format"""
kwargs = {}
if pretty:
kwargs.update({
'indent': 4,
'sort_keys': True
})
return json.dumps(self.dict(), **kwargs)
class Speedtest(object):
"""Class for performing standard speedtest.net testing operations"""
def __init__(self, config=None, source_address=None, timeout=10,
secure=False, shutdown_event=None):
self.config = {}
self._source_address = source_address
self._timeout = timeout
self._opener = build_opener(source_address, timeout)
self._secure = secure
if shutdown_event:
self._shutdown_event = shutdown_event
else:
self._shutdown_event = FakeShutdownEvent()
self.get_config()
if config is not None:
self.config.update(config)
self.servers = {}
self.closest = []
self._best = {}
self.results = SpeedtestResults(
client=self.config['client'],
opener=self._opener,
secure=secure,
)
@property
def best(self):
if not self._best:
self.get_best_server()
return self._best
def get_config(self):
"""Download the speedtest.net configuration and return only the data
we are interested in
"""
headers = {}
if gzip:
headers['Accept-Encoding'] = 'gzip'
request = build_request('://www.speedtest.net/speedtest-config.php',
headers=headers, secure=self._secure)
uh, e = catch_request(request, opener=self._opener)
if e:
raise ConfigRetrievalError(e)
configxml_list = []
stream = get_response_stream(uh)
while 1:
try:
configxml_list.append(stream.read(1024))
except (OSError, EOFError):
raise ConfigRetrievalError(get_exception())
if len(configxml_list[-1]) == 0:
break
stream.close()
uh.close()
if int(uh.code) != 200:
return None
configxml = ''.encode().join(configxml_list)
printer('Config XML:\n%s' % configxml, debug=True)
try:
try:
root = ET.fromstring(configxml)
except ET.ParseError:
e = get_exception()
raise SpeedtestConfigError(
'Malformed speedtest.net configuration: %s' % e
)
server_config = root.find('server-config').attrib
download = root.find('download').attrib
upload = root.find('upload').attrib
# times = root.find('times').attrib
client = root.find('client').attrib
except AttributeError:
try:
root = DOM.parseString(configxml)
except ExpatError:
e = get_exception()
raise SpeedtestConfigError(
'Malformed speedtest.net configuration: %s' % e
)
server_config = get_attributes_by_tag_name(root, 'server-config')
download = get_attributes_by_tag_name(root, 'download')
upload = get_attributes_by_tag_name(root, 'upload')
# times = get_attributes_by_tag_name(root, 'times')
client = get_attributes_by_tag_name(root, 'client')
ratio = int(upload['ratio'])
upload_max = int(upload['maxchunkcount'])
up_sizes = [32768, 65536, 131072, 262144, 524288, 1048576, 7340032]
sizes = {
'upload': up_sizes[ratio - 1:],
'download': [350, 500, 750, 1000, 1500, 2000, 2500,
3000, 3500, 4000]
}
size_count = len(sizes['upload'])
upload_count = int(math.ceil(upload_max / size_count))
counts = {
'upload': upload_count,
'download': int(download['threadsperurl'])
}
threads = {
'upload': int(upload['threads']),
'download': int(server_config['threadcount']) * 2
}
length = {
'upload': int(upload['testlength']),
'download': int(download['testlength'])
}
self.config.update({
'client': client,
'sizes': sizes,
'counts': counts,
'threads': threads,
'length': length,
'upload_max': upload_count * size_count
})
try:
self.lat_lon = (float(client['lat']), float(client['lon']))
except ValueError:
raise SpeedtestConfigError(
'Unknown location: lat=%r lon=%r' %
(client.get('lat'), client.get('lon'))
)
printer('Config:\n%r' % self.config, debug=True)
return self.config
def get_servers(self, servers=None, exclude=None):
"""Retrieve a the list of speedtest.net servers, optionally filtered
to servers matching those specified in the ``servers`` argument
"""
if servers is None:
servers = []
if exclude is None:
exclude = []
self.servers.clear()
for server_list in (servers, exclude):
for i, s in enumerate(server_list):
try:
server_list[i] = int(s)
except ValueError:
raise InvalidServerIDType(
'%s is an invalid server type, must be int' % s
)
urls = [
'://www.speedtest.net/speedtest-servers-static.php',
'http://c.speedtest.net/speedtest-servers-static.php',
'://www.speedtest.net/speedtest-servers.php',
'http://c.speedtest.net/speedtest-servers.php',
]
headers = {}
if gzip:
headers['Accept-Encoding'] = 'gzip'
errors = []
for url in urls:
try:
request = build_request(
'%s?threads=%s' % (url,
self.config['threads']['download']),
headers=headers,
secure=self._secure
)
uh, e = catch_request(request, opener=self._opener)
if e:
errors.append('%s' % e)
raise ServersRetrievalError()
stream = get_response_stream(uh)
serversxml_list = []
while 1:
try:
serversxml_list.append(stream.read(1024))
except (OSError, EOFError):
raise ServersRetrievalError(get_exception())
if len(serversxml_list[-1]) == 0:
break
stream.close()
uh.close()
if int(uh.code) != 200:
raise ServersRetrievalError()
serversxml = ''.encode().join(serversxml_list)
printer('Servers XML:\n%s' % serversxml, debug=True)
try:
try:
try:
root = ET.fromstring(serversxml)
except ET.ParseError:
e = get_exception()
raise SpeedtestServersError(
'Malformed speedtest.net server list: %s' % e
)
elements = etree_iter(root, 'server')
except AttributeError:
try:
root = DOM.parseString(serversxml)
except ExpatError:
e = get_exception()
raise SpeedtestServersError(
'Malformed speedtest.net server list: %s' % e
)
elements = root.getElementsByTagName('server')
except (SyntaxError, xml.parsers.expat.ExpatError):
raise ServersRetrievalError()
for server in elements:
try:
attrib = server.attrib
except AttributeError:
attrib = dict(list(server.attributes.items()))
if servers and int(attrib.get('id')) not in servers:
continue
try:
d = distance(self.lat_lon,
(float(attrib.get('lat')),
float(attrib.get('lon'))))
except Exception:
continue
attrib['d'] = d
try:
self.servers[d].append(attrib)
except KeyError:
self.servers[d] = [attrib]
break
except ServersRetrievalError:
continue
if (servers or exclude) and not self.servers:
raise NoMatchedServers()
return self.servers
def set_mini_server(self, server):
"""Instead of querying for a list of servers, set a link to a
speedtest mini server
"""
urlparts = urlparse(server)
name, ext = os.path.splitext(urlparts[2])
if ext:
url = os.path.dirname(server)
else:
url = server
request = build_request(url)
uh, e = catch_request(request, opener=self._opener)
if e:
raise SpeedtestMiniConnectFailure('Failed to connect to %s' %
server)
else:
text = uh.read()
uh.close()
extension = re.findall('upload_?[Ee]xtension: "([^"]+)"',
text.decode())
if not extension:
for ext in ['php', 'asp', 'aspx', 'jsp']:
try:
f = self._opener.open(
'%s/speedtest/upload.%s' % (url, ext)
)
except Exception:
pass
else:
data = f.read().strip().decode()
if (f.code == 200 and
len(data.splitlines()) == 1 and
re.match('size=[0-9]', data)):
extension = [ext]
break
if not urlparts or not extension:
raise InvalidSpeedtestMiniServer('Invalid Speedtest Mini Server: '
'%s' % server)
self.servers = [{
'sponsor': 'Speedtest Mini',
'name': urlparts[1],
'd': 0,
'url': '%s/speedtest/upload.%s' % (url.rstrip('/'), extension[0]),
'latency': 0,
'id': 0
}]
return self.servers
def get_closest_servers(self, limit=5):
"""Limit servers to the closest speedtest.net servers based on
geographic distance
"""
if not self.servers:
self.get_servers()
for d in sorted(self.servers.keys()):
for s in self.servers[d]:
self.closest.append(s)
if len(self.closest) == limit:
break
else:
continue
break
printer('Closest Servers:\n%r' % self.closest, debug=True)
return self.closest
def get_best_server(self, servers=None):
"""Perform a speedtest.net "ping" to determine which speedtest.net
server has the lowest latency
"""
if not servers:
if not self.closest:
servers = self.get_closest_servers()
servers = self.closest
if self._source_address:
source_address_tuple = (self._source_address, 0)
else:
source_address_tuple = None
user_agent = build_user_agent()
results = {}
for server in servers:
cum = []
url = os.path.dirname(server['url'])
stamp = int(timeit.time.time() * 1000)
latency_url = '%s/latency.txt?x=%s' % (url, stamp)
for i in range(0, 3):
this_latency_url = '%s.%s' % (latency_url, i)
printer('%s %s' % ('GET', this_latency_url),
debug=True)
urlparts = urlparse(latency_url)
try:
if urlparts[0] == 'https':
h = SpeedtestHTTPSConnection(
urlparts[1],
source_address=source_address_tuple
)
else:
h = SpeedtestHTTPConnection(
urlparts[1],
source_address=source_address_tuple
)
headers = {'User-Agent': user_agent}
path = '%s?%s' % (urlparts[2], urlparts[4])
start = timeit.default_timer()
h.request("GET", path, headers=headers)
r = h.getresponse()
total = (timeit.default_timer() - start)
except HTTP_ERRORS:
e = get_exception()
printer('ERROR: %r' % e, debug=True)
cum.append(3600)
continue
text = r.read(9)
if int(r.status) == 200 and text == 'test=test'.encode():
cum.append(total)
else:
cum.append(3600)
h.close()
avg = round((sum(cum) / 6) * 1000.0, 3)
results[avg] = server
try:
fastest = sorted(results.keys())[0]
except IndexError:
raise SpeedtestBestServerFailure('Unable to connect to servers to '
'test latency.')
best = results[fastest]
best['latency'] = fastest
self.results.ping = fastest
self.results.server = best
self._best.update(best)
printer('Best Server:\n%r' % best, debug=True)
return best
def download(self, callback=do_nothing, threads=None):
"""Test download speed against speedtest.net
A ``threads`` value of ``None`` will fall back to those dictated
by the speedtest.net configuration
"""
urls = []
for size in self.config['sizes']['download']:
for _ in range(0, self.config['counts']['download']):
urls.append('%s/random%sx%s.jpg' %
(os.path.dirname(self.best['url']), size, size))
request_count = len(urls)
requests = []
for i, url in enumerate(urls):
requests.append(
build_request(url, bump=i, secure=self._secure)
)
max_threads = threads or self.config['threads']['download']
in_flight = {'threads': 0}
def producer(q, requests, request_count):
for i, request in enumerate(requests):
thread = HTTPDownloader(
i,
request,
start,
self.config['length']['download'],
opener=self._opener,
shutdown_event=self._shutdown_event
)
while in_flight['threads'] >= max_threads:
timeit.time.sleep(0.001)
thread.start()
q.put(thread, True)
in_flight['threads'] += 1
callback(i, request_count, start=True)
finished = []
def consumer(q, request_count):
_is_alive = thread_is_alive
while len(finished) < request_count:
thread = q.get(True)
while _is_alive(thread):
thread.join(timeout=0.001)
in_flight['threads'] -= 1
finished.append(sum(thread.result))
callback(thread.i, request_count, end=True)
q = Queue(max_threads)
prod_thread = threading.Thread(target=producer,
args=(q, requests, request_count))
cons_thread = threading.Thread(target=consumer,
args=(q, request_count))
start = timeit.default_timer()
prod_thread.start()
cons_thread.start()
_is_alive = thread_is_alive
while _is_alive(prod_thread):
prod_thread.join(timeout=0.001)
while _is_alive(cons_thread):
cons_thread.join(timeout=0.001)
stop = timeit.default_timer()
self.results.bytes_received = sum(finished)
self.results.download = (
(self.results.bytes_received / (stop - start)) * 8.0
)
if self.results.download > 100000:
self.config['threads']['upload'] = 8
return self.results.download
def upload(self, callback=do_nothing, pre_allocate=True, threads=None):
"""Test upload speed against speedtest.net
A ``threads`` value of ``None`` will fall back to those dictated
by the speedtest.net configuration
"""
sizes = []
for size in self.config['sizes']['upload']:
for _ in range(0, self.config['counts']['upload']):
sizes.append(size)
# request_count = len(sizes)
request_count = self.config['upload_max']
requests = []
for i, size in enumerate(sizes):
# We set ``0`` for ``start`` and handle setting the actual
# ``start`` in ``HTTPUploader`` to get better measurements
data = HTTPUploaderData(
size,
0,
self.config['length']['upload'],
shutdown_event=self._shutdown_event
)
if pre_allocate:
data.pre_allocate()
headers = {'Content-length': size}
requests.append(
(
build_request(self.best['url'], data, secure=self._secure,
headers=headers),
size
)
)
max_threads = threads or self.config['threads']['upload']
in_flight = {'threads': 0}
def producer(q, requests, request_count):
for i, request in enumerate(requests[:request_count]):
thread = HTTPUploader(
i,
request[0],
start,
request[1],
self.config['length']['upload'],
opener=self._opener,
shutdown_event=self._shutdown_event
)
while in_flight['threads'] >= max_threads:
timeit.time.sleep(0.001)
thread.start()
q.put(thread, True)
in_flight['threads'] += 1
callback(i, request_count, start=True)
finished = []
def consumer(q, request_count):
_is_alive = thread_is_alive
while len(finished) < request_count:
thread = q.get(True)
while _is_alive(thread):
thread.join(timeout=0.001)
in_flight['threads'] -= 1
finished.append(thread.result)
callback(thread.i, request_count, end=True)
q = Queue(threads or self.config['threads']['upload'])
prod_thread = threading.Thread(target=producer,
args=(q, requests, request_count))
cons_thread = threading.Thread(target=consumer,
args=(q, request_count))
start = timeit.default_timer()
prod_thread.start()
cons_thread.start()
_is_alive = thread_is_alive
while _is_alive(prod_thread):
prod_thread.join(timeout=0.1)
while _is_alive(cons_thread):
cons_thread.join(timeout=0.1)
stop = timeit.default_timer()
self.results.bytes_sent = sum(finished)
self.results.upload = (
(self.results.bytes_sent / (stop - start)) * 8.0
)
return self.results.upload
def ctrl_c(shutdown_event):
"""Catch Ctrl-C key sequence and set a SHUTDOWN_EVENT for our threaded
operations
"""
def inner(signum, frame):
shutdown_event.set()
printer('\nCancelling...', error=True)
sys.exit(0)
return inner
def version():
"""Print the version"""
printer('speedtest-cli %s' % __version__)
printer('Python %s' % sys.version.replace('\n', ''))
sys.exit(0)
def csv_header(delimiter=','):
"""Print the CSV Headers"""
printer(SpeedtestResults.csv_header(delimiter=delimiter))
sys.exit(0)
def parse_args():
"""Function to handle building and parsing of command line arguments"""
description = (
'Command line interface for testing internet bandwidth using '
'speedtest.net.\n'
'------------------------------------------------------------'
'--------------\n'
'https://github.com/sivel/speedtest-cli')
parser = ArgParser(description=description)
# Give optparse.OptionParser an `add_argument` method for
# compatibility with argparse.ArgumentParser
try:
parser.add_argument = parser.add_option
except AttributeError:
pass
parser.add_argument('--no-download', dest='download', default=True,
action='store_const', const=False,
help='Do not perform download test')
parser.add_argument('--no-upload', dest='upload', default=True,
action='store_const', const=False,
help='Do not perform upload test')
parser.add_argument('--single', default=False, action='store_true',
help='Only use a single connection instead of '
'multiple. This simulates a typical file '
'transfer.')
parser.add_argument('--bytes', dest='units', action='store_const',
const=('byte', 8), default=('bit', 1),
help='Display values in bytes instead of bits. Does '
'not affect the image generated by --share, nor '
'output from --json or --csv')
parser.add_argument('--share', action='store_true',
help='Generate and provide a URL to the speedtest.net '
'share results image, not displayed with --csv')
parser.add_argument('--simple', action='store_true', default=False,
help='Suppress verbose output, only show basic '
'information')
parser.add_argument('--csv', action='store_true', default=False,
help='Suppress verbose output, only show basic '
'information in CSV format. Speeds listed in '
'bit/s and not affected by --bytes')
parser.add_argument('--csv-delimiter', default=',', type=PARSER_TYPE_STR,
help='Single character delimiter to use in CSV '
'output. Default ","')
parser.add_argument('--csv-header', action='store_true', default=False,
help='Print CSV headers')
parser.add_argument('--json', action='store_true', default=False,
help='Suppress verbose output, only show basic '
'information in JSON format. Speeds listed in '
'bit/s and not affected by --bytes')
parser.add_argument('--list', action='store_true',
help='Display a list of speedtest.net servers '
'sorted by distance')
parser.add_argument('--server', type=PARSER_TYPE_INT, action='append',
help='Specify a server ID to test against. Can be '
'supplied multiple times')
parser.add_argument('--exclude', type=PARSER_TYPE_INT, action='append',
help='Exclude a server from selection. Can be '
'supplied multiple times')
parser.add_argument('--mini', help='URL of the Speedtest Mini server')
parser.add_argument('--source', help='Source IP address to bind to')
parser.add_argument('--timeout', default=10, type=PARSER_TYPE_FLOAT,
help='HTTP timeout in seconds. Default 10')
parser.add_argument('--secure', action='store_true',
help='Use HTTPS instead of HTTP when communicating '
'with speedtest.net operated servers')
parser.add_argument('--no-pre-allocate', dest='pre_allocate',
action='store_const', default=True, const=False,
help='Do not pre allocate upload data. Pre allocation '
'is enabled by default to improve upload '
'performance. To support systems with '
'insufficient memory, use this option to avoid a '
'MemoryError')
parser.add_argument('--version', action='store_true',
help='Show the version number and exit')
parser.add_argument('--debug', action='store_true',
help=ARG_SUPPRESS, default=ARG_SUPPRESS)
options = parser.parse_args()
if isinstance(options, tuple):
args = options[0]
else:
args = options
return args
def validate_optional_args(args):
"""Check if an argument was provided that depends on a module that may
not be part of the Python standard library.
If such an argument is supplied, and the module does not exist, exit
with an error stating which module is missing.
"""
optional_args = {
'json': ('json/simplejson python module', json),
'secure': ('SSL support', HTTPSConnection),
}
for arg, info in optional_args.items():
if getattr(args, arg, False) and info[1] is None:
raise SystemExit('%s is not installed. --%s is '
'unavailable' % (info[0], arg))
def printer(string, quiet=False, debug=False, error=False, **kwargs):
"""Helper function print a string with various features"""
if debug and not DEBUG:
return
if debug:
if sys.stdout.isatty():
out = '\033[1;30mDEBUG: %s\033[0m' % string
else:
out = 'DEBUG: %s' % string
else:
out = string
if error:
kwargs['file'] = sys.stderr
if not quiet:
print_(out, **kwargs)
def shell():
"""Run the full speedtest.net test"""
global DEBUG
shutdown_event = threading.Event()
signal.signal(signal.SIGINT, ctrl_c(shutdown_event))
args = parse_args()
# Print the version and exit
if args.version:
version()
if not args.download and not args.upload:
raise SpeedtestCLIError('Cannot supply both --no-download and '
'--no-upload')
if len(args.csv_delimiter) != 1:
raise SpeedtestCLIError('--csv-delimiter must be a single character')
if args.csv_header:
csv_header(args.csv_delimiter)
validate_optional_args(args)
debug = getattr(args, 'debug', False)
if debug == 'SUPPRESSHELP':
debug = False
if debug:
DEBUG = True
if args.simple or args.csv or args.json:
quiet = True
else:
quiet = False
if args.csv or args.json:
machine_format = True
else:
machine_format = False
# Don't set a callback if we are running quietly
if quiet or debug:
callback = do_nothing
else:
callback = print_dots(shutdown_event)
printer('Retrieving speedtest.net configuration...', quiet)
try:
speedtest = Speedtest(
source_address=args.source,
timeout=args.timeout,
secure=args.secure
)
except (ConfigRetrievalError,) + HTTP_ERRORS:
printer('Cannot retrieve speedtest configuration', error=True)
raise SpeedtestCLIError(get_exception())
if args.list:
try:
speedtest.get_servers()
except (ServersRetrievalError,) + HTTP_ERRORS:
printer('Cannot retrieve speedtest server list', error=True)
raise SpeedtestCLIError(get_exception())
for _, servers in sorted(speedtest.servers.items()):
for server in servers:
line = ('%(id)5s) %(sponsor)s (%(name)s, %(country)s) '
'[%(d)0.2f km]' % server)
try:
printer(line)
except IOError:
e = get_exception()
if e.errno != errno.EPIPE:
raise
sys.exit(0)
printer('Testing from %(isp)s (%(ip)s)...' % speedtest.config['client'],
quiet)
if not args.mini:
printer('Retrieving speedtest.net server list...', quiet)
try:
speedtest.get_servers(servers=args.server, exclude=args.exclude)
except NoMatchedServers:
raise SpeedtestCLIError(
'No matched servers: %s' %
', '.join('%s' % s for s in args.server)
)
except (ServersRetrievalError,) + HTTP_ERRORS:
printer('Cannot retrieve speedtest server list', error=True)
raise SpeedtestCLIError(get_exception())
except InvalidServerIDType:
raise SpeedtestCLIError(
'%s is an invalid server type, must '
'be an int' % ', '.join('%s' % s for s in args.server)
)
if args.server and len(args.server) == 1:
printer('Retrieving information for the selected server...', quiet)
else:
printer('Selecting best server based on ping...', quiet)
speedtest.get_best_server()
elif args.mini:
speedtest.get_best_server(speedtest.set_mini_server(args.mini))
results = speedtest.results
printer('Hosted by %(sponsor)s (%(name)s) [%(d)0.2f km]: '
'%(latency)s ms' % results.server, quiet)
if args.download:
printer('Testing download speed', quiet,
end=('', '\n')[bool(debug)])
speedtest.download(
callback=callback,
threads=(None, 1)[args.single]
)
printer('Download: %0.2f M%s/s' %
((results.download / 1000.0 / 1000.0) / args.units[1],
args.units[0]),
quiet)
else:
printer('Skipping download test', quiet)
if args.upload:
printer('Testing upload speed', quiet,
end=('', '\n')[bool(debug)])
speedtest.upload(
callback=callback,
pre_allocate=args.pre_allocate,
threads=(None, 1)[args.single]
)
printer('Upload: %0.2f M%s/s' %
((results.upload / 1000.0 / 1000.0) / args.units[1],
args.units[0]),
quiet)
else:
printer('Skipping upload test', quiet)
printer('Results:\n%r' % results.dict(), debug=True)
if not args.simple and args.share:
results.share()
if args.simple:
printer('Ping: %s ms\nDownload: %0.2f M%s/s\nUpload: %0.2f M%s/s' %
(results.ping,
(results.download / 1000.0 / 1000.0) / args.units[1],
args.units[0],
(results.upload / 1000.0 / 1000.0) / args.units[1],
args.units[0]))
elif args.csv:
printer(results.csv(delimiter=args.csv_delimiter))
elif args.json:
printer(results.json())
if args.share and not machine_format:
printer('Share results: %s' % results.share())
def main():
try:
shell()
except KeyboardInterrupt:
printer('\nCancelling...', error=True)
except (SpeedtestException, SystemExit):
e = get_exception()
# Ignore a successful exit, or argparse exit
if getattr(e, 'code', 1) not in (0, 2):
msg = '%s' % e
if not msg:
msg = '%r' % e
raise SystemExit('ERROR: %s' % msg)
if __name__ == '__main__':
main()
| 32.443831 | 79 | 0.548715 |
98f0e719b7cdd169580a8ace5230ec457f4e1be1 | 13,008 | py | Python | sklearn/manifold/_isomap.py | matiasrvazquez/scikit-learn | e821a9e8a0d4ef63b1219faf9ab902ad0fd4b181 | [
"BSD-3-Clause"
] | 2 | 2017-11-22T08:20:15.000Z | 2017-11-22T08:23:14.000Z | sklearn/manifold/_isomap.py | matiasrvazquez/scikit-learn | e821a9e8a0d4ef63b1219faf9ab902ad0fd4b181 | [
"BSD-3-Clause"
] | 1 | 2022-03-06T18:49:03.000Z | 2022-03-06T18:49:03.000Z | sklearn/manifold/_isomap.py | matiasrvazquez/scikit-learn | e821a9e8a0d4ef63b1219faf9ab902ad0fd4b181 | [
"BSD-3-Clause"
] | null | null | null | """Isomap for manifold learning"""
# Author: Jake Vanderplas -- <vanderplas@astro.washington.edu>
# License: BSD 3 clause (C) 2011
import warnings
import numpy as np
import scipy
from scipy.sparse import issparse
from scipy.sparse.csgraph import shortest_path
from scipy.sparse.csgraph import connected_components
from ..base import BaseEstimator, TransformerMixin, _ClassNamePrefixFeaturesOutMixin
from ..neighbors import NearestNeighbors, kneighbors_graph
from ..utils.validation import check_is_fitted
from ..decomposition import KernelPCA
from ..preprocessing import KernelCenterer
from ..utils.graph import _fix_connected_components
from ..externals._packaging.version import parse as parse_version
class Isomap(_ClassNamePrefixFeaturesOutMixin, TransformerMixin, BaseEstimator):
"""Isomap Embedding.
Non-linear dimensionality reduction through Isometric Mapping
Read more in the :ref:`User Guide <isomap>`.
Parameters
----------
n_neighbors : int, default=5
Number of neighbors to consider for each point.
n_components : int, default=2
Number of coordinates for the manifold.
eigen_solver : {'auto', 'arpack', 'dense'}, default='auto'
'auto' : Attempt to choose the most efficient solver
for the given problem.
'arpack' : Use Arnoldi decomposition to find the eigenvalues
and eigenvectors.
'dense' : Use a direct solver (i.e. LAPACK)
for the eigenvalue decomposition.
tol : float, default=0
Convergence tolerance passed to arpack or lobpcg.
not used if eigen_solver == 'dense'.
max_iter : int, default=None
Maximum number of iterations for the arpack solver.
not used if eigen_solver == 'dense'.
path_method : {'auto', 'FW', 'D'}, default='auto'
Method to use in finding shortest path.
'auto' : attempt to choose the best algorithm automatically.
'FW' : Floyd-Warshall algorithm.
'D' : Dijkstra's algorithm.
neighbors_algorithm : {'auto', 'brute', 'kd_tree', 'ball_tree'}, \
default='auto'
Algorithm to use for nearest neighbors search,
passed to neighbors.NearestNeighbors instance.
n_jobs : int or None, default=None
The number of parallel jobs to run.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
metric : str, or callable, default="minkowski"
The metric to use when calculating distance between instances in a
feature array. If metric is a string or callable, it must be one of
the options allowed by :func:`sklearn.metrics.pairwise_distances` for
its metric parameter.
If metric is "precomputed", X is assumed to be a distance matrix and
must be square. X may be a :term:`Glossary <sparse graph>`.
.. versionadded:: 0.22
p : int, default=2
Parameter for the Minkowski metric from
sklearn.metrics.pairwise.pairwise_distances. When p = 1, this is
equivalent to using manhattan_distance (l1), and euclidean_distance
(l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used.
.. versionadded:: 0.22
metric_params : dict, default=None
Additional keyword arguments for the metric function.
.. versionadded:: 0.22
Attributes
----------
embedding_ : array-like, shape (n_samples, n_components)
Stores the embedding vectors.
kernel_pca_ : object
:class:`~sklearn.decomposition.KernelPCA` object used to implement the
embedding.
nbrs_ : sklearn.neighbors.NearestNeighbors instance
Stores nearest neighbors instance, including BallTree or KDtree
if applicable.
dist_matrix_ : array-like, shape (n_samples, n_samples)
Stores the geodesic distance matrix of training data.
n_features_in_ : int
Number of features seen during :term:`fit`.
.. versionadded:: 0.24
feature_names_in_ : ndarray of shape (`n_features_in_`,)
Names of features seen during :term:`fit`. Defined only when `X`
has feature names that are all strings.
.. versionadded:: 1.0
See Also
--------
sklearn.decomposition.PCA : Principal component analysis that is a linear
dimensionality reduction method.
sklearn.decomposition.KernelPCA : Non-linear dimensionality reduction using
kernels and PCA.
MDS : Manifold learning using multidimensional scaling.
TSNE : T-distributed Stochastic Neighbor Embedding.
LocallyLinearEmbedding : Manifold learning using Locally Linear Embedding.
SpectralEmbedding : Spectral embedding for non-linear dimensionality.
References
----------
.. [1] Tenenbaum, J.B.; De Silva, V.; & Langford, J.C. A global geometric
framework for nonlinear dimensionality reduction. Science 290 (5500)
Examples
--------
>>> from sklearn.datasets import load_digits
>>> from sklearn.manifold import Isomap
>>> X, _ = load_digits(return_X_y=True)
>>> X.shape
(1797, 64)
>>> embedding = Isomap(n_components=2)
>>> X_transformed = embedding.fit_transform(X[:100])
>>> X_transformed.shape
(100, 2)
"""
def __init__(
self,
*,
n_neighbors=5,
n_components=2,
eigen_solver="auto",
tol=0,
max_iter=None,
path_method="auto",
neighbors_algorithm="auto",
n_jobs=None,
metric="minkowski",
p=2,
metric_params=None,
):
self.n_neighbors = n_neighbors
self.n_components = n_components
self.eigen_solver = eigen_solver
self.tol = tol
self.max_iter = max_iter
self.path_method = path_method
self.neighbors_algorithm = neighbors_algorithm
self.n_jobs = n_jobs
self.metric = metric
self.p = p
self.metric_params = metric_params
def _fit_transform(self, X):
self.nbrs_ = NearestNeighbors(
n_neighbors=self.n_neighbors,
algorithm=self.neighbors_algorithm,
metric=self.metric,
p=self.p,
metric_params=self.metric_params,
n_jobs=self.n_jobs,
)
self.nbrs_.fit(X)
self.n_features_in_ = self.nbrs_.n_features_in_
if hasattr(self.nbrs_, "feature_names_in_"):
self.feature_names_in_ = self.nbrs_.feature_names_in_
self.kernel_pca_ = KernelPCA(
n_components=self.n_components,
kernel="precomputed",
eigen_solver=self.eigen_solver,
tol=self.tol,
max_iter=self.max_iter,
n_jobs=self.n_jobs,
)
kng = kneighbors_graph(
self.nbrs_,
self.n_neighbors,
metric=self.metric,
p=self.p,
metric_params=self.metric_params,
mode="distance",
n_jobs=self.n_jobs,
)
# Compute the number of connected components, and connect the different
# components to be able to compute a shortest path between all pairs
# of samples in the graph.
# Similar fix to cluster._agglomerative._fix_connectivity.
n_connected_components, labels = connected_components(kng)
if n_connected_components > 1:
if self.metric == "precomputed" and issparse(X):
raise RuntimeError(
"The number of connected components of the neighbors graph"
f" is {n_connected_components} > 1. The graph cannot be "
"completed with metric='precomputed', and Isomap cannot be"
"fitted. Increase the number of neighbors to avoid this "
"issue, or precompute the full distance matrix instead "
"of passing a sparse neighbors graph."
)
warnings.warn(
"The number of connected components of the neighbors graph "
f"is {n_connected_components} > 1. Completing the graph to fit"
" Isomap might be slow. Increase the number of neighbors to "
"avoid this issue.",
stacklevel=2,
)
# use array validated by NearestNeighbors
kng = _fix_connected_components(
X=self.nbrs_._fit_X,
graph=kng,
n_connected_components=n_connected_components,
component_labels=labels,
mode="distance",
metric=self.nbrs_.effective_metric_,
**self.nbrs_.effective_metric_params_,
)
if parse_version(scipy.__version__) < parse_version("1.3.2"):
# make identical samples have a nonzero distance, to account for
# issues in old scipy Floyd-Warshall implementation.
kng.data += 1e-15
self.dist_matrix_ = shortest_path(kng, method=self.path_method, directed=False)
G = self.dist_matrix_ ** 2
G *= -0.5
self.embedding_ = self.kernel_pca_.fit_transform(G)
self._n_features_out = self.embedding_.shape[1]
def reconstruction_error(self):
"""Compute the reconstruction error for the embedding.
Returns
-------
reconstruction_error : float
Reconstruction error.
Notes
-----
The cost function of an isomap embedding is
``E = frobenius_norm[K(D) - K(D_fit)] / n_samples``
Where D is the matrix of distances for the input data X,
D_fit is the matrix of distances for the output embedding X_fit,
and K is the isomap kernel:
``K(D) = -0.5 * (I - 1/n_samples) * D^2 * (I - 1/n_samples)``
"""
G = -0.5 * self.dist_matrix_ ** 2
G_center = KernelCenterer().fit_transform(G)
evals = self.kernel_pca_.eigenvalues_
return np.sqrt(np.sum(G_center ** 2) - np.sum(evals ** 2)) / G.shape[0]
def fit(self, X, y=None):
"""Compute the embedding vectors for data X.
Parameters
----------
X : {array-like, sparse graph, BallTree, KDTree, NearestNeighbors}
Sample data, shape = (n_samples, n_features), in the form of a
numpy array, sparse graph, precomputed tree, or NearestNeighbors
object.
y : Ignored
Not used, present for API consistency by convention.
Returns
-------
self : object
Returns a fitted instance of self.
"""
self._fit_transform(X)
return self
def fit_transform(self, X, y=None):
"""Fit the model from data in X and transform X.
Parameters
----------
X : {array-like, sparse graph, BallTree, KDTree}
Training vector, where `n_samples` is the number of samples
and `n_features` is the number of features.
y : Ignored
Not used, present for API consistency by convention.
Returns
-------
X_new : array-like, shape (n_samples, n_components)
X transformed in the new space.
"""
self._fit_transform(X)
return self.embedding_
def transform(self, X):
"""Transform X.
This is implemented by linking the points X into the graph of geodesic
distances of the training data. First the `n_neighbors` nearest
neighbors of X are found in the training data, and from these the
shortest geodesic distances from each point in X to each point in
the training data are computed in order to construct the kernel.
The embedding of X is the projection of this kernel onto the
embedding vectors of the training set.
Parameters
----------
X : array-like, shape (n_queries, n_features)
If neighbors_algorithm='precomputed', X is assumed to be a
distance matrix or a sparse graph of shape
(n_queries, n_samples_fit).
Returns
-------
X_new : array-like, shape (n_queries, n_components)
X transformed in the new space.
"""
check_is_fitted(self)
distances, indices = self.nbrs_.kneighbors(X, return_distance=True)
# Create the graph of shortest distances from X to
# training data via the nearest neighbors of X.
# This can be done as a single array operation, but it potentially
# takes a lot of memory. To avoid that, use a loop:
n_samples_fit = self.nbrs_.n_samples_fit_
n_queries = distances.shape[0]
G_X = np.zeros((n_queries, n_samples_fit))
for i in range(n_queries):
G_X[i] = np.min(self.dist_matrix_[indices[i]] + distances[i][:, None], 0)
G_X **= 2
G_X *= -0.5
return self.kernel_pca_.transform(G_X)
| 35.252033 | 87 | 0.625077 |
eff777be5b493e0157f39796d52b76e0e725b656 | 2,382 | py | Python | hw4/controllers.py | jgkim2020/homework | bdab7f693c5b1f2fb5ab0e2cfb7b9052bd3fb6ce | [
"MIT"
] | null | null | null | hw4/controllers.py | jgkim2020/homework | bdab7f693c5b1f2fb5ab0e2cfb7b9052bd3fb6ce | [
"MIT"
] | null | null | null | hw4/controllers.py | jgkim2020/homework | bdab7f693c5b1f2fb5ab0e2cfb7b9052bd3fb6ce | [
"MIT"
] | null | null | null | import numpy as np
from cost_functions import trajectory_cost_fn
import time
class Controller():
def __init__(self):
pass
# Get the appropriate action(s) for this state(s)
def get_action(self, state):
pass
class RandomController(Controller):
def __init__(self, env):
""" YOUR CODE HERE """
self.env = env
def get_action(self, state):
""" YOUR CODE HERE """
""" Your code should randomly sample an action uniformly from the action space """
return self.env.action_space.sample()
class MPCcontroller(Controller):
""" Controller built using the MPC method outlined in https://arxiv.org/abs/1708.02596 """
def __init__(self,
env,
dyn_model,
horizon=5,
cost_fn=None,
num_simulated_paths=10,
):
self.env = env
self.dyn_model = dyn_model
self.horizon = horizon
self.cost_fn = cost_fn
self.num_simulated_paths = num_simulated_paths
num_action_candidates = self.num_simulated_paths*self.horizon
self.action_candidates = np.array([self.env.action_space.sample() for i in range(num_action_candidates)]) # (num, action_dim)
def get_action(self, state):
""" YOUR CODE HERE """
""" Note: be careful to batch your simulations through the model for speed """
obs = state*np.ones((self.num_simulated_paths, state.shape[0])) # (paths, obs_dim)
observations = [] # [(paths, obs_dim), ...]
actions = [] # [(paths, action_dim), ...]
next_observations = [] # [(paths, obs_dim), ...]
for i in range(self.horizon):
# sample from action candidates (instead of calling env.action_space.sample() every iteration)
random_idx = np.random.choice(self.action_candidates.shape[0], obs.shape[0], replace=False)
action = self.action_candidates[random_idx]
#action = np.array([self.env.action_space.sample() for i in range(self.num_simulated_paths)])
observations += [obs]
actions += [action]
obs = self.dyn_model.predict(obs, action)
next_observations += [obs]
costs = trajectory_cost_fn(self.cost_fn, observations, actions, next_observations) # (paths, )
return actions[0][np.argmin(costs)]
| 38.419355 | 133 | 0.620067 |
5b1bb715d1aee846a73b5924b4976e266b238c3f | 2,828 | py | Python | app/models.py | kode-ai/capson | 3b5175aeb38807bbbe9ae3bc6b7824c492f75d8f | [
"MIT"
] | null | null | null | app/models.py | kode-ai/capson | 3b5175aeb38807bbbe9ae3bc6b7824c492f75d8f | [
"MIT"
] | null | null | null | app/models.py | kode-ai/capson | 3b5175aeb38807bbbe9ae3bc6b7824c492f75d8f | [
"MIT"
] | null | null | null | from . import db
from flask_login import UserMixin
from app import login_manager
from werkzeug.security import generate_password_hash,check_password_hash
from time import time
@login_manager.user_loader
def load_user(user_id):
return User.query.get(int(user_id))
class User(UserMixin,db.Model):
"""
Class to create users
"""
__tablename__ = "users"
id = db.Column(db.Integer,primary_key = True)
username = db.Column(db.String(40),unique = True, index=True)
email = db.Column(db.String(255),unique = True, index = True)
bio = db.Column(db.String)
image = db.Column(db.String(255))
posts = db.relationship("Post", backref = "user", lazy = "dynamic")
user_pass = db.Column(db.String)
comments = db.relationship('Comment',backref='user',lazy='dynamic')
photos = db.relationship('PhotoProfile',backref = 'user',lazy = "dynamic")
def save_user(self):
db.session.add(self)
db.session.commit()
@property
def password(self):
raise AttributeError("Gerrarahia")
@password.setter
def password(self,password):
self.user_pass = generate_password_hash(password)
def verify_pass(self,password):
return check_password_hash(self.user_pass, password)
def __repr__(self):
return f'User {self.username}'
class Post(db.Model):
__tablename__ = "posts"
id = db.Column(db.Integer,primary_key = True)
user_id = db.Column(db.Integer, db.ForeignKey("users.id"))
post = db.Column(db.String)
time = db.Column(db.String)
comments = db.relationship("Comment",backref = "post", lazy = "dynamic")
def save_post(self):
db.session.add(self)
db.session.commit()
@classmethod
def get_post(cls,id):
posts = Post.query.filter_by(id=id).all()
return posts
@classmethod
def get_all_posts(cls):
posts = Post.query.order_by('-id').all()
return posts
def get_post_comments(self):
comments= Comment.query.filter_by(post_id = self.id)
return comments
class Comment(db.Model):
__tablename__ = "comments"
id = db.Column(db.Integer, primary_key = True)
name = db.Column(db.String)
content = db.Column(db.String)
post_id = db.Column(db.Integer, db.ForeignKey("posts.id"))
user_id = db.Column(db.Integer,db.ForeignKey('users.id'))
time = db.Column(db.String)
def save_comment(self):
db.session.add(self)
db.session.commit()
@classmethod
def get_comments(cls,id):
comments = Comment.query.filter_by(post_id=id).all()
return comments
class PhotoProfile(db.Model):
__tablename__ = 'profile_photos'
id = db.Column(db.Integer,primary_key = True)
pic_path = db.Column(db.String())
user_id = db.Column(db.Integer,db.ForeignKey("users.id"))
| 29.768421 | 78 | 0.668317 |
bfcb379eefe5d27d3babb64b26cd3c7ed2a61cfd | 12,426 | py | Python | tests/riscv/state_transition/state_transition_merge_state_elements_force.py | noahsherrill/force-riscv | 500cec3017f619dbf853a497bf02eaeecca927c9 | [
"Apache-2.0"
] | 111 | 2020-06-12T22:31:30.000Z | 2022-03-19T03:45:20.000Z | tests/riscv/state_transition/state_transition_merge_state_elements_force.py | noahsherrill/force-riscv | 500cec3017f619dbf853a497bf02eaeecca927c9 | [
"Apache-2.0"
] | 34 | 2020-06-12T20:23:40.000Z | 2022-03-15T20:04:31.000Z | tests/riscv/state_transition/state_transition_merge_state_elements_force.py | noahsherrill/force-riscv | 500cec3017f619dbf853a497bf02eaeecca927c9 | [
"Apache-2.0"
] | 32 | 2020-06-12T19:15:26.000Z | 2022-02-20T11:38:31.000Z | #
# Copyright (C) [2020] Futurewei Technologies, Inc.
#
# FORCE-RISCV is licensed under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES
# OF ANY KIND, EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO
# NON-INFRINGEMENT, MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import RandomUtils
import StateTransition
import UtilityFunctions
from Enums import EStateElementType, EStateTransitionType
from State import State
import state_transition_test_utils as utils
from base.Sequence import Sequence
from base.StateTransitionHandler import StateTransitionHandler
from riscv.EnvRISCV import EnvRISCV
from riscv.GenThreadRISCV import GenThreadRISCV
# A test StateTransitionHandler that verifies the StateElements it is passed
# have been merged with current State data.
class StateTransitionHandlerTest(StateTransitionHandler):
def __init__(self, aGenThread):
super().__init__(aGenThread)
self.mExpectedStateData = None
# Execute the State change represented by the StateElement. Only instances
# of the StateElement types for which the StateTransitionHandler has been
# registered will be passed to this method. Other StateTransitionHandlers
# will process the other StateElement types. It is important to avoid
# making changes to entities represented by StateElements that have already
# been processed. Changes to entities represented by StateElements that
# will be processed later are permitted.
#
# @param aStateElem A StateElement object.
def processStateElement(self, aStateElem):
state_elem_type = aStateElem.getStateElementType()
if state_elem_type == EStateElementType.Memory:
expected_mem_state_data = self.mExpectedStateData[state_elem_type]
if aStateElem.getStartAddress() not in expected_mem_state_data:
self.error(
"MemoryStateElement has unexpected start address: 0x%x"
% aStateElem.getStartAddress()
)
elif state_elem_type == EStateElementType.SystemRegister:
expected_sys_reg_state_data = self.mExpectedStateData[
state_elem_type
]
sys_reg_val = expected_sys_reg_state_data[aStateElem.getName()]
if aStateElem.getValues()[0] != sys_reg_val:
self.error(
"Value for StateElement %s was not merged as expected. "
"Expected=0x%x, Actual=0x%x"
% (
aStateElem.getName(),
sys_reg_val,
aStateElem.getValues()[0],
)
)
elif state_elem_type == EStateElementType.FloatingPointRegister:
expected_fp_reg_state_data = self.mExpectedStateData[
state_elem_type
]
fp_reg_name = "D%d" % aStateElem.getRegisterIndex()
fp_reg_val = expected_fp_reg_state_data[fp_reg_name]
if aStateElem.getValues()[0] != fp_reg_val:
self.error(
"Value for StateElement %s was not merged as expected. "
"Expected=0x%x, Actual=0x%x"
% (fp_reg_name, fp_reg_val, aStateElem.getValues()[0])
)
elif state_elem_type == EStateElementType.VectorRegister:
expected_vec_reg_state_data = self.mExpectedStateData[
state_elem_type
]
vec_reg_name = "v%d" % aStateElem.getRegisterIndex()
vec_reg_values = expected_vec_reg_state_data[vec_reg_name]
if aStateElem.getValues() != vec_reg_values:
self.error(
"Value for StateElement %s was not merged as expected. "
"Expected=%s, Actual=%s"
% (vec_reg_name, vec_reg_values, aStateElem.getValues())
)
return True
# This test verifies that StateElements are merged correctly with current
# State values when expected.
class MainSequence(Sequence):
def __init__(self, aGenThread, aName=None):
super().__init__(aGenThread, aName)
self._mExpectedStateData = {}
def generate(self, **kargs):
state_trans_handler = StateTransitionHandlerTest(self.genThread)
StateTransition.registerStateTransitionHandler(
state_trans_handler,
EStateTransitionType.Explicit,
(
EStateElementType.Memory,
EStateElementType.SystemRegister,
EStateElementType.FloatingPointRegister,
EStateElementType.VectorRegister,
),
)
state = self._createState()
state_trans_handler.mExpectedStateData = self._mExpectedStateData
StateTransition.transitionToState(state)
# Create a simple State to test an explicit StateTransition.
def _createState(self):
state = State()
self._mExpectedStateData[
EStateElementType.Memory
] = self._createMemoryStateElements(state)
self._mExpectedStateData[
EStateElementType.SystemRegister
] = self._createSystemRegisterStateElements(state)
self._mExpectedStateData[
EStateElementType.FloatingPointRegister
] = self._createFloatingPointRegisterStateElements(state)
self._mExpectedStateData[
EStateElementType.VectorRegister
] = self._createVectorRegisterStateElements(state)
return state
# Add randomly-generated memory StateElements to the State. The
# StateElements only partially specify the memory values. Return a list of
# expected memory StateElement name and value pairs. The expected values
# are determined by combining what is specified in the StateElements and
# the values currently in memory.
#
# @param aState The State to which the memory StateElements should be
# added.
def _createMemoryStateElements(self, aState):
expected_mem_state_data = set()
for _ in range(RandomUtils.random32(0, 5)):
mem_size = RandomUtils.random32(1, 20)
mem_start_addr = self.genVA(Size=mem_size, Align=1, Type="D")
mem_values = []
for _ in range(mem_size):
mem_values.append(RandomUtils.random32(0, 0xFF))
aState.addMemoryStateElementsAsBytes(mem_start_addr, mem_values)
# Compute the start addresses for the resulting StateElements
state_elem_size = 8
aligned_mem_start_addr = UtilityFunctions.getAlignedValue(
mem_start_addr, state_elem_size
)
bytes_remaining = mem_size
chunk_size = state_elem_size - (
mem_start_addr - aligned_mem_start_addr
)
while bytes_remaining > 0:
expected_mem_state_data.add(aligned_mem_start_addr)
aligned_mem_start_addr += state_elem_size
bytes_remaining -= chunk_size
# The last StateElement may contain fewer specified bytes than
# state_elem_size, but setting chunk_size to state_elem_size
# will cause the loop to terminate at the correct time in
# either case
chunk_size = state_elem_size
return expected_mem_state_data
# Add system register StateElements to the State. The StateElements only
# partially specify the system register values. Return a list of expected
# system register StateElement name and value pairs. The expected values
# are determined by combining what is specified in the StateElements and
# the values currently in the registers.
#
# @param aState The State to which the system register StateElements
# should be added.
def _createSystemRegisterStateElements(self, aState):
sys_reg_name = "mstatus"
aState.addSystemRegisterStateElementByField(sys_reg_name, "MIE", 0x0)
aState.addSystemRegisterStateElementByField(sys_reg_name, "MPRV", 0x1)
self.randomInitializeRegister(sys_reg_name)
(sys_reg_val, valid) = self.readRegister(sys_reg_name)
utils.assert_valid_register_value(self, sys_reg_name, valid)
sys_reg_val = utils.combine_register_value_with_field_value(
self, sys_reg_name, sys_reg_val, "MIE", 0x0
)
sys_reg_val = utils.combine_register_value_with_field_value(
self, sys_reg_name, sys_reg_val, "MPRV", 0x1
)
return {sys_reg_name: sys_reg_val}
# Add randomly-generated floating-point register StateElements to the
# State. The StateElements only partially specify the floating-point
# register values. Return a list of expected floating-point register
# StateElement name and value pairs. The expected values are determined by
# combining what is specified in the StateElements and the values
# currently in the registers.
#
# @param aState The State to which the floating-point register
# StateElements should be added.
def _createFloatingPointRegisterStateElements(self, aState):
expected_fp_reg_state_data = {}
fp_reg_count = RandomUtils.random32(0, 10)
fp_reg_indices = self.sample(range(0, 32), fp_reg_count)
for fp_reg_index in fp_reg_indices:
fp_reg_name = "S%d" % fp_reg_index
fp_reg_val = RandomUtils.random32()
aState.addRegisterStateElement(fp_reg_name, (fp_reg_val,))
containing_fp_reg_name = "D%d" % fp_reg_index
self.randomInitializeRegister(containing_fp_reg_name)
(orig_fp_reg_val, valid) = self.readRegister(
containing_fp_reg_name
)
utils.assert_valid_register_value(
self, containing_fp_reg_name, valid
)
combined_fp_reg_val = (
orig_fp_reg_val & (0xFFFFFFFF << 32)
) | fp_reg_val
expected_fp_reg_state_data[
containing_fp_reg_name
] = combined_fp_reg_val
return expected_fp_reg_state_data
# Add randomly-generated vector register StateElements to the State. The
# StateElements only partially specify the vector register values. Return a
# list of expected vector register StateElement name and value list pairs.
# The expected values are determined by combining what is specified in the
# StateElements and the values currently in the registers.
#
# @param aState The State to which the vector register StateElements
# should be added.
def _createVectorRegisterStateElements(self, aState):
expected_vec_reg_state_data = {}
vec_reg_count = RandomUtils.random32(0, 10)
vec_reg_indices = self.sample(range(0, 32), vec_reg_count)
max_reg_val_count = self.getLimitValue("MaxPhysicalVectorLen") // 64
for vec_reg_index in vec_reg_indices:
vec_reg_values = []
state_elem_reg_val_count = RandomUtils.random32(
1, max_reg_val_count
)
for val_index in range(state_elem_reg_val_count):
vec_reg_values.append(RandomUtils.random64())
vec_reg_name = "v%d" % vec_reg_index
self.randomInitializeRegister(vec_reg_name)
aState.addRegisterStateElement(vec_reg_name, vec_reg_values)
for val_index in range(
state_elem_reg_val_count, max_reg_val_count
):
field_name = "%s_%d" % (vec_reg_name, val_index)
(field_val, valid) = self.readRegister(
vec_reg_name, field=field_name
)
utils.assert_valid_register_value(self, vec_reg_name, valid)
vec_reg_values.append(field_val)
expected_vec_reg_state_data[vec_reg_name] = vec_reg_values
return expected_vec_reg_state_data
MainSequenceClass = MainSequence
GenThreadClass = GenThreadRISCV
EnvClass = EnvRISCV
| 41.558528 | 79 | 0.665942 |
e563a508c1b16127bb15a468957cd90b8fe20277 | 193 | py | Python | tests.py | guumeyer/myFlaskBook | e917caea14f448cc6dc73783db4fc4f91b845b2c | [
"Apache-2.0"
] | null | null | null | tests.py | guumeyer/myFlaskBook | e917caea14f448cc6dc73783db4fc4f91b845b2c | [
"Apache-2.0"
] | null | null | null | tests.py | guumeyer/myFlaskBook | e917caea14f448cc6dc73783db4fc4f91b845b2c | [
"Apache-2.0"
] | null | null | null | import os, sys
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
import unittest
from user.tests import UserTest
if __name__ == '__main__':
unittest.main()
| 19.3 | 79 | 0.720207 |
f48652ff784630e9d4e796c14eb44d940293e595 | 20,086 | py | Python | third_party/crashpad/crashpad/build/run_tests.py | zealoussnow/chromium | fd8a8914ca0183f0add65ae55f04e287543c7d4a | [
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 14,668 | 2015-01-01T01:57:10.000Z | 2022-03-31T23:33:32.000Z | third_party/crashpad/crashpad/build/run_tests.py | zealoussnow/chromium | fd8a8914ca0183f0add65ae55f04e287543c7d4a | [
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 113 | 2015-05-04T09:58:14.000Z | 2022-01-31T19:35:03.000Z | third_party/crashpad/crashpad/build/run_tests.py | zealoussnow/chromium | fd8a8914ca0183f0add65ae55f04e287543c7d4a | [
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 5,941 | 2015-01-02T11:32:21.000Z | 2022-03-31T16:35:46.000Z | #!/usr/bin/env python
# coding: utf-8
# Copyright 2014 The Crashpad Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import argparse
import os
import pipes
import posixpath
import re
import subprocess
import sys
import tempfile
import uuid
CRASHPAD_DIR = os.path.join(os.path.dirname(os.path.abspath(__file__)),
os.pardir)
IS_WINDOWS_HOST = sys.platform.startswith('win')
def _FindGNFromBinaryDir(binary_dir):
"""Attempts to determine the path to a GN binary used to generate the build
files in the given binary_dir. This is necessary because `gn` might not be
in the path or might be in a non-standard location, particularly on build
machines."""
build_ninja = os.path.join(binary_dir, 'build.ninja')
if os.path.isfile(build_ninja):
with open(build_ninja, 'rb') as f:
# Look for the always-generated regeneration rule of the form:
#
# rule gn
# command = <gn binary> ... arguments ...
#
# to extract the gn binary's full path.
found_rule_gn = False
for line in f:
if line.strip() == 'rule gn':
found_rule_gn = True
continue
if found_rule_gn:
if len(line) == 0 or line[0] != ' ':
return None
if line.startswith(' command = '):
gn_command_line_parts = line.strip().split(' ')
if len(gn_command_line_parts) > 2:
return os.path.join(binary_dir,
gn_command_line_parts[2])
return None
def _BinaryDirTargetOS(binary_dir):
"""Returns the apparent target OS of binary_dir, or None if none appear to
be explicitly specified."""
gn_path = _FindGNFromBinaryDir(binary_dir)
if gn_path:
# Look for a GN “target_os”.
popen = subprocess.Popen([
gn_path, '--root=' + CRASHPAD_DIR, 'args', binary_dir,
'--list=target_os', '--short'
],
shell=IS_WINDOWS_HOST,
stdout=subprocess.PIPE,
stderr=open(os.devnull))
value = popen.communicate()[0]
if popen.returncode == 0:
match = re.match('target_os = "(.*)"$', value.decode('utf-8'))
if match:
return match.group(1)
# For GYP with Ninja, look for the appearance of “linux-android” in the path
# to ar. This path is configured by gyp_crashpad_android.py.
build_ninja_path = os.path.join(binary_dir, 'build.ninja')
if os.path.exists(build_ninja_path):
with open(build_ninja_path) as build_ninja_file:
build_ninja_content = build_ninja_file.read()
match = re.search('-linux-android(eabi)?-ar$', build_ninja_content,
re.MULTILINE)
if match:
return 'android'
return None
def _EnableVTProcessingOnWindowsConsole():
"""Enables virtual terminal processing for ANSI/VT100-style escape sequences
on a Windows console attached to standard output. Returns True on success.
Returns False if standard output is not a console or if virtual terminal
processing is not supported. The feature was introduced in Windows 10.
"""
import pywintypes
import win32console
import winerror
stdout_console = win32console.GetStdHandle(win32console.STD_OUTPUT_HANDLE)
try:
console_mode = stdout_console.GetConsoleMode()
except pywintypes.error as e:
if e.winerror == winerror.ERROR_INVALID_HANDLE:
# Standard output is not a console.
return False
raise
try:
# From <wincon.h>. This would be
# win32console.ENABLE_VIRTUAL_TERMINAL_PROCESSING, but it’s too new to
# be defined there.
ENABLE_VIRTUAL_TERMINAL_PROCESSING = 0x0004
stdout_console.SetConsoleMode(console_mode |
ENABLE_VIRTUAL_TERMINAL_PROCESSING)
except pywintypes.error as e:
if e.winerror == winerror.ERROR_INVALID_PARAMETER:
# ANSI/VT100-style escape sequence processing isn’t supported before
# Windows 10.
return False
raise
return True
def _RunOnAndroidTarget(binary_dir, test, android_device, extra_command_line):
local_test_path = os.path.join(binary_dir, test)
MAYBE_UNSUPPORTED_TESTS = (
'crashpad_client_test',
'crashpad_handler_test',
'crashpad_minidump_test',
'crashpad_snapshot_test',
)
if not os.path.exists(local_test_path) and test in MAYBE_UNSUPPORTED_TESTS:
print('This test is not present and may not be supported, skipping')
return
def _adb(*args):
# Flush all of this script’s own buffered stdout output before running
# adb, which will likely produce its own output on stdout.
sys.stdout.flush()
adb_command = ['adb', '-s', android_device]
adb_command.extend(args)
subprocess.check_call(adb_command, shell=IS_WINDOWS_HOST)
def _adb_push(sources, destination):
args = list(sources)
args.append(destination)
_adb('push', *args)
def _adb_shell(command_args, env={}):
# Build a command to execute via “sh -c” instead of invoking it
# directly. Here’s why:
#
# /system/bin/env isn’t normally present prior to Android 6.0 (M), where
# toybox was introduced (Android platform/manifest 9a2c01e8450b).
# Instead, set environment variables by using the shell’s internal
# “export” command.
#
# adbd prior to Android 7.0 (N), and the adb client prior to SDK
# platform-tools version 24, don’t know how to communicate a shell
# command’s exit status. This was added in Android platform/system/core
# 606835ae5c4b). With older adb servers and clients, adb will “exit 0”
# indicating success even if the command failed on the device. This
# makes subprocess.check_call() semantics difficult to implement
# directly. As a workaround, have the device send the command’s exit
# status over stdout and pick it back up in this function.
#
# Both workarounds are implemented by giving the device a simple script,
# which adbd will run as an “sh -c” argument.
adb_command = ['adb', '-s', android_device, 'shell']
script_commands = []
for k, v in env.items():
script_commands.append('export %s=%s' %
(pipes.quote(k), pipes.quote(v)))
script_commands.extend([
' '.join(pipes.quote(x) for x in command_args), 'status=${?}',
'echo "status=${status}"', 'exit ${status}'
])
adb_command.append('; '.join(script_commands))
child = subprocess.Popen(adb_command,
shell=IS_WINDOWS_HOST,
stdin=open(os.devnull),
stdout=subprocess.PIPE)
FINAL_LINE_RE = re.compile('status=(\d+)$')
final_line = None
while True:
# Use readline so that the test output appears “live” when running.
data = child.stdout.readline().decode('utf-8')
if data == '':
break
if final_line is not None:
# It wasn’t really the final line.
print(final_line, end='')
final_line = None
if FINAL_LINE_RE.match(data.rstrip()):
final_line = data
else:
print(data, end='')
if final_line is None:
# Maybe there was some stderr output after the end of stdout. Old
# versions of adb, prior to when the exit status could be
# communicated, smush the two together.
raise subprocess.CalledProcessError(-1, adb_command)
status = int(FINAL_LINE_RE.match(final_line.rstrip()).group(1))
if status != 0:
raise subprocess.CalledProcessError(status, adb_command)
child.wait()
if child.returncode != 0:
raise subprocess.CalledProcessError(subprocess.returncode,
adb_command)
# /system/bin/mktemp isn’t normally present prior to Android 6.0 (M), where
# toybox was introduced (Android platform/manifest 9a2c01e8450b). Fake it
# with a host-generated name. This won’t retry if the name is in use, but
# with 122 bits of randomness, it should be OK. This uses “mkdir” instead of
# “mkdir -p”because the latter will not indicate failure if the directory
# already exists.
device_temp_dir = '/data/local/tmp/%s.%s' % (test, uuid.uuid4().hex)
_adb_shell(['mkdir', device_temp_dir])
try:
# Specify test dependencies that must be pushed to the device. This
# could be determined automatically in a GN build, following the example
# used for Fuchsia. Since nothing like that exists for GYP, hard-code it
# for supported tests.
test_build_artifacts = [test, 'crashpad_handler']
test_data = ['test/test_paths_test_data_root.txt']
if test == 'crashpad_test_test':
test_build_artifacts.append(
'crashpad_test_test_multiprocess_exec_test_child')
elif test == 'crashpad_util_test':
test_data.append('util/net/testdata/')
# Establish the directory structure on the device.
device_out_dir = posixpath.join(device_temp_dir, 'out')
device_mkdirs = [device_out_dir]
for source_path in test_data:
# A trailing slash could reasonably mean to copy an entire
# directory, but will interfere with what’s needed from the path
# split. All parent directories of any source_path need to be be
# represented in device_mkdirs, but it’s important that no
# source_path itself wind up in device_mkdirs, even if source_path
# names a directory, because that would cause the “adb push” of the
# directory below to behave incorrectly.
if source_path.endswith(posixpath.sep):
source_path = source_path[:-1]
device_source_path = posixpath.join(device_temp_dir, source_path)
device_mkdir = posixpath.split(device_source_path)[0]
if device_mkdir not in device_mkdirs:
device_mkdirs.append(device_mkdir)
adb_mkdir_command = ['mkdir', '-p']
adb_mkdir_command.extend(device_mkdirs)
_adb_shell(adb_mkdir_command)
# Push the test binary and any other build output to the device.
local_test_build_artifacts = []
for artifact in test_build_artifacts:
local_test_build_artifacts.append(os.path.join(
binary_dir, artifact))
_adb_push(local_test_build_artifacts, device_out_dir)
# Push test data to the device.
for source_path in test_data:
_adb_push([os.path.join(CRASHPAD_DIR, source_path)],
posixpath.join(device_temp_dir, source_path))
# Run the test on the device. Pass the test data root in the
# environment.
#
# Because the test will not run with its standard output attached to a
# pseudo-terminal device, Google Test will not normally enable colored
# output, so mimic Google Test’s own logic for deciding whether to
# enable color by checking this script’s own standard output connection.
# The list of TERM values comes from Google Test’s
# googletest/src/gtest.cc testing::internal::ShouldUseColor().
env = {'CRASHPAD_TEST_DATA_ROOT': device_temp_dir}
gtest_color = os.environ.get('GTEST_COLOR')
if gtest_color in ('auto', None):
if (sys.stdout.isatty() and
(os.environ.get('TERM')
in ('xterm', 'xterm-color', 'xterm-256color', 'screen',
'screen-256color', 'tmux', 'tmux-256color', 'rxvt-unicode',
'rxvt-unicode-256color', 'linux', 'cygwin') or
(IS_WINDOWS_HOST and _EnableVTProcessingOnWindowsConsole()))):
gtest_color = 'yes'
else:
gtest_color = 'no'
env['GTEST_COLOR'] = gtest_color
_adb_shell([posixpath.join(device_out_dir, test)] + extra_command_line,
env)
finally:
_adb_shell(['rm', '-rf', device_temp_dir])
def _RunOnIOSTarget(binary_dir, test, is_xcuitest=False):
"""Runs the given iOS |test| app on iPhone 8 with the default OS version."""
def xctest(binary_dir, test):
"""Returns a dict containing the xctestrun data needed to run an
XCTest-based test app."""
test_path = os.path.join(CRASHPAD_DIR, binary_dir)
module_data = {
'TestBundlePath': os.path.join(test_path, test + '_module.xctest'),
'TestHostPath': os.path.join(test_path, test + '.app'),
'TestingEnvironmentVariables': {
'DYLD_FRAMEWORK_PATH': '__TESTROOT__/Debug-iphonesimulator:',
'DYLD_INSERT_LIBRARIES':
('__PLATFORMS__/iPhoneSimulator.platform/Developer/'
'usr/lib/libXCTestBundleInject.dylib'),
'DYLD_LIBRARY_PATH': '__TESTROOT__/Debug-iphonesimulator',
'IDEiPhoneInternalTestBundleName': test + '.app',
'XCInjectBundleInto': '__TESTHOST__/' + test,
}
}
return {test: module_data}
def xcuitest(binary_dir, test):
"""Returns a dict containing the xctestrun data needed to run an
XCUITest-based test app."""
test_path = os.path.join(CRASHPAD_DIR, binary_dir)
runner_path = os.path.join(test_path, test + '_module-Runner.app')
bundle_path = os.path.join(runner_path, 'PlugIns',
test + '_module.xctest')
target_app_path = os.path.join(test_path, test + '.app')
module_data = {
'IsUITestBundle': True,
'IsXCTRunnerHostedTestBundle': True,
'TestBundlePath': bundle_path,
'TestHostPath': runner_path,
'UITargetAppPath': target_app_path,
'DependentProductPaths': [
bundle_path, runner_path, target_app_path
],
'TestingEnvironmentVariables': {
'DYLD_FRAMEWORK_PATH': '__TESTROOT__/Debug-iphonesimulator:',
'DYLD_INSERT_LIBRARIES':
('__PLATFORMS__/iPhoneSimulator.platform/Developer/'
'usr/lib/libXCTestBundleInject.dylib'),
'DYLD_LIBRARY_PATH': '__TESTROOT__/Debug-iphonesimulator',
'XCInjectBundleInto': '__TESTHOST__/' + test + '_module-Runner',
},
}
return {test: module_data}
with tempfile.NamedTemporaryFile() as f:
import plistlib
xctestrun_path = f.name
print(xctestrun_path)
if is_xcuitest:
plistlib.writePlist(xcuitest(binary_dir, test), xctestrun_path)
else:
plistlib.writePlist(xctest(binary_dir, test), xctestrun_path)
subprocess.check_call([
'xcodebuild', 'test-without-building', '-xctestrun', xctestrun_path,
'-destination', 'platform=iOS Simulator,name=iPhone 8'
])
# This script is primarily used from the waterfall so that the list of tests
# that are run is maintained in-tree, rather than in a separate infrastructure
# location in the recipe.
def main(args):
parser = argparse.ArgumentParser(description='Run Crashpad unittests.')
parser.add_argument('binary_dir', help='Root of build dir')
parser.add_argument('test', nargs='*', help='Specific test(s) to run.')
parser.add_argument(
'--gtest_filter',
help='Google Test filter applied to Google Test binary runs.')
args = parser.parse_args()
# Tell 64-bit Windows tests where to find 32-bit test executables, for
# cross-bitted testing. This relies on the fact that the GYP build by
# default uses {Debug,Release} for the 32-bit build and {Debug,Release}_x64
# for the 64-bit build. This is not a universally valid assumption, and if
# it’s not met, 64-bit tests that require 32-bit build output will disable
# themselves dynamically.
if (sys.platform == 'win32' and args.binary_dir.endswith('_x64') and
'CRASHPAD_TEST_32_BIT_OUTPUT' not in os.environ):
binary_dir_32 = args.binary_dir[:-4]
if os.path.isdir(binary_dir_32):
os.environ['CRASHPAD_TEST_32_BIT_OUTPUT'] = binary_dir_32
target_os = _BinaryDirTargetOS(args.binary_dir)
is_android = target_os == 'android'
is_ios = target_os == 'ios'
tests = [
'crashpad_client_test',
'crashpad_handler_test',
'crashpad_minidump_test',
'crashpad_snapshot_test',
'crashpad_test_test',
'crashpad_util_test',
]
if is_android:
android_device = os.environ.get('ANDROID_DEVICE')
if not android_device:
adb_devices = subprocess.check_output(['adb', 'devices'],
shell=IS_WINDOWS_HOST)
devices = []
for line in adb_devices.splitlines():
line = line.decode('utf-8')
if (line == 'List of devices attached' or
re.match('^\* daemon .+ \*$', line) or line == ''):
continue
(device, ignore) = line.split('\t')
devices.append(device)
if len(devices) != 1:
print("Please set ANDROID_DEVICE to your device's id",
file=sys.stderr)
return 2
android_device = devices[0]
print('Using autodetected Android device:', android_device)
elif is_ios:
tests.append('ios_crash_xcuitests')
elif IS_WINDOWS_HOST:
tests.append('snapshot/win/end_to_end_test.py')
if args.test:
for t in args.test:
if t not in tests:
print('Unrecognized test:', t, file=sys.stderr)
return 3
tests = args.test
for test in tests:
print('-' * 80)
print(test)
print('-' * 80)
if test.endswith('.py'):
subprocess.check_call([
sys.executable,
os.path.join(CRASHPAD_DIR, test), args.binary_dir
])
else:
extra_command_line = []
if args.gtest_filter:
extra_command_line.append('--gtest_filter=' + args.gtest_filter)
if is_android:
_RunOnAndroidTarget(args.binary_dir, test, android_device,
extra_command_line)
elif is_ios:
_RunOnIOSTarget(args.binary_dir,
test,
is_xcuitest=test.startswith('ios'))
else:
subprocess.check_call([os.path.join(args.binary_dir, test)] +
extra_command_line)
return 0
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
| 41.845833 | 80 | 0.607836 |
d02b57a9b5cd53235d0a2472f859650cfc49b76c | 18,364 | py | Python | .vim/bundle/vim-wakatime/packages/wakatime/arguments.py | ELC/dotfiles | 2721242c2e8cbb48b85e3573c9ad129f91932a8e | [
"MIT"
] | 1 | 2022-03-18T19:25:55.000Z | 2022-03-18T19:25:55.000Z | .vim/bundle/vim-wakatime/packages/wakatime/arguments.py | ELC/dotfiles | 2721242c2e8cbb48b85e3573c9ad129f91932a8e | [
"MIT"
] | null | null | null | .vim/bundle/vim-wakatime/packages/wakatime/arguments.py | ELC/dotfiles | 2721242c2e8cbb48b85e3573c9ad129f91932a8e | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
wakatime.arguments
~~~~~~~~~~~~~~~~~~
Command-line arguments.
:copyright: (c) 2016 Alan Hamlett.
:license: BSD, see LICENSE for more details.
"""
from __future__ import print_function
import os
import re
import time
import traceback
from .__about__ import __version__
from .compat import basestring
from .configs import parseConfigFile
from .constants import AUTH_ERROR, DEFAULT_SYNC_OFFLINE_ACTIVITY
from .packages import argparse
class FileAction(argparse.Action):
def __call__(self, parser, namespace, values, option_string=None):
if isinstance(values, basestring) and values.startswith('"'):
values = re.sub(r'\\"', '"', values.strip('"'))
try:
if os.path.isfile(values):
values = os.path.realpath(values)
except: # pragma: nocover
pass
setattr(namespace, self.dest, values)
class StoreWithoutQuotes(argparse.Action):
def __call__(self, parser, namespace, values, option_string=None):
if isinstance(values, basestring) and values.startswith('"'):
values = re.sub(r'\\"', '"', values.strip('"'))
setattr(namespace, self.dest, values)
def parse_arguments():
"""Parse command line arguments and configs from ~/.wakatime.cfg.
Command line arguments take precedence over config file settings.
Returns instances of ArgumentParser and SafeConfigParser.
"""
# define supported command line arguments
parser = argparse.ArgumentParser(description='Common interface for the ' +
'WakaTime api.')
parser.add_argument('--entity', dest='entity', metavar='FILE',
action=FileAction,
help='Absolute path to file for the heartbeat. Can ' +
'also be a url, domain or app when ' +
'--entity-type is not file.')
parser.add_argument('--file', dest='file', action=FileAction,
help=argparse.SUPPRESS)
parser.add_argument('--key', dest='key', action=StoreWithoutQuotes,
help='Your wakatime api key; uses api_key from ' +
'~/.wakatime.cfg by default.')
parser.add_argument('--write', dest='is_write', action='store_true',
help='When set, tells api this heartbeat was ' +
'triggered from writing to a file.')
parser.add_argument('--plugin', dest='plugin', action=StoreWithoutQuotes,
help='Optional text editor plugin name and version ' +
'for User-Agent header.')
parser.add_argument('--time', dest='timestamp', metavar='time',
type=float, action=StoreWithoutQuotes,
help='Optional floating-point unix epoch timestamp. ' +
'Uses current time by default.')
parser.add_argument('--lineno', dest='lineno', action=StoreWithoutQuotes,
help='Optional line number. This is the current ' +
'line being edited.')
parser.add_argument('--cursorpos', dest='cursorpos',
action=StoreWithoutQuotes,
help='Optional cursor position in the current file.')
parser.add_argument('--entity-type', dest='entity_type',
action=StoreWithoutQuotes,
help='Entity type for this heartbeat. Can be ' +
'"file", "domain" or "app". Defaults to "file".')
parser.add_argument('--category', dest='category',
action=StoreWithoutQuotes,
help='Category of this heartbeat activity. Can be ' +
'"coding", "building", "indexing", ' +
'"debugging", "running tests", ' +
'"writing tests", "manual testing", ' +
'"code reviewing", "browsing", or "designing". ' +
'Defaults to "coding".')
parser.add_argument('--proxy', dest='proxy', action=StoreWithoutQuotes,
help='Optional proxy configuration. Supports HTTPS '+
'and SOCKS proxies. For example: '+
'https://user:pass@host:port or '+
'socks5://user:pass@host:port or ' +
'domain\\user:pass')
parser.add_argument('--no-ssl-verify', dest='nosslverify',
action='store_true',
help='Disables SSL certificate verification for HTTPS '+
'requests. By default, SSL certificates are ' +
'verified.')
parser.add_argument('--ssl-certs-file', dest='ssl_certs_file',
action=StoreWithoutQuotes,
help='Override the bundled Python Requests CA certs ' +
'file. By default, uses certifi for ca certs.')
parser.add_argument('--project', dest='project', action=StoreWithoutQuotes,
help='Optional project name.')
parser.add_argument('--alternate-project', dest='alternate_project',
action=StoreWithoutQuotes,
help='Optional alternate project name. ' +
'Auto-discovered project takes priority.')
parser.add_argument('--alternate-language', dest='alternate_language',
action=StoreWithoutQuotes,
help=argparse.SUPPRESS)
parser.add_argument('--language', dest='language',
action=StoreWithoutQuotes,
help='Optional language name. If valid, takes ' +
'priority over auto-detected language.')
parser.add_argument('--local-file', dest='local_file', metavar='FILE',
action=FileAction,
help='Absolute path to local file for the ' +
'heartbeat. When --entity is a remote file, ' +
'this local file will be used for stats and ' +
'just the value of --entity sent with heartbeat.')
parser.add_argument('--hostname', dest='hostname',
action=StoreWithoutQuotes,
help='Hostname of current machine.')
parser.add_argument('--disable-offline', dest='offline',
action='store_false',
help='Disables offline time logging instead of ' +
'queuing logged time.')
parser.add_argument('--disableoffline', dest='offline_deprecated',
action='store_true',
help=argparse.SUPPRESS)
parser.add_argument('--hide-file-names', dest='hide_file_names',
action='store_true',
help='Obfuscate filenames. Will not send file names ' +
'to api.')
parser.add_argument('--hide-filenames', dest='hide_filenames',
action='store_true',
help=argparse.SUPPRESS)
parser.add_argument('--hidefilenames', dest='hidefilenames',
action='store_true',
help=argparse.SUPPRESS)
parser.add_argument('--hide-project-names', dest='hide_project_names',
action='store_true',
help='Obfuscate project names. When a project ' +
'folder is detected instead of using the ' +
'folder name as the project, a ' +
'.wakatime-project file is created with a ' +
'random project name.')
parser.add_argument('--exclude', dest='exclude', action='append',
help='Filename patterns to exclude from logging. ' +
'POSIX regex syntax. Can be used more than once.')
parser.add_argument('--exclude-unknown-project',
dest='exclude_unknown_project', action='store_true',
help='When set, any activity where the project ' +
'cannot be detected will be ignored.')
parser.add_argument('--include', dest='include', action='append',
help='Filename patterns to log. When used in ' +
'combination with --exclude, files matching ' +
'include will still be logged. POSIX regex ' +
'syntax. Can be used more than once.')
parser.add_argument('--include-only-with-project-file',
dest='include_only_with_project_file',
action='store_true',
help='Disables tracking folders unless they contain ' +
'a .wakatime-project file. Defaults to false.')
parser.add_argument('--ignore', dest='ignore', action='append',
help=argparse.SUPPRESS)
parser.add_argument('--extra-heartbeats', dest='extra_heartbeats',
action='store_true',
help='Reads extra heartbeats from STDIN as a JSON ' +
'array until EOF.')
parser.add_argument('--log-file', dest='log_file',
action=StoreWithoutQuotes,
help='Defaults to ~/.wakatime.log.')
parser.add_argument('--logfile', dest='logfile', action=StoreWithoutQuotes,
help=argparse.SUPPRESS)
parser.add_argument('--api-url', dest='api_url', action=StoreWithoutQuotes,
help='Heartbeats api url. For debugging with a ' +
'local server.')
parser.add_argument('--apiurl', dest='apiurl', action=StoreWithoutQuotes,
help=argparse.SUPPRESS)
parser.add_argument('--timeout', dest='timeout', type=int,
action=StoreWithoutQuotes,
help='Number of seconds to wait when sending ' +
'heartbeats to api. Defaults to 60 seconds.')
parser.add_argument('--sync-offline-activity',
dest='sync_offline_activity',
action=StoreWithoutQuotes,
help='Amount of offline activity to sync from your ' +
'local ~/.wakatime.db sqlite3 file to your ' +
'WakaTime Dashboard before exiting. Can be ' +
'"none" or a positive integer number. Defaults ' +
'to 5, meaning for every heartbeat sent while ' +
'online 5 offline heartbeats are synced. Can ' +
'be used without --entity to only sync offline ' +
'activity without generating new heartbeats.')
parser.add_argument('--config', dest='config', action=StoreWithoutQuotes,
help='Defaults to ~/.wakatime.cfg.')
parser.add_argument('--verbose', dest='verbose', action='store_true',
help='Turns on debug messages in log file.')
parser.add_argument('--version', action='version', version=__version__)
# parse command line arguments
args = parser.parse_args()
# use current unix epoch timestamp by default
if not args.timestamp:
args.timestamp = time.time()
# parse ~/.wakatime.cfg file
configs = parseConfigFile(args.config)
# update args from configs
if not args.hostname:
if configs.has_option('settings', 'hostname'):
args.hostname = configs.get('settings', 'hostname')
if not args.key:
default_key = None
if configs.has_option('settings', 'api_key'):
default_key = configs.get('settings', 'api_key')
elif configs.has_option('settings', 'apikey'):
default_key = configs.get('settings', 'apikey')
if default_key:
args.key = default_key
else:
try:
parser.error('Missing api key. Find your api key from wakatime.com/settings/api-key.')
except SystemExit:
raise SystemExit(AUTH_ERROR)
is_valid = not not re.match(r'^[a-f0-9]{8}-[a-f0-9]{4}-4[a-f0-9]{3}-[89ab][a-f0-9]{3}-[a-f0-9]{12}$', args.key, re.I)
if not is_valid:
try:
parser.error('Invalid api key. Find your api key from wakatime.com/settings/api-key.')
except SystemExit:
raise SystemExit(AUTH_ERROR)
if not args.entity:
if args.file:
args.entity = args.file
elif not args.sync_offline_activity or args.sync_offline_activity == 'none':
parser.error('argument --entity is required')
if not args.sync_offline_activity:
args.sync_offline_activity = DEFAULT_SYNC_OFFLINE_ACTIVITY
if args.sync_offline_activity == 'none':
args.sync_offline_activity = 0
try:
args.sync_offline_activity = int(args.sync_offline_activity)
if args.sync_offline_activity < 0:
raise Exception('Error')
except:
parser.error('argument --sync-offline-activity must be "none" or an integer number')
if not args.language and args.alternate_language:
args.language = args.alternate_language
if not args.exclude:
args.exclude = []
if configs.has_option('settings', 'ignore'):
try:
for pattern in configs.get('settings', 'ignore').split("\n"):
if pattern.strip() != '':
args.exclude.append(pattern)
except TypeError: # pragma: nocover
pass
if configs.has_option('settings', 'exclude'):
try:
for pattern in configs.get('settings', 'exclude').split("\n"):
if pattern.strip() != '':
args.exclude.append(pattern)
except TypeError: # pragma: nocover
pass
if not args.include_only_with_project_file and configs.has_option('settings', 'include_only_with_project_file'):
args.include_only_with_project_file = configs.get('settings', 'include_only_with_project_file') == 'true'
if not args.include:
args.include = []
if configs.has_option('settings', 'include'):
try:
for pattern in configs.get('settings', 'include').split("\n"):
if pattern.strip() != '':
args.include.append(pattern)
except TypeError: # pragma: nocover
pass
if not args.exclude_unknown_project and configs.has_option('settings', 'exclude_unknown_project'):
args.exclude_unknown_project = configs.getboolean('settings', 'exclude_unknown_project')
boolean_or_list('hide_file_names', args, configs, alternative_names=['hide_filenames', 'hidefilenames'])
boolean_or_list('hide_project_names', args, configs, alternative_names=['hide_projectnames', 'hideprojectnames'])
if args.offline_deprecated:
args.offline = False
if args.offline and configs.has_option('settings', 'offline'):
args.offline = configs.getboolean('settings', 'offline')
if not args.proxy and configs.has_option('settings', 'proxy'):
args.proxy = configs.get('settings', 'proxy')
if args.proxy:
pattern = r'^((https?|socks5)://)?([^:@]+(:([^:@])+)?@)?[^:]+(:\d+)?$'
if '\\' in args.proxy:
pattern = r'^.*\\.+$'
is_valid = not not re.match(pattern, args.proxy, re.I)
if not is_valid:
parser.error('Invalid proxy. Must be in format ' +
'https://user:pass@host:port or ' +
'socks5://user:pass@host:port or ' +
'domain\\user:pass.')
if configs.has_option('settings', 'no_ssl_verify'):
args.nosslverify = configs.getboolean('settings', 'no_ssl_verify')
if configs.has_option('settings', 'ssl_certs_file'):
args.ssl_certs_file = configs.get('settings', 'ssl_certs_file')
if not args.verbose and configs.has_option('settings', 'verbose'):
args.verbose = configs.getboolean('settings', 'verbose')
if not args.verbose and configs.has_option('settings', 'debug'):
args.verbose = configs.getboolean('settings', 'debug')
if not args.log_file and args.logfile:
args.log_file = args.logfile
if not args.log_file and configs.has_option('settings', 'log_file'):
args.log_file = configs.get('settings', 'log_file')
if not args.log_file and os.environ.get('WAKATIME_HOME'):
home = os.environ.get('WAKATIME_HOME')
args.log_file = os.path.join(os.path.expanduser(home), '.wakatime.log')
if not args.api_url and args.apiurl:
args.api_url = args.apiurl
if not args.api_url and configs.has_option('settings', 'api_url'):
args.api_url = configs.get('settings', 'api_url')
if not args.timeout and configs.has_option('settings', 'timeout'):
try:
args.timeout = int(configs.get('settings', 'timeout'))
except ValueError:
print(traceback.format_exc())
return args, configs
def boolean_or_list(config_name, args, configs, alternative_names=[]):
"""Get a boolean or list of regexes from args and configs."""
# when argument flag present, set to wildcard regex
for key in alternative_names + [config_name]:
if hasattr(args, key) and getattr(args, key):
setattr(args, config_name, ['.*'])
return
setattr(args, config_name, [])
option = None
alternative_names.insert(0, config_name)
for key in alternative_names:
if configs.has_option('settings', key):
option = configs.get('settings', key)
break
if option is not None:
if option.strip().lower() == 'true':
setattr(args, config_name, ['.*'])
elif option.strip().lower() != 'false':
for pattern in option.split("\n"):
if pattern.strip() != '':
getattr(args, config_name).append(pattern)
| 50.312329 | 121 | 0.566924 |
2be31143df5dcfbe8a9582d556f398ccda293464 | 169 | py | Python | configs/swin/mask_rcnn_swin-t-p4-w7_fpn_fp16_ms-crop-3x_coco.py | Brym-Gyimah/mmdetection | d5d749afe57c77e2ec4500395faed3566fdfedae | [
"Apache-2.0"
] | 20,190 | 2018-09-10T01:11:53.000Z | 2022-03-31T22:31:33.000Z | configs/swin/mask_rcnn_swin-t-p4-w7_fpn_fp16_ms-crop-3x_coco.py | Joker-co/mmdet_pro | 96abfd90cf0e38c5ce398795f949e9328eb85c1b | [
"Apache-2.0"
] | 6,736 | 2018-09-17T09:45:51.000Z | 2022-03-31T22:54:10.000Z | configs/swin/mask_rcnn_swin-t-p4-w7_fpn_fp16_ms-crop-3x_coco.py | Joker-co/mmdet_pro | 96abfd90cf0e38c5ce398795f949e9328eb85c1b | [
"Apache-2.0"
] | 7,837 | 2018-09-11T02:58:23.000Z | 2022-03-31T22:31:38.000Z | _base_ = './mask_rcnn_swin-t-p4-w7_fpn_ms-crop-3x_coco.py'
# you need to set mode='dynamic' if you are using pytorch<=1.5.0
fp16 = dict(loss_scale=dict(init_scale=512))
| 42.25 | 64 | 0.745562 |
1c0a13dd5688268e59ad9e13a7796200064d7055 | 4,979 | py | Python | download_atts/download.py | zpix1/vk_pm_downloader | 0f4c338aa0d4a611c8f280174445a6089b808280 | [
"MIT"
] | 2 | 2019-08-31T19:14:14.000Z | 2022-02-25T17:15:45.000Z | download_atts/download.py | zpix1/vk_pm_downloader | 0f4c338aa0d4a611c8f280174445a6089b808280 | [
"MIT"
] | 1 | 2022-02-25T18:43:38.000Z | 2022-02-26T13:22:47.000Z | download_atts/download.py | zpix1/vk_pm_downloader | 0f4c338aa0d4a611c8f280174445a6089b808280 | [
"MIT"
] | 2 | 2021-12-14T23:42:43.000Z | 2022-01-02T14:36:57.000Z | #! /usr/bin/env python3
import argparse
import sys
import os
import asyncio
import random
from time import sleep
from bs4 import BeautifulSoup
import requests
import aiohttp
from urllib.parse import urlparse
parser = argparse.ArgumentParser(description='Скачать все вложения из файлов VK PM Downloader')
parser.add_argument('--wait', type=int, help='Задержка между отдельными запросами (в мс)', default=0)
sp = parser.add_subparsers(dest='mode')
sp.required = True
file_p = sp.add_parser('file', help='Скачать у одного файла')
file_p.add_argument('infile', action='store', type=argparse.FileType('r'), help='Входной HTML файл (скачанный с помощью VK PM Downloader v>=1.3)')
file_p.add_argument('outfile', action='store', type=argparse.FileType('w'), help='Выходной файл с измененными ссылками')
file_p.add_argument('--download', choices=['photo', 'audio', 'all'], default='all', help='Выбор видов вложений для загрузки, по умолчанию качается все')
file_p.add_argument('--dir', help='Папка для вложений, по умолчанию создается автоматически')
dir_p = sp.add_parser('dir', help='Скачать для всей папки')
dir_p.add_argument('indir', help='Папка с диалогами')
dir_p.add_argument('outdir', help='Папка для вывода')
dir_p.add_argument('--download', choices=['photo', 'audio', 'all'], default='all', help='Выбор видов вложений для загрузки, по умолчанию качается все')
# print(sys.argv)
args = parser.parse_args()
# print(args)
NTRY = 5
def random_salt():
return str(random.randint(10e5, 10e6))
def normalize_filename(filename):
return random_salt() + '_' + urlparse(filename).path
async def download_to(path, url, session, redo=NTRY):
if redo == 0:
return
try:
async with session.get(url) as response:
with open(path, 'wb') as f:
f.write(await response.read())
sleep(args.wait / 1000)
except Exception as e:
print('Возникла ошибка при загрузке вложения ({}) (попытка {}) ({})'.format(path, NTRY + 1 -redo, e))
sleep(1)
await download_to(path, url, session, redo=redo-1)
else:
s = 'Вложение ({}) загружено'.format(path)
if redo != NTRY:
s += ' (с {} попытки)'.format(NTRY + 1 - redo)
print(s)
async def download_all(tasks):
async with aiohttp.ClientSession() as session:
tasks = [asyncio.ensure_future(download_to(e[0], e[1], session)) for e in tasks]
return await asyncio.gather(*tasks)
def download(directory, infile, outfile, download):
# directory =
html = infile.read()
soup = BeautifulSoup(html, 'lxml')
to_download = []
if download == 'all':
to_download = soup.find_all(class_='download_photo_type') + soup.find_all(class_='download_audio_message_type')
elif download == 'audio':
to_download = soup.find_all(class_='download_audio_message_type')
elif download == 'photo':
to_download = soup.find_all(class_='download_photo_type')
if not os.path.exists(directory):
print('Директория {} была создана'.format(directory))
os.makedirs(directory)
tasks = set()
for e_i, e in enumerate(to_download):
url = e.get
if not e.has_attr('data-src'):
print('Ссылка не найдена, скорее всего при загрузке HTML использовалась устаревшая версия VK PM Downloader')
return False
url = e['data-src']
new_name = normalize_filename('{}'.format('_'.join(url.split('/')[-4:])))
new_path = os.path.join(directory, new_name)
if not os.path.exists(new_path) and not (new_path, url) in tasks:
print('Загрузка вложения id={}'.format(e_i))
tasks.add((new_path, url))
elif (new_path, url) in tasks:
print('Вложение ({}) повторяется, не загружаю'.format(new_path))
else:
print('Вложение ({}) уже было загружено'.format(new_path))
if e.name == 'a':
e['href'] = new_path
elif e.name == 'audio':
e['src'] = new_path
outfile.write(str(soup))
print('Ожидание загрузки вложений')
loop = asyncio.get_event_loop()
future = asyncio.ensure_future(download_all(list(tasks)))
loop.run_until_complete(future)
print('Все вложения загружены')
if 'indir' in args:
if not os.path.exists(args.indir):
print('Директория indir не обнаружена')
exit(1)
else:
if not os.path.exists(args.outdir):
print('Директория {} была создана'.format(args.outdir))
os.makedirs(args.outdir)
for f in os.listdir(args.indir):
print('Файл {}'.format(f))
if f.endswith('.html'):
os.chdir(args.outdir)
download('atts_' + f + '_dir', open(os.path.join('..',args.indir, f), encoding='utf-8'), open(os.path.join(f), 'w', encoding='utf-8'), args.download)
os.chdir('..')
else:
download('atts_' + args.infile.name + '_dir', args.infile, args.outfile, args.download) | 38.3 | 161 | 0.648122 |
e8bee2cffab12bf872581aba2513ba4246d685be | 38,579 | py | Python | windows/native_exec/simple_x86.py | srounet/PythonForWindows | 83b02310e447eb9910649ff48ae9435d7cd6abb8 | [
"BSD-3-Clause"
] | null | null | null | windows/native_exec/simple_x86.py | srounet/PythonForWindows | 83b02310e447eb9910649ff48ae9435d7cd6abb8 | [
"BSD-3-Clause"
] | null | null | null | windows/native_exec/simple_x86.py | srounet/PythonForWindows | 83b02310e447eb9910649ff48ae9435d7cd6abb8 | [
"BSD-3-Clause"
] | 1 | 2021-07-28T23:24:09.000Z | 2021-07-28T23:24:09.000Z | import sys
import collections
import struct
import binascii
# py3
is_py3 = (sys.version_info.major >= 3)
if is_py3:
basestring = str
int_types = int
else:
int_types = (int, long)
class BitArray(object):
def __init__(self, size, bits):
self.size = size
if len(bits) > size:
raise ValueError("size > len(bits)")
bits_list = []
for bit in bits:
x = int(bit)
if x not in [0, 1]:
raise ValueError("Not expected bits value {0}".format(x))
bits_list.append(x)
self.array = bits_list
if size > len(self.array):
self.array = ([0] * (size - len(self.array))) + self.array
def dump(self):
res = []
for i in range(self.size // 8):
c = 0
for x in (self.array[i * 8: (i + 1) * 8]):
c = (c << 1) + x
res.append(c)
return bytearray((res))
def __getitem__(self, slice):
return self.array[slice]
def __setitem__(self, slice, value):
self.array[slice] = value
return True
def __repr__(self):
return repr(self.array)
def __add__(self, other):
if not isinstance(other, BitArray):
return NotImplemented
return BitArray(self.size + other.size, self.array + other.array)
def to_int(self):
return int("".join([str(i) for i in self.array]), 2)
@classmethod
def from_string(cls, str_base):
l = []
for c in bytearray(reversed(str_base)):
for i in range(8):
l.append(c & 1)
c = c >> 1
return cls(len(str_base) * 8, list(reversed(l)))
@classmethod
def from_int(cls, size, x):
if x < 0:
x = x & ((2 ** size) - 1)
return cls(size, bin(x)[2:])
# Prefix
class Prefix(object):
PREFIX_VALUE = None
def __init__(self, next=None):
self.next = next
def __add__(self, other):
return type(self)(other)
def get_code_py3(self):
return bytes([self.PREFIX_VALUE]) + self.next.get_code()
def get_code(self):
return chr(self.PREFIX_VALUE) + self.next.get_code()
if is_py3:
get_code = get_code_py3
def create_prefix(name, value):
prefix_type = type(name + "Type", (Prefix,), {'PREFIX_VALUE': value})
return prefix_type()
LockPrefix = create_prefix('LockPrefix', 0xf0)
Repne = create_prefix('Repne', 0xf2)
Rep = create_prefix('Rep', 0xf3)
SSPrefix = create_prefix('SSPrefix', 0x36)
CSPrefix = create_prefix('CSPrefix', 0x2e)
DSPrefix = create_prefix('DSPrefix', 0x3e)
ESPrefix = create_prefix('ESPrefix', 0x26)
FSPrefix = create_prefix('FSPrefix', 0x64)
GSPrefix = create_prefix('GSPrefix', 0x65)
OperandSizeOverride = create_prefix('OperandSizeOverride', 0x66)
AddressSizeOverride = create_prefix('AddressSizeOverride', 0x67)
# Main informations about X86
mem_access = collections.namedtuple('mem_access', ['base', 'index', 'scale', 'disp', 'prefix'])
x86_regs = ['EAX', 'ECX', 'EDX', 'EBX', 'ESP', 'EBP', 'ESI', 'EDI']
x86_16bits_regs = ['AX', 'CX', 'DX', 'BX', 'SP', 'BP', 'SI', 'DI']
x86_segment_selectors = {'CS': CSPrefix, 'DS': DSPrefix, 'ES': ESPrefix, 'SS': SSPrefix,
'FS': FSPrefix, 'GS': GSPrefix}
# Man intel -> Sreg (Vol 2.a 3-6)
x86_segment_selectors_number = {
"ES": "000",
"CS": "001",
"SS": "010",
"DS": "011",
"FS": "100",
"GS": "101",
}
class X86(object):
@staticmethod
def is_reg(name):
try:
return name.upper() in x86_regs + x86_16bits_regs
except AttributeError: # Not a string
return False
@staticmethod
def is_seg_reg(name):
try:
return name.upper() in x86_segment_selectors_number
except AttributeError:
return False
@staticmethod
def reg_size(name):
if name.upper() in x86_regs:
return 32
elif name.upper() in x86_16bits_regs:
return 16
else:
raise ValueError("Unknow register <{0}>".format(name))
@staticmethod
def is_mem_acces(data):
return isinstance(data, mem_access)
@staticmethod
def mem_access_has_only(mem_access, names):
if not X86.is_mem_acces(mem_access):
raise ValueError("mem_access_has_only")
for f in mem_access._fields:
v = getattr(mem_access, f)
if v and f != 'prefix' and f not in names:
return False
if v is None and f in names:
return False
return True
def create_displacement(base=None, index=None, scale=None, disp=0, prefix=None):
"""Creates a X86 memory access description"""
if index is not None and scale is None:
scale = 1
if scale and index is None:
raise ValueError("Cannot create displacement with scale and no index")
if scale and index.upper() == "ESP":
raise ValueError("Cannot create displacement with index == ESP")
return mem_access(base, index, scale, disp, prefix)
def deref(disp):
"""Create a memory access for an immediate value ``Ex: [0x42424242]``"""
return create_displacement(disp=disp)
def mem(data):
"""Parse a memory access string of format ``[EXPR]`` or ``seg:[EXPR]``
``EXPR`` may describe: ``BASE | INDEX * SCALE | DISPLACEMENT`` or any combinaison (in this order)
"""
if not isinstance(data, str):
raise TypeError("mem need a string to parse")
data = data.strip()
prefix = None
if not (data.startswith("[") and data.endswith("]")):
if data[2] != ":":
raise ValueError("mem acces expect <[EXPR]> or <seg:[EXPR]")
prefix_name = data[:2].upper()
if prefix_name not in x86_segment_selectors:
raise ValueError("Unknow segment selector {0}".format(prefix_name))
prefix = prefix_name
data = data[3:]
if not (data.startswith("[") and data.endswith("]")):
raise ValueError("mem acces expect <[EXPR]> or <seg:[EXPR]")
# A l'arrache.. j'aime pas le parsing de trucs
data = data[1:-1]
items = data.split("+")
parsed_items = {'prefix': prefix}
for item in items:
item = item.strip()
# Index * scale
if "*" in item:
if 'index' in parsed_items:
raise ValueError("Multiple index / index*scale in mem expression <{0}>".format(data))
sub_items = item.split("*")
if len(sub_items) != 2:
raise ValueError("Invalid item <{0}> in mem access".format(item))
index, scale = sub_items
index, scale = index.strip(), scale.strip()
if not X86.is_reg(index):
raise ValueError("Invalid index <{0}> in mem access".format(index))
if X86.reg_size(index) == 16:
raise NotImplementedError("16bits modrm")
try:
scale = int(scale, 0)
except ValueError:
raise ValueError("Invalid scale <{0}> in mem access".format(scale))
parsed_items['scale'] = scale
parsed_items['index'] = index
else:
# displacement / base / index alone
if X86.is_reg(item):
if X86.reg_size(item) == 16:
raise NotImplementedError("16bits modrm")
if 'base' not in parsed_items:
parsed_items['base'] = item
continue
# Already have base + index -> cannot avec another register in expression
if 'index' in parsed_items:
raise ValueError("Multiple index / index*scale in mem expression <{0}>".format(data))
parsed_items['index'] = item
continue
try:
disp = int(item, 0)
except ValueError:
raise ValueError("Invalid base/index or displacement <{0}> in mem access".format(item))
if 'disp' in parsed_items:
raise ValueError("Multiple displacement in mem expression <{0}>".format(data))
parsed_items['disp'] = disp
return create_displacement(**parsed_items)
# Helper to get the BitArray associated to a register
class X86RegisterSelector(object):
size = 3 # bits
reg_opcode = {v: BitArray.from_int(size=3, x=i) for i, v in enumerate(x86_regs)}
reg_opcode.update({v: BitArray.from_int(size=3, x=i) for i, v in enumerate(x86_16bits_regs)})
def accept_arg(self, args, instr_state):
x = args[0]
try:
return (1, self.reg_opcode[x.upper()])
except (KeyError, AttributeError):
return (None, None)
@classmethod
def get_reg_bits(cls, name):
return cls.reg_opcode[name.upper()]
# Instruction Parameters
class FixedRegister(object):
def __init__(self, register):
self.reg = register.upper()
def accept_arg(self, args, instr_state):
x = args[0]
if isinstance(x, str) and x.upper() == self.reg:
return (1, BitArray(0, []))
return None, None
RegisterEax = lambda: FixedRegister('EAX')
class RawBits(BitArray):
def accept_arg(self, args, instr_state):
return (0, self)
# Immediat value logic
# All 8/16 bits stuff are sign extended
class ImmediatOverflow(ValueError):
pass
def accept_as_8immediat(x):
try:
return struct.pack("<b", x)
except struct.error:
raise ImmediatOverflow("8bits signed Immediat overflow")
def accept_as_unsigned_8immediat(x):
try:
return struct.pack("<B", x)
except struct.error:
raise ImmediatOverflow("8bits signed Immediat overflow")
def accept_as_16immediat(x):
try:
return struct.pack("<h", x)
except struct.error:
raise ImmediatOverflow("16bits signed Immediat overflow")
def accept_as_unsigned_16immediat(x):
try:
return struct.pack("<H", x)
except struct.error:
raise ImmediatOverflow("16bits unsigned Immediat overflow")
def accept_as_32immediat(x):
try:
return struct.pack("<i", x)
except struct.error:
pass
try:
return struct.pack("<I", x)
except struct.error:
raise ImmediatOverflow("32bits signed Immediat overflow")
class Imm8(object):
def accept_arg(self, args, instr_state):
try:
x = int(args[0])
except (ValueError, TypeError):
return (None, None)
try:
imm8 = accept_as_8immediat(x)
except ImmediatOverflow:
return None, None
return (1, BitArray.from_string(imm8))
class UImm8(object):
def accept_arg(self, args, instr_state):
try:
x = int(args[0])
except (ValueError, TypeError):
return (None, None)
try:
imm8 = accept_as_unsigned_8immediat(x)
except ImmediatOverflow:
return None, None
return (1, BitArray.from_string(imm8))
class Imm16(object):
def accept_arg(self, args, instr_state):
try:
x = int(args[0])
except (ValueError, TypeError):
return (None, None)
try:
imm16 = accept_as_16immediat(x)
except ImmediatOverflow:
return None, None
return (1, BitArray.from_string(imm16))
class UImm16(object):
def accept_arg(self, args, instr_state):
try:
x = int(args[0])
except (ValueError, TypeError):
return (None, None)
try:
imm16 = accept_as_unsigned_16immediat(x)
except ImmediatOverflow:
return None, None
return (1, BitArray.from_string(imm16))
class Imm32(object):
def accept_arg(self, args, instr_state):
try:
x = int(args[0])
except (ValueError, TypeError):
return (None, None)
try:
imm32 = accept_as_32immediat(x)
except ImmediatOverflow:
return None, None
return (1, BitArray.from_string(imm32))
class SegmentSelectorAbsoluteAddr(object):
def accept_arg(self, args, instr_state):
sizess, datass = UImm16().accept_arg(args, instr_state)
if sizess is None:
return None, None
sizeabs, dataabs = Imm32().accept_arg(args[1:], instr_state)
if sizeabs is None:
return None, None
return (sizess + sizeabs, dataabs + datass)
class ModRM(object):
def __init__(self, sub_modrm, accept_reverse=True, has_direction_bit=True):
self.accept_reverse = accept_reverse
self.has_direction_bit = has_direction_bit
self.sub = sub_modrm
def accept_arg(self, args, instr_state):
if len(args) < 2:
raise ValueError("Missing arg for modrm")
arg1 = args[0]
arg2 = args[1]
for sub in self.sub:
# Problem in reverse sens -> need to fix it
if sub.match(arg1, arg2):
d = sub(arg1, arg2, 0, instr_state)
if self.has_direction_bit:
instr_state.previous[0][-2] = d.direction
return (2, d.mod + d.reg + d.rm + d.after)
elif self.accept_reverse and sub.match(arg2, arg1):
d = sub(arg2, arg1, 1, instr_state)
if self.has_direction_bit:
instr_state.previous[0][-2] = d.direction
return (2, d.mod + d.reg + d.rm + d.after)
return (None, None)
class ModRM_REG__REG(object):
@classmethod
def match(cls, arg1, arg2):
return X86.is_reg(arg1) and X86.is_reg(arg2)
def __init__(self, arg1, arg2, reversed, instr_state):
self.mod = BitArray(2, "11")
if X86.reg_size(arg1) != X86.reg_size(arg2):
raise ValueError("Register size mitmatch between {0} and {1}".format(arg1, arg2))
if X86.reg_size(arg1) == 16:
instr_state.prefixes.append(OperandSizeOverride)
self.reg = X86RegisterSelector.get_reg_bits(arg2)
self.rm = X86RegisterSelector.get_reg_bits(arg1)
self.after = BitArray(0, "")
self.direction = 0
class ModRM_REG__SEGREG(object):
@classmethod
def match(cls, arg1, arg2):
return X86.is_reg(arg1) and X86.is_seg_reg(arg2)
def __init__(self, arg1, arg2, reversed, instr_state):
self.mod = BitArray(2, "11")
self.rm = X86RegisterSelector.get_reg_bits(arg1)
self.reg = BitArray(3, x86_segment_selectors_number[arg2.upper()])
self.after = BitArray(0, "")
self.direction = reversed
class ModRM_REG__MEM(object):
@classmethod
def match(cls, arg1, arg2):
return X86.is_reg(arg1) and X86.is_mem_acces(arg2)
def setup_reg_as_register(self, regname, instr_state):
self.reg = X86RegisterSelector.get_reg_bits(regname)
if X86.reg_size(regname) == 16:
instr_state.prefixes.append(OperandSizeOverride)
def __init__(self, arg1, arg2, reversed, instr_state):
# ARG1 : REG
# ARG2 : prefix:[MEM]
# Handle prefix:
if arg2.prefix is not None:
instr_state.prefixes.append(x86_segment_selectors[arg2.prefix])
if X86.mem_access_has_only(arg2, ["disp"]):
self.mod = BitArray(2, "00")
self.setup_reg_as_register(arg1, instr_state)
self.rm = BitArray(3, "101")
try:
self.after = BitArray.from_string(accept_as_32immediat(arg2.disp))
except ImmediatOverflow:
raise ImmediatOverflow("Interger32 overflow for displacement {0}".format(hex(arg2.disp)))
self.direction = not reversed
return
# Those registers cannot be addressed without SIB
# No index -> no scale -> no SIB
FIRE_UP_SIB = (arg2.base and arg2.base.upper() in ["ESP", "EBP"]) or arg2.index
if not FIRE_UP_SIB:
self.setup_reg_as_register(arg1, instr_state)
self.rm = X86RegisterSelector.get_reg_bits(arg2.base)
self.compute_displacement(arg2.disp)
self.direction = not reversed
return
# FIRE UP THE SIB
# Handle no base and base == EBP special case
if not arg2.base:
force_displacement = 4
elif arg2.base.upper() == "EBP":
force_displacement = 1
else:
force_displacement = 0
self.setup_reg_as_register(arg1, instr_state)
self.rm = BitArray(3, "100")
self.compute_displacement(arg2.disp, force_displacement)
self.after = self.compute_sib(arg2) + self.after
if not arg2.base:
self.mod = BitArray(2, "00")
self.direction = not reversed
def compute_displacement(self, displacement, force_displacement=0):
if not displacement and not force_displacement:
self.mod = BitArray(2, "00")
self.after = BitArray(0, "")
return
# Pack in a byte
try:
v = accept_as_8immediat(displacement)
except ImmediatOverflow:
v = None
if v is not None and force_displacement <= 1:
self.mod = BitArray(2, "01")
self.after = BitArray.from_string(v)
return
# Pack in a dword
try:
v = accept_as_32immediat(displacement)
except ImmediatOverflow:
v = None
if v is not None and force_displacement <= 4:
self.mod = BitArray(2, "10")
self.after = BitArray.from_string(v)
return
raise ValueError("Displacement {0} is too big".format(hex(displacement)))
def compute_sib(self, mem_access):
scale = {1: 0, 2: 1, 4: 2, 8: 3}
if mem_access.index is None:
return BitArray(2, "00") + BitArray(3, "100") + X86RegisterSelector.get_reg_bits(mem_access.base)
if mem_access.scale not in scale:
raise ValueError("Invalid scale for mem access <{0}>".format(mem_access.scale))
if mem_access.base is None:
return BitArray.from_int(2, scale[mem_access.scale]) + X86RegisterSelector.get_reg_bits(mem_access.index) + BitArray(3, "101")
return BitArray.from_int(2, scale[mem_access.scale]) + X86RegisterSelector.get_reg_bits(mem_access.index) + X86RegisterSelector.get_reg_bits(mem_access.base)
class Slash(object):
"No idea for the name: represent the modRM for single args + encoding in reg (/7 in cmp in man intel)"
def __init__(self, reg_num):
"reg = 7 for /7"
self.reg = x86_regs[reg_num]
def accept_arg(self, args, instr_state):
if len(args) < 1:
raise ValueError("Missing arg for Slash")
# Reuse all the MODRm logique with the reg as our self.reg
# The sens of param is strange I need to fix the `reversed` logique
arg_consum, value = ModRM([ModRM_REG__REG, ModRM_REG__MEM], has_direction_bit=False).accept_arg(args[:1] + [self.reg] + args[1:], instr_state)
if value is None:
return arg_consum, value
return arg_consum - 1, value
class ControlRegisterModRM(object):
def __init__(self, writecr = False):
self.writecr = writecr
def accept_arg(self, args, instr_state):
writecr = self.writecr
if len(args) < 2:
return None, None
reg = args[writecr]
cr = args[not writecr]
if not isinstance(cr, str):
return None, None
if not cr.lower().startswith("cr"):
return None, None
try:
cr_number = int(cr[2:], 10)
except ValueError as e:
raise ValueError("Invalid ControlRegister {0}".format(cr))
if cr_number > 7:
raise ValueError("Invalid ControlRegister {0}".format(cr))
modrm_params = [reg, x86_regs[cr_number]] + args[2:]
return ModRM([ModRM_REG__REG], has_direction_bit=False).accept_arg(modrm_params, instr_state)
instr_state = collections.namedtuple('instr_state', ['previous', 'prefixes'])
class Instruction(object):
"""Base class of instructions, use `encoding` to find a valid way to assemble the instruction"""
encoding = []
def __init__(self, *initial_args):
for type_encoding in self.encoding:
args = list(initial_args)
prefix = []
res = []
for element in type_encoding:
arg_consum, value = element.accept_arg(args, instr_state(res, prefix))
if arg_consum is None:
break
res.append(value)
del args[:arg_consum]
else: # if no break
if args: # if still args: fail
continue
self.value = sum(res, BitArray(0, ""))
self.prefix = prefix
return
raise ValueError("Cannot encode <{0} {1}>:(".format(type(self).__name__, initial_args))
def get_code(self):
prefix_opcode = b"".join(chr(p.PREFIX_VALUE) for p in self.prefix)
return prefix_opcode + bytes(self.value.dump())
def get_code_py3(self):
prefix_opcode = b"".join(bytes([p.PREFIX_VALUE]) for p in self.prefix)
return prefix_opcode + bytes(self.value.dump())
if is_py3:
get_code = get_code_py3
#def __add__(self, other):
# res = MultipleInstr()
# res += self
# res += other
# return res
def __mul__(self, value):
if not isinstance(value, int_types):
return NotImplemented
res = MultipleInstr()
for i in range(value):
res += self
return res
# Jump helpers
class DelayedJump(object):
"""A jump to a label :NAME"""
def __init__(self, type, label):
self.type = type
self.label = label
class JmpType(Instruction):
"""Dispatcher between a real jump or DelayedJump if parameters is a label"""
def __new__(cls, *initial_args):
if len(initial_args) == 1:
arg = initial_args[0]
if isinstance(arg, str) and arg[0] == ":":
return DelayedJump(cls, arg)
return super(JmpType, cls).__new__(cls)
class JmpImm(object):
"""Immediat parameters for Jump instruction
Sub a specified size from the size to jump to `emulate` a jump from the begin address of the instruction"""
accept_as_Ximmediat = None
def __init__(self, sub):
self.sub = sub
def accept_arg(self, args, instr_state):
try:
jump_size = int(args[0])
except (ValueError, TypeError):
return (None, None)
jump_size -= self.sub
try:
jmp_imm = self.accept_as_Ximmediat(jump_size)
except ImmediatOverflow:
return (None, None)
return (1, BitArray.from_string(jmp_imm))
class JmpImm8(JmpImm):
accept_as_Ximmediat = staticmethod(accept_as_8immediat)
class JmpImm32(JmpImm):
accept_as_Ximmediat = staticmethod(accept_as_32immediat)
# Instructions
class Call(JmpType):
encoding = [(RawBits.from_int(8, 0xe8), JmpImm32(5)),
(RawBits.from_int(8, 0xff), Slash(2)),
(RawBits.from_int(8, 0x9a), SegmentSelectorAbsoluteAddr())]
class Jmp(JmpType):
encoding = [(RawBits.from_int(8, 0xeb), JmpImm8(2)),
(RawBits.from_int(8, 0xe9), JmpImm32(5)),
(RawBits.from_int(8, 0xea), SegmentSelectorAbsoluteAddr())]
class Jz(JmpType):
encoding = [(RawBits.from_int(8, 0x74), JmpImm8(2)),
(RawBits.from_int(16, 0x0f84), JmpImm32(6))]
class Jnz(JmpType):
encoding = [(RawBits.from_int(8, 0x75), JmpImm8(2)),
(RawBits.from_int(16, 0x0f85), JmpImm32(6))]
class Jbe(JmpType):
encoding = [(RawBits.from_int(8, 0x76), JmpImm8(2)),
(RawBits.from_int(16, 0x0f86), JmpImm32(6))]
class Jnb(JmpType):
encoding = [(RawBits.from_int(8, 0x73), JmpImm8(2)),
(RawBits.from_int(16, 0x0f83), JmpImm32(6))]
class Push(Instruction):
encoding = [(RawBits.from_int(5, 0x50 >> 3), X86RegisterSelector()),
(RawBits.from_int(8, 0x68), Imm32()),
(RawBits.from_int(8, 0xff), Slash(6))]
class Pop(Instruction):
encoding = [(RawBits.from_int(5, 0x58 >> 3), X86RegisterSelector())]
class Dec(Instruction):
encoding = [(RawBits.from_int(5, 0x48 >> 3), X86RegisterSelector())]
class Inc(Instruction):
encoding = [(RawBits.from_int(5, 0x40 >> 3), X86RegisterSelector()),
(RawBits.from_int(8, 0xff), Slash(0))]
class Add(Instruction):
encoding = [(RawBits.from_int(8, 0x05), RegisterEax(), Imm32()),
(RawBits.from_int(8, 0x81), Slash(0), Imm32()),
(RawBits.from_int(8, 0x01), ModRM([ModRM_REG__REG, ModRM_REG__MEM]))]
class And(Instruction):
default_32_bits = True
encoding = [(RawBits.from_int(8, 0x25), RegisterEax(), Imm32()),
(RawBits.from_int(8, 0x81), Slash(4), Imm32()),
(RawBits.from_int(8, 0x21), ModRM([ModRM_REG__REG, ModRM_REG__MEM]))]
class Or(Instruction):
default_32_bits = True
encoding = [(RawBits.from_int(8, 0x0d), RegisterEax(), Imm32()),
(RawBits.from_int(8, 0x81), Slash(1), Imm32()),
(RawBits.from_int(8, 0x09), ModRM([ModRM_REG__REG, ModRM_REG__MEM]))]
class Sub(Instruction):
encoding = [(RawBits.from_int(8, 0x2D), RegisterEax(), Imm32()),
(RawBits.from_int(8, 0x81), Slash(5), Imm32()),
(RawBits.from_int(8, 0x29), ModRM([ModRM_REG__REG, ModRM_REG__MEM]))]
class Mov(Instruction):
encoding = [(RawBits.from_int(8, 0x89), ModRM([ModRM_REG__REG, ModRM_REG__MEM])),
(RawBits.from_int(8, 0xc7), Slash(0), Imm32()),
(RawBits.from_int(5, 0xB8 >> 3), X86RegisterSelector(), Imm32()),
(RawBits.from_int(8, 0x8C), ModRM([ModRM_REG__SEGREG])),
(RawBits.from_int(16, 0x0f20), ControlRegisterModRM(writecr=False)),
(RawBits.from_int(16, 0x0f22), ControlRegisterModRM(writecr=True))]
class Movsb(Instruction):
encoding = [(RawBits.from_int(8, 0xa4),)]
class Movsd(Instruction):
encoding = [(RawBits.from_int(8, 0xa5),)]
class Lea(Instruction):
encoding = [(RawBits.from_int(8, 0x8d), ModRM([ModRM_REG__MEM], accept_reverse=False, has_direction_bit=False))]
class Cmp(Instruction):
encoding = [(RawBits.from_int(8, 0x3d), RegisterEax(), Imm32()),
(RawBits.from_int(8, 0x81), Slash(7), Imm32()),
(RawBits.from_int(8, 0x3b), ModRM([ModRM_REG__REG, ModRM_REG__MEM]))]
class Test(Instruction):
encoding = [(RawBits.from_int(8, 0xf7), Slash(0), Imm32()),
(RawBits.from_int(8, 0x85), ModRM([ModRM_REG__REG, ModRM_REG__MEM], has_direction_bit=False))]
class Out(Instruction):
encoding = [(RawBits.from_int(8, 0xee), FixedRegister('DX'), FixedRegister('AL')),
(RawBits.from_int(16, 0x66ef), FixedRegister('DX'), FixedRegister('AX')), # Fuck-it hardcoded prefix for now
(RawBits.from_int(8, 0xef), FixedRegister('DX'), FixedRegister('EAX'))]
class In(Instruction):
encoding = [(RawBits.from_int(8, 0xec), FixedRegister('AL'), FixedRegister('DX')),
(RawBits.from_int(16, 0x66ed), FixedRegister('AX'), FixedRegister('DX')), # Fuck-it hardcoded prefix for now
(RawBits.from_int(8, 0xed), FixedRegister('EAX'), FixedRegister('DX'))]
class Xor(Instruction):
encoding = [(RawBits.from_int(8, 0x31), ModRM([ModRM_REG__REG]))]
class Xchg(Instruction):
encoding = [(RawBits.from_int(5, 0x90 >> 3), RegisterEax(), X86RegisterSelector()), (RawBits.from_int(5, 0x90 >> 3), X86RegisterSelector(), RegisterEax())]
class Rol(Instruction):
encoding = [(RawBits.from_int(8, 0xC1), Slash(0), Imm8())]
class Ror(Instruction):
encoding = [(RawBits.from_int(8, 0xC1), Slash(1), Imm8())]
class Shr(Instruction):
encoding = [(RawBits.from_int(8, 0xC1), Slash(5), Imm8())]
class Shl(Instruction):
encoding = [(RawBits.from_int(8, 0xC1), Slash(4), Imm8())]
class Cpuid(Instruction):
encoding = [(RawBits.from_int(16, 0x0fa2),)]
class Ret(Instruction):
encoding = [(RawBits.from_int(8, 0xc3),),
(RawBits.from_int(8, 0xc2), UImm16())]
class ScasB(Instruction):
encoding = [(RawBits.from_int(8, 0xAE),)]
class ScasW(Instruction):
encoding = [(RawBits.from_int(16, 0x66AF),)]
class ScasD(Instruction):
encoding = [(RawBits.from_int(8, 0xAF),)]
class StosB(Instruction):
encoding = [(RawBits.from_int(8, 0xAA),)]
class StosW(Instruction):
encoding = [(RawBits.from_int(16, 0x66AB),)]
class StosD(Instruction):
encoding = [(RawBits.from_int(8, 0xAB),)]
class CmpsB(Instruction):
default_32_bits = True
encoding = [(RawBits.from_int(8, 0xa6),)]
class CmpsW(Instruction):
default_32_bits = True
encoding = [(RawBits.from_int(16, 0x66A7),)]
class CmpsD(Instruction):
default_32_bits = True
encoding = [(RawBits.from_int(8, 0xa7),)]
class Nop(Instruction):
encoding = [(RawBits.from_int(8, 0x90),)]
class Not(Instruction):
encoding = [(RawBits.from_int(8, 0xF7), Slash(2))]
class Retf(Instruction):
encoding = [(RawBits.from_int(8, 0xcb),)]
class Int(Instruction):
encoding = [(RawBits.from_int(8, 0xcd), UImm8())]
class Int3(Instruction):
encoding = [(RawBits.from_int(8, 0xcc),)]
class Iret(Instruction):
encoding = [(RawBits.from_int(8, 0xcf),)]
class _NopArtifact(Nop):
"""Special NOP used in shellcode reduction"""
pass
class Byte(Instruction):
"""Output a raw byte"""
encoding = [(UImm8(),)]
class Raw(Instruction):
"""Output raw data"""
def __init__(self, *initial_args):
if len(initial_args) != 1:
raise ValueError("raw 'opcode' only accept one argument")
# Accept space
self.data = binascii.unhexlify(initial_args[0].replace(" ", ""))
def get_code(self):
return self.data
class Label(object):
def __init__(self, name):
self.name = name
def JmpAt(addr):
code = MultipleInstr()
code += Push(addr)
code += Ret()
return code
class MultipleInstr(object):
JUMP_SIZE = 6
def __init__(self, init_instrs=()):
self.instrs = {}
self.labels = {}
self.expected_labels = {}
# List of all labeled jump already resolved
# Will be used for 'relocation'
self.computed_jump = []
self.size = 0
for i in init_instrs:
self += i
def get_code(self):
if self.expected_labels:
raise ValueError("Unresolved labels: {0}".format(self.expected_labels.keys()))
return b"".join([x[1].get_code() for x in sorted(self.instrs.items())])
def add_instruction(self, instruction):
if isinstance(instruction, Label):
return self.add_label(instruction)
# Change DelayedJump to LabeledJump ?
if isinstance(instruction, DelayedJump):
return self.add_delayed_jump(instruction)
if isinstance(instruction, (Instruction, Prefix)):
self.instrs[self.size] = instruction
self.size += len(instruction.get_code())
return
raise ValueError("Don't know what to do with {0} of type {1}".format(instruction, type(instruction)))
def add_label(self, label):
if label.name not in self.expected_labels:
# Label that have no jump before definition
# Just registed the address of the label
self.labels[label.name] = self.size
return
# Label with jmp before definition
# Lot of stuff todo:
# Find all delayed jump that refer to this jump
# Replace them with real jump
# If size of jump < JUMP_SIZE: relocate everything we can
# Update expected_labels
for jump_to_label in self.expected_labels[label.name]:
if jump_to_label.offset in self.instrs:
raise ValueError("WTF REPLACE EXISTING INSTR...")
distance = self.size - jump_to_label.offset
real_jump = jump_to_label.type(distance)
self.instrs[jump_to_label.offset] = real_jump
self.computed_jump.append((jump_to_label.offset, self.size))
for i in range(self.JUMP_SIZE - len(real_jump.get_code())):
self.instrs[jump_to_label.offset + len(real_jump.get_code()) + i] = _NopArtifact()
del self.expected_labels[label.name]
self.labels[label.name] = self.size
if not self.expected_labels:
# No more un-resolved label (for now): time to reduce the shellcode
self._reduce_shellcode()
def add_delayed_jump(self, jump):
dst = jump.label
if dst in self.labels:
# Jump to already defined labels
# Nothing fancy: get offset of label and jump to it !
distance = self.size - self.labels[dst]
jump_instruction = jump.type(-distance)
self.computed_jump.append((self.size, self.labels[dst]))
return self.add_instruction(jump_instruction)
# Jump to undefined label
# Add label to expected ones
# Add jump info -> offset of jump | type
# Reserve space for call !
jump.offset = self.size
self.expected_labels.setdefault(dst, []).append(jump)
self.size += self.JUMP_SIZE
return
def _reduce_shellcode(self):
to_remove = [offset for offset, instr in self.instrs.items() if type(instr) == _NopArtifact]
while to_remove:
self._remove_nop_artifact(to_remove[0])
# _remove_nop_artifact will change the offsets of the nop
# Need to refresh these offset
to_remove = [offset for offset, instr in self.instrs.items() if type(instr) == _NopArtifact]
def _remove_nop_artifact(self, offset):
# Remove a NOP from the shellcode
for src, dst in self.computed_jump:
# Reduce size of Jump over the nop (both sens)
if src < offset < dst or dst < offset < src:
old_jmp = self.instrs[src]
old_jump_size = len(old_jmp.get_code())
if src < offset < dst:
new_jmp = type(old_jmp)(dst - src - 1)
else:
new_jmp = type(old_jmp)(dst - src + 1)
new_jmp_size = len(new_jmp.get_code())
if new_jmp_size > old_jump_size:
raise ValueError("Wtf jump of smaller size is bigger.. ABORT")
self.instrs[src] = new_jmp
# Add other _NopArtifact if jump instruction size is reduced
for i in range(old_jump_size - new_jmp_size):
self.instrs[src + new_jmp_size + i] = _NopArtifact()
# dec offset of all Label after the NOP
for name, labeloffset in self.labels.items():
if labeloffset > offset:
self.labels[name] = labeloffset - 1
# dec offset of all instr after the NOP
new_instr = {}
for instroffset, instr in self.instrs.items():
if instroffset == offset:
continue
if instroffset > offset:
instroffset -= 1
new_instr[instroffset] = instr
self.instrs = new_instr
# Update all computed jump
new_computed_jump = []
for src, dst in self.computed_jump:
if src > offset:
src -= 1
if dst > offset:
dst -= 1
new_computed_jump.append((src, dst))
self.computed_jump = new_computed_jump
# dec size of the shellcode
self.size -= 1
def merge_shellcode(self, other):
shared_labels = set(self.labels) & set(other.labels)
if shared_labels:
raise ValueError("Cannot merge shellcode: shared labels {0}".format(shared_labels))
for offset, instr in sorted(other.instrs.items()):
for label_name in [name for name, label_offset in other.labels.items() if label_offset == offset]:
self.add_instruction(Label(label_name))
self.add_instruction(instr)
def __iadd__(self, other):
if isinstance(other, MultipleInstr):
self.merge_shellcode(other)
elif isinstance(other, basestring):
self.assemble(other)
else:
self.add_instruction(other)
return self
def assemble(self, code):
for instr in assemble_instructions_generator(code):
self.add_instruction(instr)
def split_in_instruction(str):
for line in str.split("\n"):
if not line:
continue
for instr in line.split(";"):
if not instr:
continue
yield instr.strip()
def assemble_instructions_generator(str):
for instr in split_in_instruction(str):
data = instr.split(" ", 1)
mnemo, args_raw = data[0], data[1:]
try:
instr_object = globals()[mnemo.capitalize()]
except:
raise ValueError("Unknow mnemonic <{0}>".format(mnemo))
args = []
if args_raw:
for arg in args_raw[0].split(","):
arg = arg.strip()
if (arg[0] == "[" or arg[2:4] == ":[") and arg[-1] == "]":
arg = mem(arg)
else:
try:
arg = int(arg, 0)
except ValueError:
pass
args.append(arg)
yield instr_object(*args)
def assemble(str):
"""Play test"""
shellcode = MultipleInstr()
shellcode += str
return shellcode.get_code()
def shellcode(str):
shellcode = MultipleInstr()
shellcode += str
return shellcode
# IDA : import windows.native_exec.simple_x86 as x86
# IDA testing
try:
import midap
import idc
in_IDA = True
except ImportError:
in_IDA = False
if in_IDA:
def test_code():
s = MultipleInstr()
s += Mov("Eax", "ESI")
s += Inc("Ecx")
s += Dec("edi")
s += Ret()
return s
def reset():
idc.MakeUnknown(idc.MinEA(), 0x1000, 0)
for i in range(0x1000):
idc.PatchByte(idc.MinEA() + i, 0)
s = test_code()
def tst():
reset()
midap.here(idc.MinEA()).write(s.get_code())
idc.MakeFunction(idc.MinEA())
| 33.171969 | 165 | 0.598538 |
2b3d6ced1bae06cc953ee3c69a585a07caee1a6e | 6,577 | py | Python | tests/core/test_tasks.py | MajesticFalcon/nornir | 75f82dbb7f492d0f283abcc5eb6b5fee08db9487 | [
"Apache-2.0"
] | 955 | 2018-05-16T17:10:12.000Z | 2022-03-30T20:14:26.000Z | tests/core/test_tasks.py | MajesticFalcon/nornir | 75f82dbb7f492d0f283abcc5eb6b5fee08db9487 | [
"Apache-2.0"
] | 490 | 2018-05-16T08:00:22.000Z | 2022-03-28T21:14:39.000Z | tests/core/test_tasks.py | MajesticFalcon/nornir | 75f82dbb7f492d0f283abcc5eb6b5fee08db9487 | [
"Apache-2.0"
] | 243 | 2018-05-17T11:07:24.000Z | 2022-03-27T18:01:07.000Z | import logging
from nornir.core.task import Result
from nornir.core.exceptions import NornirSubTaskError
class CustomException(Exception):
pass
def a_task_for_testing(task, fail_on=None):
fail_on = fail_on or []
if task.host.name in fail_on:
raise CustomException()
return Result(host=task.host, stdout=task.host.name)
def a_failed_task_for_testing(task):
return Result(host=task.host, stdout=task.host.name, failed=True)
def a_failed_task_for_testing_overrides_severity(task):
return Result(
host=task.host,
stdout=task.host.name,
failed=True,
severity_level=logging.CRITICAL,
)
def a_task_to_test_dry_run(task, expected_dry_run_value, dry_run=None):
assert task.is_dry_run(dry_run) is expected_dry_run_value
def sub_task_for_testing(task, fail_on=None):
task.run(
a_task_for_testing, fail_on=fail_on,
)
def sub_task_for_testing_overrides_severity(task, fail_on=None):
task.run(
a_task_for_testing, fail_on=fail_on, severity_level=logging.DEBUG,
)
def fail_command_subtask_no_capture(task, fail_on=None):
task.run(a_task_for_testing, fail_on=fail_on)
return "I shouldn't be here"
def fail_command_subtask_capture(task, fail_on=None):
try:
task.run(a_task_for_testing, fail_on=fail_on)
except Exception:
return "I captured this succcessfully"
class Test(object):
def test_task(self, nornir):
result = nornir.run(a_task_for_testing)
assert result
for h, r in result.items():
assert r.stdout.strip() == h
def test_sub_task(self, nornir):
result = nornir.run(sub_task_for_testing)
assert result
for h, r in result.items():
assert r[0].name == "sub_task_for_testing"
assert r[1].name == "a_task_for_testing"
assert h == r[1].stdout.strip()
def test_skip_failed_host(self, nornir):
result = nornir.run(sub_task_for_testing, fail_on=["dev3.group_2"])
assert result.failed
assert "dev3.group_2" in result
for h, r in result.items():
if h == "dev3.group_2":
assert r.failed
else:
assert not r.failed
assert h == r[1].stdout.strip()
result = nornir.run(a_task_for_testing)
assert not result.failed
assert "dev3.group_2" not in result
def test_run_on(self, nornir):
result = nornir.run(a_task_for_testing, fail_on=["dev3.group_2"])
assert result.failed
assert "dev3.group_2" in result
assert "dev1.group_1" in result
result = nornir.run(
a_task_for_testing, fail_on=["dev3.group_2"], on_failed=True
)
assert result.failed
assert "dev3.group_2" in result
assert "dev1.group_1" in result
result = nornir.run(
a_task_for_testing, fail_on=["dev3.group_2"], on_failed=True, on_good=False
)
assert result.failed
assert "dev3.group_2" in result
assert "dev1.group_1" not in result
result = nornir.run(
a_task_for_testing, fail_on=["dev3.group_2"], on_failed=False, on_good=True
)
assert not result.failed
assert "dev3.group_2" not in result
assert "dev1.group_1" in result
def test_severity(self, nornir):
r = nornir.run(a_task_for_testing)
for host, result in r.items():
assert result[0].severity_level == logging.INFO
r = nornir.run(a_task_for_testing, severity_level=logging.WARN)
for host, result in r.items():
assert result[0].severity_level == logging.WARN
r = nornir.run(sub_task_for_testing, severity_level=logging.WARN)
for host, result in r.items():
for sr in result:
assert sr.severity_level == logging.WARN
r = nornir.run(
sub_task_for_testing_overrides_severity,
fail_on=["dev3.group_2"],
severity_level=logging.WARN,
)
for host, result in r.items():
if host == "dev3.group_2":
assert result[0].severity_level == logging.ERROR
else:
assert result[0].severity_level == logging.WARN
assert result[1].severity_level == logging.DEBUG
r = nornir.run(a_failed_task_for_testing)
for host, result in r.items():
assert result[0].severity_level == logging.ERROR
# Reset all failed host for next test
nornir.data.reset_failed_hosts()
r = nornir.run(a_failed_task_for_testing, severity_level=logging.WARN)
for host, result in r.items():
assert result[0].severity_level == logging.ERROR
# Reset all failed host for next test
nornir.data.reset_failed_hosts()
r = nornir.run(a_failed_task_for_testing_overrides_severity)
for host, result in r.items():
assert result[0].severity_level == logging.CRITICAL
# Reset all failed host for next test
nornir.data.reset_failed_hosts()
def test_dry_run(self, nornir):
host = nornir.filter(name="dev3.group_2")
r = host.run(a_task_to_test_dry_run, expected_dry_run_value=True)
assert not r["dev3.group_2"].failed
r = host.run(
a_task_to_test_dry_run, dry_run=False, expected_dry_run_value=False
)
assert not r["dev3.group_2"].failed
nornir.data.dry_run = False
r = host.run(a_task_to_test_dry_run, expected_dry_run_value=False)
assert not r["dev3.group_2"].failed
nornir.data.dry_run = True
r = host.run(a_task_to_test_dry_run, expected_dry_run_value=False)
assert r["dev3.group_2"].failed
def test_subtask_exception_no_capture(self, nornir):
host = nornir.filter(name="dev1.group_1")
r = host.run(task=fail_command_subtask_no_capture, fail_on=["dev1.group_1"])
assert r.failed
assert r["dev1.group_1"][0].exception.__class__ is NornirSubTaskError
assert r["dev1.group_1"][1].exception.__class__ is CustomException
def test_subtask_exception_capture(self, nornir):
host = nornir.filter(name="dev1.group_1")
r = host.run(task=fail_command_subtask_capture, fail_on=["dev1.group_1"])
assert r.failed
assert not r["dev1.group_1"][0].exception
assert r["dev1.group_1"][0].result == "I captured this succcessfully"
assert r["dev1.group_1"][1].exception.__class__ is CustomException
| 34.07772 | 87 | 0.650753 |
a961e6ed38177ba021451237a61939b0e67f2c9f | 5,220 | py | Python | Modules/Scripted/EditorLib/Testing/ThresholdThreadingTest.py | TheInterventionCentre/NorMIT-Plan-App | 765ed9a5dccc1cc134b65ccabe93fc132baeb2ea | [
"MIT"
] | null | null | null | Modules/Scripted/EditorLib/Testing/ThresholdThreadingTest.py | TheInterventionCentre/NorMIT-Plan-App | 765ed9a5dccc1cc134b65ccabe93fc132baeb2ea | [
"MIT"
] | null | null | null | Modules/Scripted/EditorLib/Testing/ThresholdThreadingTest.py | TheInterventionCentre/NorMIT-Plan-App | 765ed9a5dccc1cc134b65ccabe93fc132baeb2ea | [
"MIT"
] | null | null | null |
import unittest
import qt
import slicer
import EditorLib
from EditorLib.EditUtil import EditUtil
class ThresholdThreading(unittest.TestCase):
def setUp(self):
pass
def delayDisplay(self,message,msec=1000):
"""This utility method displays a small dialog and waits.
This does two things: 1) it lets the event loop catch up
to the state of the test so that rendering and widget updates
have all taken place before the test continues and 2) it
shows the user/developer/tester the state of the test
so that we'll know when it breaks.
"""
print(message)
self.info = qt.QDialog()
self.infoLayout = qt.QVBoxLayout()
self.info.setLayout(self.infoLayout)
self.label = qt.QLabel(message,self.info)
self.infoLayout.addWidget(self.label)
qt.QTimer.singleShot(msec, self.info.close)
self.info.exec_()
def runTest(self):
self.test_ThresholdThreading()
def test_ThresholdThreading(self):
"""
Replicate the issue reported in bug 1822 where spliting
a grow-cut produced volume causes a multi-threading related
issue on mac release builds
"""
#
# first, get some sample data
#
self.delayDisplay("Get some data")
import SampleData
sampleDataLogic = SampleData.SampleDataLogic()
head = sampleDataLogic.downloadMRHead()
#
# now, define an ROI in it
#
roi = slicer.vtkMRMLAnnotationROINode()
slicer.mrmlScene.AddNode(roi)
roi.SetXYZ(-2, 104, -80)
roi.SetRadiusXYZ(30, 30, 30)
#
# apply the cropping to the head
#
cropLogic = slicer.modules.cropvolume.logic()
cvpn = slicer.vtkMRMLCropVolumeParametersNode()
cvpn.SetROINodeID( roi.GetID() )
cvpn.SetInputVolumeNodeID( head.GetID() )
cropLogic.Apply( cvpn )
croppedHead = slicer.mrmlScene.GetNodeByID( cvpn.GetOutputVolumeNodeID() )
#
# create a label map and set it for editing
#
volumesLogic = slicer.modules.volumes.logic()
croppedHeadLabel = volumesLogic.CreateAndAddLabelVolume( slicer.mrmlScene, croppedHead, croppedHead.GetName() + '-label' )
selectionNode = slicer.app.applicationLogic().GetSelectionNode()
selectionNode.SetReferenceActiveVolumeID( croppedHead.GetID() )
selectionNode.SetReferenceActiveLabelVolumeID( croppedHeadLabel.GetID() )
slicer.app.applicationLogic().PropagateVolumeSelection(0)
#
# got to the editor and do some drawing
#
self.delayDisplay("Paint some things")
parameterNode = EditUtil.getParameterNode()
lm = slicer.app.layoutManager()
paintEffect = EditorLib.PaintEffectOptions()
paintEffect.setMRMLDefaults()
paintEffect.__del__()
sliceWidget = lm.sliceWidget('Red')
paintTool = EditorLib.PaintEffectTool(sliceWidget)
EditUtil.setLabel(1)
paintTool.paintAddPoint(100,100)
paintTool.paintApply()
EditUtil.setLabel(2)
paintTool.paintAddPoint(200,200)
paintTool.paintApply()
paintTool.cleanup()
paintTool = None
self.delayDisplay("Now grow cut")
#
# now do GrowCut
#
growCutLogic = EditorLib.GrowCutEffectLogic(sliceWidget.sliceLogic())
growCutLogic.growCut()
#
# now split the volume, merge it back, and see if it looks right
#
preArray = slicer.util.array(croppedHeadLabel.GetName())
slicer.util.selectModule('Editor')
slicer.util.findChildren(text='Split Merge Volume')[0].clicked()
slicer.util.findChildren(text='Merge All')[0].clicked()
postArray = slicer.util.array(croppedHeadLabel.GetName())
if (postArray - preArray).max() != 0:
print("!$!$!#!@#!@!@$%! Test Failed!!")
else:
print("Ahh... test passed.")
self.assertEqual((postArray - preArray).max(), 0)
self.delayDisplay("Test passed!")
#
# ThresholdThreadingTest
#
class ThresholdThreadingTest:
"""
This class is the 'hook' for slicer to detect and recognize the test
as a loadable scripted module (with a hidden interface)
"""
def __init__(self, parent):
parent.title = "ThresholdThreadingTest"
parent.categories = ["Testing"]
parent.contributors = ["Steve Pieper (Isomics Inc.)"]
parent.helpText = """
Self test for the editor.
No module interface here, only used in SelfTests module
"""
parent.acknowledgementText = """
This DICOM Plugin was developed by
Steve Pieper, Isomics, Inc.
and was partially funded by NIH grant 3P41RR013218.
"""
# don't show this module
parent.hidden = True
# Add this test to the SelfTest module's list for discovery when the module
# is created. Since this module may be discovered before SelfTests itself,
# create the list if it doesn't already exist.
try:
slicer.selfTests
except AttributeError:
slicer.selfTests = {}
slicer.selfTests['ThresholdThreadingTest'] = self.runTest
def runTest(self):
tester = ThresholdThreading()
tester.setUp()
tester.runTest()
#
# EditorLibSelfTestWidget
#
class EditorLibSelfTestWidget:
def __init__(self, parent = None):
self.parent = parent
def setup(self):
# don't display anything for this widget - it will be hidden anyway
pass
def enter(self):
pass
def exit(self):
pass
| 28.681319 | 126 | 0.696743 |
c7eaaa46a427bf22fafd87c5106762d9114f29d3 | 807 | py | Python | api/tests/integration/tests/substructure/sub_components.py | f1nzer/Indigo | 59efbd0be0b42f449f706c3a3c8d094e483e5ef4 | [
"Apache-2.0"
] | null | null | null | api/tests/integration/tests/substructure/sub_components.py | f1nzer/Indigo | 59efbd0be0b42f449f706c3a3c8d094e483e5ef4 | [
"Apache-2.0"
] | null | null | null | api/tests/integration/tests/substructure/sub_components.py | f1nzer/Indigo | 59efbd0be0b42f449f706c3a3c8d094e483e5ef4 | [
"Apache-2.0"
] | null | null | null | import os
import sys
sys.path.append(
os.path.normpath(
os.path.join(os.path.abspath(__file__), "..", "..", "..", "common")
)
)
from env_indigo import *
indigo = Indigo()
smiles = [
"Cl.C1CCCCC1",
"Cl.C1=CC=CC=C1",
"Cl.c1ccccc1",
"Clc1ccccc1",
"ClC1=CC=CC=C1",
"ClC1CCC=CC1",
"Cl.ClC1=CC=CC=C1",
]
qsmiles = ["([Cl]).([c])", "([Cl].[c])", "[Cl].[c]"]
for item in smiles:
print(item)
mol = indigo.loadMolecule(item)
matcher = indigo.substructureMatcher(mol)
for q in qsmiles:
qmol = indigo.loadSmarts(q)
cnt = matcher.countMatches(qmol)
if cnt > 0:
print(" %s: %d" % (q, cnt))
qmol.optimize()
cnt = matcher.countMatches(qmol)
if cnt > 0:
print(" %s (opt): %d" % (q, cnt))
| 23.057143 | 75 | 0.527881 |
87a4c4c40d9d8d74b94a40251c85bf34b8d67eb6 | 2,065 | py | Python | Num-15.py | SESCNCFUARTYOM/robot | 7d3680dee3c7f9de7edf43300749bbf2bf3bbca2 | [
"MIT"
] | null | null | null | Num-15.py | SESCNCFUARTYOM/robot | 7d3680dee3c7f9de7edf43300749bbf2bf3bbca2 | [
"MIT"
] | null | null | null | Num-15.py | SESCNCFUARTYOM/robot | 7d3680dee3c7f9de7edf43300749bbf2bf3bbca2 | [
"MIT"
] | null | null | null | XAXIS = "ABCDEFGH"
YAXIS = "12345678"
FORBIDDEN = {
"A2": ["right"],
"B2": ["left"],
"B6": ["up"],
"B7": ["down"],
"C5": ["up"],
"C6": ["down"],
"D4": ["up"],
"D5": ["down"],
"D7": ["up"],
"D8": ["down"],
"E2": ["up"],
"E3": ["down"],
"E8": ["right"],
"F8": ["left"],
"F6": ["right"],
"G6": ["left"],
"G1": ["up"],
"G2": ["down"],
"G4": ["right"],
"H4": ["left"],
"H6": ["up"],
"H7": ["down"]
}
def move(position, direction):
i = XAXIS.index(position[0])
j = YAXIS.index(position[1])
if direction == "left":
if i > 0:
return XAXIS[i - 1] + position[1]
elif direction == "right":
if i < len(XAXIS) - 1:
return XAXIS[i + 1] + position[1]
elif direction == "up":
if j < len(YAXIS) - 1:
return position[0] + YAXIS[j + 1]
elif direction == "down":
if j > 0:
return position[0] + YAXIS[j - 1]
return position
def can_move(position, direction):
if position[0] == "A" and direction == "left":
return False
elif position[0] == "H" and direction == "right":
return False
elif position[1] == "8" and direction == "up":
return False
elif position[1] == "1" and direction == "down":
return False
rules = FORBIDDEN.get(position, [])
return direction not in rules
def execute(start):
current = start
while can_move(current, "right"):
current = move(current, "right")
while can_move(current, "down"):
current = move(current, "down")
while can_move(current, "left"):
current = move(current, "left")
while can_move(current, "up"):
current = move(current, "up")
return current
if __name__ == "__main__":
result = []
for x in XAXIS:
for y in YAXIS:
start = x + y
finish = execute(start)
if start == finish:
result.append(start)
print(len(result))
print(", ".join(result)) | 19.855769 | 53 | 0.489588 |
3bbae437ccd4ccca7700b5f17d149941e66aa52e | 1,741 | py | Python | scripts/run_submission.py | edervishaj/spotify-recsys-challenge | 4077201ac7e4ed9da433bd10a92c183614182437 | [
"Apache-2.0"
] | 3 | 2018-10-12T20:19:57.000Z | 2019-12-11T01:11:38.000Z | scripts/run_submission.py | kiminh/spotify-recsys-challenge | 5e7844a77ce3c26658400f161d2d74d682f30e69 | [
"Apache-2.0"
] | null | null | null | scripts/run_submission.py | kiminh/spotify-recsys-challenge | 5e7844a77ce3c26658400f161d2d74d682f30e69 | [
"Apache-2.0"
] | 4 | 2018-10-27T20:30:18.000Z | 2020-10-14T07:43:27.000Z | from scipy import sparse
from boosts.hole_boost import HoleBoost
from boosts.tail_boost import TailBoost
from boosts.album_boost import AlbumBoost
from boosts.match_boost import MatchBoost
from utils.post_processing import *
from utils.submitter import Submitter
from utils.pre_processing import *
def submission(boost, eurm_ens, sim, name):
"""
Function to create a submission from a eurm with or without boosts.
:param boost: apply boosts
:param eurm_ens: eurm from ensemble (10k x 2.2M)
:param sim: similarity matrix (tracks x tracks)
:param name: name of the submission
"""
# INIT
dr = Datareader(mode='online', only_load=True, verbose=False)
sb = Submitter(dr)
if boost:
# HOLEBOOST
hb = HoleBoost(similarity=sim, eurm=eurm_ens, datareader=dr, norm=norm_l1_row)
eurm_ens = hb.boost_eurm(categories=[8, 10], k=300, gamma=5)
# TAILBOOST
tb = TailBoost(similarity=sim, eurm=eurm_ens, datareader=dr, norm=norm_l2_row)
eurm_ens = tb.boost_eurm(categories=[9, 7, 6, 5],
last_tracks=[10, 3, 3, 3],
k=[100, 80, 100, 100],
gamma=[0.01, 0.01, 0.01, 0.01])
# ALBUMBOOST
ab = AlbumBoost(dr, eurm_ens)
eurm_ens = ab.boost_eurm(categories=[3, 4, 7, 9], gamma=2, top_k=[3, 3, 10, 40])
# SUBMISSION
rec_list = eurm_to_recommendation_list_submission(eurm_ens, datareader=dr)
sb.submit(rec_list, name=name)
if __name__ == '__main__':
# SETTINGS
boost = True
eurm = sparse.load_npz(ROOT_DIR + '')
similarity = sparse.load_npz(ROOT_DIR + '')
submission(boost=boost, eurm_ens=eurm, sim=similarity)
| 32.240741 | 88 | 0.64618 |
390b8934d6f2031e8884266394456ba20dde1416 | 71,735 | py | Python | src/sentry/south_migrations/0300_auto__add_processingissue__add_unique_processingissue_project_checksum.py | seukjung/sentry-custom | c5f6bb2019aef3caff7f3e2b619f7a70f2b9b963 | [
"BSD-3-Clause"
] | null | null | null | src/sentry/south_migrations/0300_auto__add_processingissue__add_unique_processingissue_project_checksum.py | seukjung/sentry-custom | c5f6bb2019aef3caff7f3e2b619f7a70f2b9b963 | [
"BSD-3-Clause"
] | 8 | 2019-12-28T23:49:55.000Z | 2022-03-02T04:34:18.000Z | src/sentry/south_migrations/0300_auto__add_processingissue__add_unique_processingissue_project_checksum.py | seukjung/sentry-custom | c5f6bb2019aef3caff7f3e2b619f7a70f2b9b963 | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'ProcessingIssue'
db.create_table('sentry_processingissue', (
('id', self.gf('sentry.db.models.fields.bounded.BoundedBigAutoField')(primary_key=True)),
('project', self.gf('sentry.db.models.fields.foreignkey.FlexibleForeignKey')(to=orm['sentry.Project'])),
('checksum', self.gf('django.db.models.fields.CharField')(max_length=40, db_index=True)),
('type', self.gf('django.db.models.fields.CharField')(max_length=30)),
('data', self.gf('sentry.db.models.fields.gzippeddict.GzippedDictField')()),
('datetime', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now)),
))
db.send_create_signal('sentry', ['ProcessingIssue'])
# Adding unique constraint on 'ProcessingIssue', fields ['project', 'checksum', 'type']
db.create_unique('sentry_processingissue', ['project_id', 'checksum', 'type'])
# Adding model 'ReprocessingReport'
db.create_table('sentry_reprocessingreport', (
('id', self.gf('sentry.db.models.fields.bounded.BoundedBigAutoField')(primary_key=True)),
('project', self.gf('sentry.db.models.fields.foreignkey.FlexibleForeignKey')(to=orm['sentry.Project'])),
('event_id', self.gf('django.db.models.fields.CharField')(max_length=32, null=True)),
('datetime', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now)),
))
db.send_create_signal('sentry', ['ReprocessingReport'])
# Adding unique constraint on 'ReprocessingReport', fields ['project', 'event_id']
db.create_unique('sentry_reprocessingreport', ['project_id', 'event_id'])
# Adding model 'RawEvent'
db.create_table('sentry_rawevent', (
('id', self.gf('sentry.db.models.fields.bounded.BoundedBigAutoField')(primary_key=True)),
('project', self.gf('sentry.db.models.fields.foreignkey.FlexibleForeignKey')(to=orm['sentry.Project'])),
('event_id', self.gf('django.db.models.fields.CharField')(max_length=32, null=True)),
('datetime', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now)),
('data', self.gf('sentry.db.models.fields.node.NodeField')(null=True, blank=True)),
))
db.send_create_signal('sentry', ['RawEvent'])
# Adding unique constraint on 'RawEvent', fields ['project', 'event_id']
db.create_unique('sentry_rawevent', ['project_id', 'event_id'])
# Adding model 'EventProcessingIssue'
db.create_table('sentry_eventprocessingissue', (
('id', self.gf('sentry.db.models.fields.bounded.BoundedBigAutoField')(primary_key=True)),
('raw_event', self.gf('sentry.db.models.fields.foreignkey.FlexibleForeignKey')(to=orm['sentry.RawEvent'])),
('processing_issue', self.gf('sentry.db.models.fields.foreignkey.FlexibleForeignKey')(to=orm['sentry.ProcessingIssue'])),
))
db.send_create_signal('sentry', ['EventProcessingIssue'])
# Adding unique constraint on 'EventProcessingIssue', fields ['raw_event', 'processing_issue']
db.create_unique('sentry_eventprocessingissue', ['raw_event_id', 'processing_issue_id'])
def backwards(self, orm):
# Removing unique constraint on 'EventProcessingIssue', fields ['raw_event', 'processing_issue']
db.delete_unique('sentry_eventprocessingissue', ['raw_event_id', 'processing_issue_id'])
# Removing unique constraint on 'RawEvent', fields ['project', 'event_id']
db.delete_unique('sentry_rawevent', ['project_id', 'event_id'])
# Removing unique constraint on 'ReprocessingReport', fields ['project', 'event_id']
db.delete_unique('sentry_reprocessingreport', ['project_id', 'event_id'])
# Removing unique constraint on 'ProcessingIssue', fields ['project', 'checksum', 'type']
db.delete_unique('sentry_processingissue', ['project_id', 'checksum', 'type'])
# Deleting model 'ProcessingIssue'
db.delete_table('sentry_processingissue')
# Deleting model 'ReprocessingReport'
db.delete_table('sentry_reprocessingreport')
# Deleting model 'RawEvent'
db.delete_table('sentry_rawevent')
# Deleting model 'EventProcessingIssue'
db.delete_table('sentry_eventprocessingissue')
models = {
'sentry.activity': {
'Meta': {'object_name': 'Activity'},
'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {'null': 'True'}),
'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']", 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'ident': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'type': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']", 'null': 'True'})
},
'sentry.apikey': {
'Meta': {'object_name': 'ApiKey'},
'allowed_origins': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '32'}),
'label': ('django.db.models.fields.CharField', [], {'default': "'Default'", 'max_length': '64', 'blank': 'True'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'key_set'", 'to': "orm['sentry.Organization']"}),
'scopes': ('django.db.models.fields.BigIntegerField', [], {'default': 'None'}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0', 'db_index': 'True'})
},
'sentry.apitoken': {
'Meta': {'object_name': 'ApiToken'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.ApiKey']", 'null': 'True'}),
'scopes': ('django.db.models.fields.BigIntegerField', [], {'default': 'None'}),
'token': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '64'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"})
},
'sentry.auditlogentry': {
'Meta': {'object_name': 'AuditLogEntry'},
'actor': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'blank': 'True', 'related_name': "'audit_actors'", 'null': 'True', 'to': "orm['sentry.User']"}),
'actor_key': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.ApiKey']", 'null': 'True', 'blank': 'True'}),
'actor_label': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}),
'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {}),
'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'event': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'ip_address': ('django.db.models.fields.GenericIPAddressField', [], {'max_length': '39', 'null': 'True'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Organization']"}),
'target_object': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}),
'target_user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'blank': 'True', 'related_name': "'audit_targets'", 'null': 'True', 'to': "orm['sentry.User']"})
},
'sentry.authenticator': {
'Meta': {'unique_together': "(('user', 'type'),)", 'object_name': 'Authenticator', 'db_table': "'auth_authenticator'"},
'config': ('sentry.db.models.fields.pickle.UnicodePickledObjectField', [], {}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedAutoField', [], {'primary_key': 'True'}),
'last_used_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'type': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"})
},
'sentry.authidentity': {
'Meta': {'unique_together': "(('auth_provider', 'ident'), ('auth_provider', 'user'))", 'object_name': 'AuthIdentity'},
'auth_provider': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.AuthProvider']"}),
'data': ('jsonfield.fields.JSONField', [], {'default': '{}'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'ident': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'last_synced': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_verified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"})
},
'sentry.authprovider': {
'Meta': {'object_name': 'AuthProvider'},
'config': ('jsonfield.fields.JSONField', [], {'default': '{}'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'default_global_access': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'default_role': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '50'}),
'default_teams': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['sentry.Team']", 'symmetrical': 'False', 'blank': 'True'}),
'flags': ('django.db.models.fields.BigIntegerField', [], {'default': '0'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'last_sync': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Organization']", 'unique': 'True'}),
'provider': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'sync_time': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'})
},
'sentry.broadcast': {
'Meta': {'object_name': 'Broadcast'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'date_expires': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2017, 3, 2, 0, 0)', 'null': 'True', 'blank': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'db_index': 'True'}),
'link': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'message': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'upstream_id': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True', 'blank': 'True'})
},
'sentry.broadcastseen': {
'Meta': {'unique_together': "(('broadcast', 'user'),)", 'object_name': 'BroadcastSeen'},
'broadcast': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Broadcast']"}),
'date_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"})
},
'sentry.commit': {
'Meta': {'unique_together': "(('repository_id', 'key'),)", 'object_name': 'Commit', 'index_together': "(('repository_id', 'date_added'),)"},
'author': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.CommitAuthor']", 'null': 'True'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'message': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'organization_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'}),
'repository_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {})
},
'sentry.commitauthor': {
'Meta': {'unique_together': "(('organization_id', 'email'),)", 'object_name': 'CommitAuthor'},
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True'}),
'organization_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'})
},
'sentry.commitfilechange': {
'Meta': {'unique_together': "(('commit', 'filename'),)", 'object_name': 'CommitFileChange'},
'commit': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Commit']"}),
'filename': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'organization_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '1'})
},
'sentry.counter': {
'Meta': {'object_name': 'Counter', 'db_table': "'sentry_projectcounter'"},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'unique': 'True'}),
'value': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {})
},
'sentry.dsymbundle': {
'Meta': {'object_name': 'DSymBundle'},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'object': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.DSymObject']"}),
'sdk': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.DSymSDK']"})
},
'sentry.dsymobject': {
'Meta': {'object_name': 'DSymObject'},
'cpu_name': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'object_path': ('django.db.models.fields.TextField', [], {'db_index': 'True'}),
'uuid': ('django.db.models.fields.CharField', [], {'max_length': '36', 'db_index': 'True'}),
'vmaddr': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'null': 'True'}),
'vmsize': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'null': 'True'})
},
'sentry.dsymsdk': {
'Meta': {'object_name': 'DSymSDK', 'index_together': "[('version_major', 'version_minor', 'version_patchlevel', 'version_build')]"},
'dsym_type': ('django.db.models.fields.CharField', [], {'max_length': '20', 'db_index': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'sdk_name': ('django.db.models.fields.CharField', [], {'max_length': '20'}),
'version_build': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'version_major': ('django.db.models.fields.IntegerField', [], {}),
'version_minor': ('django.db.models.fields.IntegerField', [], {}),
'version_patchlevel': ('django.db.models.fields.IntegerField', [], {})
},
'sentry.dsymsymbol': {
'Meta': {'unique_together': "[('object', 'address')]", 'object_name': 'DSymSymbol'},
'address': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'db_index': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'object': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.DSymObject']"}),
'symbol': ('django.db.models.fields.TextField', [], {})
},
'sentry.environment': {
'Meta': {'unique_together': "(('project_id', 'name'),)", 'object_name': 'Environment'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'organization_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'project_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'projects': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['sentry.Project']", 'through': "orm['sentry.EnvironmentProject']", 'symmetrical': 'False'})
},
'sentry.environmentproject': {
'Meta': {'unique_together': "(('project', 'environment'),)", 'object_name': 'EnvironmentProject'},
'environment': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Environment']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"})
},
'sentry.event': {
'Meta': {'unique_together': "(('project_id', 'event_id'),)", 'object_name': 'Event', 'db_table': "'sentry_message'", 'index_together': "(('group_id', 'datetime'),)"},
'data': ('sentry.db.models.fields.node.NodeField', [], {'null': 'True', 'blank': 'True'}),
'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'event_id': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True', 'db_column': "'message_id'"}),
'group_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'null': 'True', 'blank': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'message': ('django.db.models.fields.TextField', [], {}),
'platform': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'project_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'null': 'True', 'blank': 'True'}),
'time_spent': ('sentry.db.models.fields.bounded.BoundedIntegerField', [], {'null': 'True'})
},
'sentry.eventmapping': {
'Meta': {'unique_together': "(('project_id', 'event_id'),)", 'object_name': 'EventMapping'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'event_id': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'group_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {})
},
'sentry.eventprocessingissue': {
'Meta': {'unique_together': "(('raw_event', 'processing_issue'),)", 'object_name': 'EventProcessingIssue'},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'processing_issue': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.ProcessingIssue']"}),
'raw_event': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.RawEvent']"})
},
'sentry.eventtag': {
'Meta': {'unique_together': "(('event_id', 'key_id', 'value_id'),)", 'object_name': 'EventTag', 'index_together': "(('project_id', 'key_id', 'value_id'), ('group_id', 'key_id', 'value_id'))"},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'event_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {}),
'group_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {}),
'project_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {}),
'value_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {})
},
'sentry.eventuser': {
'Meta': {'unique_together': "(('project', 'ident'), ('project', 'hash'))", 'object_name': 'EventUser', 'index_together': "(('project', 'email'), ('project', 'username'), ('project', 'ip_address'))"},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'null': 'True'}),
'hash': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'ident': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True'}),
'ip_address': ('django.db.models.fields.GenericIPAddressField', [], {'max_length': '39', 'null': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'username': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True'})
},
'sentry.file': {
'Meta': {'object_name': 'File'},
'blob': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'legacy_blob'", 'null': 'True', 'to': "orm['sentry.FileBlob']"}),
'blobs': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['sentry.FileBlob']", 'through': "orm['sentry.FileBlobIndex']", 'symmetrical': 'False'}),
'checksum': ('django.db.models.fields.CharField', [], {'max_length': '40', 'null': 'True'}),
'headers': ('jsonfield.fields.JSONField', [], {'default': '{}'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'path': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'size': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}),
'timestamp': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '64'})
},
'sentry.fileblob': {
'Meta': {'object_name': 'FileBlob'},
'checksum': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '40'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'path': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'size': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}),
'timestamp': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'})
},
'sentry.fileblobindex': {
'Meta': {'unique_together': "(('file', 'blob', 'offset'),)", 'object_name': 'FileBlobIndex'},
'blob': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.FileBlob']"}),
'file': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.File']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'offset': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {})
},
'sentry.globaldsymfile': {
'Meta': {'object_name': 'GlobalDSymFile'},
'cpu_name': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'file': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.File']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'object_name': ('django.db.models.fields.TextField', [], {}),
'uuid': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '36'})
},
'sentry.group': {
'Meta': {'unique_together': "(('project', 'short_id'),)", 'object_name': 'Group', 'db_table': "'sentry_groupedmessage'", 'index_together': "(('project', 'first_release'),)"},
'active_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'db_index': 'True'}),
'culprit': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'db_column': "'view'", 'blank': 'True'}),
'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {'null': 'True', 'blank': 'True'}),
'first_release': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Release']", 'null': 'True', 'on_delete': 'models.PROTECT'}),
'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'is_public': ('django.db.models.fields.NullBooleanField', [], {'default': 'False', 'null': 'True', 'blank': 'True'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'level': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '40', 'db_index': 'True', 'blank': 'True'}),
'logger': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '64', 'db_index': 'True', 'blank': 'True'}),
'message': ('django.db.models.fields.TextField', [], {}),
'num_comments': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0', 'null': 'True'}),
'platform': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'resolved_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'db_index': 'True'}),
'score': ('sentry.db.models.fields.bounded.BoundedIntegerField', [], {'default': '0'}),
'short_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'null': 'True'}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0', 'db_index': 'True'}),
'time_spent_count': ('sentry.db.models.fields.bounded.BoundedIntegerField', [], {'default': '0'}),
'time_spent_total': ('sentry.db.models.fields.bounded.BoundedIntegerField', [], {'default': '0'}),
'times_seen': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '1', 'db_index': 'True'})
},
'sentry.groupassignee': {
'Meta': {'object_name': 'GroupAssignee', 'db_table': "'sentry_groupasignee'"},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'assignee_set'", 'unique': 'True', 'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'assignee_set'", 'to': "orm['sentry.Project']"}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'sentry_assignee_set'", 'to': "orm['sentry.User']"})
},
'sentry.groupbookmark': {
'Meta': {'unique_together': "(('project', 'user', 'group'),)", 'object_name': 'GroupBookmark'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True'}),
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'bookmark_set'", 'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'bookmark_set'", 'to': "orm['sentry.Project']"}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'sentry_bookmark_set'", 'to': "orm['sentry.User']"})
},
'sentry.groupcommitresolution': {
'Meta': {'unique_together': "(('group_id', 'commit_id'),)", 'object_name': 'GroupCommitResolution'},
'commit_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'group_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'})
},
'sentry.groupemailthread': {
'Meta': {'unique_together': "(('email', 'group'), ('email', 'msgid'))", 'object_name': 'GroupEmailThread'},
'date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'groupemail_set'", 'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'msgid': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'groupemail_set'", 'to': "orm['sentry.Project']"})
},
'sentry.grouphash': {
'Meta': {'unique_together': "(('project', 'hash'),)", 'object_name': 'GroupHash'},
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']", 'null': 'True'}),
'hash': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'})
},
'sentry.groupmeta': {
'Meta': {'unique_together': "(('group', 'key'),)", 'object_name': 'GroupMeta'},
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'value': ('django.db.models.fields.TextField', [], {})
},
'sentry.groupredirect': {
'Meta': {'object_name': 'GroupRedirect'},
'group_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'db_index': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'previous_group_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'unique': 'True'})
},
'sentry.grouprelease': {
'Meta': {'unique_together': "(('group_id', 'release_id', 'environment'),)", 'object_name': 'GroupRelease'},
'environment': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '64'}),
'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'group_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'project_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'}),
'release_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'})
},
'sentry.groupresolution': {
'Meta': {'object_name': 'GroupResolution'},
'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']", 'unique': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'release': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Release']"}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'})
},
'sentry.grouprulestatus': {
'Meta': {'unique_together': "(('rule', 'group'),)", 'object_name': 'GroupRuleStatus'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'last_active': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'rule': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Rule']"}),
'status': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0'})
},
'sentry.groupseen': {
'Meta': {'unique_together': "(('user', 'group'),)", 'object_name': 'GroupSeen'},
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']", 'db_index': 'False'})
},
'sentry.groupsnooze': {
'Meta': {'object_name': 'GroupSnooze'},
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']", 'unique': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'until': ('django.db.models.fields.DateTimeField', [], {})
},
'sentry.groupsubscription': {
'Meta': {'unique_together': "(('group', 'user'),)", 'object_name': 'GroupSubscription'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True'}),
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'subscription_set'", 'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'subscription_set'", 'to': "orm['sentry.Project']"}),
'reason': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"})
},
'sentry.grouptagkey': {
'Meta': {'unique_together': "(('project', 'group', 'key'),)", 'object_name': 'GroupTagKey'},
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'values_seen': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'})
},
'sentry.grouptagvalue': {
'Meta': {'unique_together': "(('group', 'key', 'value'),)", 'object_name': 'GroupTagValue', 'db_table': "'sentry_messagefiltervalue'", 'index_together': "(('project', 'key', 'value', 'last_seen'),)"},
'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'db_index': 'True'}),
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'grouptag'", 'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'db_index': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'grouptag'", 'null': 'True', 'to': "orm['sentry.Project']"}),
'times_seen': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'sentry.lostpasswordhash': {
'Meta': {'object_name': 'LostPasswordHash'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'hash': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']", 'unique': 'True'})
},
'sentry.option': {
'Meta': {'object_name': 'Option'},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '64'}),
'last_updated': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'value': ('sentry.db.models.fields.pickle.UnicodePickledObjectField', [], {})
},
'sentry.organization': {
'Meta': {'object_name': 'Organization'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'default_role': ('django.db.models.fields.CharField', [], {'default': "'member'", 'max_length': '32'}),
'flags': ('django.db.models.fields.BigIntegerField', [], {'default': '1'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'members': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'org_memberships'", 'symmetrical': 'False', 'through': "orm['sentry.OrganizationMember']", 'to': "orm['sentry.User']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50'}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'})
},
'sentry.organizationaccessrequest': {
'Meta': {'unique_together': "(('team', 'member'),)", 'object_name': 'OrganizationAccessRequest'},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'member': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.OrganizationMember']"}),
'team': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Team']"})
},
'sentry.organizationavatar': {
'Meta': {'object_name': 'OrganizationAvatar'},
'avatar_type': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0'}),
'file': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.File']", 'unique': 'True', 'null': 'True', 'on_delete': 'models.SET_NULL'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'ident': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '32', 'db_index': 'True'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'avatar'", 'unique': 'True', 'to': "orm['sentry.Organization']"})
},
'sentry.organizationmember': {
'Meta': {'unique_together': "(('organization', 'user'), ('organization', 'email'))", 'object_name': 'OrganizationMember'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'null': 'True', 'blank': 'True'}),
'flags': ('django.db.models.fields.BigIntegerField', [], {'default': '0'}),
'has_global_access': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'member_set'", 'to': "orm['sentry.Organization']"}),
'role': ('django.db.models.fields.CharField', [], {'default': "'member'", 'max_length': '32'}),
'teams': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['sentry.Team']", 'symmetrical': 'False', 'through': "orm['sentry.OrganizationMemberTeam']", 'blank': 'True'}),
'token': ('django.db.models.fields.CharField', [], {'max_length': '64', 'unique': 'True', 'null': 'True', 'blank': 'True'}),
'type': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '50', 'blank': 'True'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'blank': 'True', 'related_name': "'sentry_orgmember_set'", 'null': 'True', 'to': "orm['sentry.User']"})
},
'sentry.organizationmemberteam': {
'Meta': {'unique_together': "(('team', 'organizationmember'),)", 'object_name': 'OrganizationMemberTeam', 'db_table': "'sentry_organizationmember_teams'"},
'id': ('sentry.db.models.fields.bounded.BoundedAutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'organizationmember': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.OrganizationMember']"}),
'team': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Team']"})
},
'sentry.organizationonboardingtask': {
'Meta': {'unique_together': "(('organization', 'task'),)", 'object_name': 'OrganizationOnboardingTask'},
'data': ('jsonfield.fields.JSONField', [], {'default': '{}'}),
'date_completed': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Organization']"}),
'project_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'null': 'True', 'blank': 'True'}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'task': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']", 'null': 'True'})
},
'sentry.organizationoption': {
'Meta': {'unique_together': "(('organization', 'key'),)", 'object_name': 'OrganizationOption', 'db_table': "'sentry_organizationoptions'"},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Organization']"}),
'value': ('sentry.db.models.fields.pickle.UnicodePickledObjectField', [], {})
},
'sentry.processingissue': {
'Meta': {'unique_together': "(('project', 'checksum', 'type'),)", 'object_name': 'ProcessingIssue'},
'checksum': ('django.db.models.fields.CharField', [], {'max_length': '40', 'db_index': 'True'}),
'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {}),
'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '30'})
},
'sentry.project': {
'Meta': {'unique_together': "(('team', 'slug'), ('organization', 'slug'))", 'object_name': 'Project'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'first_event': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'flags': ('django.db.models.fields.BigIntegerField', [], {'default': '0', 'null': 'True'}),
'forced_color': ('django.db.models.fields.CharField', [], {'max_length': '6', 'null': 'True', 'blank': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Organization']"}),
'public': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'null': 'True'}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0', 'db_index': 'True'}),
'team': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Team']"})
},
'sentry.projectbookmark': {
'Meta': {'unique_together': "(('project_id', 'user'),)", 'object_name': 'ProjectBookmark'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'null': 'True', 'blank': 'True'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"})
},
'sentry.projectdsymfile': {
'Meta': {'unique_together': "(('project', 'uuid'),)", 'object_name': 'ProjectDSymFile'},
'cpu_name': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'file': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.File']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'object_name': ('django.db.models.fields.TextField', [], {}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'uuid': ('django.db.models.fields.CharField', [], {'max_length': '36'})
},
'sentry.projectkey': {
'Meta': {'object_name': 'ProjectKey'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'key_set'", 'to': "orm['sentry.Project']"}),
'public_key': ('django.db.models.fields.CharField', [], {'max_length': '32', 'unique': 'True', 'null': 'True'}),
'roles': ('django.db.models.fields.BigIntegerField', [], {'default': '1'}),
'secret_key': ('django.db.models.fields.CharField', [], {'max_length': '32', 'unique': 'True', 'null': 'True'}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0', 'db_index': 'True'})
},
'sentry.projectoption': {
'Meta': {'unique_together': "(('project', 'key'),)", 'object_name': 'ProjectOption', 'db_table': "'sentry_projectoptions'"},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'value': ('sentry.db.models.fields.pickle.UnicodePickledObjectField', [], {})
},
'sentry.projectplatform': {
'Meta': {'unique_together': "(('project_id', 'platform'),)", 'object_name': 'ProjectPlatform'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'platform': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'project_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {})
},
'sentry.rawevent': {
'Meta': {'unique_together': "(('project', 'event_id'),)", 'object_name': 'RawEvent'},
'data': ('sentry.db.models.fields.node.NodeField', [], {'null': 'True', 'blank': 'True'}),
'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'event_id': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"})
},
'sentry.release': {
'Meta': {'unique_together': "(('organization', 'version'),)", 'object_name': 'Release'},
'data': ('jsonfield.fields.JSONField', [], {'default': '{}'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'date_released': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'date_started': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'new_groups': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Organization']"}),
'owner': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']", 'null': 'True', 'blank': 'True'}),
'project_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}),
'projects': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'releases'", 'symmetrical': 'False', 'through': "orm['sentry.ReleaseProject']", 'to': "orm['sentry.Project']"}),
'ref': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'version': ('django.db.models.fields.CharField', [], {'max_length': '64'})
},
'sentry.releasecommit': {
'Meta': {'unique_together': "(('release', 'commit'), ('release', 'order'))", 'object_name': 'ReleaseCommit'},
'commit': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Commit']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'order': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'organization_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'}),
'project_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True', 'db_index': 'True'}),
'release': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Release']"})
},
'sentry.releaseenvironment': {
'Meta': {'unique_together': "(('project_id', 'release_id', 'environment_id'),)", 'object_name': 'ReleaseEnvironment', 'db_table': "'sentry_environmentrelease'"},
'environment_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'}),
'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'organization_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'}),
'project_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'}),
'release_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'})
},
'sentry.releasefile': {
'Meta': {'unique_together': "(('release', 'ident'),)", 'object_name': 'ReleaseFile'},
'file': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.File']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'ident': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'name': ('django.db.models.fields.TextField', [], {}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Organization']"}),
'project_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}),
'release': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Release']"})
},
'sentry.releaseproject': {
'Meta': {'unique_together': "(('project', 'release'),)", 'object_name': 'ReleaseProject', 'db_table': "'sentry_release_project'"},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'new_groups': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0', 'null': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'release': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Release']"})
},
'sentry.repository': {
'Meta': {'unique_together': "(('organization_id', 'name'), ('organization_id', 'provider', 'external_id'))", 'object_name': 'Repository'},
'config': ('jsonfield.fields.JSONField', [], {'default': '{}'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'external_id': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'organization_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'}),
'provider': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0', 'db_index': 'True'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True'})
},
'sentry.reprocessingreport': {
'Meta': {'unique_together': "(('project', 'event_id'),)", 'object_name': 'ReprocessingReport'},
'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'event_id': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"})
},
'sentry.rule': {
'Meta': {'object_name': 'Rule'},
'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0', 'db_index': 'True'})
},
'sentry.savedsearch': {
'Meta': {'unique_together': "(('project', 'name'),)", 'object_name': 'SavedSearch'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'is_default': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'query': ('django.db.models.fields.TextField', [], {})
},
'sentry.savedsearchuserdefault': {
'Meta': {'unique_together': "(('project', 'user'),)", 'object_name': 'SavedSearchUserDefault', 'db_table': "'sentry_savedsearch_userdefault'"},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'savedsearch': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.SavedSearch']"}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"})
},
'sentry.tagkey': {
'Meta': {'unique_together': "(('project', 'key'),)", 'object_name': 'TagKey', 'db_table': "'sentry_filterkey'"},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'}),
'values_seen': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'})
},
'sentry.tagvalue': {
'Meta': {'unique_together': "(('project', 'key', 'value'),)", 'object_name': 'TagValue', 'db_table': "'sentry_filtervalue'"},
'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {'null': 'True', 'blank': 'True'}),
'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'db_index': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'db_index': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'times_seen': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'sentry.team': {
'Meta': {'unique_together': "(('organization', 'slug'),)", 'object_name': 'Team'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Organization']"}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50'}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'})
},
'sentry.user': {
'Meta': {'object_name': 'User', 'db_table': "'auth_user'"},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedAutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_managed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_password_expired': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_password_change': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200', 'db_column': "'first_name'", 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'session_nonce': ('django.db.models.fields.CharField', [], {'max_length': '12', 'null': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '128'})
},
'sentry.useravatar': {
'Meta': {'object_name': 'UserAvatar'},
'avatar_type': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0'}),
'file': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.File']", 'unique': 'True', 'null': 'True', 'on_delete': 'models.SET_NULL'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'ident': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '32', 'db_index': 'True'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'avatar'", 'unique': 'True', 'to': "orm['sentry.User']"})
},
'sentry.useremail': {
'Meta': {'unique_together': "(('user', 'email'),)", 'object_name': 'UserEmail'},
'date_hash_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'is_verified': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'emails'", 'to': "orm['sentry.User']"}),
'validation_hash': ('django.db.models.fields.CharField', [], {'default': "u'NnUPPiyxZyr9NQE2MMofvocS0qcYoTeN'", 'max_length': '32'})
},
'sentry.useroption': {
'Meta': {'unique_together': "(('user', 'project', 'key'),)", 'object_name': 'UserOption'},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"}),
'value': ('sentry.db.models.fields.pickle.UnicodePickledObjectField', [], {})
},
'sentry.userreport': {
'Meta': {'unique_together': "(('project', 'event_id'),)", 'object_name': 'UserReport', 'index_together': "(('project', 'event_id'), ('project', 'date_added'))"},
'comments': ('django.db.models.fields.TextField', [], {}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
'event_id': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']", 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"})
}
}
complete_apps = ['sentry'] | 86.951515 | 217 | 0.590089 |
9abeb3c24676a94c01f9189a752153edc4b139a6 | 283 | py | Python | tests/core/test_random_forest.py | m-novikov/hytra | 0dc28deaa2571fa8bea63ca178f0e53cc1cd7508 | [
"MIT"
] | null | null | null | tests/core/test_random_forest.py | m-novikov/hytra | 0dc28deaa2571fa8bea63ca178f0e53cc1cd7508 | [
"MIT"
] | null | null | null | tests/core/test_random_forest.py | m-novikov/hytra | 0dc28deaa2571fa8bea63ca178f0e53cc1cd7508 | [
"MIT"
] | null | null | null | from hytra.core.probabilitygenerator import RandomForestClassifier
def test_rf():
rf = RandomForestClassifier(
"/CountClassification", "tests/mergerResolvingTestDataset/tracking.ilp"
)
assert len(rf._randomForests) == 1
assert len(rf.selectedFeatures) == 4
| 28.3 | 79 | 0.742049 |
b3ccfbf3a2af2e2dcf1864046055a1f4972a2ca6 | 125 | py | Python | weapon.py | alykane20/Robot_Dino_Battle | 5f78d360ca1dda10cdb91ec1036573442cd95856 | [
"MIT"
] | null | null | null | weapon.py | alykane20/Robot_Dino_Battle | 5f78d360ca1dda10cdb91ec1036573442cd95856 | [
"MIT"
] | null | null | null | weapon.py | alykane20/Robot_Dino_Battle | 5f78d360ca1dda10cdb91ec1036573442cd95856 | [
"MIT"
] | null | null | null | class Weapon:
def __init__(self, name, attack_power):
self.name = name
self.attack_power = attack_power
| 20.833333 | 43 | 0.656 |
1b91f336c959812f9d233a68bb80e0c91abbefa1 | 1,121 | py | Python | py/fiberassign/test/fiberassign_test_suite.py | rstaten/fiberassign | d336e7df37380dcb09054acf7a89491701dcec73 | [
"BSD-3-Clause"
] | 7 | 2016-04-05T20:43:34.000Z | 2021-10-19T06:03:00.000Z | py/fiberassign/test/fiberassign_test_suite.py | rstaten/fiberassign | d336e7df37380dcb09054acf7a89491701dcec73 | [
"BSD-3-Clause"
] | 323 | 2015-07-29T15:19:52.000Z | 2022-03-29T00:15:57.000Z | py/fiberassign/test/fiberassign_test_suite.py | rstaten/fiberassign | d336e7df37380dcb09054acf7a89491701dcec73 | [
"BSD-3-Clause"
] | 10 | 2015-04-10T14:16:15.000Z | 2021-05-10T23:14:03.000Z | # Licensed under a 3-clause BSD style license - see LICENSE.rst
# -*- coding: utf-8 -*-
"""
fiberassign.test.fiberassign_test_suite
===================================================
Used to initialize the unit test framework via ``python setup.py test``.
"""
from __future__ import absolute_import, division, print_function
import sys
import unittest
def fiberassign_test_suite():
"""Returns unittest.TestSuite of desiutil tests.
This is factored out separately from runtests() so that it can be used by
``python setup.py test``.
"""
from os.path import dirname
py_dir = dirname(dirname(__file__))
return unittest.defaultTestLoader.discover(py_dir, top_level_dir=dirname(py_dir))
def runtests():
"""Run all tests in fiberassign.test.test_*."""
# Load all TestCase classes from desispec/test/test_*.py
tests = fiberassign_test_suite()
# Run them and force exit with a non-zero process return value if they fail
ret = unittest.TextTestRunner(verbosity=2).run(tests)
if not ret.wasSuccessful():
sys.exit(ret)
if __name__ == "__main__":
runtests()
| 28.025 | 85 | 0.685103 |
354032c04d8c4e4e17549e52aceb15e2962c4ddf | 17,026 | py | Python | scibert/helper.py | se4en/scibert | c74ed633699727ce84851ac2787b009b24c84586 | [
"Apache-2.0"
] | null | null | null | scibert/helper.py | se4en/scibert | c74ed633699727ce84851ac2787b009b24c84586 | [
"Apache-2.0"
] | null | null | null | scibert/helper.py | se4en/scibert | c74ed633699727ce84851ac2787b009b24c84586 | [
"Apache-2.0"
] | null | null | null | """ Module including helper functions for feature extraction and other stuff including metrics, jsonhandler, etc"""
import json
from collections import Counter
import logging
import re
from typing import List, Dict
import numpy as np
import string
from scibert.resources.lexicons import ALL_CONCEPT_LEXICONS, ALL_ACTION_LEXICONS
logger = logging.getLogger('classifier')
regex_find_citation = re.compile(r"\(\s?(([A-Za-z\-]+\s)+([A-Za-z\-]+\.?)?,?\s\d{2,4}[a-c]?(;\s)?)+\s?\)|"
r"\[(\d{1,3},\s?)+\d{1,3}\]|"
r"\[[\d,-]+\]|(\([A-Z][a-z]+, \d+[a-c]?\))|"
r"([A-Z][a-z]+ (et al\.)? \(\d+[a-c]?\))|"
r"[A-Z][a-z]+ and [A-Z][a-z]+ \(\d+[a-c]?\)]")
def print_top_words(model, feature_names, n_top_words):
""" Prints top words in each topics for an LDA topic model"""
for topic_idx, topic in enumerate(model.components_):
message = "Topic #%d: " % topic_idx
message += " ".join([feature_names[i]
for i in topic.argsort()[:-n_top_words - 1:-1]])
print(message)
print()
def get_values_from_list(inplst, key, is_class=True):
""" gets a value of an obj for a list of dicts (inplst)
Args:
inplst: list of objects
key: key of interest
is_class: ist the input object a class or a dictionary obj
"""
return [getattr(elem, key) for elem in inplst] if is_class \
else [elem[key] for elem in inplst]
def partial_fmeasure_multilabel(y_true,
y_pred,
pos_labels_index: list,
neg_labels_index: list):
"""Calculate F-measure when partial annotations for each class is available
In calculating the f-measure, this function only considers examples that are annotated for each class
and ignores instances that are not annotated for that class
A set of positive and negative labels identify the samples that are annotated
This functions expects the input to be one hot encoding of labels plus negative labels
For example if labels set are ['cat', 'dog'] we also want to encode ['not-cat', 'not-dog']
This is because if an annotator only examines an instance for `cat` and says this is `cat`
we want to ignore this instance in calculation of f-score for `dog` category.
Therefore, the input should have a shape of (num_instances, 2 * num_classes)
e.g., A one hot encoding corresponding to [`cat`, `dog`] would be:
[`cat`, `dog`, `not-cat`, `not-dog`]
A list of pos_labels_index and negative_labels_index identify the corresponding pos and neg labels
e.g., For our example above, pos_labels_index=[0,1] and neg_labels_idnex=[2,3]
Args:
y_true: A 2D array of true class, shape = (num_instances, 2*num_classes)
e.g. [[1,0,0,0], [0,0,1,0], [0,0,0,1], [0,1,0,0], [0,1,0,0], ...]
y_pred: A 2D array of precitions, shape = (num_instances, 2*num_classes)
pos_labels_index: 1D array of shape (num_classes)
neg_labels_index: 1D array of shape (num_classes)
returns:
list of precision scores, list of recall scores, list of F1 scores
The list is in the order of positive labels for each class
"""
if not isinstance(y_true, np.ndarray):
y_true = np.array(y_true)
if not isinstance(y_pred, np.ndarray):
y_pred = np.array(y_pred)
precisions = []
recalls = []
f1s = []
supports = []
for pos_class, neg_class in zip(pos_labels_index, neg_labels_index):
predictions_pos = y_pred[:, pos_class]
predictions_neg = y_pred[:, neg_class]
gold_pos = y_true[:, pos_class]
gold_neg = y_true[:, pos_class]
# argmax_predictions = predictions.max(-1)[1].float().squeeze(-1)
# True Negatives: correct non-positive predictions.
correct_null_predictions = (predictions_neg == gold_neg).astype(float) * gold_neg
_true_negatives = (correct_null_predictions.astype(float)).sum()
# True Positives: correct positively labeled predictions.
correct_non_null_predictions = (predictions_pos == gold_pos).astype(np.float) * predictions_pos
_true_positives = correct_non_null_predictions.sum()
# False Negatives: incorrect negatively labeled predictions.
incorrect_null_predictions = (predictions_pos != gold_pos).astype(np.float) * gold_pos
_false_negatives = incorrect_null_predictions.sum()
# False Positives: incorrect positively labeled predictions
incorrect_non_null_predictions = (predictions_pos != gold_pos).astype(np.float) * predictions_pos
_false_positives = incorrect_non_null_predictions.sum()
precision = float(_true_positives) / float(_true_positives + _false_positives + 1e-13)
recall = float(_true_positives) / float(_true_positives + _false_negatives + 1e-13)
f1_measure = 2. * ((precision * recall) / (precision + recall + 1e-13))
support = (gold_pos + gold_neg).sum()
precisions.append(precision)
recalls.append(recall)
f1s.append(f1_measure)
supports.append(support)
return precisions, recalls, f1s, supports
def format_classification_report(precisions, recalls, f1s, supports, labels, digits=4):
last_line_heading = 'avg / total'
if labels is None:
target_names = [u'%s' % l for l in labels]
else:
target_names = labels
name_width = max(len(cn) for cn in target_names)
width = max(name_width, len(last_line_heading), digits)
headers = ["precision", "recall", "f1-score", "support"]
head_fmt = u'{:>{width}s} ' + u' {:>9}' * len(headers)
report = head_fmt.format(u'', *headers, width=width)
report += u'\n\n'
row_fmt = u'{:>{width}s} ' + u' {:>9.{digits}f}' * 3 + u' {:>9}\n'
rows = zip(labels, precisions, recalls, f1s, supports)
for row in rows:
report += row_fmt.format(*row, width=5, digits=digits)
report += u'\n'
# compute averages
report += row_fmt.format(last_line_heading,
np.average(precisions, weights=supports),
np.average(recalls, weights=supports),
np.average(f1s, weights=supports),
np.sum(supports),
width=width, digits=digits)
return report
class JsonFloatEncoder(json.JSONEncoder):
""" numpy floats are not json serializable
This class is a json encoder that enables dumping
json objects that have numpy numbers in them
use: json.dumps(obj, cls=JsonFLoatEncoder)"""
def default(self, obj):
if isinstance(obj, np.integer):
return int(obj)
elif isinstance(obj, np.floating):
return float(obj)
elif isinstance(obj, np.ndarray):
return obj.tolist()
else:
return super(JsonFloatEncoder, self).default(obj)
MIN_TOKEN_COUNT = 8
MAX_TOKEN_COUNT = 200
MIN_WORD_TOKENS_RATIO = 0.40
MIN_LETTER_CHAR_RATIO = 0.50
MIN_PRINTABLE_CHAR_RATIO = 0.95
# util function adopted from github.com/allenai/relex
def is_sentence(sentence: str) -> bool:
if not isinstance(sentence, str):
return False
num_chars = len(sentence)
tokens = sentence.split(' ')
num_tokens = len(tokens)
if num_tokens < MIN_TOKEN_COUNT:
return False
if num_tokens > MAX_TOKEN_COUNT:
return False
# Most tokens should be words
if sum([t.isalpha() for t in tokens]) / num_tokens < MIN_WORD_TOKENS_RATIO:
return False
# Most characters should be letters, not numbers and not special characters
if sum([c in string.ascii_letters for c in sentence]) / num_chars < MIN_LETTER_CHAR_RATIO:
return False
# Most characters should be printable
if sum([c in string.printable for c in sentence]) / num_chars < MIN_PRINTABLE_CHAR_RATIO:
return False
return True
def _is_in_lexicon(lexicon, sentence, si, ArgType=None, required_pos=None):
for cur_phrase in lexicon:
phrase = cur_phrase.split(' ')
# Can't match phrases that would extend beyond this sentence
if len(phrase) + si > len(sentence):
continue
found = True
found_arg = False
for i, lemma in enumerate(phrase):
# Check the word form too, just to prevent weird lemmatization
# issues (usually for adjectives)
if not (sentence[si+i]['lemma'] == lemma or sentence[si+i]['word'] == lemma) \
or not (required_pos is None or sentence[si+i]['pos'][0] == required_pos):
found = False
break
if ArgType is not None and sentence[si+i]['ArgType'] == ArgType:
found_arg = True
if found and (ArgType is None or found_arg):
#if len(phrase) > 1:
# print '~~~~~~Matched %s' % (' '.join(phrase))
return True, len(phrase)
return False, 0
def find(pattern: List[str], sentence: List[dict], must_have_subj_value: bool, feature=None) -> int:
# if debug is not None:
# print json.dumps(sentence)
# if debug is not None:
# print('\n\ntesting %s (%s) against "%s" (must be subj? %s)' % (pattern, feature, debug, must_have_subj_value))
# For each position in the sentence
for sent_pos in range(0, (len(sentence) - len(pattern)) + 1):
match = True
is_subj = False
# This is the adjustment to the sentence's token offset based on finding
# a MWE match in a lexicon
k = 0
# if debug is not None:
# print('starting search at ' + sentence[sent_pos]['word'])
for pat_pos in range(0, len(pattern)):
# if debug is not None:
# print('%d:%d:%d -> "%s" in "%s"?' % (sent_pos, pat_pos, k, sentence[sent_pos + pat_pos + k]['lemma'], pattern[pat_pos]))
# Check that we won't search outside the sentence length due to
# finding a MWE lexicon entry at the end of the sentence
if sent_pos + pat_pos + k >= len(sentence):
# if debug is not None:
# print('moved beyond end of sentence :(')
match = False
break
# print '%d %s' % (sent_pos+pat_pos+k, sentence[sent_pos+pat_pos+k]['ArgType'])
if sentence[sent_pos + pat_pos + k]['ArgType'] == 'subj':
is_subj = True
cur_pat_i = pattern[pat_pos]
# if debug is not None:
# print('Testing %d/%d: %s' % (pat_pos + 1, pat_len, cur_pat_i))
# If this is a category that we have to look up
if cur_pat_i[0] == '@':
label = cur_pat_i[1:]
# if debug is not None:
# print('Checking if "%s" is in %s' % (sentence[sent_pos + pat_pos + k]['lemma'], label))
lexicon = None
required_pos = None
if label in ALL_CONCEPT_LEXICONS:
lexicon = ALL_CONCEPT_LEXICONS[label]
elif label in ALL_ACTION_LEXICONS:
lexicon = ALL_ACTION_LEXICONS[label]
required_pos = 'V'
if lexicon is None:
# raise BaseException(("unknown lexicon ref: '%s' in %s, %s" % (label, feature, pattern)))
return -1
(is_match, matched_phrased_length) = _is_in_lexicon(lexicon, sentence, \
sent_pos+pat_pos+k, required_pos=required_pos)
# print 'found %s (%d) in %s? %s (%d)' % (sentence[sent_pos+pat_pos+k]['lemma'], sent_pos+pat_pos+k, label, is_match, matched_phrased_length)
if not is_match:
match = False
break
# else:
# if debug is not None:
# print('YAY:: "%s" is in set %s in %s' % (sentence[sent_pos + pat_pos + k]['lemma'], label, debug))
# If we did find a match, recognize that some phrases are
# multi-word expressions, so we may need to skip ahead more than
# one token. Note that we were already going to skip one token
# anyway, so substract 1 from the phrase length
k += (matched_phrased_length - 1)
# if not sentence[sent_pos+pat_pos+k]['lemma'] not in lexicon:
# if debug is not None:
# print '"%s" is not in set %s in %s' % (sentence[sent_pos+pat_pos+k]['lemma'], label, debug)
# match = False
# break
# else:
# if debug is not None:
elif cur_pat_i == 'SELFCITATION':
# if debug is not None:
# print('Checking if "%s" is %s' % (sentence[sent_pos + pat_pos + k]['pos'][0], cur_pat_i[1]))
if sentence[sent_pos + pat_pos + k]['word'] != cur_pat_i:
# if debug is not None:
# print('"%s" is not a %s in %s' % (sentence[sent_pos + pat_pos + k]['lemma'], cur_pat_i, debug))
match = False
break
# else:
# if debug is not None:
# print('YAY:: "%s" is a %s in %s' % (sentence[sent_pos + pat_pos + k]['lemma'], cur_pat_i, debug))
elif cur_pat_i == 'CITATION':
# if debug is not None:
# print('Checking if "%s" is %s' % (sentence[sent_pos + pat_pos + k]['pos'][0], cur_pat_i[1]))
if not sentence[sent_pos + pat_pos + k]['word'].endswith(cur_pat_i):
# if debug is not None:
# print('"%s" is not a %s in %s' % (sentence[sent_pos + pat_pos + k]['lemma'], cur_pat_i, debug))
match = False
break
# else:
# if debug is not None:
# print('YAY:: "%s" is a %s in %s' % (sentence[sent_pos + pat_pos + k]['lemma'], cur_pat_i, debug))
# Not sure if this is entirely right...
elif cur_pat_i == 'CREF':
if sentence[sent_pos + pat_pos + k]['pos'] != 'CD' \
or sentence[sent_pos + pat_pos + k]['word'] != 'CREF':
match = False
break
# If this is POS-match
elif cur_pat_i[0] == '#':
# if debug is not None:
# print('Checking if "%s" is %s' % (sentence[sent_pos + pat_pos + k]['pos'][0], cur_pat_i[1]))
# NOTE: we compare only the coarsest POS tag level (N/V/J)
#
# NOTE Check for weird POS-tagging issues with verbal adjectives
if sentence[sent_pos + pat_pos + k]['pos'][0] != cur_pat_i[1] \
and not (cur_pat_i[1] == 'J' and sentence[sent_pos + pat_pos + k]['pos'] == 'VBN'):
match = False
break
# if debug is not None:
# print('"%s" is not %s in %s' % (sentence[sent_pos + pat_pos + k]['pos'][0], cur_pat_i[1], debug))
# else:
# if debug is not None:
# print('"YAY:: %s" is %s in %s' % (sentence[sent_pos + pat_pos + k]['pos'][0], cur_pat_i[1], debug))
# Otherwise, we have to match the word
else:
# if debug is not None:
# print('Checking if "%s" is %s' % (sentence[sent_pos + pat_pos + k]['lemma'], cur_pat_i))
if sentence[sent_pos + pat_pos + k]['lemma'] != cur_pat_i:
# if debug is not None:
# print('"%s" is not %s in %s' % (sentence[sent_pos + pat_pos + k]['lemma'], cur_pat_i, debug))
match = False
break
# else:
# if debug is not None:
# print('YAY:: "%s" is %s in %s' % (sentence[sent_pos + pat_pos + k]['lemma'], cur_pat_i, debug))
if match and (must_have_subj_value is not None) and (is_subj is not must_have_subj_value):
# if debug is not None:
# print(
# 'needed a subject for %s but this isn\'t one (%s != %s)' % (feature, is_subj, must_have_subj_value))
continue
# TODO: confirm we can skip 'pat_pos' items so sent_pos += pat_pos
if match:
# if debug is not None:
# print('match!\n\n')
return sent_pos
# else:
# if debug is not None:
# print('no match (%d, %d, %d)\n\n' % (sent_pos, pat_pos, k))
# if debug is not None:
# print('\n\n')
return -1
| 43.65641 | 157 | 0.560496 |
8768a445ac1144dc2b739b964d8eade14f0e414b | 662 | py | Python | Binary Tree/findMaximumBST.py | hoaiphun96/Leet-Code-Problems | bc4651fa9c8eae261bb280bb25b7537722d3b1f9 | [
"MIT",
"Unlicense"
] | 8 | 2019-01-17T23:45:41.000Z | 2021-07-08T02:06:16.000Z | Binary Tree/findMaximumBST.py | hoaiphun96/Leet-Code-Problems | bc4651fa9c8eae261bb280bb25b7537722d3b1f9 | [
"MIT",
"Unlicense"
] | null | null | null | Binary Tree/findMaximumBST.py | hoaiphun96/Leet-Code-Problems | bc4651fa9c8eae261bb280bb25b7537722d3b1f9 | [
"MIT",
"Unlicense"
] | 9 | 2018-04-27T04:50:06.000Z | 2022-03-03T14:17:13.000Z | """
Given a
Binary Search Tree
, return the node with the maximum data.
Example:
4
/ \
2 8
/ \
5 10
Output ==> 10 (TreeNode)
Note: Each node of BinaryTree is a TreeNode.
Check out Use Me section to find out it's structure.
"""
class BinaryTree:
def __init__(self, root_node=None):
# Check out Use Me section to find out Node Structure
self.root = root_node
def find_max(self, root):
# Return element should be of type TreeNode
if not root:
return None
curr = root
while curr.right_child != None:
curr = curr.right_child
return curr | 20.060606 | 61 | 0.595166 |
45504b112822e9f169688bf2013fc404b8962870 | 932 | py | Python | froide/foirequest/urls/make_request_urls.py | OpendataCH/froide | 8136bac0d8caa56f9cfc7ba15480be987280e55d | [
"MIT"
] | null | null | null | froide/foirequest/urls/make_request_urls.py | OpendataCH/froide | 8136bac0d8caa56f9cfc7ba15480be987280e55d | [
"MIT"
] | null | null | null | froide/foirequest/urls/make_request_urls.py | OpendataCH/froide | 8136bac0d8caa56f9cfc7ba15480be987280e55d | [
"MIT"
] | null | null | null | from django.urls import path, re_path
from django.utils.translation import pgettext_lazy
from ..views import DraftRequestView, MakeRequestView, RequestSentView
urlpatterns = [
# Translators: part in /request/to/public-body-slug URL
path("", MakeRequestView.as_view(), name="foirequest-make_request"),
re_path(
pgettext_lazy("url part", r"^to/(?P<publicbody_ids>\d+(?:\+\d+)*)/$"),
MakeRequestView.as_view(),
name="foirequest-make_request",
),
path(
pgettext_lazy("url part", "to/<slug:publicbody_slug>/"),
MakeRequestView.as_view(),
name="foirequest-make_request",
),
path(
pgettext_lazy("url part", "draft/<int:pk>/"),
DraftRequestView.as_view(),
name="foirequest-make_draftrequest",
),
path(
pgettext_lazy("url part", "sent/"),
RequestSentView.as_view(),
name="foirequest-request_sent",
),
]
| 31.066667 | 78 | 0.638412 |
6b7fc4d23eeba155b97fd4c3cdc2dbc4173cd283 | 15,400 | py | Python | docs/sphinxext/sphinxarg/ext.py | aarontuor/cpp | 97f975b9a3b816bc59b912237daab7118667e01e | [
"MIT"
] | 117 | 2017-12-19T08:14:05.000Z | 2022-03-28T11:22:14.000Z | docs/sphinxext/sphinxarg/ext.py | aarontuor/cpp | 97f975b9a3b816bc59b912237daab7118667e01e | [
"MIT"
] | 7 | 2018-04-12T06:50:18.000Z | 2021-06-30T21:05:36.000Z | docs/sphinxext/sphinxarg/ext.py | aarontuor/cpp | 97f975b9a3b816bc59b912237daab7118667e01e | [
"MIT"
] | 44 | 2018-01-11T06:59:28.000Z | 2022-03-23T10:55:50.000Z | from argparse import ArgumentParser
import os
from docutils import nodes
from docutils.statemachine import StringList
from docutils.parsers.rst.directives import flag, unchanged
from sphinx.util.compat import Directive
from sphinx.util.nodes import nested_parse_with_titles
from sphinxarg.parser import parse_parser, parser_navigate
def map_nested_definitions(nested_content):
if nested_content is None:
raise Exception('Nested content should be iterable, not null')
# build definition dictionary
definitions = {}
for item in nested_content:
if not isinstance(item, nodes.definition_list):
continue
for subitem in item:
if not isinstance(subitem, nodes.definition_list_item):
continue
if not len(subitem.children) > 0:
continue
classifier = '@after'
idx = subitem.first_child_matching_class(nodes.classifier)
if idx is not None:
ci = subitem[idx]
if len(ci.children) > 0:
classifier = ci.children[0].astext()
if classifier is not None and classifier not in (
'@replace', '@before', '@after'):
raise Exception('Unknown classifier: %s' % classifier)
idx = subitem.first_child_matching_class(nodes.term)
if idx is not None:
ch = subitem[idx]
if len(ch.children) > 0:
term = ch.children[0].astext()
idx = subitem.first_child_matching_class(nodes.definition)
if idx is not None:
def_node = subitem[idx]
def_node.attributes['classifier'] = classifier
definitions[term] = def_node
return definitions
def print_arg_list(data, nested_content):
definitions = map_nested_definitions(nested_content)
items = []
if 'args' in data:
for arg in data['args']:
my_def = [nodes.paragraph(text=arg['help'])] if arg['help'] else []
name = arg['name']
my_def = apply_definition(definitions, my_def, name)
if len(my_def) == 0:
my_def.append(nodes.paragraph(text='Undocumented'))
if 'choices' in arg:
my_def.append(nodes.paragraph(
text=('Possible choices: %s' % ', '.join([str(c) for c in arg['choices']]))))
items.append(
nodes.option_list_item(
'', nodes.option_group('', nodes.option_string(text=name)),
nodes.description('', *my_def)))
return nodes.option_list('', *items) if items else None
def print_opt_list(data, nested_content):
definitions = map_nested_definitions(nested_content)
items = []
if 'options' in data:
for opt in data['options']:
names = []
my_def = [nodes.paragraph(text=opt['help'])] if opt['help'] else []
for name in opt['name']:
option_declaration = [nodes.option_string(text=name)]
if opt['default'] is not None \
and opt['default'] != '==SUPPRESS==':
option_declaration += nodes.option_argument(
'', text='=' + str(opt['default']))
names.append(nodes.option('', *option_declaration))
my_def = apply_definition(definitions, my_def, name)
if len(my_def) == 0:
my_def.append(nodes.paragraph(text='Undocumented'))
if 'choices' in opt:
my_def.append(nodes.paragraph(
text=('Possible choices: %s' % ', '.join([str(c) for c in opt['choices']]))))
items.append(
nodes.option_list_item(
'', nodes.option_group('', *names),
nodes.description('', *my_def)))
return nodes.option_list('', *items) if items else None
def print_command_args_and_opts(arg_list, opt_list, sub_list=None):
items = []
if arg_list:
items.append(nodes.definition_list_item(
'', nodes.term(text='Positional arguments:'),
nodes.definition('', arg_list)))
if opt_list:
items.append(nodes.definition_list_item(
'', nodes.term(text='Options:'),
nodes.definition('', opt_list)))
if sub_list and len(sub_list):
items.append(nodes.definition_list_item(
'', nodes.term(text='Sub-commands:'),
nodes.definition('', sub_list)))
return nodes.definition_list('', *items)
def apply_definition(definitions, my_def, name):
if name in definitions:
definition = definitions[name]
classifier = definition['classifier']
if classifier == '@replace':
return definition.children
if classifier == '@after':
return my_def + definition.children
if classifier == '@before':
return definition.children + my_def
raise Exception('Unknown classifier: %s' % classifier)
return my_def
def print_subcommand_list(data, nested_content):
definitions = map_nested_definitions(nested_content)
items = []
if 'children' in data:
for child in data['children']:
my_def = [nodes.paragraph(
text=child['help'])] if child['help'] else []
name = child['name']
my_def = apply_definition(definitions, my_def, name)
if len(my_def) == 0:
my_def.append(nodes.paragraph(text='Undocumented'))
if 'description' in child:
my_def.append(nodes.paragraph(text=child['description']))
my_def.append(nodes.literal_block(text=child['usage']))
my_def.append(print_command_args_and_opts(
print_arg_list(child, nested_content),
print_opt_list(child, nested_content),
print_subcommand_list(child, nested_content)
))
items.append(
nodes.definition_list_item(
'',
nodes.term('', '', nodes.strong(text=name)),
nodes.definition('', *my_def)
)
)
return nodes.definition_list('', *items)
class ArgParseDirective(Directive):
has_content = True
option_spec = dict(module=unchanged, func=unchanged, ref=unchanged,
prog=unchanged, path=unchanged, nodefault=flag,
manpage=unchanged, nosubcommands=unchanged, passparser=flag)
def _construct_manpage_specific_structure(self, parser_info):
"""
Construct a typical man page consisting of the following elements:
NAME (automatically generated, out of our control)
SYNOPSIS
DESCRIPTION
OPTIONS
FILES
SEE ALSO
BUGS
"""
# SYNOPSIS section
synopsis_section = nodes.section(
'',
nodes.title(text='Synopsis'),
nodes.literal_block(text=parser_info["bare_usage"]),
ids=['synopsis-section'])
# DESCRIPTION section
description_section = nodes.section(
'',
nodes.title(text='Description'),
nodes.paragraph(text=parser_info.get(
'description', parser_info.get(
'help', "undocumented").capitalize())),
ids=['description-section'])
nested_parse_with_titles(
self.state, self.content, description_section)
if parser_info.get('epilog'):
# TODO: do whatever sphinx does to understand ReST inside
# docstrings magically imported from other places. The nested
# parse method invoked above seem to be able to do this but
# I haven't found a way to do it for arbitrary text
description_section += nodes.paragraph(
text=parser_info['epilog'])
# OPTIONS section
options_section = nodes.section(
'',
nodes.title(text='Options'),
ids=['options-section'])
if 'args' in parser_info:
options_section += nodes.paragraph()
options_section += nodes.subtitle(text='Positional arguments:')
options_section += self._format_positional_arguments(parser_info)
if 'options' in parser_info:
options_section += nodes.paragraph()
options_section += nodes.subtitle(text='Optional arguments:')
options_section += self._format_optional_arguments(parser_info)
items = [
# NOTE: we cannot generate NAME ourselves. It is generated by
# docutils.writers.manpage
synopsis_section,
description_section,
# TODO: files
# TODO: see also
# TODO: bugs
]
if len(options_section.children) > 1:
items.append(options_section)
if 'nosubcommands' not in self.options:
# SUBCOMMANDS section (non-standard)
subcommands_section = nodes.section(
'',
nodes.title(text='Sub-Commands'),
ids=['subcommands-section'])
if 'children' in parser_info:
subcommands_section += self._format_subcommands(parser_info)
if len(subcommands_section) > 1:
items.append(subcommands_section)
if os.getenv("INCLUDE_DEBUG_SECTION"):
import json
# DEBUG section (non-standard)
debug_section = nodes.section(
'',
nodes.title(text="Argparse + Sphinx Debugging"),
nodes.literal_block(text=json.dumps(parser_info, indent=' ')),
ids=['debug-section'])
items.append(debug_section)
return items
def _format_positional_arguments(self, parser_info):
assert 'args' in parser_info
items = []
for arg in parser_info['args']:
arg_items = []
if arg['help']:
arg_items.append(nodes.paragraph(text=arg['help']))
else:
arg_items.append(nodes.paragraph(text='Undocumented'))
if 'choices' in arg:
arg_items.append(
nodes.paragraph(
text='Possible choices: ' + ', '.join(arg['choices'])))
items.append(
nodes.option_list_item(
'',
nodes.option_group(
'', nodes.option(
'', nodes.option_string(text=arg['metavar'])
)
),
nodes.description('', *arg_items)))
return nodes.option_list('', *items)
def _format_optional_arguments(self, parser_info):
assert 'options' in parser_info
items = []
for opt in parser_info['options']:
names = []
opt_items = []
for name in opt['name']:
option_declaration = [nodes.option_string(text=name)]
if opt['default'] is not None \
and opt['default'] != '==SUPPRESS==':
option_declaration += nodes.option_argument(
'', text='=' + str(opt['default']))
names.append(nodes.option('', *option_declaration))
if opt['help']:
opt_items.append(nodes.paragraph(text=opt['help']))
else:
opt_items.append(nodes.paragraph(text='Undocumented'))
if 'choices' in opt:
opt_items.append(
nodes.paragraph(
text='Possible choices: ' + ', '.join(opt['choices'])))
items.append(
nodes.option_list_item(
'', nodes.option_group('', *names),
nodes.description('', *opt_items)))
return nodes.option_list('', *items)
def _format_subcommands(self, parser_info):
assert 'children' in parser_info
items = []
for subcmd in parser_info['children']:
subcmd_items = []
if subcmd['help']:
subcmd_items.append(nodes.paragraph(text=subcmd['help']))
else:
subcmd_items.append(nodes.paragraph(text='Undocumented'))
items.append(
nodes.definition_list_item(
'',
nodes.term('', '', nodes.strong(
text=subcmd['bare_usage'])),
nodes.definition('', *subcmd_items)))
return nodes.definition_list('', *items)
def _nested_parse_paragraph(self, text):
content = nodes.paragraph()
self.state.nested_parse(StringList(text.split("\n")), 0, content)
return content
def run(self):
if 'module' in self.options and 'func' in self.options:
module_name = self.options['module']
attr_name = self.options['func']
elif 'ref' in self.options:
_parts = self.options['ref'].split('.')
module_name = '.'.join(_parts[0:-1])
attr_name = _parts[-1]
else:
raise self.error(
':module: and :func: should be specified, or :ref:')
mod = __import__(module_name, globals(), locals(), [attr_name])
if not hasattr(mod, attr_name):
raise self.error((
'Module "%s" has no attribute "%s"\n'
'Incorrect argparse :module: or :func: values?'
) % (module_name, attr_name))
func = getattr(mod, attr_name)
if isinstance(func, ArgumentParser):
parser = func
elif 'passparser' in self.options:
parser = ArgumentParser()
func(parser)
else:
parser = func()
if 'path' not in self.options:
self.options['path'] = ''
path = str(self.options['path'])
if 'prog' in self.options:
parser.prog = self.options['prog']
result = parse_parser(
parser, skip_default_values='nodefault' in self.options)
result = parser_navigate(result, path)
if 'manpage' in self.options:
return self._construct_manpage_specific_structure(result)
nested_content = nodes.paragraph()
self.state.nested_parse(
self.content, self.content_offset, nested_content)
nested_content = nested_content.children
items = []
# add common content between
for item in nested_content:
if not isinstance(item, nodes.definition_list):
items.append(item)
if 'description' in result:
items.append(self._nested_parse_paragraph(result['description']))
items.append(nodes.literal_block(text=result['usage']))
items.append(print_command_args_and_opts(
print_arg_list(result, nested_content),
print_opt_list(result, nested_content),
print_subcommand_list(result, nested_content)
))
if 'epilog' in result:
items.append(self._nested_parse_paragraph(result['epilog']))
return items
def setup(app):
app.add_directive('argparse', ArgParseDirective)
| 41.397849 | 97 | 0.561104 |
fbeef5c219837835f8797ef42b441aecf6a7dcca | 1,099 | py | Python | server/djangoapp/urls.py | gsvr30/CloudAppDevelopment_Capstone | 138c1b8ea35c5af5bcdd1b15a3154ef01585bbe6 | [
"Apache-2.0"
] | null | null | null | server/djangoapp/urls.py | gsvr30/CloudAppDevelopment_Capstone | 138c1b8ea35c5af5bcdd1b15a3154ef01585bbe6 | [
"Apache-2.0"
] | null | null | null | server/djangoapp/urls.py | gsvr30/CloudAppDevelopment_Capstone | 138c1b8ea35c5af5bcdd1b15a3154ef01585bbe6 | [
"Apache-2.0"
] | null | null | null | from django.urls import path
from django.conf.urls.static import static
from django.conf import settings
from . import views
app_name = 'djangoapp'
urlpatterns = [
# route is a string contains a URL pattern
# view refers to the view function
# name the URL
# path for about view
path(route='about/', view=views.about, name='about'),
# path for contact us view
path(route='contact/', view=views.contact, name='contact'),
# path for registration
path(route='registration/', view=views.registration_request, name='registration'),
# path for login
path(route='login/', view=views.login_request, name='login'),
# path for logout
path(route='logout/', view=views.logout_request, name='logout'),
path(route='', view=views.get_dealerships, name='index'),
# path for dealer reviews view
path('dealer/<int:dealer_id>/', views.get_dealer_details, name='dealer_details'),
# path for add a review view
path('review/<int:dealer_id>/', views.add_review, name='add_review')
] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
| 37.896552 | 86 | 0.705187 |
8ef7687a5bc19dd8d428ca95f8472d1519a10cb7 | 1,280 | py | Python | setup.py | lsetiawan/ooipy | 23c23e84be0a05a74ed69d7b79dcc3429c4c9fc4 | [
"MIT"
] | null | null | null | setup.py | lsetiawan/ooipy | 23c23e84be0a05a74ed69d7b79dcc3429c4c9fc4 | [
"MIT"
] | null | null | null | setup.py | lsetiawan/ooipy | 23c23e84be0a05a74ed69d7b79dcc3429c4c9fc4 | [
"MIT"
] | null | null | null | from __future__ import (
absolute_import,
division,
print_function,
unicode_literals,
)
import os
import setuptools
import versioneer
here = os.path.abspath(os.path.dirname(__file__))
# Dependencies.
with open("requirements.txt") as f:
requirements = f.readlines()
install_requires = [t.strip() for t in requirements]
with open(os.path.join(here, "README.md"), encoding="utf-8") as f:
long_description = f.read()
setuptools.setup(
name="ooipy",
author="OOIPy",
author_email="ooipython@gmail.com",
description="A python toolbox for acquiring and analyzing Ocean Obvservatories Initiative (OOI) Data",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/ooipy/ooipy",
packages=setuptools.find_packages(exclude=("tests")),
license="MIT",
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
python_requires='>=3.6',
install_requires=install_requires,
use_scm_version={
"write_to": "_version.py",
"write_to_template": 'version = "{version}"\n',
},
setup_requires=["setuptools>=30.3.0", "wheel", "setuptools_scm"]
) | 29.090909 | 106 | 0.682031 |
44347a0ff86da519089e9b90b89dc261781852e2 | 1,661 | py | Python | neseg/seg.py | myosmatrix/neseg | de3215534e940ebb41b543eb407de43c093548a8 | [
"MIT"
] | null | null | null | neseg/seg.py | myosmatrix/neseg | de3215534e940ebb41b543eb407de43c093548a8 | [
"MIT"
] | null | null | null | neseg/seg.py | myosmatrix/neseg | de3215534e940ebb41b543eb407de43c093548a8 | [
"MIT"
] | null | null | null | import sys
import os
import xlrd
import ahocorasick
import neseg.lib.FMM as fmm
import neseg.lib.RMM as rmm
NESEG_VERSION='0.7'
def readCSV2List(filePath):
try:
file=open(filePath,'r',encoding="utf-8")
context = file.read()
list_result=context.split("\n")# 以回车符\n分割成单独的行
length=len(list_result)
for i in range(length):
list_result[i] =list_result[i].split(",")
return list_result
except Exception as e:
print("文件[%s]读取失败" % filePath, "异常: %s" % e,sep=',')
return
finally:
file.close();# 操作完成一定要关闭
# 令牌分割
def dictTokenizer(sent,dict):
# 国名
dictWords = []
while True:
strm = fmm.cut_words(sent,dict,'')
dictWords = dictWords + strm
strmlen = len(''.join(strm))
#print("While country:",sentence,"strm:",''.join(strm),"strmlen:",strmlen)
sent = sent[strmlen:]
if strmlen <=1 :
break
return (''.join(dictWords), sent)
def segbydict(ne,dcountry,dprovince,dcity,dcounty,dsuffix):
dicCountry = fmm.load_dic(dcountry)
dicProvince = fmm.load_dic(dprovince)
dicCity = fmm.load_dic(dcity)
dicCounty = fmm.load_dic(dcounty)
token1, st1 = dictTokenizer(ne,dicCountry)
token2, st2 = dictTokenizer(st1,dicProvince)
token3, st3 = dictTokenizer(st2,dicCity)
token4, st4 = dictTokenizer(st3,dicCounty)
lst1 = readCSV2List(dsuffix)
lst_suffix = [row[0] for row in lst1]
token5 = rmm.cut_words(st4, lst_suffix)
st5 = st4.rstrip(''.join(token5))
strtemp = ','.join([token1,token2,token3,token4,st5,','.join(token5)])
return strtemp
| 27.683333 | 82 | 0.627935 |
b0a45b123b86ae01c541c049f1d26906acda7d82 | 3,020 | py | Python | assistant/filemanager/tests.py | personal-assisntant-2/personal-assistant | 5dfc2ba100285cfa9b50058ecd1212912b80b5b4 | [
"MIT"
] | null | null | null | assistant/filemanager/tests.py | personal-assisntant-2/personal-assistant | 5dfc2ba100285cfa9b50058ecd1212912b80b5b4 | [
"MIT"
] | 22 | 2021-11-03T05:47:28.000Z | 2021-11-08T10:19:46.000Z | assistant/filemanager/tests.py | personal-assisntant-2/personal-assistant | 5dfc2ba100285cfa9b50058ecd1212912b80b5b4 | [
"MIT"
] | null | null | null | from django.contrib.auth.models import User
from django.core.files.base import ContentFile
from django.core.files.uploadedfile import SimpleUploadedFile
from django.contrib import auth
from django.test import TestCase
from django.urls import reverse
from .models import UploadedFiles
class BaseFileManagerTestCase(TestCase):
...
class BaseFileManagerTestCaseWithUser(BaseFileManagerTestCase):
@classmethod
def setUpClass(cls):
super().setUpClass()
cls.username = 'test'
cls.password = 'test'
cls.user = User.objects.create_user(
username=cls.username,
password=cls.password
)
def setUp(self):
super().setUp()
self.client.force_login(self.user)
# @classmethod
# def tearDownClass(cls):
# super().tearDownClass()
# User.objects.all().delete()
# cls.user = None
class TestFileManagerViewRedirect(BaseFileManagerTestCase):
def test_redirect(self):
response = self.client.get(reverse('file_manager:file'))
self.assertEqual(response.status_code, 302)
self.assertURLEqual(
response.headers['Location'],
reverse('login') + '?next=/file_manager/'
)
class TestFileManagerView(BaseFileManagerTestCaseWithUser):
# def _login_user(self, username, password):
# response = self.client.post(
# reverse('login'),
# data=dict(username=username, password=password)
# )
def displaying_the_upload_file_in_file_list(self, file_name):
response = self.client.get(reverse('file_manager:file'))
self.assertEqual(response.status_code, 200)
response_redirect_html = response.content.decode('utf-8')
self.assertIn(file_name, response_redirect_html)
def test_get(self):
response = self.client.get(reverse('file_manager:file'))
self.assertEqual(response.status_code, 200)
# user = auth.get_user(self.client)
# print('.....', user.is_authenticated)
response_html = response.content.decode('utf-8')
self.assertIn('Upload file', response_html)
self.assertIn('Sort by category', response_html)
def test_upload_video(self):
file_name = "file.mp4"
video = SimpleUploadedFile(file_name, b"file_content", content_type="video/mp4")
response = self.client.post(reverse('file_manager:file'), {'file': video})
self.assertEquals(response.status_code, 302)
self.displaying_the_upload_file_in_file_list(file_name)
# class TestFileDownloadView(BaseFileManagerTestCaseWithUser):
#
# def setUp(self):
# super().setUp()
# self.file = ContentFile('text', 'name')
# print('.........self.file', vars(self.file))
# # self.f = b''
# # # Reading file
# # for chunk in file.chunks():
# # self.f += chunk
# UploadedFiles.objects.create(file=self.file)
#
# def test_download_video(self):
# pass
| 29.320388 | 88 | 0.653642 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.