code stringlengths 22 1.05M | apis listlengths 1 3.31k | extract_api stringlengths 75 3.25M |
|---|---|---|
import cv2
import retinex
import os
import glob
if __name__ == '__main__':
DATA_ROOT = './train/0000045'
for name in os.listdir(DATA_ROOT):
img_path = os.path.join(DATA_ROOT, name)
sigma = [15, 80, 200]
print(img_path)
img = cv2.imread(img_path)
img_en = retinex.automatedMSRCR(img, sigma)
cv2.namedWindow("img", 0)
cv2.resizeWindow("img", 300, 300)
cv2.moveWindow("img", 100, 100)
cv2.namedWindow("img_en", 0)
cv2.resizeWindow("img_en", 300, 300)
cv2.moveWindow("img_en", 400, 100)
cv2.imshow('img', img)
cv2.imshow('img_en', img_en)
cv2.waitKey(0)
cv2.destroyAllWindows()
| [
"cv2.moveWindow",
"os.listdir",
"cv2.resizeWindow",
"os.path.join",
"retinex.automatedMSRCR",
"cv2.imshow",
"cv2.waitKey",
"cv2.destroyAllWindows",
"cv2.imread",
"cv2.namedWindow"
] | [((126, 147), 'os.listdir', 'os.listdir', (['DATA_ROOT'], {}), '(DATA_ROOT)\n', (136, 147), False, 'import os\n'), ((168, 197), 'os.path.join', 'os.path.join', (['DATA_ROOT', 'name'], {}), '(DATA_ROOT, name)\n', (180, 197), False, 'import os\n'), ((266, 286), 'cv2.imread', 'cv2.imread', (['img_path'], {}), '(img_path)\n', (276, 286), False, 'import cv2\n'), ((304, 338), 'retinex.automatedMSRCR', 'retinex.automatedMSRCR', (['img', 'sigma'], {}), '(img, sigma)\n', (326, 338), False, 'import retinex\n'), ((348, 373), 'cv2.namedWindow', 'cv2.namedWindow', (['"""img"""', '(0)'], {}), "('img', 0)\n", (363, 373), False, 'import cv2\n'), ((382, 415), 'cv2.resizeWindow', 'cv2.resizeWindow', (['"""img"""', '(300)', '(300)'], {}), "('img', 300, 300)\n", (398, 415), False, 'import cv2\n'), ((424, 455), 'cv2.moveWindow', 'cv2.moveWindow', (['"""img"""', '(100)', '(100)'], {}), "('img', 100, 100)\n", (438, 455), False, 'import cv2\n'), ((464, 492), 'cv2.namedWindow', 'cv2.namedWindow', (['"""img_en"""', '(0)'], {}), "('img_en', 0)\n", (479, 492), False, 'import cv2\n'), ((501, 537), 'cv2.resizeWindow', 'cv2.resizeWindow', (['"""img_en"""', '(300)', '(300)'], {}), "('img_en', 300, 300)\n", (517, 537), False, 'import cv2\n'), ((546, 580), 'cv2.moveWindow', 'cv2.moveWindow', (['"""img_en"""', '(400)', '(100)'], {}), "('img_en', 400, 100)\n", (560, 580), False, 'import cv2\n'), ((590, 612), 'cv2.imshow', 'cv2.imshow', (['"""img"""', 'img'], {}), "('img', img)\n", (600, 612), False, 'import cv2\n'), ((621, 649), 'cv2.imshow', 'cv2.imshow', (['"""img_en"""', 'img_en'], {}), "('img_en', img_en)\n", (631, 649), False, 'import cv2\n'), ((658, 672), 'cv2.waitKey', 'cv2.waitKey', (['(0)'], {}), '(0)\n', (669, 672), False, 'import cv2\n'), ((681, 704), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (702, 704), False, 'import cv2\n')] |
import argparse
import subprocess
import logging
import time
import re
import os
from datetime import datetime
from contextlib import closing, contextmanager
import pymysql
import pymysql.cursors
import boto3
import botocore.exceptions
logger = logging.getLogger(__name__)
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--debug", action="store_true", help="print debug logs")
parser.add_argument("--region", required=True, help="AWS region name")
parser.add_argument(
"--source-instance-id",
required=True,
help="name of the existing instance (This is going to be master when replication is setup.)")
parser.add_argument(
"--new-instance-id", required=True, help="name of the slave instance that is going to be created")
parser.add_argument(
"--master-user-name", required=True, help="master username of instance specified with --source-instance-id")
parser.add_argument(
"--master-user-password",
required=True,
help="master user password of instance specified with --source-instance-id")
parser.add_argument(
"--databases", required=True, help="comma separated database names that need to be copied to slave")
parser.add_argument("--users", help="comma separated user names that need to be copied to slave")
parser.add_argument("--availability-zone", help="set it if you want slave on different availability zone")
parser.add_argument("--db-instance-class", help="set it if you want different instance class on slave")
parser.add_argument("--engine-version", help="set it if you want different engine version on slave")
parser.add_argument("--parameter-group", help="set it if you want different parameter group on slave")
parser.add_argument("--option-group", help="set it if you want different option group on slave")
parser.add_argument(
"--allocated-storage", type=int, help="set it if you want to grow/shrink storage space on slave")
parser.add_argument(
"--iops",
type=int,
help="set it if you want different IOPS on slave (must be valid for given --allocated-storage)")
parser.add_argument(
"--binlog-retention-hours",
type=int,
default=24,
help="Darbe set 'binlog retention hours' on master to allow enough time for copying data between instances."
"Increase if your data is too big so that it cannot be copied in 24 hours.")
args = parser.parse_args()
formatter = logging.Formatter('%(asctime)s %(levelname)-5.5s L%(lineno)-3d %(message)s', datefmt='%H:%M:%S')
stream_handler = logging.StreamHandler()
stream_handler.setLevel(logging.DEBUG)
stream_handler.setFormatter(formatter)
logger.addHandler(stream_handler)
if args.debug:
logger.setLevel(logging.DEBUG)
else:
logger.setLevel(logging.INFO)
logger.info("checking required programs")
subprocess.check_call(['which', 'mysqldump'])
subprocess.check_call(['which', 'mysql'])
# put root password to environ for not using -p flag with mysql commands below.
os.environ['MYSQL_PWD'] = args.master_user_password
rds = boto3.client('rds', region_name=args.region)
ec2 = boto3.client('ec2', region_name=args.region)
db_instance_available = rds.get_waiter('db_instance_available')
# unique string representing current second like 20160101090500
timestamp = str(datetime.utcnow()).replace('-', '').replace(':', '').replace(' ', '')[:14]
@contextmanager
def connect_db(instance, cursorclass=pymysql.cursors.Cursor):
"""Yields a cursor on a new connection to a database."""
conn = pymysql.connect(
user=args.master_user_name,
password=args.master_user_password,
host=instance['Endpoint']['Address'],
port=instance['Endpoint']['Port'],
autocommit=True,
cursorclass=cursorclass)
with closing(conn):
cursor = conn.cursor()
with closing(cursor):
yield cursor
def wait_db_instance_available(instance_id):
"""Timeout on waiter cannot be changed. We keep continue to wait on timeout error."""
while True:
try:
db_instance_available.wait(DBInstanceIdentifier=instance_id)
except botocore.exceptions.WaiterError:
continue
else:
break
def wait_until_zero_lag(instance):
"""Blocks until replication lag is zero."""
while True:
time.sleep(4)
try:
with connect_db(instance, cursorclass=pymysql.cursors.DictCursor) as cursor:
cursor.execute("SHOW SLAVE STATUS")
slave_status = cursor.fetchone()
except Exception as e:
logger.error(str(e))
else:
seconds_behind_master = slave_status['Seconds_Behind_Master']
logger.info("seconds behind master: %s", seconds_behind_master)
if seconds_behind_master is None:
continue
if seconds_behind_master < 1:
break
logger.info("getting details of source instance")
source_instance = rds.describe_db_instances(DBInstanceIdentifier=args.source_instance_id)['DBInstances'][0]
logger.info("creating replication security group")
vpc_id = source_instance['DBSubnetGroup']['VpcId']
try:
response = ec2.create_security_group(
GroupName="darbe-replication",
VpcId=vpc_id,
Description="created by darbe for replication between instances")
except botocore.exceptions.ClientError as e:
if e.response['Error']['Code'] != 'InvalidGroup.Duplicate':
raise
logger.info("security group already exists")
security_group_id = ec2.describe_security_groups(Filters=[{
'Name': 'vpc-id',
"Values": [vpc_id]
}, {
'Name': 'group-name',
'Values': ['darbe-replication']
}])['SecurityGroups'][0]['GroupId']
else:
security_group_id = response['GroupId']
logger.info("modifying security group rules: %s", security_group_id)
try:
ec2.authorize_security_group_ingress(
GroupId=security_group_id,
IpPermissions=[{
'IpProtocol': 'tcp',
'FromPort': 3306,
'ToPort': 3306,
'IpRanges': [{
'CidrIp': '0.0.0.0/0'
}]
}])
except botocore.exceptions.ClientError as e:
if e.response['Error']['Code'] != 'InvalidPermission.Duplicate':
raise
logger.info("security group permission already exists")
security_group_ids = [g['VpcSecurityGroupId'] for g in source_instance['VpcSecurityGroups']]
if security_group_id in security_group_ids:
logger.info("replication security group is already attached to the source instance")
else:
logger.info("adding replication security group to the source instance")
security_group_ids.append(security_group_id)
rds.modify_db_instance(DBInstanceIdentifier=args.source_instance_id, VpcSecurityGroupIds=security_group_ids)
logger.info("waiting for source instance to become available")
time.sleep(60) # instance state does not switch to "modifying" immediately
wait_db_instance_available(args.source_instance_id)
with connect_db(source_instance) as cursor:
cursor.execute("SELECT VERSION()")
version_string = cursor.fetchone()[0]
match = re.match(r'(\d+)\.(\d+)\.(\d+)', version_string)
version = tuple(map(int, match.groups())) # type: ignore
logger.info("source instance mysql version: %s", version)
grants = []
if args.users:
logger.info("getting grants from source instance")
with connect_db(source_instance) as cursor:
if version < (5, 7, 6):
password_column = 'Password'
else:
password_column = 'authentication_string'
users_in = ','.join(map(lambda x: "'%s'" % x, args.users.split(',')))
sql = "SELECT User, Host, %s FROM mysql.user WHERE User in (%s)" % (password_column, users_in)
logger.debug("running sql: %s", sql)
cursor.execute(sql)
for user, host, password in cursor.fetchall():
logger.debug("user: %s, host: %s, password: %s", user, host, password)
full_user = "'{}'@'{}'".format(user, host)
logger.debug("full user: %r", full_user)
if version >= (5, 7, 6):
cursor.execute("SHOW CREATE USER %s" % full_user)
create_user_sql = cursor.fetchall()[0][0]
grants.append(create_user_sql)
cursor.execute("SHOW GRANTS FOR %s" % full_user)
for grant in cursor.fetchall():
grant = str(grant[0])
logger.debug("grant: %s", grant)
if version < (5, 7, 6) and 'IDENTIFIED BY' in grant:
grant = grant.replace("<secret>", "'%s'" % password)
grants.append(grant)
logger.info("setting binlog retention hours on source instance to: %s", args.binlog_retention_hours)
with connect_db(source_instance) as cursor:
sql = "call mysql.rds_set_configuration('binlog retention hours', %i)" % args.binlog_retention_hours
logger.debug("running sql: %s", sql)
cursor.execute(sql)
original_parameter_group = args.parameter_group or source_instance['DBParameterGroups'][0]['DBParameterGroupName']
match = re.match(r'.+-darbe-(\d+)', original_parameter_group)
if match:
new_parameter_group = original_parameter_group.replace(match.groups()[0], timestamp)
else:
new_parameter_group = "%s-darbe-%s" % (original_parameter_group, timestamp)
logger.info("copying parameter group as: %s", new_parameter_group)
rds.copy_db_parameter_group(
SourceDBParameterGroupIdentifier=original_parameter_group,
TargetDBParameterGroupIdentifier=new_parameter_group,
TargetDBParameterGroupDescription="copied from %s then modified" % original_parameter_group)
logger.info("modifying new parameter group")
rds.modify_db_parameter_group(
DBParameterGroupName=new_parameter_group,
# these parameters makes slave sql thread run faster,
# otherwise slave may not catch up with the master for write intensive load.
Parameters=[
{
'ParameterName': 'innodb_flush_log_at_trx_commit',
'ParameterValue': '2',
'ApplyMethod': 'immediate',
},
{
'ParameterName': 'sync_binlog',
'ParameterValue': '0',
'ApplyMethod': 'immediate',
},
])
logger.info("creating new db instance: %s", args.new_instance_id)
new_instance_params = dict(
AllocatedStorage=args.allocated_storage or source_instance['AllocatedStorage'],
AutoMinorVersionUpgrade=source_instance['AutoMinorVersionUpgrade'],
AvailabilityZone=args.availability_zone or source_instance['AvailabilityZone'],
BackupRetentionPeriod=0, # should be disabled for fast import, will be enabled after import
CopyTagsToSnapshot=source_instance['CopyTagsToSnapshot'],
DBInstanceClass=args.db_instance_class or source_instance['DBInstanceClass'],
DBInstanceIdentifier=args.new_instance_id,
DBParameterGroupName=new_parameter_group,
DBSubnetGroupName=source_instance['DBSubnetGroup']['DBSubnetGroupName'],
Engine=source_instance['Engine'],
EngineVersion=args.engine_version or source_instance['EngineVersion'],
LicenseModel=source_instance['LicenseModel'],
MasterUserPassword=args.master_user_password,
MasterUsername=args.master_user_name,
OptionGroupName=args.option_group or source_instance['OptionGroupMemberships'][0]['OptionGroupName'],
MultiAZ=False, # should be disabled for fast import, will be enabled after import
Port=source_instance['Endpoint']['Port'],
PreferredBackupWindow=source_instance['PreferredBackupWindow'],
PreferredMaintenanceWindow=source_instance['PreferredMaintenanceWindow'],
PubliclyAccessible=source_instance['PubliclyAccessible'],
StorageEncrypted=source_instance['StorageEncrypted'],
StorageType=source_instance['StorageType'],
VpcSecurityGroupIds=security_group_ids, )
if source_instance.get('Iops', 0) > 0:
new_instance_params['Iops'] = args.iops or source_instance['Iops']
if source_instance.get('MonitoringInterval', 0) > 0:
new_instance_params['MonitoringInterval'] = source_instance['MonitoringInterval']
new_instance_params['MonitoringRoleArn'] = source_instance['MonitoringRoleArn']
rds.create_db_instance(**new_instance_params)
read_replica_instance_id = "%s-readreplica-%s" % (source_instance['DBInstanceIdentifier'], timestamp)
logger.info("crating read replica: %s", read_replica_instance_id)
rds.create_db_instance_read_replica(
DBInstanceIdentifier=read_replica_instance_id,
SourceDBInstanceIdentifier=source_instance['DBInstanceIdentifier'],
DBInstanceClass=source_instance['DBInstanceClass'],
AvailabilityZone=source_instance['AvailabilityZone'])['DBInstance']
logger.info("waiting for new instance to become available")
wait_db_instance_available(args.new_instance_id)
logger.info("getting details of new instance")
new_instance = rds.describe_db_instances(DBInstanceIdentifier=args.new_instance_id)['DBInstances'][0]
logger.info("waiting for read replica to become available")
wait_db_instance_available(read_replica_instance_id)
logger.info("getting details of created read replica")
read_replica_instance = rds.describe_db_instances(DBInstanceIdentifier=read_replica_instance_id)['DBInstances'][0]
logger.info("stopping replication on read replica")
with connect_db(read_replica_instance, cursorclass=pymysql.cursors.DictCursor) as cursor:
cursor.callproc("mysql.rds_stop_replication")
logger.info("finding binlog position")
cursor.execute("SHOW SLAVE STATUS")
slave_status = cursor.fetchone()
binlog_filename, binlog_position = slave_status['Relay_Master_Log_File'], slave_status['Exec_Master_Log_Pos']
logger.info("master status: filename: %s position: %s", binlog_filename, binlog_position)
logger.info("dumping data from read replica")
dump_args = [
'mysqldump',
'-h',
read_replica_instance['Endpoint']['Address'],
'-P',
str(read_replica_instance['Endpoint']['Port']),
'-u',
args.master_user_name,
'--single-transaction',
'--order-by-primary',
'--set-gtid-purged=OFF',
'--databases',
]
logger.debug("running mysqldump: %s", " ".join(dump_args))
dump_args.extend(args.databases.split(','))
dump = subprocess.Popen(dump_args, stdout=subprocess.PIPE)
logger.info("loading data to new instance")
load_args = [
'mysql',
'-h',
new_instance['Endpoint']['Address'],
'-P',
str(new_instance['Endpoint']['Port']),
'-u',
args.master_user_name,
'-f',
]
logger.debug("running mysql for loding data: %s", " ".join(load_args))
load = subprocess.Popen(load_args, stdin=dump.stdout)
logger.info("waiting for data transfer to finish")
load.wait()
assert load.returncode == 0
dump.wait()
assert dump.returncode == 0
logger.info("data transfer is finished")
logger.info("deleting read replica instance")
rds.delete_db_instance(DBInstanceIdentifier=read_replica_instance_id, SkipFinalSnapshot=True)
logger.info("setting master on new instance")
with connect_db(new_instance) as cursor:
cursor.callproc("mysql.rds_set_external_master",
(source_instance['Endpoint']['Address'], source_instance['Endpoint']['Port'],
args.master_user_name, args.master_user_password, binlog_filename, binlog_position, 0))
logger.info("starting replication on new instance")
with connect_db(new_instance) as cursor:
cursor.callproc("mysql.rds_start_replication")
if grants:
logger.info("creating users on new instance")
with connect_db(new_instance) as cursor:
for grant in grants:
logger.debug("executing grant sql: %s", grant)
cursor.execute(grant)
logger.info("wating until new instance catches source instance")
wait_until_zero_lag(new_instance)
changes = {}
if source_instance['BackupRetentionPeriod'] > 0:
changes['BackupRetentionPeriod'] = source_instance['BackupRetentionPeriod']
changes['PreferredBackupWindow'] = source_instance['PreferredBackupWindow']
if source_instance['MultiAZ']:
changes['MultiAZ'] = source_instance['MultiAZ']
if source_instance['PerformanceInsightsEnabled']:
changes['EnablePerformanceInsights'] = source_instance['PerformanceInsightsEnabled']
changes['PerformanceInsightsKMSKeyId'] = source_instance['PerformanceInsightsKMSKeyId']
changes['PerformanceInsightsRetentionPeriod'] = source_instance['PerformanceInsightsRetentionPeriod']
if changes:
logger.info("modifying new instance last time")
rds.modify_db_instance(DBInstanceIdentifier=args.new_instance_id, ApplyImmediately=True, **changes)
logger.info("waiting for new instance to become available")
time.sleep(60) # instance state does not switch to "modifying" immediately
wait_db_instance_available(args.new_instance_id)
logger.info("wating until new instance catches source instance")
wait_until_zero_lag(new_instance)
logger.info("all done")
if __name__ == '__main__':
main()
| [
"logging.getLogger",
"logging.StreamHandler",
"boto3.client",
"argparse.ArgumentParser",
"subprocess.check_call",
"datetime.datetime.utcnow",
"logging.Formatter",
"subprocess.Popen",
"re.match",
"pymysql.connect",
"time.sleep",
"contextlib.closing"
] | [((247, 274), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (264, 274), False, 'import logging\n'), ((302, 327), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (325, 327), False, 'import argparse\n'), ((2521, 2621), 'logging.Formatter', 'logging.Formatter', (['"""%(asctime)s %(levelname)-5.5s L%(lineno)-3d %(message)s"""'], {'datefmt': '"""%H:%M:%S"""'}), "('%(asctime)s %(levelname)-5.5s L%(lineno)-3d %(message)s',\n datefmt='%H:%M:%S')\n", (2538, 2621), False, 'import logging\n'), ((2640, 2663), 'logging.StreamHandler', 'logging.StreamHandler', ([], {}), '()\n', (2661, 2663), False, 'import logging\n'), ((2946, 2991), 'subprocess.check_call', 'subprocess.check_call', (["['which', 'mysqldump']"], {}), "(['which', 'mysqldump'])\n", (2967, 2991), False, 'import subprocess\n'), ((2996, 3037), 'subprocess.check_call', 'subprocess.check_call', (["['which', 'mysql']"], {}), "(['which', 'mysql'])\n", (3017, 3037), False, 'import subprocess\n'), ((3190, 3234), 'boto3.client', 'boto3.client', (['"""rds"""'], {'region_name': 'args.region'}), "('rds', region_name=args.region)\n", (3202, 3234), False, 'import boto3\n'), ((3245, 3289), 'boto3.client', 'boto3.client', (['"""ec2"""'], {'region_name': 'args.region'}), "('ec2', region_name=args.region)\n", (3257, 3289), False, 'import boto3\n'), ((9799, 9852), 're.match', 're.match', (['""".+-darbe-(\\\\d+)"""', 'original_parameter_group'], {}), "('.+-darbe-(\\\\d+)', original_parameter_group)\n", (9807, 9852), False, 'import re\n'), ((15272, 15323), 'subprocess.Popen', 'subprocess.Popen', (['dump_args'], {'stdout': 'subprocess.PIPE'}), '(dump_args, stdout=subprocess.PIPE)\n', (15288, 15323), False, 'import subprocess\n'), ((15679, 15725), 'subprocess.Popen', 'subprocess.Popen', (['load_args'], {'stdin': 'dump.stdout'}), '(load_args, stdin=dump.stdout)\n', (15695, 15725), False, 'import subprocess\n'), ((3690, 3894), 'pymysql.connect', 'pymysql.connect', ([], {'user': 'args.master_user_name', 'password': 'args.master_user_password', 'host': "instance['Endpoint']['Address']", 'port': "instance['Endpoint']['Port']", 'autocommit': '(True)', 'cursorclass': 'cursorclass'}), "(user=args.master_user_name, password=args.\n master_user_password, host=instance['Endpoint']['Address'], port=\n instance['Endpoint']['Port'], autocommit=True, cursorclass=cursorclass)\n", (3705, 3894), False, 'import pymysql\n'), ((7399, 7413), 'time.sleep', 'time.sleep', (['(60)'], {}), '(60)\n', (7409, 7413), False, 'import time\n'), ((7689, 7741), 're.match', 're.match', (['"""(\\\\d+)\\\\.(\\\\d+)\\\\.(\\\\d+)"""', 'version_string'], {}), "('(\\\\d+)\\\\.(\\\\d+)\\\\.(\\\\d+)', version_string)\n", (7697, 7741), False, 'import re\n'), ((17898, 17912), 'time.sleep', 'time.sleep', (['(60)'], {}), '(60)\n', (17908, 17912), False, 'import time\n'), ((3971, 3984), 'contextlib.closing', 'closing', (['conn'], {}), '(conn)\n', (3978, 3984), False, 'from contextlib import closing, contextmanager\n'), ((4583, 4596), 'time.sleep', 'time.sleep', (['(4)'], {}), '(4)\n', (4593, 4596), False, 'import time\n'), ((4038, 4053), 'contextlib.closing', 'closing', (['cursor'], {}), '(cursor)\n', (4045, 4053), False, 'from contextlib import closing, contextmanager\n'), ((3448, 3465), 'datetime.datetime.utcnow', 'datetime.utcnow', ([], {}), '()\n', (3463, 3465), False, 'from datetime import datetime\n')] |
import datetime
import requests
import json
from bitcoin import ecdsa_verify, ecdsa_recover, ecdsa_sign, pubtoaddr, privtoaddr
from .exceptions import InvalidSignature
from .consensus import validate_timestamp
from .network import SEED_NODES
import dateutil.parser
def make_peer_registration(pk, domain):
timestamp = datetime.datetime.now().isoformat()
address = privtoaddr(pk)
to_sign = "%s%s%s" % (domain, address, timestamp)
return {
'domain': domain,
'payout_address': address,
'timestamp': timestamp,
'signature': ecdsa_sign(to_sign, pk)
}
def validate_peer_registration(reg, now=None):
ts = dateutil.parser.parse(reg['timestamp'])
validate_timestamp(ts, now=now)
to_sign = "{domain}{payout_address}{timestamp}".format(**reg)
try:
pubkey = ecdsa_recover(to_sign, reg['signature'])
except:
raise InvalidSignature("Can't recover pubkey from signature")
valid_address = pubtoaddr(pubkey) == reg['payout_address']
valid_sig = ecdsa_verify(to_sign, reg['signature'], pubkey)
if not valid_sig or not valid_address:
raise InvalidSignature("Invalid Signature")
return True
def get_peerlist():
"""
Tries seed nodes until a peerlist is returned
"""
response = None
for seed in SEED_NODES:
url = "http://%s/staeon/peerlist?top" % seed
print(url)
try:
response = requests.get(url).json()
except (requests.exceptions.ConnectionError, ValueError) as exc:
print(exc)
continue
break
if not response:
raise Exception("Can't get peerlist")
return response['peers']
def push_peer_registration(reg, peers=None, verbose=True):
if not peers: peers = get_peerlist()
for peer in peers:
domain = peer['domain']
url = "http://%s/peerlist" % domain
if verbose: print("Pushing to: " + domain)
try:
response = requests.post(url, {'registration': json.dumps(reg)})
except requests.exceptions.ConnectionError as exc:
print(exc)
if verbose: print("..." + response.content)
def register_peer(domain, pk, peers=None, verbose=True):
reg = make_peer_registration(pk, domain)
push_peer_registration(reg, peers=peers, verbose=verbose)
| [
"bitcoin.ecdsa_recover",
"json.dumps",
"bitcoin.pubtoaddr",
"requests.get",
"datetime.datetime.now",
"bitcoin.privtoaddr",
"bitcoin.ecdsa_verify",
"bitcoin.ecdsa_sign"
] | [((373, 387), 'bitcoin.privtoaddr', 'privtoaddr', (['pk'], {}), '(pk)\n', (383, 387), False, 'from bitcoin import ecdsa_verify, ecdsa_recover, ecdsa_sign, pubtoaddr, privtoaddr\n'), ((1028, 1075), 'bitcoin.ecdsa_verify', 'ecdsa_verify', (['to_sign', "reg['signature']", 'pubkey'], {}), "(to_sign, reg['signature'], pubkey)\n", (1040, 1075), False, 'from bitcoin import ecdsa_verify, ecdsa_recover, ecdsa_sign, pubtoaddr, privtoaddr\n'), ((569, 592), 'bitcoin.ecdsa_sign', 'ecdsa_sign', (['to_sign', 'pk'], {}), '(to_sign, pk)\n', (579, 592), False, 'from bitcoin import ecdsa_verify, ecdsa_recover, ecdsa_sign, pubtoaddr, privtoaddr\n'), ((825, 865), 'bitcoin.ecdsa_recover', 'ecdsa_recover', (['to_sign', "reg['signature']"], {}), "(to_sign, reg['signature'])\n", (838, 865), False, 'from bitcoin import ecdsa_verify, ecdsa_recover, ecdsa_sign, pubtoaddr, privtoaddr\n'), ((969, 986), 'bitcoin.pubtoaddr', 'pubtoaddr', (['pubkey'], {}), '(pubkey)\n', (978, 986), False, 'from bitcoin import ecdsa_verify, ecdsa_recover, ecdsa_sign, pubtoaddr, privtoaddr\n'), ((323, 346), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (344, 346), False, 'import datetime\n'), ((1431, 1448), 'requests.get', 'requests.get', (['url'], {}), '(url)\n', (1443, 1448), False, 'import requests\n'), ((2010, 2025), 'json.dumps', 'json.dumps', (['reg'], {}), '(reg)\n', (2020, 2025), False, 'import json\n')] |
from pathlib import Path
from ontopy import get_ontology, World
from ontopy.utils import write_catalog
# Setup dlite paths
thisdir = Path(__file__).parent.absolute()
rootdir = thisdir.parent.parent
# Load ontologies into a common world
world = World()
mapsTo_onto = world.get_ontology(f'{rootdir}/ontology/mapsTo.ttl').load(
EMMObased=False)
chemistry_onto = world.get_ontology(f'{rootdir}/ontology/chemistry.ttl').load()
dlite_onto = world.get_ontology('https://raw.githubusercontent.com/'
'emmo-repo/datamodel-ontology/master'
'/dlitemodel.ttl').load(EMMObased=False)
mapping = world.get_ontology('http://onto-ns.com/ontology/mapping#')
mapping.set_version('0.1')
mapping.imported_ontologies.extend([mapsTo_onto, chemistry_onto, dlite_onto])
molecule = dlite_onto.Metadata()
molecule.iri = 'http://onto-ns.com/meta/0.1/Molecule'
molecule_energy = dlite_onto.Metadata()
molecule_energy.iri = ('http://onto-ns.com/meta/0.1/'
'Molecule#groundstate_energy')
molecule_name = dlite_onto.Metadata()
molecule_name.iri = 'http://onto-ns.com/meta/0.1/Molecule#name'
with mapping:
molecule.mapsTo.append(chemistry_onto.MoleculeModel)
molecule_energy.mapsTo.append(chemistry_onto.GroundStateEnergy)
molecule_name.mapsTo.append(chemistry_onto.Identifier)
# XXX
onto = chemistry_onto
onto.Identifier.mapsTo.append(onto.Atom)
onto.BondedAtom.mapsTo.append(onto.Field)
mapping.save(f'{thisdir}/mapping_mols.ttl')
# Since the iris are not directly findable on the www, a catalog file
# with pointers to the imported ontologies must be made in order
# to ensure correct loading of mapping_mols.ttl in EMMOntopy or Protege
catalog = {'http://emmo.info/datamodel/dlitemodel':
'https://raw.githubusercontent.com/emmo-repo/datamodel-ontology/master/dlitemodel.ttl',
'http://emmo.info/datamodel/0.0.1':
'https://raw.githubusercontent.com/emmo-repo/datamodel-ontology/master/datamodel.ttl',
'http://emmo.info/datamodel/0.0.1/entity':
'https://raw.githubusercontent.com/emmo-repo/datamodel-ontology/master/entity.ttl',
'http://onto-ns.com/ontology/chemistry': '../../ontology/chemistry.ttl',
'http://onto-ns.com/ontology/mapsTo': '../../ontology/mapsTo.ttl'}
write_catalog(catalog, f'{thisdir}/catalog-v001.xml')
#import owlready2
#
#def related(onto, source, relation, route=[]):
# """Returns a generator over all entities that `source` relates to via
# `relation`.
# """
# for e1 in getattr(source, relation.name):
# r1 = route + [(source.iri, relation.iri, e1.iri)]
# for e2 in e1.descendants():
# r2 = r1 + [(e2.iri, 'rdfs:subClassOf', e1.iri)] if e1 != e1 else r1
# for rel in relation.descendants():
# r3 = r2 + [(rel.iri, 'rdfs:subProperty', relation.iri)
# ] if rel != relation else r2
# yield e2, r3
# if issubclass(rel, owlready2.TransitiveProperty):
# yield from related(onto, e2, relation, r3)
#
#m = mapping.world['http://onto-ns.com/meta/0.1/Molecule#name']
#for e, r in related(mapping, m, mapping.mapsTo):
# print()
# print(e)
# print(r)
| [
"ontopy.utils.write_catalog",
"ontopy.World",
"pathlib.Path"
] | [((249, 256), 'ontopy.World', 'World', ([], {}), '()\n', (254, 256), False, 'from ontopy import get_ontology, World\n'), ((2323, 2376), 'ontopy.utils.write_catalog', 'write_catalog', (['catalog', 'f"""{thisdir}/catalog-v001.xml"""'], {}), "(catalog, f'{thisdir}/catalog-v001.xml')\n", (2336, 2376), False, 'from ontopy.utils import write_catalog\n'), ((136, 150), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (140, 150), False, 'from pathlib import Path\n')] |
import torch
import numpy as np
from torch import nn
from torch import optim
from torch.utils.data import TensorDataset, DataLoader
from forPython.datasets.uci import load_mhealth
from forPython.models.torch.cnn import SimpleCNN
from forPython.utility.trainer import TorchSimpleTrainer
np.random.seed(0)
torch.random.manual_seed(0)
(x_train, y_train), (x_test, y_test) = load_mhealth()
y_train -= 1
y_test -= 1
n_timesteps, n_features, n_outputs = x_train.shape[1], x_train.shape[2], 12
batch_size, epochs = 32, 10
x_train = torch.tensor(x_train).float()
x_test = torch.tensor(x_test).float()
y_train = torch.tensor(y_train[:, 0]).long()
y_test = torch.tensor(y_test[:, 0]).long()
mid_size = 128 * 62
model = SimpleCNN(n_features, mid_size, n_outputs)
loss_func = nn.CrossEntropyLoss()
optimizer = optim.Adam(model.parameters())
train_ds = TensorDataset(x_train, y_train)
test_ds = TensorDataset(x_test, y_test)
train_loader = DataLoader(train_ds, batch_size, False)
test_loader = DataLoader(test_ds, batch_size, False)
clf = TorchSimpleTrainer(model, loss_func, optimizer)
clf.fit(train_loader, epochs)
clf.evaluate(test_loader)
| [
"torch.random.manual_seed",
"forPython.utility.trainer.TorchSimpleTrainer",
"forPython.models.torch.cnn.SimpleCNN",
"torch.nn.CrossEntropyLoss",
"torch.utils.data.TensorDataset",
"torch.tensor",
"numpy.random.seed",
"torch.utils.data.DataLoader",
"forPython.datasets.uci.load_mhealth"
] | [((288, 305), 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), '(0)\n', (302, 305), True, 'import numpy as np\n'), ((306, 333), 'torch.random.manual_seed', 'torch.random.manual_seed', (['(0)'], {}), '(0)\n', (330, 333), False, 'import torch\n'), ((375, 389), 'forPython.datasets.uci.load_mhealth', 'load_mhealth', ([], {}), '()\n', (387, 389), False, 'from forPython.datasets.uci import load_mhealth\n'), ((718, 760), 'forPython.models.torch.cnn.SimpleCNN', 'SimpleCNN', (['n_features', 'mid_size', 'n_outputs'], {}), '(n_features, mid_size, n_outputs)\n', (727, 760), False, 'from forPython.models.torch.cnn import SimpleCNN\n'), ((773, 794), 'torch.nn.CrossEntropyLoss', 'nn.CrossEntropyLoss', ([], {}), '()\n', (792, 794), False, 'from torch import nn\n'), ((850, 881), 'torch.utils.data.TensorDataset', 'TensorDataset', (['x_train', 'y_train'], {}), '(x_train, y_train)\n', (863, 881), False, 'from torch.utils.data import TensorDataset, DataLoader\n'), ((892, 921), 'torch.utils.data.TensorDataset', 'TensorDataset', (['x_test', 'y_test'], {}), '(x_test, y_test)\n', (905, 921), False, 'from torch.utils.data import TensorDataset, DataLoader\n'), ((938, 977), 'torch.utils.data.DataLoader', 'DataLoader', (['train_ds', 'batch_size', '(False)'], {}), '(train_ds, batch_size, False)\n', (948, 977), False, 'from torch.utils.data import TensorDataset, DataLoader\n'), ((992, 1030), 'torch.utils.data.DataLoader', 'DataLoader', (['test_ds', 'batch_size', '(False)'], {}), '(test_ds, batch_size, False)\n', (1002, 1030), False, 'from torch.utils.data import TensorDataset, DataLoader\n'), ((1038, 1085), 'forPython.utility.trainer.TorchSimpleTrainer', 'TorchSimpleTrainer', (['model', 'loss_func', 'optimizer'], {}), '(model, loss_func, optimizer)\n', (1056, 1085), False, 'from forPython.utility.trainer import TorchSimpleTrainer\n'), ((531, 552), 'torch.tensor', 'torch.tensor', (['x_train'], {}), '(x_train)\n', (543, 552), False, 'import torch\n'), ((570, 590), 'torch.tensor', 'torch.tensor', (['x_test'], {}), '(x_test)\n', (582, 590), False, 'import torch\n'), ((610, 637), 'torch.tensor', 'torch.tensor', (['y_train[:, 0]'], {}), '(y_train[:, 0])\n', (622, 637), False, 'import torch\n'), ((654, 680), 'torch.tensor', 'torch.tensor', (['y_test[:, 0]'], {}), '(y_test[:, 0])\n', (666, 680), False, 'import torch\n')] |
import pytest
from marshmallow import Schema, fields
from lambda_handlers import validators
from lambda_handlers.handlers import http_handler
from lambda_handlers.response import cors
class TestHTTPHandlerDefaults:
@pytest.fixture
def handler(self):
@http_handler()
def handler(event, context):
return {'user_id': 12}
return handler
def test_empty_body_validation(self, handler):
response = handler({}, None)
assert isinstance(response, dict)
assert response['statusCode'] == 200
def test_invalid_body_validation(self, handler):
response = handler({'body': '{.x'}, None)
assert isinstance(response, dict)
assert response['statusCode'] == 400
assert response['body'] == '{"errors": [{"body": ["Invalid JSON input."]}]}'
def test_handler_response(self, handler):
response = handler({}, None)
assert isinstance(response, dict)
assert response['statusCode'] == 200
assert response['body'] == '{"user_id": 12}'
assert response['headers'] == {
'Access-Control-Allow-Credentials': True,
'Access-Control-Allow-Origin': '*',
}
class PipeFormatter:
@staticmethod
def parse(content):
items = content.split('|')
return dict(zip(items[::2], items[1::2]))
@staticmethod
def serialize(content):
content_items = [
item for pairs in list(content.items())
for item in pairs
]
return '|'.join(content_items)
class TestHTTPHandlerCustomBodyFormat:
@pytest.fixture
def handler(self):
@http_handler(
input_format=PipeFormatter.parse,
)
def handler(event, context):
return event['body']
return handler
def test_custom_body_formatting(self, handler):
event = {'body': 'user_id|peter'}
response = handler(event, None)
assert isinstance(response, dict)
assert response['statusCode'] == 200
assert response['body'] == '{"user_id": "peter"}'
class TestHTTPHandlerCORS:
@pytest.fixture
def handler(self):
@http_handler(
cors=cors(origin='localhost', credentials=False),
)
def handler(event, context):
return event
return handler
def test_custom_cors_headers(self, handler):
response = handler({}, None)
assert isinstance(response, dict)
assert response['statusCode'] == 200
assert response['headers'] == {'Access-Control-Allow-Origin': 'localhost'}
class TestHTTPHandlerCustomOutputFormat:
@pytest.fixture
def handler(self):
@http_handler(
output_format=PipeFormatter.serialize,
)
def handler(event, context):
return {'user_id': 'peter'}
return handler
def test_custom_output_format(self, handler):
response = handler({}, None)
assert isinstance(response, dict)
assert response['statusCode'] == 200
assert response['body'] == 'user_id|peter'
class TestHTTPHandlerCustomMarshmallowValidator:
@pytest.fixture
def handler(self):
class UserSchema(Schema):
user_id = fields.Integer(required=True)
class ResponseSchema(Schema):
body = fields.Nested(UserSchema, required=True)
headers = fields.Dict(required=True)
statusCode = fields.Integer(required=True)
@http_handler(
validator=validators.http.marshmallow(
body=UserSchema(),
response=ResponseSchema(),
),
)
def handler(event, context):
return event['body']
return handler
@pytest.mark.parametrize(
'body,expected',
[
('{"user_id": 1}', '{"user_id": 1}'),
('{"user_id": "1"}', '{"user_id": 1}'),
],
)
def test_custom_body_validator(self, handler, body, expected):
event = {'body': body}
response = handler(event, None)
assert isinstance(response, dict)
assert response['statusCode'] == 200
assert response['body'] == expected
| [
"lambda_handlers.handlers.http_handler",
"marshmallow.fields.Nested",
"pytest.mark.parametrize",
"lambda_handlers.response.cors",
"marshmallow.fields.Dict",
"marshmallow.fields.Integer"
] | [((3768, 3892), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""body,expected"""', '[(\'{"user_id": 1}\', \'{"user_id": 1}\'), (\'{"user_id": "1"}\', \'{"user_id": 1}\')]'], {}), '(\'body,expected\', [(\'{"user_id": 1}\',\n \'{"user_id": 1}\'), (\'{"user_id": "1"}\', \'{"user_id": 1}\')])\n', (3791, 3892), False, 'import pytest\n'), ((271, 285), 'lambda_handlers.handlers.http_handler', 'http_handler', ([], {}), '()\n', (283, 285), False, 'from lambda_handlers.handlers import http_handler\n'), ((1657, 1703), 'lambda_handlers.handlers.http_handler', 'http_handler', ([], {'input_format': 'PipeFormatter.parse'}), '(input_format=PipeFormatter.parse)\n', (1669, 1703), False, 'from lambda_handlers.handlers import http_handler\n'), ((2706, 2757), 'lambda_handlers.handlers.http_handler', 'http_handler', ([], {'output_format': 'PipeFormatter.serialize'}), '(output_format=PipeFormatter.serialize)\n', (2718, 2757), False, 'from lambda_handlers.handlers import http_handler\n'), ((3258, 3287), 'marshmallow.fields.Integer', 'fields.Integer', ([], {'required': '(True)'}), '(required=True)\n', (3272, 3287), False, 'from marshmallow import Schema, fields\n'), ((3346, 3386), 'marshmallow.fields.Nested', 'fields.Nested', (['UserSchema'], {'required': '(True)'}), '(UserSchema, required=True)\n', (3359, 3386), False, 'from marshmallow import Schema, fields\n'), ((3409, 3435), 'marshmallow.fields.Dict', 'fields.Dict', ([], {'required': '(True)'}), '(required=True)\n', (3420, 3435), False, 'from marshmallow import Schema, fields\n'), ((3461, 3490), 'marshmallow.fields.Integer', 'fields.Integer', ([], {'required': '(True)'}), '(required=True)\n', (3475, 3490), False, 'from marshmallow import Schema, fields\n'), ((2213, 2256), 'lambda_handlers.response.cors', 'cors', ([], {'origin': '"""localhost"""', 'credentials': '(False)'}), "(origin='localhost', credentials=False)\n", (2217, 2256), False, 'from lambda_handlers.response import cors\n')] |
# Copyright 2008 Nokia Siemens Networks Oyj
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from robot.utils.asserts import assert_equals, assert_none
from robotide.validators import ArgumentsValidator
class Test(unittest.TestCase):
validate = ArgumentsValidator()._validate
def test_valid_arguments_validation(self):
for arg in ["${arg}", "${arg}|${arg2}", "${arg}=", "${arg}=default val",
"${a} | ${b}=d | ${c}=\\| | ${d}=", "@{list}",
"${a} | ${b} | ${c}=1 | ${d}=2 | ${e}=3 | @{f}"]:
assert_none(self.validate(arg))
def test_invalid_arguments_validation(self):
for arg in ["arg", "@{list}=", "@{list}=fooness"]:
assert_equals(self.validate(arg),
"Invalid argument syntax '%s'" % arg)
for arg, err in [("|${a}", ""), ("${a} | ${a2} | invalid", "invalid")]:
assert_equals(self.validate(arg),
"Invalid argument syntax '%s'" % err)
def test_list_arg_not_last(self):
for arg in ["@{list} | ${arg}", "@{list} | ${arg} | @{list2}",
"@{list} | ${arg}=foo", "@{list} | @{list2}"]:
assert_equals(self.validate(arg),
"List variable allowed only as the last argument")
def test_req_arg_after_defaults(self):
for arg in ["${a}=default | ${a2}", "${a} | ${b}=default | ${c}"]:
assert_equals(self.validate(arg),
"Required arguments not allowed after arguments "
"with default values.")
if __name__ == "__main__":
unittest.main()
| [
"unittest.main",
"robotide.validators.ArgumentsValidator"
] | [((2159, 2174), 'unittest.main', 'unittest.main', ([], {}), '()\n', (2172, 2174), False, 'import unittest\n'), ((783, 803), 'robotide.validators.ArgumentsValidator', 'ArgumentsValidator', ([], {}), '()\n', (801, 803), False, 'from robotide.validators import ArgumentsValidator\n')] |
from socket import *
import json
import JsonParser
import DBHelper
conn = socket(AF_INET, SOCK_STREAM)
conn.bind(('',4687))
conn.listen(40)
while True:
new_conn, addr = conn.accept()
msg = new_conn.recv(1024).decode()
obj = json.loads(msg)
if obj['instruction'] == 'reg':
result = DBHelper.registry(obj['userid'], obj['passwd'], obj['nickname'])
new_conn.send(result.encode())
elif obj['instruction'] == 'add_note':
result = DBHelper.insert_note(obj['userid'], obj['passwd'], obj['noteid'], obj['title'], obj['folderid'], obj['value'])
new_conn.send(result.encode())
elif obj['instruction'] == 'del_note':
result = DBHelper.delete_note(obj['userid'], obj['passwd'], obj['noteid'])
new_conn.send(result.encode())
elif obj['instruction'] == 'sel_note':
result = DBHelper.select_single_note(obj['userid'], obj['passwd'], obj['noteid'])
if isinstance(result, str):
new_conn.send(result.encode())
else:
new_conn.send(JsonParser.dump_single_note(result).encode())
elif obj['instruction'] == 'up_note':
result = DBHelper.update_note(obj['userid'], obj['passwd'], obj['noteid'], obj['title'], obj['folderid'], obj['value'])
new_conn.send(result.encode())
elif obj['instruction'] == 'add_folder':
result = DBHelper.insert_folder(obj['userid'], obj['passwd'], obj['folderid'], obj['foldername'], obj['parentid'])
new_conn.send(result.encode())
elif obj['instruction'] == 'del_folder':
result = DBHelper.delete_folder(obj['userid'], obj['passwd'], obj['folderid'])
new_conn.send(result.encode())
elif obj['instruction'] == 'up_folder':
result = DBHelper.update_folder(obj['userid'], obj['passwd'], obj['folderid'], obj['foldername'], obj['parentid'])
new_conn.send(result.encode())
elif obj['instruction'] == 'sel_notes':
result = DBHelper.select_all_notes(obj['userid'], obj['passwd'])
if isinstance(result, str):
new_conn.send(result.encode())
else:
new_conn.send(JsonParser.dump_all_notes(result).encode())
elif obj['instruction'] == 'sel_folders':
result = DBHelper.select_all_folders(obj['userid'], obj['passwd'])
if isinstance(result, str):
new_conn.send(result.encode())
else:
new_conn.send(JsonParser.dump_all_folder(result).encode())
new_conn.close()
| [
"json.loads",
"DBHelper.update_folder",
"JsonParser.dump_single_note",
"DBHelper.select_all_notes",
"DBHelper.insert_folder",
"DBHelper.insert_note",
"DBHelper.select_single_note",
"DBHelper.select_all_folders",
"DBHelper.registry",
"DBHelper.update_note",
"DBHelper.delete_folder",
"JsonParser... | [((239, 254), 'json.loads', 'json.loads', (['msg'], {}), '(msg)\n', (249, 254), False, 'import json\n'), ((308, 372), 'DBHelper.registry', 'DBHelper.registry', (["obj['userid']", "obj['passwd']", "obj['nickname']"], {}), "(obj['userid'], obj['passwd'], obj['nickname'])\n", (325, 372), False, 'import DBHelper\n'), ((472, 587), 'DBHelper.insert_note', 'DBHelper.insert_note', (["obj['userid']", "obj['passwd']", "obj['noteid']", "obj['title']", "obj['folderid']", "obj['value']"], {}), "(obj['userid'], obj['passwd'], obj['noteid'], obj[\n 'title'], obj['folderid'], obj['value'])\n", (492, 587), False, 'import DBHelper\n'), ((682, 747), 'DBHelper.delete_note', 'DBHelper.delete_note', (["obj['userid']", "obj['passwd']", "obj['noteid']"], {}), "(obj['userid'], obj['passwd'], obj['noteid'])\n", (702, 747), False, 'import DBHelper\n'), ((847, 919), 'DBHelper.select_single_note', 'DBHelper.select_single_note', (["obj['userid']", "obj['passwd']", "obj['noteid']"], {}), "(obj['userid'], obj['passwd'], obj['noteid'])\n", (874, 919), False, 'import DBHelper\n'), ((1144, 1259), 'DBHelper.update_note', 'DBHelper.update_note', (["obj['userid']", "obj['passwd']", "obj['noteid']", "obj['title']", "obj['folderid']", "obj['value']"], {}), "(obj['userid'], obj['passwd'], obj['noteid'], obj[\n 'title'], obj['folderid'], obj['value'])\n", (1164, 1259), False, 'import DBHelper\n'), ((1356, 1466), 'DBHelper.insert_folder', 'DBHelper.insert_folder', (["obj['userid']", "obj['passwd']", "obj['folderid']", "obj['foldername']", "obj['parentid']"], {}), "(obj['userid'], obj['passwd'], obj['folderid'], obj[\n 'foldername'], obj['parentid'])\n", (1378, 1466), False, 'import DBHelper\n'), ((1563, 1632), 'DBHelper.delete_folder', 'DBHelper.delete_folder', (["obj['userid']", "obj['passwd']", "obj['folderid']"], {}), "(obj['userid'], obj['passwd'], obj['folderid'])\n", (1585, 1632), False, 'import DBHelper\n'), ((1039, 1074), 'JsonParser.dump_single_note', 'JsonParser.dump_single_note', (['result'], {}), '(result)\n', (1066, 1074), False, 'import JsonParser\n'), ((1733, 1843), 'DBHelper.update_folder', 'DBHelper.update_folder', (["obj['userid']", "obj['passwd']", "obj['folderid']", "obj['foldername']", "obj['parentid']"], {}), "(obj['userid'], obj['passwd'], obj['folderid'], obj[\n 'foldername'], obj['parentid'])\n", (1755, 1843), False, 'import DBHelper\n'), ((1939, 1994), 'DBHelper.select_all_notes', 'DBHelper.select_all_notes', (["obj['userid']", "obj['passwd']"], {}), "(obj['userid'], obj['passwd'])\n", (1964, 1994), False, 'import DBHelper\n'), ((2221, 2278), 'DBHelper.select_all_folders', 'DBHelper.select_all_folders', (["obj['userid']", "obj['passwd']"], {}), "(obj['userid'], obj['passwd'])\n", (2248, 2278), False, 'import DBHelper\n'), ((2114, 2147), 'JsonParser.dump_all_notes', 'JsonParser.dump_all_notes', (['result'], {}), '(result)\n', (2139, 2147), False, 'import JsonParser\n'), ((2398, 2432), 'JsonParser.dump_all_folder', 'JsonParser.dump_all_folder', (['result'], {}), '(result)\n', (2424, 2432), False, 'import JsonParser\n')] |
#!/usr/bin/env python3
"""
**
* @file convertFASTQ2SAM.py
* @brief Convert SimCT FASTQ to SAM
* @copyright © 2019 Novocraft Technologies Sdn Bhd. All rights reserved.
* @author <NAME>.
* @license This script is released under MIT License
* @date 26/04/2019
**
"""
import argparse
import gzip
def convert(_path2Reads1, _path2Reads2, _path2output):
"""
Parse input SimCT FASTQ files ling by line, and write output SAM
:param _path2Reads1: path to first mates FASTQ file
:param _path2Reads2: path to second mates FASTQ file
:param _path2output: path to output SAM file
:return: non
"""
inputFile1 = gzip.open(_path2Reads1, "r")
inputFile2 = gzip.open(_path2Reads2, "r")
outputFile = open(_path2output, "w")
while True:
read1 = [inputFile1.readline().decode('ascii'),
inputFile1.readline().decode('ascii'),
inputFile1.readline(),
inputFile1.readline().decode('ascii')]
read2 = [inputFile2.readline().decode('ascii'),
inputFile2.readline().decode('ascii'),
inputFile2.readline(),
inputFile2.readline().decode('ascii')]
if read1[0] == "":
break
fields = read1[0].split(':')
pairInfo = fields[1].split(';')
readOneInfo = pairInfo[0].split(',')
readTwoInfo = pairInfo[1].split(',')
chromsome1 = readOneInfo[0]
chromsome2 = readTwoInfo[0]
strand1 = readOneInfo[2]
strand2 = readTwoInfo[2]
startPos1 = int(readOneInfo[1]) + 1
startPos2 = int(readTwoInfo[1]) + 1
distance = startPos2 + len(read2[1]) - startPos1
CIGAR1 = readOneInfo[3].split(':')[0]
CIGAR2 = readTwoInfo[3]
flag1 = ""
flag2 = ""
if CIGAR2[len(CIGAR2) - 3] == '/':
CIGAR2 = CIGAR2[0:-3]
if strand1 == "+" and strand2 == "-":
flag1 = "99"
flag2 = "147"
elif strand1 == "-" and strand2 == "+":
flag1 = "83"
flag2 = "163"
else:
assert(True == False)
outputFile.write(read1[0][1:-1] +
"\t" +
flag1 +
"\t" +
chromsome1 +
"\t" +
str(startPos1) +
"\t60\t" +
CIGAR1 +
"\t=\t" +
str(startPos2) +
"\t" +
str(distance) +
"\t" +
read1[1][:-1] +
"\t" +
read1[3])
outputFile.write(read2[0][1:-1] +
"\t" +
flag2 +
"\t" +
chromsome2 +
"\t" +
str(startPos2) +
"\t60\t" +
CIGAR2 +
"\t=\t" +
str(startPos1) +
"\t" +
str(-distance) +
"\t" +
read2[1][:-1] +
"\t" +
read2[3])
inputFile2.close()
inputFile1.close()
outputFile.close()
parser = argparse.ArgumentParser(description='Convert SimCT FASTQ to SAM')
parser.add_argument('-1',
metavar='',
type=str,
# nargs=1,
dest="input1",
action="store",
required=True,
help="Path to first mates FASTQ file")
parser.add_argument('-2',
metavar='',
type=str,
# nargs=1,
dest="input2",
action="store",
required=True,
help="Path to second mates FASTQ file")
parser.add_argument('-o',
metavar='',
type=str,
# nargs=1,
dest="output",
action="store",
required=True,
help="Path to output file")
if __name__ == '__main__':
args = parser.parse_args()
convert(args.input1, args.input2, args.output) | [
"argparse.ArgumentParser",
"gzip.open"
] | [((3499, 3564), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Convert SimCT FASTQ to SAM"""'}), "(description='Convert SimCT FASTQ to SAM')\n", (3522, 3564), False, 'import argparse\n'), ((699, 727), 'gzip.open', 'gzip.open', (['_path2Reads1', '"""r"""'], {}), "(_path2Reads1, 'r')\n", (708, 727), False, 'import gzip\n'), ((745, 773), 'gzip.open', 'gzip.open', (['_path2Reads2', '"""r"""'], {}), "(_path2Reads2, 'r')\n", (754, 773), False, 'import gzip\n')] |
import json
import websocket
def get_uri(game=""):
uri = f"ws://127.0.0.1:8000/{game}/"
return uri
def connect(uri):
connection = websocket.WebSocketApp(uri, on_error=on_error, on_close=on_close)
return connection
def on_error(connection, error):
print(error)
def on_close(connection):
print(f"Closed connection")
def on_open(connection):
def run(*args):
pass
def move(connection, direction):
if direction:
connection.send(json.dumps({"direction": direction,},))
| [
"json.dumps",
"websocket.WebSocketApp"
] | [((146, 211), 'websocket.WebSocketApp', 'websocket.WebSocketApp', (['uri'], {'on_error': 'on_error', 'on_close': 'on_close'}), '(uri, on_error=on_error, on_close=on_close)\n', (168, 211), False, 'import websocket\n'), ((484, 520), 'json.dumps', 'json.dumps', (["{'direction': direction}"], {}), "({'direction': direction})\n", (494, 520), False, 'import json\n')] |
"""
Copyright (c) Facebook, Inc. and its affiliates.
This source code is licensed under the MIT license found in the
LICENSE file in the root directory of this source tree.
Changes made:
window is now 3D with size (1, 1, win_size, win_size, win_si`ze)
All convolutions for mean and variance comp. are 3D Conv
"""
import torch
from torch import nn
import torch.nn.functional as F
class SSIMLoss(nn.Module):
"""
SSIM loss module.
"""
def __init__(self, win_size: int = 7, k1: float = 0.01, k2: float = 0.03):
"""
Args:
win_size: Window size for SSIM calculation.
k1: k1 parameter for SSIM calculation.
k2: k2 parameter for SSIM calculation.
"""
super().__init__()
self.win_size = win_size
self.k1, self.k2 = k1, k2
NP = win_size**2
self.cov_norm = NP / (NP - 1)
def forward(self, X: torch.Tensor, Y: torch.Tensor, data_range: torch.Tensor):
input_dimension = X.ndim - 2
if input_dimension == 2:
conv = F.conv2d
window_size = (self.win_size, self.win_size)
elif input_dimension == 3:
conv = F.conv3d
window_size = (self.win_size, self.win_size, self.win_size)
else:
raise f"Unsupported dim {input_dimension} for provided input"
if not hasattr(self, "w"):
self.register_buffer("w", torch.ones(1, 1, *window_size) / self.win_size**2)
data_range = data_range[:, None, None, None]
C1 = (self.k1 * data_range)**2
C2 = (self.k2 * data_range)**2
ux = conv(X, self.w) # typing: ignore
uy = conv(Y, self.w) #
uxx = conv(X * X, self.w)
uyy = conv(Y * Y, self.w)
uxy = conv(X * Y, self.w)
vx = self.cov_norm * (uxx - ux * ux)
vy = self.cov_norm * (uyy - uy * uy)
vxy = self.cov_norm * (uxy - ux * uy)
A1, A2, B1, B2 = (
2 * ux * uy + C1,
2 * vxy + C2,
ux**2 + uy**2 + C1,
vx + vy + C2,
)
D = B1 * B2
S = (A1 * A2) / D
return 1 - S.mean()
if __name__ == "__main__":
import SimpleITK as sitk
import numpy as np
from torchvision.utils import save_image
from scipy.ndimage.filters import gaussian_filter as blur
MIN_B = -1000
MAX_B = 2000
img_a = sitk.ReadImage('/repos/Maastro/nki_cervix/train/21403922/CT/0/CT.nrrd')
img_b = sitk.ReadImage('/repos/Maastro/nki_cervix/train/21403922/CT/0/CT.nrrd')
array_a = np.clip(sitk.GetArrayFromImage(img_a), MIN_B, MAX_B) - MIN_B
array_b = np.clip(sitk.GetArrayFromImage(img_b), MIN_B, MAX_B) - MIN_B
# array_a = (array_a - MIN_B) / (MAX_B - MIN_B)
# array_b = (array_b - MIN_B) / (MAX_B - MIN_B)
tensor_a = torch.Tensor(array_a).unsqueeze(dim=0).unsqueeze(dim=0)
tensor_b = torch.Tensor(array_b).unsqueeze(dim=0).unsqueeze(dim=0)
print(f"Tensor A max: {tensor_a.max()} min: {tensor_a.min()}")
print(f"Tensor B max: {tensor_b.max()} min: {tensor_b.min()}")
ssim = SSIMLoss()
ssim_val = ssim(tensor_a, tensor_b, data_range=torch.full((1, 1, 1, 1), tensor_a.max()))
print(f"Calculated SSIM Value is : {ssim_val}")
| [
"SimpleITK.ReadImage",
"torch.Tensor",
"SimpleITK.GetArrayFromImage",
"torch.ones"
] | [((2382, 2453), 'SimpleITK.ReadImage', 'sitk.ReadImage', (['"""/repos/Maastro/nki_cervix/train/21403922/CT/0/CT.nrrd"""'], {}), "('/repos/Maastro/nki_cervix/train/21403922/CT/0/CT.nrrd')\n", (2396, 2453), True, 'import SimpleITK as sitk\n'), ((2466, 2537), 'SimpleITK.ReadImage', 'sitk.ReadImage', (['"""/repos/Maastro/nki_cervix/train/21403922/CT/0/CT.nrrd"""'], {}), "('/repos/Maastro/nki_cervix/train/21403922/CT/0/CT.nrrd')\n", (2480, 2537), True, 'import SimpleITK as sitk\n'), ((2561, 2590), 'SimpleITK.GetArrayFromImage', 'sitk.GetArrayFromImage', (['img_a'], {}), '(img_a)\n', (2583, 2590), True, 'import SimpleITK as sitk\n'), ((2636, 2665), 'SimpleITK.GetArrayFromImage', 'sitk.GetArrayFromImage', (['img_b'], {}), '(img_b)\n', (2658, 2665), True, 'import SimpleITK as sitk\n'), ((1418, 1448), 'torch.ones', 'torch.ones', (['(1)', '(1)', '*window_size'], {}), '(1, 1, *window_size)\n', (1428, 1448), False, 'import torch\n'), ((2810, 2831), 'torch.Tensor', 'torch.Tensor', (['array_a'], {}), '(array_a)\n', (2822, 2831), False, 'import torch\n'), ((2881, 2902), 'torch.Tensor', 'torch.Tensor', (['array_b'], {}), '(array_b)\n', (2893, 2902), False, 'import torch\n')] |
import os
import subprocess
from audio_length import escape_characters
import argparse
filetypes_to_convert=[".mp3",".m4a", ".webm"]
def convert(filename):
filename_extensionless, extension = os.path.splitext(filename)
new_filename = "".join([filename_extensionless, ".wav"])
if not os.path.exists(new_filename):
command = "ffmpeg -i \"{}\" -ac 1 \"{}\"".format(escape_characters(filename), escape_characters(new_filename))
subprocess.call(command, shell=True)
def walk_path(path):
for root, dirs, files in os.walk(path):
for sound_file in files:
_, extension = os.path.splitext(sound_file)
#print sound_file
if extension in filetypes_to_convert:
yield os.path.join(root, sound_file)
else:
continue
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--path', dest='path', help='Directory for the files to convert', required=True)
args = parser.parse_args()
for sound_file in walk_path(args.path):
convert(sound_file)
| [
"os.path.exists",
"argparse.ArgumentParser",
"os.path.splitext",
"os.path.join",
"audio_length.escape_characters",
"subprocess.call",
"os.walk"
] | [((196, 222), 'os.path.splitext', 'os.path.splitext', (['filename'], {}), '(filename)\n', (212, 222), False, 'import os\n'), ((527, 540), 'os.walk', 'os.walk', (['path'], {}), '(path)\n', (534, 540), False, 'import os\n'), ((803, 828), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (826, 828), False, 'import argparse\n'), ((291, 319), 'os.path.exists', 'os.path.exists', (['new_filename'], {}), '(new_filename)\n', (305, 319), False, 'import os\n'), ((440, 476), 'subprocess.call', 'subprocess.call', (['command'], {'shell': '(True)'}), '(command, shell=True)\n', (455, 476), False, 'import subprocess\n'), ((374, 401), 'audio_length.escape_characters', 'escape_characters', (['filename'], {}), '(filename)\n', (391, 401), False, 'from audio_length import escape_characters\n'), ((403, 434), 'audio_length.escape_characters', 'escape_characters', (['new_filename'], {}), '(new_filename)\n', (420, 434), False, 'from audio_length import escape_characters\n'), ((592, 620), 'os.path.splitext', 'os.path.splitext', (['sound_file'], {}), '(sound_file)\n', (608, 620), False, 'import os\n'), ((703, 733), 'os.path.join', 'os.path.join', (['root', 'sound_file'], {}), '(root, sound_file)\n', (715, 733), False, 'import os\n')] |
###------------------------------------------------------###
### Replay and Remember Memory Class ###
###------------------------------------------------------###
import numpy as np
from hyperparameters import *
# expand dimensions to (1, 84, 84, 5) from (84, 84, 5)
# normalize 0-255 -> 0-1 to reduce exploding gradient
def normalize_states(current_frame_history):
return current_frame_history.astype('float32') / 255.
class ReplayMemory:
def __init__(self, memory_size, state_size, action_size):
# set the state size, HEIGHT : default 84px
self.state_height = state_size[0]
# set the state size, WIDTH : default 84px
self.state_width = state_size[1]
# set the state size, DEPTH : default 4 for discrete frame sets, for 4 frames
# 5 with frame windows
self.state_depth = state_size[2]
# set the action size, 3 actions, minus the first no-op
self.action_size = action_size
# initial size
self.size = 0
# set the max size of the remember and replay memory
self.maxsize = memory_size
# default current index
self.current_index = 0
if hp['DISCRETE_FRAMING']:
# create the current states of the game (N, 64, 64, 4)
self.current_states = np.zeros([memory_size, self.state_height, self.state_width, self.state_depth], dtype=np.uint8)
# create the next states of the game (N, 64, 64, 5)
self.next_states = np.zeros([memory_size, self.state_height, self.state_width, self.state_depth], dtype=np.uint8)
# used if using frame sliding
else:
self.states = np.zeros([memory_size, self.state_height, self.state_width, self.state_depth], dtype=np.uint8)
# reward array (N)
self.reward = np.zeros([memory_size], dtype=np.uint8)
# integer action
self.action = [0]*memory_size
# Boolean (terminal transition?)
self.lost_life = [False]*memory_size
def remember_discrete(self, current_states, action, reward, next_states, lost_life):
# Stores a single memory item
self.current_states[self.current_index,:] = current_states
self.next_states[self.current_index,:] = next_states
# get the rest of the items
self.action[self.current_index] = action
self.reward[self.current_index] = reward
self.lost_life[self.current_index] = lost_life
# offset the current index
self.current_index = (self.current_index + 1) % self.maxsize
# increase the size
self.size = max(self.current_index,self.size)
def replay_discrete(self, model, target_model):
# Run replay!
# set the number of samples to train on
num_samples = hp['REPLAY_ITERATIONS']
# set the sample size out of the memory bank
sample_size = hp['BATCH_SIZE']
# discount rate
gamma = hp['GAMMA']
# show the learning fit
show_fit = hp['SHOW_FIT']
# Can't train if we don't yet have enough samples to begin with...
if self.size < sample_size:
return
# number of replays
for i in range(num_samples):
# Select sample_size memory indices from the whole set
current_sample = np.random.choice(self.size, sample_size, replace=False)
# Slice memory into training sample
# current state is frames [0, 1, 2, 3]
# and normalize states [0,1] instead of 0-255
current_states = normalize_states(self.current_states[current_sample, :, :, :])
# next_state is frames [1, 2, 3, 4]
# and normalize states [0,1] instead of 0-255
next_states = normalize_states(self.next_states[current_sample, :, :, :])
# get the rest of the items from memory
actions = [self.action[j] for j in current_sample]
reward = self.reward[current_sample]
lost_lives = [self.lost_life[j] for j in current_sample]
# Obtain model's current Q-values
model_targets = model.predict(current_states)
# Create targets from argmax(Q(s+1,a+1))
# Use the target model!
targets = reward + gamma * np.amax(target_model.predict(next_states), axis=1)
# Absorb the reward on terminal state-action transitions
targets[lost_lives] = reward[lost_lives]
# Update just the relevant parts of the model_target vector...
model_targets[range(sample_size), actions] = targets
# Current State: (32, 84, 84, 4)
# Model Targets: (32, 4)
# Update the weights accordingly
model.fit(current_states, model_targets,
epochs=1 ,verbose=show_fit, batch_size=sample_size)
def remember_frame_sliding(self, states, action, reward, lost_life):
# Stores a single memory item
self.states[self.current_index,:] = states
# get the rest of the items
self.action[self.current_index] = action
self.reward[self.current_index] = reward
self.lost_life[self.current_index] = lost_life
# offset the current index
self.current_index = (self.current_index + 1) % self.maxsize
# increase the size
self.size = max(self.current_index,self.size)
def replay_slidding(self, model, target_model):
# Run replay!
# set the number of samples to train on
num_samples = hp['REPLAY_ITERATIONS']
# set the sample size out of the memory bank
sample_size = hp['BATCH_SIZE']
# discount rate
gamma = hp['GAMMA']
# show the learning fit
show_fit = hp['SHOW_FIT']
# Can't train if we don't yet have enough samples to begin with...
if self.size < sample_size:
return
# number of replays
for i in range(num_samples):
# Select sample_size memory indices from the whole set
current_sample = np.random.choice(self.size, sample_size, replace=False)
# Slice memory into training sample
# current state is frames [0, 1, 2, 3]
# and normalize states [0,1] instead of 0-255
current_states = normalize_states(self.states[current_sample, :, :, :hp['FRAME_BATCH_SIZE']])
# next_state is frames [1, 2, 3, 4]
# and normalize states [0,1] instead of 0-255
next_states = normalize_states(self.states[current_sample, :, :, 1:])
# get the rest of the items from memory
actions = [self.action[j] for j in current_sample]
reward = self.reward[current_sample]
lost_lives = [self.lost_life[j] for j in current_sample]
# Obtain model's current Q-values
model_targets = model.predict(current_states)
# Create targets from argmax(Q(s+1,a+1))
# Use the target model
targets = reward + gamma * np.amax(target_model.predict(next_states), axis=1)
# Absorb the reward on terminal state-action transitions
targets[lost_lives] = reward[lost_lives]
# Update just the relevant parts of the model_target vector...
model_targets[range(sample_size), actions] = targets
# Current State: (32, 84, 84, 4)
# Model Targets: (32, 4)
# Update the weights accordingly
model.fit(current_states, model_targets,
epochs=1 ,verbose=show_fit, batch_size=sample_size)
| [
"numpy.random.choice",
"numpy.zeros"
] | [((1868, 1907), 'numpy.zeros', 'np.zeros', (['[memory_size]'], {'dtype': 'np.uint8'}), '([memory_size], dtype=np.uint8)\n', (1876, 1907), True, 'import numpy as np\n'), ((1338, 1437), 'numpy.zeros', 'np.zeros', (['[memory_size, self.state_height, self.state_width, self.state_depth]'], {'dtype': 'np.uint8'}), '([memory_size, self.state_height, self.state_width, self.\n state_depth], dtype=np.uint8)\n', (1346, 1437), True, 'import numpy as np\n'), ((1541, 1640), 'numpy.zeros', 'np.zeros', (['[memory_size, self.state_height, self.state_width, self.state_depth]'], {'dtype': 'np.uint8'}), '([memory_size, self.state_height, self.state_width, self.\n state_depth], dtype=np.uint8)\n', (1549, 1640), True, 'import numpy as np\n'), ((1723, 1822), 'numpy.zeros', 'np.zeros', (['[memory_size, self.state_height, self.state_width, self.state_depth]'], {'dtype': 'np.uint8'}), '([memory_size, self.state_height, self.state_width, self.\n state_depth], dtype=np.uint8)\n', (1731, 1822), True, 'import numpy as np\n'), ((3446, 3501), 'numpy.random.choice', 'np.random.choice', (['self.size', 'sample_size'], {'replace': '(False)'}), '(self.size, sample_size, replace=False)\n', (3462, 3501), True, 'import numpy as np\n'), ((6335, 6390), 'numpy.random.choice', 'np.random.choice', (['self.size', 'sample_size'], {'replace': '(False)'}), '(self.size, sample_size, replace=False)\n', (6351, 6390), True, 'import numpy as np\n')] |
import numpy as np
import scipy.stats as stat
from utils import dichotomic_search
""" Implementation of last particle variant """
def ImportanceSplittingLp(gen,kernel,h,tau=0,N=100,s=0.1,decay=0.9,T = 20, accept_ratio = 0.9,
alpha_est = 0.95, alpha_test=0.99,verbose=1, gain_thresh=0.01, check_every=3, p_c = 10**(-20),n_max = int(10**6),
reject_forget_rate =0, gain_forget_rate=0, reject_thresh=0.005):
"""
Importance splitting last particle estimator, i.e. the importance splitting algorithm with K=N-1
Args:
gen: generator of iid samples X_i [fun]
kernel: mixing kernel invariant to f_X [fun]
h: score function from gaussian vector [fun]
tau: threshold. The rare events are defined as h(X)>tau_j [tx1]
N: number of samples [1x1] (100)
s: strength of the the kernel [1x1] (0.1)
T: number of repetitions of the mixing kernel [1x1] (20)
n_max: max number of iterations [1x1] (200)
n_mod: check each n_mod iteration [1x1] (100)
decay: decay rate of the strength [1x1] (0.9)
accept_ratio: lower bound of accept ratio [1x1] (0.5)
alpha: level of confidence interval [1x1] (0.95)
verbose: level of verbosity [1x1] (0)
Returns:
P_est: estimated probability
s_out: a dictionary containing additional data
-s_out['Var_est']: estimated variance
-s_out['CI_est']: estimated confidence of interval
-s_out['Xrare']: Examples of the rare event
-s_out['result']: Result of the estimation/hypothesis testing process
"""
# Internals
q = -stat.norm.ppf((1-alpha_est)/2) # gaussian quantile
#d =gen(1).shape[-1] # dimension of the random vectors
k = 1 # Number of iterations
p = (N-1)/N
confidence_level_m = lambda y :stat.gamma.sf(-np.log(p_c),a=y, scale =1/N)
m, _ = dichotomic_search(f = confidence_level_m, a=100, b=n_max, thresh=alpha_test)
m = int(m)+1
if verbose:
print(f"Starting Last Particle algorithm with {m}, to certify p<p_c={p_c}, with confidence level alpha ={1-alpha_test}.")
if m>=n_max:
raise AssertionError(f"Confidence level requires more than n_max={n_max} iterations... increase n_max ?")
tau_j = -np.inf
P_est = 0
Var_est = 0
CI_est = np.zeros((2))
kernel_pass=0
Count_accept = 0
check=0
## Init
# step A0: generate & compute scores
X = gen(N) # generate N samples
SX = h(X) # compute their scores
Count_h = N # Number of calls to function h
reject_rate = 0
avg_gain=0
#step B: find new threshold
## While
while (k<=m):
#find new threshold
i_dead = np.argmin(SX,axis = None) # sort in descending order
#print(SX[i_dead], tau_j )
if tau_j!=-np.inf:
gain = np.abs((SX[i_dead]-tau_j)/tau_j)
else:
gain=0
gamma = 1+gain_forget_rate*(k-1)
avg_gain = (1-gamma/k)*avg_gain + (gamma/k)*gain
if k>1 and avg_gain<gain_thresh and reject_rate<reject_thresh:
s = s/decay
if verbose>=1 and check%check_every==0:
print('Strength of kernel increased!')
print(f's={s}')
tau_j = SX[i_dead] # set the threshold to the last particule's score
if tau_j>tau:
P_est= p**(k-1)
break #it is useless to compute new minimum if desired level has already been reached
if verbose>=1 and check%check_every==0:
print('Iter = ',k, ' tau_j = ', tau_j, " Calls = ", Count_h)
check+=1
# Refresh samples
i_new = np.random.choice(list(set(range(N))-set([i_dead])))
z0 = X[i_new,:]
sz0 = SX[i_new]
for t in range(T):
w = kernel(z0,s)
sw = h(w)
if sw>=tau_j:
z0 = w
sz0 = sw
Count_accept+=1
X[i_dead,:] = z0
SX[i_dead] = sz0
Count_h+=T
gamma = T+reject_forget_rate*kernel_pass
reject_rate = (1-gamma/(kernel_pass+T))*reject_rate + gamma*(1-Count_accept/T)/(kernel_pass+T)
if check%check_every==0 and verbose>=1:
print(f'Accept ratio:{Count_accept/T}')
print(f'Reject rate:{reject_rate}')
kernel_pass+=T
if reject_rate > (1-accept_ratio):
s = s*decay
if verbose>=1 and check%check_every==0:
print('Strength of kernel diminished!')
print(f's={s}')
Count_accept = 0
k += 1 # increase iteration number
if tau_j>tau:
Var_est = P_est**2*(P_est**(-1/N)-1)
CI_est[0] = P_est*np.exp(-q/np.sqrt(N)*np.sqrt(-np.log(P_est)+(q**2)/4/N) - (q**2)/2/N)
CI_est[1] = P_est*np.exp(q/np.sqrt(N)*np.sqrt(-np.log(P_est)+(q**2)/4/N) - (q**2)/2/N)
s_out = {'Var_est':Var_est,'CI_est':CI_est,'Iter':k,'Calls':Count_h,'Sample size':N}
s_out['Cert']=False
s_out['Xrare'] = X
else:
s_out = {'Var_est':None, 'CI_est':[0,p_c],'Iter':k,'Calls':Count_h,'Sample size':N}
P_est = p_c
s_out['Cert']=True
s_out['Xrare']= None
return P_est, s_out
def ImportanceSplittingLpBatch(gen,kernel_b,h,h_big,nb_system=5,d=784,tau=0,N=100,s=0.1,decay=0.92,T = 20, accept_ratio = 0.9,
alpha_est = 0.95, alpha_test=0.99,verbose=1, gain_thresh=0.01, check_every=3, p_c = 10**(-20),n_max = int(10**6),
reject_forget_rate =0, gain_forget_rate=0, reject_thresh=0.005,fast_decay=True, fast_d=1):
"""
Importance splitting last particle estimator, i.e. the importance splitting algorithm with K=N-1
with several particle systems.
Args:
gen: generator of iid samples X_i [fun]
kernel_batch: mixing kernel invariant to f_X [fun]
h: score function from gaussian vector [fun]
tau: threshold. The rare events are defined as h(X)>tau_j [tx1]
N: number of samples [1x1] (100)
s: strength of the the kernel [1x1] (0.1)
T: number of repetitions of the mixing kernel [1x1] (20)
n_max: max number of iterations [1x1] (200)
n_mod: check each n_mod iteration [1x1] (100)
decay: decay rate of the strength [1x1] (0.9)
accept_ratio: lower bound of accept ratio [1x1] (0.5)
alpha: level of confidence interval [1x1] (0.95)
verbose: level of verbosity [1x1] (0)
Returns:
P_est: estimated probability
s_out: a dictionary containing additional data
-s_out['Var_est']: estimated variance
-s_out['CI_est']: estimated confidence of interval
-s_out['Xrare']: Examples of the rare event
-s_out['result']: Result of the estimation/hypothesis testing process
"""
q = -stat.norm.ppf((1-alpha_est)/2) # gaussian quantile
s_b = s*np.ones(nb_system)
k = 1 # Number of iterations
p = (N-1)/N
confidence_level_m = lambda y :stat.gamma.sf(-np.log(p_c),a=y, scale =1/N)
m, _ = dichotomic_search(f = confidence_level_m, a=100, b=n_max, thresh=alpha_test)
m = int(m)+1
if verbose:
print(f"Starting Last Particle algorithm with {m}, to certify p<p_c={p_c}, with confidence level alpha ={1-alpha_test}.")
if m>=n_max:
raise AssertionError(f"Confidence level requires more than n_max={n_max} iterations... increase n_max ?")
tau_j = np.array(nb_system*[-np.inf])
is_done = np.zeros(nb_system)
done_k = -np.ones(nb_system)
kernel_pass= 0
Count_accept = np.zeros(nb_system)
check=0
X = gen(nb_system*N).reshape((nb_system,N,d)) # generate N*nb_system samples
SX = h_big(X.reshape((nb_system*N,d))).reshape((nb_system,N)) # compute their scores
Count_h = nb_system*N # Number of calls to function h
reject_rate = np.zeros(nb_system)
avg_gain= np.zeros(nb_system)
Xrare = -np.ones((nb_system,N,d))
nb_system_c = nb_system #current number, as systems can get deleted as algorithm goes
real_indices = np.arange(nb_system) #keeping track of initial systems indices as systems gets deleted
local_indices = np.arange(nb_system_c)
while (k<=m):
#find new threshold
i_deads = np.argmin(SX,axis = 1) # sort in descending order
#we switch the 'last' particle in terms of score and the first particle as indices go, for simplicity
tempXs, tempSs = np.array(X[:,0],copy=True), np.array(SX[:,0],copy=True)
X[:,0], SX[:,0] = X[local_indices,i_deads],SX[local_indices,i_deads]
X[local_indices,i_deads],SX[local_indices,i_deads] = tempXs, tempSs
del tempSs, tempXs
#print(SX[i_dead], tau_j )
if k>1:
gain = np.abs((SX[local_indices, i_deads]-tau_j[None])/tau_j[None])
else:
gain=np.zeros(nb_system_c)
gamma = 1+gain_forget_rate*(k-1)
avg_gain = (1-gamma/k)*avg_gain + (gamma/k)*gain
if k>1:
is_too_low = (avg_gain<gain_thresh) * (reject_rate<reject_thresh)
if is_too_low.sum()>0:
s_b = s_b/decay*is_too_low+s_b*(1-is_too_low)
s_b = s_b.reshape(-1)
if verbose>=1 and check%check_every==0:
print('Strengths of kernels updated!')
print(f's_b={s_b}')
tau_j = SX[:,0] # set the threshold to the last particules's scores
if (tau_j>tau).sum()>0:
is_over = np.where(tau_j>tau)[0]
if verbose:
print(f"System(s):{is_over} reached required level.")
#we need to kill systems that have reached required level, while taking this into account for the real systems indices
is_done[real_indices[is_over]],done_k[real_indices[is_over]]=1,k
if is_done.sum()==nb_system:
break #if all the systems have reached the final level we can stop the itertions there
nb_system_c-=len(is_over)
local_indices = np.arange(nb_system_c)
Xrare[is_over] = X[is_over]
X,SX = np.delete(X,is_over, axis=0),np.delete(SX,is_over, axis=0)
gain, avg_gain,tau_j = np.delete(gain,is_over), np.delete(avg_gain,is_over), np.delete(tau_j,is_over)
reject_rate, Count_accept = np.delete(reject_rate,is_over), np.delete(Count_accept,is_over)
real_indices = np.delete(real_indices,is_over)
s_b = np.delete(s_b ,is_over)
if verbose>=1 and check%check_every==0:
print('Iter = ',k, ' tau_j = ', tau_j, " Calls = ", Count_h)
check+=1
# Refresh samples
i_news = np.random.choice(range(1,N),size=nb_system_c)
z0s = X[local_indices,i_news]
sz0s = SX[local_indices,i_news]
for _ in range(T):
w = kernel_b(z0s,s_b) #kernel_b must take into account the number of systems and different strengths
sw = h(w, real_indices)
is_good_move = sw>=tau_j
z0s,sz0s = z0s*(1-is_good_move)[:,None] + is_good_move[:,None]*w, sz0s *(1-is_good_move) + is_good_move*sw
Count_accept = Count_accept + is_good_move
X[:,0] = z0s
SX[:,0] = sz0s
del z0s, sz0s
Count_h+=T*nb_system_c
gamma = T+reject_forget_rate*kernel_pass
reject_rate = (1-gamma/(kernel_pass+T))*reject_rate + gamma*(1-Count_accept/T)/(kernel_pass+T)
if check%check_every==0 and verbose>=1:
print(f'Accept ratios (local averages):{Count_accept/T}')
print(f'Reject rates (moving averages):{reject_rate}')
kernel_pass+=T
is_zero_accept = Count_accept==0
is_too_high = reject_rate > (1-accept_ratio)
if is_too_high.sum()>0:
s_b = s_b*decay*is_too_high+s_b*(1-is_too_high)
s_b = s_b.reshape(-1)
if fast_decay:
s_b = s_b*decay**fast_d*is_zero_accept+(1-is_zero_accept)*s_b
if verbose>=1 and check%check_every==0:
print('Strengths of kernel updated!')
print(f's_b={s_b}')
Count_accept = np.zeros(nb_system_c)
k += 1 # increase iteration number
if is_done.sum()>0:
P_est = p**(done_k-1)*is_done+(1-is_done)*p_c
Var_est = is_done*P_est**2*(P_est**(-1/N)-1)-(1-is_done)
CI_est = np.zeros((nb_system,2))
CI_est[:,0] = is_done*(P_est*np.exp(-q/np.sqrt(N)*np.sqrt(-np.log(P_est)+(q**2)/4/N) - (q**2)/2/N))
CI_est[:,1] = is_done*(P_est*np.exp(q/np.sqrt(N)*np.sqrt(-np.log(P_est)+(q**2)/4/N) - (q**2)/2/N)) + (1-is_done)*p_c
cert_ = 1-is_done
s_out ={'Var_est':Var_est,'CI_est':CI_est,'Iter':k,'Calls':Count_h,'Sample size':N,'Cert':cert_}
s_out['Xrare'] = Xrare
else:
s_out = {'Var_est': -np.ones(nb_system), 'CI_est':np.array(nb_system*[0,p_c]),'Iter':k,'Calls':Count_h,'Sample size':N}
s_out['Cert']= np.array([True]*nb_system)
s_out['Xrare']= None
P_est = np.array(nb_system*[p_c])
return P_est, s_out
def ImportanceSplitting(gen,kernel,h,tau,N=2000,K=1000,s=1,decay=0.99,T = 30,n_max = 300, alpha = 0.95,
verbose=1, track_rejection=False, rejection_ctrl = False, rej_threshold=0.9, gain_rate = 1.0001,
prog_thresh=0.01):
"""
Importance splitting estimator
Args:
gen: generator of iid samples X_i [fun]
kernel: mixing kernel invariant to f_X [fun]
h: score function [fun]
tau: threshold. The rare event is defined as h(X)>tau [1x1]
N: number of samples [1x1] (2000)
K: number of survivors [1x1] (1000)
s: strength of the the mixing kernel [1x1] (1)
decay: decay rate of the strength of the kernel [1x1] (0.9)
T: number of repetitions of the mixing kernel [1x1] (20)
n_max: max number of iterations [1x1] (200)
alpha: level of confidence interval [1x1] (0.95)
verbose: level of verbosity [1x1] (1)
Returns:
P_est: estimated probability
s_out: a dictionary containing additional data
-s_out['Var_est']: estimated variance
-s_out.['CI_est']: estimated confidence of interval
-s_out.['Xrare']: Examples of the rare event
"""
# Internals
q = -stat.norm.ppf((1-alpha)/2) # gaussian quantile
d =gen(1).shape[-1] # dimension of the random vectors
n = 1 # Number of iterations
## Init
# step A0: generate & compute scores
X = gen(N) # generate N samples
SX = h(X) # compute their scores
Count_h = N # Number of calls to function h
#step B: find new threshold
ind = np.argsort(SX,axis=None)[::-1] # sort in descending order
S_sort= SX[ind]
tau_j = S_sort[K] # set the threshold to (K+1)-th
h_mean = SX.mean()
if verbose>=1:
print('Iter = ',n, ' tau_j = ', tau_j, "h_mean",h_mean, " Calls = ", Count_h)
rejection_rate=0
kernel_pass=0
rejection_rates=[0]
## While
while (n<n_max) and (tau_j<tau):
n += 1 # increase iteration number
if n >=n_max:
raise RuntimeError('The estimator failed. Increase n_max?')
# step C: Keep K highest scores samples in Y
Y = X[ind[0:K],:]
SY = SX[ind[0:K]] # Keep their scores in SY
# step D: refresh samples
Z = np.zeros((N-K,d))
SZ = np.zeros((N-K,1))
for k in range(N-K):
u = np.random.choice(range(K),size=1,replace=False) # pick a sample at random in Y
z0 = Y[u,:]
accept_flag = False
for t in range(T):
w = kernel(z0,s) # propose a refreshed sample
kernel_pass+=1
sw = h(w) # compute its score
Count_h = Count_h + 1
if sw>tau_j: # accept if true
z0 = w
sz0 = sw
accept_flag = True # monitor if accepted
elif track_rejection:
rejection_rate=((kernel_pass-1.)/kernel_pass)*rejection_rate+(1/kernel_pass)
Z[k,:] = z0 # a fresh sample
SZ[k] = sz0 # its score
if rejection_ctrl and rejection_rate>=rej_threshold:
print('Strength of kernel diminished!')
s = s*decay
print(f's={s}')
if not accept_flag:
s = s * decay # decrease the strength of the mixing kernel
# step A: update set X and the scores
X[:K,:] = Y # copy paste the old samples of Y into X
SX[:K] = SY
X[K:N,:] = Z # copy paste the new samples of Z into X
SX[K:N] = SZ
# step B: Find new threshold
ind = np.argsort(SX,axis=None)[::-1] # sort in descending order
S_sort= SX[ind]
new_tau = S_sort[K]
if (new_tau-tau_j)/tau_j<prog_thresh:
s = s*gain_rate
print('Strength of kernel increased!')
print(f's={s}')
tau_j = S_sort[K] # set the threshold to (K+1)-th
h_mean = SX.mean()
if verbose>=1:
print('Iter = ',n, ' tau_j = ', tau_j, "h_mean",h_mean, " Calls = ", Count_h)
if track_rejection:
print(f'Rejection rate: {rejection_rate}')
rejection_rates+=[rejection_rate]
# step E: Last round
K_last = (SX>=tau).sum() # count the nb of score above the target threshold
#Estimation
p = K/N
p_last = K_last/N
P_est = (p**(n-1))*p_last
Var_est = (P_est**2)*((n-1)*(1-p)/p + (1-p_last)/p_last)/N
P_bias = P_est*n*(1-p)/p/N
CI_est = P_est*np.array([1,1]) + q*np.sqrt(Var_est)*np.array([-1,1])
Xrare = X[(SX>=tau).reshape(-1),:]
s_out = {"Var_est":Var_est,"CI_est": CI_est,"N":N,"K":K,"s":s,"decay":decay,"T":T,"Count_h":Count_h,
"P_bias":P_bias,"n":n,"Xrare":Xrare}
if track_rejection:
s_out["rejection_rates"]=np.array(rejection_rates)
s_out["Avg. rejection rate"]=rejection_rate
return P_est,s_out | [
"numpy.abs",
"numpy.sqrt",
"numpy.ones",
"numpy.where",
"numpy.delete",
"numpy.log",
"scipy.stats.norm.ppf",
"utils.dichotomic_search",
"numpy.array",
"numpy.zeros",
"numpy.argsort",
"numpy.argmin",
"numpy.arange"
] | [((2177, 2251), 'utils.dichotomic_search', 'dichotomic_search', ([], {'f': 'confidence_level_m', 'a': '(100)', 'b': 'n_max', 'thresh': 'alpha_test'}), '(f=confidence_level_m, a=100, b=n_max, thresh=alpha_test)\n', (2194, 2251), False, 'from utils import dichotomic_search\n'), ((2611, 2622), 'numpy.zeros', 'np.zeros', (['(2)'], {}), '(2)\n', (2619, 2622), True, 'import numpy as np\n'), ((7660, 7734), 'utils.dichotomic_search', 'dichotomic_search', ([], {'f': 'confidence_level_m', 'a': '(100)', 'b': 'n_max', 'thresh': 'alpha_test'}), '(f=confidence_level_m, a=100, b=n_max, thresh=alpha_test)\n', (7677, 7734), False, 'from utils import dichotomic_search\n'), ((8043, 8074), 'numpy.array', 'np.array', (['(nb_system * [-np.inf])'], {}), '(nb_system * [-np.inf])\n', (8051, 8074), True, 'import numpy as np\n'), ((8092, 8111), 'numpy.zeros', 'np.zeros', (['nb_system'], {}), '(nb_system)\n', (8100, 8111), True, 'import numpy as np\n'), ((8188, 8207), 'numpy.zeros', 'np.zeros', (['nb_system'], {}), '(nb_system)\n', (8196, 8207), True, 'import numpy as np\n'), ((8467, 8486), 'numpy.zeros', 'np.zeros', (['nb_system'], {}), '(nb_system)\n', (8475, 8486), True, 'import numpy as np\n'), ((8501, 8520), 'numpy.zeros', 'np.zeros', (['nb_system'], {}), '(nb_system)\n', (8509, 8520), True, 'import numpy as np\n'), ((8669, 8689), 'numpy.arange', 'np.arange', (['nb_system'], {}), '(nb_system)\n', (8678, 8689), True, 'import numpy as np\n'), ((8777, 8799), 'numpy.arange', 'np.arange', (['nb_system_c'], {}), '(nb_system_c)\n', (8786, 8799), True, 'import numpy as np\n'), ((1920, 1954), 'scipy.stats.norm.ppf', 'stat.norm.ppf', (['((1 - alpha_est) / 2)'], {}), '((1 - alpha_est) / 2)\n', (1933, 1954), True, 'import scipy.stats as stat\n'), ((2993, 3017), 'numpy.argmin', 'np.argmin', (['SX'], {'axis': 'None'}), '(SX, axis=None)\n', (3002, 3017), True, 'import numpy as np\n'), ((7426, 7460), 'scipy.stats.norm.ppf', 'stat.norm.ppf', (['((1 - alpha_est) / 2)'], {}), '((1 - alpha_est) / 2)\n', (7439, 7460), True, 'import scipy.stats as stat\n'), ((7489, 7507), 'numpy.ones', 'np.ones', (['nb_system'], {}), '(nb_system)\n', (7496, 7507), True, 'import numpy as np\n'), ((8126, 8144), 'numpy.ones', 'np.ones', (['nb_system'], {}), '(nb_system)\n', (8133, 8144), True, 'import numpy as np\n'), ((8534, 8560), 'numpy.ones', 'np.ones', (['(nb_system, N, d)'], {}), '((nb_system, N, d))\n', (8541, 8560), True, 'import numpy as np\n'), ((8873, 8894), 'numpy.argmin', 'np.argmin', (['SX'], {'axis': '(1)'}), '(SX, axis=1)\n', (8882, 8894), True, 'import numpy as np\n'), ((12769, 12790), 'numpy.zeros', 'np.zeros', (['nb_system_c'], {}), '(nb_system_c)\n', (12777, 12790), True, 'import numpy as np\n'), ((13009, 13033), 'numpy.zeros', 'np.zeros', (['(nb_system, 2)'], {}), '((nb_system, 2))\n', (13017, 13033), True, 'import numpy as np\n'), ((13591, 13619), 'numpy.array', 'np.array', (['([True] * nb_system)'], {}), '([True] * nb_system)\n', (13599, 13619), True, 'import numpy as np\n'), ((13663, 13690), 'numpy.array', 'np.array', (['(nb_system * [p_c])'], {}), '(nb_system * [p_c])\n', (13671, 13690), True, 'import numpy as np\n'), ((15217, 15247), 'scipy.stats.norm.ppf', 'stat.norm.ppf', (['((1 - alpha) / 2)'], {}), '((1 - alpha) / 2)\n', (15230, 15247), True, 'import scipy.stats as stat\n'), ((15577, 15602), 'numpy.argsort', 'np.argsort', (['SX'], {'axis': 'None'}), '(SX, axis=None)\n', (15587, 15602), True, 'import numpy as np\n'), ((16266, 16286), 'numpy.zeros', 'np.zeros', (['(N - K, d)'], {}), '((N - K, d))\n', (16274, 16286), True, 'import numpy as np\n'), ((16297, 16317), 'numpy.zeros', 'np.zeros', (['(N - K, 1)'], {}), '((N - K, 1))\n', (16305, 16317), True, 'import numpy as np\n'), ((18885, 18910), 'numpy.array', 'np.array', (['rejection_rates'], {}), '(rejection_rates)\n', (18893, 18910), True, 'import numpy as np\n'), ((3127, 3163), 'numpy.abs', 'np.abs', (['((SX[i_dead] - tau_j) / tau_j)'], {}), '((SX[i_dead] - tau_j) / tau_j)\n', (3133, 3163), True, 'import numpy as np\n'), ((9058, 9086), 'numpy.array', 'np.array', (['X[:, 0]'], {'copy': '(True)'}), '(X[:, 0], copy=True)\n', (9066, 9086), True, 'import numpy as np\n'), ((9086, 9115), 'numpy.array', 'np.array', (['SX[:, 0]'], {'copy': '(True)'}), '(SX[:, 0], copy=True)\n', (9094, 9115), True, 'import numpy as np\n'), ((9364, 9428), 'numpy.abs', 'np.abs', (['((SX[local_indices, i_deads] - tau_j[None]) / tau_j[None])'], {}), '((SX[local_indices, i_deads] - tau_j[None]) / tau_j[None])\n', (9370, 9428), True, 'import numpy as np\n'), ((9456, 9477), 'numpy.zeros', 'np.zeros', (['nb_system_c'], {}), '(nb_system_c)\n', (9464, 9477), True, 'import numpy as np\n'), ((10619, 10641), 'numpy.arange', 'np.arange', (['nb_system_c'], {}), '(nb_system_c)\n', (10628, 10641), True, 'import numpy as np\n'), ((11008, 11040), 'numpy.delete', 'np.delete', (['real_indices', 'is_over'], {}), '(real_indices, is_over)\n', (11017, 11040), True, 'import numpy as np\n'), ((11058, 11081), 'numpy.delete', 'np.delete', (['s_b', 'is_over'], {}), '(s_b, is_over)\n', (11067, 11081), True, 'import numpy as np\n'), ((13497, 13527), 'numpy.array', 'np.array', (['(nb_system * [0, p_c])'], {}), '(nb_system * [0, p_c])\n', (13505, 13527), True, 'import numpy as np\n'), ((17675, 17700), 'numpy.argsort', 'np.argsort', (['SX'], {'axis': 'None'}), '(SX, axis=None)\n', (17685, 17700), True, 'import numpy as np\n'), ((18589, 18605), 'numpy.array', 'np.array', (['[1, 1]'], {}), '([1, 1])\n', (18597, 18605), True, 'import numpy as np\n'), ((18626, 18643), 'numpy.array', 'np.array', (['[-1, 1]'], {}), '([-1, 1])\n', (18634, 18643), True, 'import numpy as np\n'), ((2136, 2147), 'numpy.log', 'np.log', (['p_c'], {}), '(p_c)\n', (2142, 2147), True, 'import numpy as np\n'), ((7619, 7630), 'numpy.log', 'np.log', (['p_c'], {}), '(p_c)\n', (7625, 7630), True, 'import numpy as np\n'), ((10082, 10103), 'numpy.where', 'np.where', (['(tau_j > tau)'], {}), '(tau_j > tau)\n', (10090, 10103), True, 'import numpy as np\n'), ((10701, 10730), 'numpy.delete', 'np.delete', (['X', 'is_over'], {'axis': '(0)'}), '(X, is_over, axis=0)\n', (10710, 10730), True, 'import numpy as np\n'), ((10730, 10760), 'numpy.delete', 'np.delete', (['SX', 'is_over'], {'axis': '(0)'}), '(SX, is_over, axis=0)\n', (10739, 10760), True, 'import numpy as np\n'), ((10796, 10820), 'numpy.delete', 'np.delete', (['gain', 'is_over'], {}), '(gain, is_over)\n', (10805, 10820), True, 'import numpy as np\n'), ((10821, 10849), 'numpy.delete', 'np.delete', (['avg_gain', 'is_over'], {}), '(avg_gain, is_over)\n', (10830, 10849), True, 'import numpy as np\n'), ((10850, 10875), 'numpy.delete', 'np.delete', (['tau_j', 'is_over'], {}), '(tau_j, is_over)\n', (10859, 10875), True, 'import numpy as np\n'), ((10917, 10948), 'numpy.delete', 'np.delete', (['reject_rate', 'is_over'], {}), '(reject_rate, is_over)\n', (10926, 10948), True, 'import numpy as np\n'), ((10949, 10981), 'numpy.delete', 'np.delete', (['Count_accept', 'is_over'], {}), '(Count_accept, is_over)\n', (10958, 10981), True, 'import numpy as np\n'), ((13468, 13486), 'numpy.ones', 'np.ones', (['nb_system'], {}), '(nb_system)\n', (13475, 13486), True, 'import numpy as np\n'), ((18609, 18625), 'numpy.sqrt', 'np.sqrt', (['Var_est'], {}), '(Var_est)\n', (18616, 18625), True, 'import numpy as np\n'), ((5049, 5059), 'numpy.sqrt', 'np.sqrt', (['N'], {}), '(N)\n', (5056, 5059), True, 'import numpy as np\n'), ((5144, 5154), 'numpy.sqrt', 'np.sqrt', (['N'], {}), '(N)\n', (5151, 5154), True, 'import numpy as np\n'), ((13080, 13090), 'numpy.sqrt', 'np.sqrt', (['N'], {}), '(N)\n', (13087, 13090), True, 'import numpy as np\n'), ((5069, 5082), 'numpy.log', 'np.log', (['P_est'], {}), '(P_est)\n', (5075, 5082), True, 'import numpy as np\n'), ((5164, 5177), 'numpy.log', 'np.log', (['P_est'], {}), '(P_est)\n', (5170, 5177), True, 'import numpy as np\n'), ((13187, 13197), 'numpy.sqrt', 'np.sqrt', (['N'], {}), '(N)\n', (13194, 13197), True, 'import numpy as np\n'), ((13100, 13113), 'numpy.log', 'np.log', (['P_est'], {}), '(P_est)\n', (13106, 13113), True, 'import numpy as np\n'), ((13207, 13220), 'numpy.log', 'np.log', (['P_est'], {}), '(P_est)\n', (13213, 13220), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
# Database Connection variables
DBN='mysql'
HOST='localhost'
DB='equinecl_db'
USER='equinecl_usr'
MODE='fastcgi' # mod_python, fastcgi, mod_wsgi
ENV='production' # production or development
PWD='<PASSWORD>'
# Generic configuration website variables
WEBSITE_URL='http://www.equineclinic.com.br'
WEBSITE_NAME='Empresa de Teste - Porto Alegre - RS'
ADMIN_VERSION='001b'
AUTO_MODERATE=True # If True auto approve (status = 'A') a record.
CONTROL_CONCURRENT_ACCESS=False # If True implements a record lock / concurrent control access in a record.
# Get the Active Modules for the perfis
from model import get_active_admin_modules, get_active_oper_modules, get_active_moderate_modules
admin_modules = get_active_admin_modules()
oper_modules = get_active_oper_modules()
moderate_modules = get_active_moderate_modules()
ACTIVE_MODULES_ADMIN = [(str(row.class_name), row.titulo.encode('utf8')) for row in admin_modules]
ACTIVE_MODULES_OPER = [(str(row.class_name), row.titulo.encode('utf8')) for row in oper_modules]
ACTIVE_MODULES_MODER = [(str(row.class_name), row.titulo.encode('utf8')) for row in moderate_modules]
| [
"model.get_active_oper_modules",
"model.get_active_admin_modules",
"model.get_active_moderate_modules"
] | [((724, 750), 'model.get_active_admin_modules', 'get_active_admin_modules', ([], {}), '()\n', (748, 750), False, 'from model import get_active_admin_modules, get_active_oper_modules, get_active_moderate_modules\n'), ((766, 791), 'model.get_active_oper_modules', 'get_active_oper_modules', ([], {}), '()\n', (789, 791), False, 'from model import get_active_admin_modules, get_active_oper_modules, get_active_moderate_modules\n'), ((811, 840), 'model.get_active_moderate_modules', 'get_active_moderate_modules', ([], {}), '()\n', (838, 840), False, 'from model import get_active_admin_modules, get_active_oper_modules, get_active_moderate_modules\n')] |
"""
this is a stripped down version of the SWHear class.
It's designed to hold only a single audio sample in memory.
check my githib for a more complete version:
http://github.com/swharden
"""
import serial, os, pty
import time
import numpy as np
from threading import Thread
import random
class SpoofSerial:
"""
Creates some sine function for testing the GUI in variables *x* and *y*
"""
def __init__(self, freq=1, time_interval=2.0, period=0.016, generate_noise=False, snr=0.1):
self.freq = freq
self.time_interval = time_interval
self.period = period
self.generate_noise = generate_noise
self.snr = snr
self.x = np.arange(0, time_interval, period)
self.y = np.sin(2 * np.pi * self.x * freq)
self.dont_touch_me = False
self.paused = False
self.t = Thread(target=self.run_stream)
def start(self):
"""Starts running a stream on a new thread"""
self.t.start()
def run_stream(self):
"""Begins streaming a sine in *x* and *y*"""
while not self.paused:
time.sleep(self.period)
self.dont_touch_me = True
new_x_val = self.x[-1] + 1.0 * self.period
self.x = np.append(self.x, [new_x_val])
new_y_val = np.sin(2 * np.pi * new_x_val * self.freq)
if self.generate_noise:
new_y_val *= 1 + random.uniform(-self.snr, self.snr)
self.y = np.append(self.y, [new_y_val])
self.dont_touch_me = False
def pause(self):
"""Temporarily stops updating the sine, but the values are still kept"""
self.paused = True
def unpause(self):
"""Continue updating the sine"""
self.paused = False
if __name__ == "__main__":
print("Hi")
try:
thing = SpoofSerial(time_interval=1, period=0.5)
print("hi")
thing.start()
while True:
print(thing.x, thing.y)
except KeyboardInterrupt:
print("Exiting...")
exit()
| [
"random.uniform",
"time.sleep",
"numpy.append",
"numpy.sin",
"threading.Thread",
"numpy.arange"
] | [((714, 749), 'numpy.arange', 'np.arange', (['(0)', 'time_interval', 'period'], {}), '(0, time_interval, period)\n', (723, 749), True, 'import numpy as np\n'), ((768, 801), 'numpy.sin', 'np.sin', (['(2 * np.pi * self.x * freq)'], {}), '(2 * np.pi * self.x * freq)\n', (774, 801), True, 'import numpy as np\n'), ((889, 919), 'threading.Thread', 'Thread', ([], {'target': 'self.run_stream'}), '(target=self.run_stream)\n', (895, 919), False, 'from threading import Thread\n'), ((1151, 1174), 'time.sleep', 'time.sleep', (['self.period'], {}), '(self.period)\n', (1161, 1174), False, 'import time\n'), ((1296, 1326), 'numpy.append', 'np.append', (['self.x', '[new_x_val]'], {}), '(self.x, [new_x_val])\n', (1305, 1326), True, 'import numpy as np\n'), ((1354, 1395), 'numpy.sin', 'np.sin', (['(2 * np.pi * new_x_val * self.freq)'], {}), '(2 * np.pi * new_x_val * self.freq)\n', (1360, 1395), True, 'import numpy as np\n'), ((1527, 1557), 'numpy.append', 'np.append', (['self.y', '[new_y_val]'], {}), '(self.y, [new_y_val])\n', (1536, 1557), True, 'import numpy as np\n'), ((1467, 1502), 'random.uniform', 'random.uniform', (['(-self.snr)', 'self.snr'], {}), '(-self.snr, self.snr)\n', (1481, 1502), False, 'import random\n')] |
"""
Intialize the Pygate application
"""
from flask import Flask
from flask_sqlalchemy import SQLAlchemy
app = Flask(__name__)
app.config.from_object("config")
db = SQLAlchemy(app)
from pygate import routes, models
| [
"flask_sqlalchemy.SQLAlchemy",
"flask.Flask"
] | [((113, 128), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (118, 128), False, 'from flask import Flask\n'), ((167, 182), 'flask_sqlalchemy.SQLAlchemy', 'SQLAlchemy', (['app'], {}), '(app)\n', (177, 182), False, 'from flask_sqlalchemy import SQLAlchemy\n')] |
import pytest
from time import sleep
from datetime import (
date,
timedelta
)
from utilities import XLUtility
from pageObjects.common_functions.common_methods import CommonMethods
# This test checks the functionality of creating a session with fee discount
@pytest.mark.usefixtures("one_time_setup")
class Test_TC101_111_CreateSessionDiscount():
@pytest.fixture(autouse=True)
def classSetup(self, one_time_setup):
self.logIn()
def test_create_session_discount(self):
common = CommonMethods(self.driver)
self.log.info("starting test {}...".format(__name__))
self.driver.set_window_size(411, 823)
today_date = date.today()
current_weekday = today_date.weekday()
# delete any existing session
common.mobile_delete_existing_session()
# delete any session from client side
client_name = XLUtility.readData(self.path, 'session_mobile_data', 22, 3)
sleep(1)
self.login_page_obj.clk_navigation_btn()
sleep(1)
self.client_page_obj.clk_all_clients_mobile()
sleep(1)
self.client_page_obj.mobile_sel_client_name(client_name)
sleep(1)
self.client_page_obj.clk_view_client_mobile()
sleep(1)
self.notes_page_obj.clk_session_notes()
sleep(1)
common.delete_mobile_prior_session_note()
sleep(4)
# Click on Create Session button
self.client_page_obj.clk_client_create_session_mobile()
if current_weekday < 3:
N = 3 - current_weekday
meeting_date = today_date + timedelta(days=N)
self.date_time = str(meeting_date) + " 9:00am"
if current_weekday >= 3:
N = 10 - current_weekday
meeting_date = today_date + timedelta(days=N)
self.date_time = str(meeting_date) + " 9:00am"
self.calendar_page_obj.txt_date_time(self.date_time)
sleep(1)
service = XLUtility.readData(self.path, 'session_mobile_data', 22, 5)
self.calendar_page_obj.sel_service(service)
sleep(1)
service_fee = self.calendar_page_obj.get_service_fee()
sleep(0.5)
amount = XLUtility.readData(self.path, 'session_mobile_data', 22, 7)
self.calendar_page_obj.input_amount(amount)
sleep(1)
self.calendar_page_obj.clk_create_session()
today_date = date.today()
day = meeting_date.strftime("%-d")
day = int(day)
if 4 <= day <= 20 or 24 <= day <= 30:
suffix = "th"
else:
suffix = ["st", "nd", "rd"][day % 10 - 1]
date_1 = meeting_date.strftime("%a, %b %-d") + suffix + meeting_date.strftime(" %Y") + " at " + "9:00am"
sleep(2)
self.calendar_page_obj.sel_mobile_session(date_1)
sleep(2)
self.calendar_page_obj.clk_mobile_client_view_session()
sleep(1)
discount_percent = self.calendar_page_obj.get_discount_value()
exp_discount = (1 - int(amount) / int(service_fee))
exp_discount_percent = "{:.1%}".format(exp_discount)
self.calendar_page_obj.clk_delete_session()
sleep(0.5)
self.calendar_page_obj.clk_delete_session_warn()
# exp_discount_percent = XLUtility.readData(self.path, 'session_mobile_data', 22, 8)
if discount_percent == exp_discount_percent:
self.log.info("{} passed!".format(__name__))
assert True
else:
self.driver.save_screenshot(
self.pathScreenShot + "Test_TC101_111_CreateSessionDiscount" + self.dateFormat + ".png"
)
self.log.info("{} failed!".format(__name__))
assert False
| [
"time.sleep",
"datetime.timedelta",
"pytest.mark.usefixtures",
"pageObjects.common_functions.common_methods.CommonMethods",
"pytest.fixture",
"datetime.date.today",
"utilities.XLUtility.readData"
] | [((262, 303), 'pytest.mark.usefixtures', 'pytest.mark.usefixtures', (['"""one_time_setup"""'], {}), "('one_time_setup')\n", (285, 303), False, 'import pytest\n'), ((353, 381), 'pytest.fixture', 'pytest.fixture', ([], {'autouse': '(True)'}), '(autouse=True)\n', (367, 381), False, 'import pytest\n'), ((489, 515), 'pageObjects.common_functions.common_methods.CommonMethods', 'CommonMethods', (['self.driver'], {}), '(self.driver)\n', (502, 515), False, 'from pageObjects.common_functions.common_methods import CommonMethods\n'), ((628, 640), 'datetime.date.today', 'date.today', ([], {}), '()\n', (638, 640), False, 'from datetime import date, timedelta\n'), ((814, 873), 'utilities.XLUtility.readData', 'XLUtility.readData', (['self.path', '"""session_mobile_data"""', '(22)', '(3)'], {}), "(self.path, 'session_mobile_data', 22, 3)\n", (832, 873), False, 'from utilities import XLUtility\n'), ((876, 884), 'time.sleep', 'sleep', (['(1)'], {}), '(1)\n', (881, 884), False, 'from time import sleep\n'), ((930, 938), 'time.sleep', 'sleep', (['(1)'], {}), '(1)\n', (935, 938), False, 'from time import sleep\n'), ((989, 997), 'time.sleep', 'sleep', (['(1)'], {}), '(1)\n', (994, 997), False, 'from time import sleep\n'), ((1059, 1067), 'time.sleep', 'sleep', (['(1)'], {}), '(1)\n', (1064, 1067), False, 'from time import sleep\n'), ((1118, 1126), 'time.sleep', 'sleep', (['(1)'], {}), '(1)\n', (1123, 1126), False, 'from time import sleep\n'), ((1171, 1179), 'time.sleep', 'sleep', (['(1)'], {}), '(1)\n', (1176, 1179), False, 'from time import sleep\n'), ((1226, 1234), 'time.sleep', 'sleep', (['(4)'], {}), '(4)\n', (1231, 1234), False, 'from time import sleep\n'), ((1695, 1703), 'time.sleep', 'sleep', (['(1)'], {}), '(1)\n', (1700, 1703), False, 'from time import sleep\n'), ((1716, 1775), 'utilities.XLUtility.readData', 'XLUtility.readData', (['self.path', '"""session_mobile_data"""', '(22)', '(5)'], {}), "(self.path, 'session_mobile_data', 22, 5)\n", (1734, 1775), False, 'from utilities import XLUtility\n'), ((1824, 1832), 'time.sleep', 'sleep', (['(1)'], {}), '(1)\n', (1829, 1832), False, 'from time import sleep\n'), ((1892, 1902), 'time.sleep', 'sleep', (['(0.5)'], {}), '(0.5)\n', (1897, 1902), False, 'from time import sleep\n'), ((1914, 1973), 'utilities.XLUtility.readData', 'XLUtility.readData', (['self.path', '"""session_mobile_data"""', '(22)', '(7)'], {}), "(self.path, 'session_mobile_data', 22, 7)\n", (1932, 1973), False, 'from utilities import XLUtility\n'), ((2022, 2030), 'time.sleep', 'sleep', (['(1)'], {}), '(1)\n', (2027, 2030), False, 'from time import sleep\n'), ((2093, 2105), 'datetime.date.today', 'date.today', ([], {}), '()\n', (2103, 2105), False, 'from datetime import date, timedelta\n'), ((2379, 2387), 'time.sleep', 'sleep', (['(2)'], {}), '(2)\n', (2384, 2387), False, 'from time import sleep\n'), ((2442, 2450), 'time.sleep', 'sleep', (['(2)'], {}), '(2)\n', (2447, 2450), False, 'from time import sleep\n'), ((2512, 2520), 'time.sleep', 'sleep', (['(1)'], {}), '(1)\n', (2517, 2520), False, 'from time import sleep\n'), ((2744, 2754), 'time.sleep', 'sleep', (['(0.5)'], {}), '(0.5)\n', (2749, 2754), False, 'from time import sleep\n'), ((1414, 1431), 'datetime.timedelta', 'timedelta', ([], {'days': 'N'}), '(days=N)\n', (1423, 1431), False, 'from datetime import date, timedelta\n'), ((1569, 1586), 'datetime.timedelta', 'timedelta', ([], {'days': 'N'}), '(days=N)\n', (1578, 1586), False, 'from datetime import date, timedelta\n')] |
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.cm as cm
from matplotlib import animation
# thermal conductivity
c = 1.0
# define the discretization grid
xmin = -5.0 # left/bottom bound
xmax = 5.0 # right/top bound
dx = 0.1 # space increment (default 0.1)
nx = int((xmax-xmin)/dx) # number of points on xy grid
# compute timestep such that the scheme is stable
dt = 0.002
# set initial condition
u0 = np.zeros( (nx,nx) )
# step wave equation
def step_wave(t):
if t == 0:
print( 'stability:', c*dt/(dx**2) )
for i in range(0,10):
un = u0.copy()
# compute second x-derivative using central differences
L = (
u0[1:nx-1,0:nx-2] +
u0[2:nx,1:nx-1] - 4*u0[1:nx-1,1:nx-1] + u0[0:nx-2,1:nx-1] +
u0[1:nx-1,2:nx]
)
# apply second-order central differences in time
un[1:nx-1,1:nx-1] = u0[1:nx-1,1:nx-1] + c*dt/(dx**2) * L
# apply boundary conditions
un[0,0:nx+1] = 0
un[nx-1,0:nx+1] = 0
un[0:nx+1,0] = 0
un[0:nx+1,nx-1] = 0
# heater
un[40:-40,50:80] = 1.0
u0[:] = un
img.set_array(u0)
return img,
fig = plt.figure()
img = plt.imshow( u0,
vmax=1.0,
vmin=0.0,
extent=[xmin, xmax, xmin, xmax],
cmap=cm.YlOrRd )
anim = animation.FuncAnimation( fig, step_wave, 10000,
interval=1,
repeat=False,
blit=True)
plt.title( "2D Heat Equation" )
plt.xlim( xmin, xmax )
plt.ylim( xmin, xmax )
plt.show() | [
"matplotlib.pyplot.imshow",
"matplotlib.animation.FuncAnimation",
"numpy.zeros",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.title",
"matplotlib.pyplot.xlim",
"matplotlib.pyplot.ylim",
"matplotlib.pyplot.show"
] | [((440, 458), 'numpy.zeros', 'np.zeros', (['(nx, nx)'], {}), '((nx, nx))\n', (448, 458), True, 'import numpy as np\n'), ((1263, 1275), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (1273, 1275), True, 'import matplotlib.pyplot as plt\n'), ((1282, 1370), 'matplotlib.pyplot.imshow', 'plt.imshow', (['u0'], {'vmax': '(1.0)', 'vmin': '(0.0)', 'extent': '[xmin, xmax, xmin, xmax]', 'cmap': 'cm.YlOrRd'}), '(u0, vmax=1.0, vmin=0.0, extent=[xmin, xmax, xmin, xmax], cmap=cm\n .YlOrRd)\n', (1292, 1370), True, 'import matplotlib.pyplot as plt\n'), ((1452, 1539), 'matplotlib.animation.FuncAnimation', 'animation.FuncAnimation', (['fig', 'step_wave', '(10000)'], {'interval': '(1)', 'repeat': '(False)', 'blit': '(True)'}), '(fig, step_wave, 10000, interval=1, repeat=False,\n blit=True)\n', (1475, 1539), False, 'from matplotlib import animation\n'), ((1637, 1666), 'matplotlib.pyplot.title', 'plt.title', (['"""2D Heat Equation"""'], {}), "('2D Heat Equation')\n", (1646, 1666), True, 'import matplotlib.pyplot as plt\n'), ((1669, 1689), 'matplotlib.pyplot.xlim', 'plt.xlim', (['xmin', 'xmax'], {}), '(xmin, xmax)\n', (1677, 1689), True, 'import matplotlib.pyplot as plt\n'), ((1692, 1712), 'matplotlib.pyplot.ylim', 'plt.ylim', (['xmin', 'xmax'], {}), '(xmin, xmax)\n', (1700, 1712), True, 'import matplotlib.pyplot as plt\n'), ((1715, 1725), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1723, 1725), True, 'import matplotlib.pyplot as plt\n')] |
import socket
host = '192.168.4.254'
port = 12345
addr = (host, port)
c = socket.socket(type=socket.SOCK_DGRAM)
c.sendto(b'hello world!\r\n', addr)
data = c.recvfrom(1024)[0]
print(data.decode(), end='')
c.close()
| [
"socket.socket"
] | [((76, 113), 'socket.socket', 'socket.socket', ([], {'type': 'socket.SOCK_DGRAM'}), '(type=socket.SOCK_DGRAM)\n', (89, 113), False, 'import socket\n')] |
import os, re, argparse
from dotenv import load_dotenv
from instabot import Bot
from pprint import pprint
def get_usernames(text):
# Code: Regex for Instagram Username and Hashtags
# https://blog.jstassen.com/2016/03/code-regex-for-instagram-username-and-hashtags/
pattern = re.compile('(?:@)([A-Za-z0-9_](?:(?:[A-Za-z0-9_]|(?:\.(?!\.))){0,28}(?:[A-Za-z0-9_]))?)')
return pattern.findall(text)
def is_user_exist(bot, username):
return bool(bot.get_user_id_from_username(username))
def exist_real_users_in_comment(bot, text):
return bool([username for username in get_usernames(text) if is_user_exist(bot, username)])
def get_users_match_requirements(bot, comments, media_likers, followers):
users_match_requirements = set()
for comment in comments:
if exist_real_users_in_comment(bot, comment["text"]):
tuple_comment_wrote_user = comment["user_id"], comment["user"]["username"]
if str(comment["user_id"]) in media_likers and str(comment["user_id"]) in followers:
users_match_requirements.add(tuple_comment_wrote_user)
return users_match_requirements
def get_action_participants(link, login, password, username):
bot = Bot()
bot.login(username=login, password=password)
media_id = bot.get_media_id_from_link(link)
followers = bot.get_user_followers(username)
media_likers = bot.get_media_likers(media_id)
comments = bot.get_media_comments(media_id)
winners = get_users_match_requirements(bot, comments, media_likers, followers)
bot.logout()
return winners
def main():
load_dotenv()
login = os.getenv("LOGIN")
password = os.getenv("PASSWORD")
parser = argparse.ArgumentParser(
description='''Поиск победителя конкурса в Инстаграм'''
)
parser.add_argument('username', help='Укажите имя пользователя разместившего пост')
parser.add_argument('link', help='Укажите ссылку на пост')
args = parser.parse_args()
username = args.username
link = args.link
pprint(get_action_participants(link=link, login=login, password=password, username=username))
if __name__ == "__main__":
main() | [
"argparse.ArgumentParser",
"os.getenv",
"re.compile",
"dotenv.load_dotenv",
"instabot.Bot"
] | [((292, 393), 're.compile', 're.compile', (['"""(?:@)([A-Za-z0-9_](?:(?:[A-Za-z0-9_]|(?:\\\\.(?!\\\\.))){0,28}(?:[A-Za-z0-9_]))?)"""'], {}), "(\n '(?:@)([A-Za-z0-9_](?:(?:[A-Za-z0-9_]|(?:\\\\.(?!\\\\.))){0,28}(?:[A-Za-z0-9_]))?)'\n )\n", (302, 393), False, 'import os, re, argparse\n'), ((1202, 1207), 'instabot.Bot', 'Bot', ([], {}), '()\n', (1205, 1207), False, 'from instabot import Bot\n'), ((1589, 1602), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (1600, 1602), False, 'from dotenv import load_dotenv\n'), ((1616, 1634), 'os.getenv', 'os.getenv', (['"""LOGIN"""'], {}), "('LOGIN')\n", (1625, 1634), False, 'import os, re, argparse\n'), ((1649, 1670), 'os.getenv', 'os.getenv', (['"""PASSWORD"""'], {}), "('PASSWORD')\n", (1658, 1670), False, 'import os, re, argparse\n'), ((1685, 1761), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Поиск победителя конкурса в Инстаграм"""'}), "(description='Поиск победителя конкурса в Инстаграм')\n", (1708, 1761), False, 'import os, re, argparse\n')] |
# -*- coding: utf-8 -*-
# vi:si:et:sw=4:sts=4:ts=4
"""This is helper module that contains functions
to easeup communication with seaweed-fs
"""
import requests
from pyseaweed.version import __version__
class Connection(object):
def __init__(self, use_session=False):
if use_session:
self._conn = requests.Session()
else:
self._conn = requests
def _prepare_headers(self, additional_headers=None, **kwargs):
"""Prepare headers for http communication.
Return dict of header to be used in requests.
Args:
.. versionadded:: 0.3.2
**additional_headers**: (optional) Additional headers
to be used with request
Returns:
Headers dict. Key and values are string
"""
user_agent = "pyseaweed/{version}".format(version=__version__)
headers = {"User-Agent": user_agent}
if additional_headers is not None:
headers.update(additional_headers)
return headers
def head(self, url, *args, **kwargs):
"""Returns response to http HEAD
on provided url
"""
res = self._conn.head(url, headers=self._prepare_headers(**kwargs))
if res.status_code == 200:
return res
return None
def get_data(self, url, *args, **kwargs):
"""Gets data from url as text
Returns content under the provided url as text
Args:
**url**: address of the wanted data
.. versionadded:: 0.3.2
**additional_headers**: (optional) Additional headers
to be used with request
Returns:
string
"""
res = self._conn.get(url, headers=self._prepare_headers(**kwargs))
if res.status_code == 200:
return res.text
else:
return None
def get_raw_data(self, url, *args, **kwargs):
"""Gets data from url as bytes
Returns content under the provided url as bytes
ie. for binary data
Args:
**url**: address of the wanted data
.. versionadded:: 0.3.2
**additional_headers**: (optional) Additional headers
to be used with request
Returns:
bytes
"""
res = self._conn.get(url, headers=self._prepare_headers(**kwargs))
if res.status_code == 200:
return res.content
else:
return None
def post_file(self, url, filename, file_stream, *args, **kwargs):
"""Uploads file to provided url.
Returns contents as text
Args:
**url**: address where to upload file
**filename**: Name of the uploaded file
**file_stream**: file like object to upload
.. versionadded:: 0.3.2
**additional_headers**: (optional) Additional headers
to be used with request
Returns:
string
"""
res = self._conn.post(
url,
files={filename: file_stream},
headers=self._prepare_headers(**kwargs),
)
if res.status_code == 200 or res.status_code == 201:
return res.text
else:
return None
def delete_data(self, url, *args, **kwargs):
"""Deletes data under provided url
Returns status as boolean.
Args:
**url**: address of file to be deleted
.. versionadded:: 0.3.2
**additional_headers**: (optional) Additional headers
to be used with request
Returns:
Boolean. True if request was successful. False if not.
"""
res = self._conn.delete(url, headers=self._prepare_headers(**kwargs))
if res.status_code == 200 or res.status_code == 202:
return True
else:
return False
| [
"requests.Session"
] | [((326, 344), 'requests.Session', 'requests.Session', ([], {}), '()\n', (342, 344), False, 'import requests\n')] |
import networkx as nx
from nptyping import NDArray
from typing import List, Tuple
from .. import utils
from .Objective import Objective
class BudgetAllocation(Objective):
def __init__(self, G: nx.Graph, B: NDArray[int], B_range: Tuple[int, int]):
"""
Optimal budget allocation is a special case of the influence maximization
problem. It can be modeled as a bipartite graph (S, T; W), where S and T
are collections of advertising channels and customers, respectively. The
edge weight, p_st ∈ W, represents the influence probability of channel s
to customer t. The goal is to distribute the budget (e.g., time for a TV
advertisement, or space of an inline ad) among the source nodes, and to
maximize the expected influence on the potential customers.
The total influence of customer t from all channels can be modeled
by a proper monotone DR-submodular function I_t(x), where x is the
budget assignment among the advertising channels.
A concrete application is for search marketing advertiser bidding, in
which vendors bid for the right to appear alongside the results of
different search keywords.
https://arxiv.org/pdf/1606.05615.pdf (§6, Optimal budget allocation with
continuous assignments)
"""
V: List[int] = [n for n in G.nodes if G.nodes[n]['bipartite'] == 0]
T: List[int] = [m for m in G.nodes if G.nodes[m]['bipartite'] == 1]
super().__init__(V, B, B_range)
# W[s, t] is the influence probability of channel s to customer t.
W = nx.adjacency_matrix(G)
# collect the neighbors s \in S of each t \in T
neighbors: List[List[int]] = [[s for s in G.neighbors(t)] for t in T]
# keep track of (1 - p(s, t), s) for each neighbors s \in S of each t \in T
self.probs_exp_list: List[List[Tuple[float, int]]] = [
[(1 - W[s, t], s) for s in s_neighbors]
for s_neighbors, t in zip(neighbors, T)
]
def value(self, x: NDArray[int]) -> float:
"""
Value oracle for the Budget Allocation problem.
:param x: allotted budget.
:return: expected number of influenced people
"""
super().value(x)
return sum((
1 - utils.prod(
neg_p_st ** x[s]
for neg_p_st, s in probs_exp
) for probs_exp in self.probs_exp_list
))
| [
"networkx.adjacency_matrix"
] | [((1578, 1600), 'networkx.adjacency_matrix', 'nx.adjacency_matrix', (['G'], {}), '(G)\n', (1597, 1600), True, 'import networkx as nx\n')] |
from synthesis.synthesizer.decider import *
from synthesis.synthesizer.dplyr_to_pd.pd_result import *
from synthesis.search_structure import *
from utils.logger import get_logger
import pandas
import copy
from io import StringIO
logger = get_logger('synthesizer.decider')
class PDDecider(Decider):
def __init__(self, test_cases: List[TestCase], matching_apis: List[LibraryAPI]):
super().__init__(test_cases)
self.matching_apis = matching_apis
def error_message_understanding(self, raw_error_message: List[str], program: Program) -> (Constraint, List[str]):
pass
def analyze(self, program: Program) -> Result:
target_call = program.code[0]
# try to create layer
logger.debug(f'Evaluating... {target_call}')
# test cases
output = None
for test in self.test_cases:
success, output = self.pandas_eval(program, test.input['pandas'])
try:
if success:
if not isinstance(output, pandas.core.groupby.DataFrameGroupBy):
expect = test.output['pandas'].to_numpy()
result = output.reset_index()
other_result = copy.deepcopy(output).reset_index(drop=True)
if np.array_equal(result.to_numpy(), expect):
return PDResult(True, out=pandas.read_csv(StringIO(result.to_csv(index=False))))
elif np.array_equal(other_result.to_numpy(), expect):
return PDResult(True, out=pandas.read_csv(StringIO(other_result.to_csv(index=False))))
else:
tmp_result = output.size().to_frame('size').query('size > 0')
if np.array_equal(tmp_result.reset_index().to_numpy(), test.output['pandas_count'].to_numpy()):
return PDResult(True, out=output)
except Exception as e:
logger.error(e)
return PDResult(False)
def pandas_eval(self, program, df: pandas.DataFrame):
code = program.code[0]
try:
fn = eval(code)
df1 = copy.deepcopy(df)
result = fn(df1)
if not isinstance(result, pandas.DataFrame) and \
not isinstance(result, pandas.core.groupby.DataFrameGroupBy) and \
not isinstance(result, pandas.Series):
result = df1
return True, result
except:
return False, None
| [
"utils.logger.get_logger",
"copy.deepcopy"
] | [((239, 272), 'utils.logger.get_logger', 'get_logger', (['"""synthesizer.decider"""'], {}), "('synthesizer.decider')\n", (249, 272), False, 'from utils.logger import get_logger\n'), ((2181, 2198), 'copy.deepcopy', 'copy.deepcopy', (['df'], {}), '(df)\n', (2194, 2198), False, 'import copy\n'), ((1221, 1242), 'copy.deepcopy', 'copy.deepcopy', (['output'], {}), '(output)\n', (1234, 1242), False, 'import copy\n')] |
import pulpcore.client.pulp_rpm as pulp_rpm
import pulpcore.client.pulpcore as pulpcore
import yaml
import sys
import json
import colorama
from colorama import Fore
from colorama import Style
from time import sleep
from datetime import date, datetime
class RpmCherryPick:
"""Class used for cherry picking rpm packages and modules in pulpcore
"""
def __init__(self,data_file,host,username,password):
"""Initialize cherry picking class
:param data_file: the path to the data file with the packages
"""
self._data_file = data_file
self._read_data_file()
self._rpm_client_config = pulp_rpm.Configuration (
host=host,
username=username,
password=password,
)
self._rpm_api_client = pulp_rpm.ApiClient(configuration=self._rpm_client_config)
self._pulpcore_client_config = pulpcore.Configuration (
host=host,
username=username,
password=password
)
self._pulpcore_api_client = pulpcore.ApiClient(configuration=self._pulpcore_client_config)
def _read_data_file(self):
print('Reading data file', self._data_file)
try:
self._data = yaml.load(open(self._data_file, 'r'), Loader=yaml.FullLoader)
except:
print('Error: Data file', self._data_file, 'could not be opened.')
sys.exit(1)
def cherry_pick(self):
print('Starting cherry picking')
dest_repo_href=self.__get_repo_href(self._data['dest_repo'])
print(Fore.BLUE + Style.BRIGHT)
print("##################################################")
print("Cherry picking %s"%self._data['dest_repo'])
print("##################################################")
print(Style.RESET_ALL)
for repo in self._data['content']:
packages_copy_hrefs=set()
modules_copy_hrefs=set()
print(Fore.GREEN)
print("Cherry picking %s from repo %s"%(repo['desc'],repo['name']))
print(Style.RESET_ALL)
repository_version=self.__get_latest_repo_version_href(name=repo['name'])
print(repository_version)
if 'packages' in repo:
packages_copy_config=list()
print(Fore.RED)
print("Searching packages %s"%repo['desc'])
print(Style.RESET_ALL)
for package in repo['packages']:
packages_copy_hrefs.update(self.__search_package(**package, repository_version=repository_version))
packages_copy_config.append({
"source_repo_version": repository_version,
"dest_repo": dest_repo_href,
"content": list(packages_copy_hrefs),
})
self.__copy_content('packages', packages_copy_config)
if 'modules' in repo:
modules_copy_config=list()
print(Fore.RED)
print("Searching modules %s"%repo['desc'])
print(Style.RESET_ALL)
for module in repo['modules']:
modules_copy_hrefs.update(self.__search_modulemd(**module, repository_version=repository_version))
modules_copy_config.append({
"source_repo_version": repository_version,
"dest_repo": dest_repo_href,
"content": list(modules_copy_hrefs),
})
self.__copy_content('modules', modules_copy_config)
print('Cherry picking complete')
def __get_repo_href(self, name=None):
"""Get the href of an rpm repository
Returns the Pulp Href of an rpm repository
:param api_client: Pulp api client to use for the connection
:param name: Name of the repository
:return: String
Pulp_href of the repository
"""
api_instance=pulp_rpm.RepositoriesRpmApi(api_client=self._rpm_api_client)
response=api_instance.list(limit=1, name=name)
if response.count < 1:
sys.exit("There were no repositories found")
elif response.count >1:
sys.exit("There were more then 1 repositories found")
return response.results[0].pulp_href
def __get_latest_repo_version_href(self, name=None):
"""Get the latest version href of an rpm repository
Returns the Pulp Href of an rpm repository
:param api_client: Pulp api client to use for the connection
:param name: Name of the repository
:return: String
Pulp_latest_version_href of the repository
"""
api_instance=pulp_rpm.RepositoriesRpmApi(api_client=self._rpm_api_client)
response=api_instance.list(limit=1, name=name)
if response.count < 1:
sys.exit("There were no repositories found")
elif response.count >1:
sys.exit("There were more then 1 repositories found")
return response.results[0].latest_version_href
def __search_modulemd(self, name=None, stream=None, versions=1, repository_version=None):
"""Search modules
Search for modules in a specific repository_version of the whole of pulp
:param api_client: Pulp api client to use for the connection
:param name: Name of the modulemd
:param stream: stream of the modulemd
:param repository_version: repository version to look for modules
:return: set(String)
set with the pulp_href of the found modulemds
"""
api_instance = pulp_rpm.ContentModulemdsApi(api_client=self._rpm_api_client)
response=pulp_rpm.PaginatedrpmModulemdResponseList(next='foo')
modulemds=set()
response=api_instance.list(limit=versions, name=name, stream=stream, repository_version=repository_version)
for m in response.results:
print("Adding following module:")
print(json.dumps({"name": m.name, "stream": m.stream, "packages": m.packages}, sort_keys=False, indent=4))
modulemds.add(m.pulp_href)
return(modulemds)
def __search_package(self, name=None, version=None, release=None, arch=None, versions=1, repository_version=None):
"""Search packages
Search for packages in a specific repository_version of the whole of pulp
:param api_client: Pulp api client to use for the connection
:param name: Name of the packages
:param version: Version of the package
:param release: Release of the package
:param arch: architecture of the package
:param repository_version: repository version to look for modules
:return: set(String)
set with the pulp_href of the packages
"""
api_instance = pulp_rpm.ContentPackagesApi(api_client=self._rpm_api_client)
response=pulp_rpm.PaginatedrpmPackageResponseList(next='foo')
packages_response=list()
packages=set()
response = api_instance.list(limit=versions, name=name, version=version, release=release, arch=arch, repository_version=repository_version, fields='name,version,release,arch,pulp_href')
for p in response.results:
package = {
"name": p.name,
"version": p.version,
"release": p.release,
"arch": p.arch,
"pulp_href": p.pulp_href
}
packages_response.append(package)
print("Adding following package:")
print(json.dumps({"name": p.name, "version": p.version, "release": p.release, "arch": p.arch}, sort_keys=False, indent=4))
#print(json.dumps(packages_response, sort_keys=False, indent=4))
packages.add(p.pulp_href)
return (packages)
def __copy(self, config=None, dependency_solving=True):
"""Copy content
Search for modules in a specific repository_version of the whole of pulp
:param api_client: Pulp api client to use for the connection
:param config: copy config
:param dependency_solving: solve dependencies
:return: String
pulp_href of the generated task
"""
api_instance = pulp_rpm.RpmCopyApi(api_client=self._rpm_api_client)
copy = pulp_rpm.Copy(config=config, dependency_solving=dependency_solving)
return api_instance.copy_content(copy)
def __wait_until_task_has_finished(self, task_href=None):
""" Wait until pulp task is finished
Wait until pulp task is finished, if the task failed, the program will be exited
:param api_client: Pulp api client to use for the connection
:param task_href: The href of the pulp task
"""
api_instance = pulpcore.TasksApi(api_client=self._pulpcore_api_client)
task = api_instance.read(task_href)
print(json.dumps(task.to_dict(), cls=ComplexEncoder, sort_keys=False, indent=4))
print()
while task.state != 'completed':
sleep(2)
task = api_instance.read(task_href)
print(json.dumps(task.to_dict(), cls=ComplexEncoder, sort_keys=False, indent=4))
print()
if task.state == 'failed':
sys.exit("Task failed with following error %s"%json.dumps(task.error, sort_keys=False, indent=4))
def __copy_content(self, content_type, copy_config):
"""Copy content wrapper funtion
Wrapper function around copy that will print verbose output, wait until task has finished,
show task status etc ...
"""
print(Fore.BLUE + Style.BRIGHT)
print("##################################################")
print("Using following copy config for %s:"%content_type)
print("##################################################")
print(Style.RESET_ALL)
print(json.dumps(copy_config, sort_keys=False, indent=4))
response = self.__copy(config=copy_config)
print(Fore.BLUE + Style.BRIGHT)
print("##################################################")
print("Waiting until task %s has finished for %s"%(response.task,content_type))
print("##################################################")
print(Style.RESET_ALL)
self.__wait_until_task_has_finished(task_href=response.task)
class ComplexEncoder(json.JSONEncoder):
"""Class used for decoding datetimes in task outputs
"""
def default(self, obj):
if isinstance(obj, datetime):
return obj.strftime('%Y-%m-%d %H:%M:%S')
elif isinstance(obj, date):
return obj.strftime('%Y-%m-%d')
else:
return json.JSONEncoder.default(self, obj) | [
"pulpcore.client.pulp_rpm.PaginatedrpmModulemdResponseList",
"pulpcore.client.pulpcore.ApiClient",
"pulpcore.client.pulp_rpm.PaginatedrpmPackageResponseList",
"pulpcore.client.pulp_rpm.RepositoriesRpmApi",
"pulpcore.client.pulp_rpm.ApiClient",
"pulpcore.client.pulp_rpm.RpmCopyApi",
"json.JSONEncoder.def... | [((640, 711), 'pulpcore.client.pulp_rpm.Configuration', 'pulp_rpm.Configuration', ([], {'host': 'host', 'username': 'username', 'password': 'password'}), '(host=host, username=username, password=password)\n', (662, 711), True, 'import pulpcore.client.pulp_rpm as pulp_rpm\n'), ((791, 848), 'pulpcore.client.pulp_rpm.ApiClient', 'pulp_rpm.ApiClient', ([], {'configuration': 'self._rpm_client_config'}), '(configuration=self._rpm_client_config)\n', (809, 848), True, 'import pulpcore.client.pulp_rpm as pulp_rpm\n'), ((888, 959), 'pulpcore.client.pulpcore.Configuration', 'pulpcore.Configuration', ([], {'host': 'host', 'username': 'username', 'password': 'password'}), '(host=host, username=username, password=password)\n', (910, 959), True, 'import pulpcore.client.pulpcore as pulpcore\n'), ((1043, 1105), 'pulpcore.client.pulpcore.ApiClient', 'pulpcore.ApiClient', ([], {'configuration': 'self._pulpcore_client_config'}), '(configuration=self._pulpcore_client_config)\n', (1061, 1105), True, 'import pulpcore.client.pulpcore as pulpcore\n'), ((3969, 4029), 'pulpcore.client.pulp_rpm.RepositoriesRpmApi', 'pulp_rpm.RepositoriesRpmApi', ([], {'api_client': 'self._rpm_api_client'}), '(api_client=self._rpm_api_client)\n', (3996, 4029), True, 'import pulpcore.client.pulp_rpm as pulp_rpm\n'), ((4717, 4777), 'pulpcore.client.pulp_rpm.RepositoriesRpmApi', 'pulp_rpm.RepositoriesRpmApi', ([], {'api_client': 'self._rpm_api_client'}), '(api_client=self._rpm_api_client)\n', (4744, 4777), True, 'import pulpcore.client.pulp_rpm as pulp_rpm\n'), ((5636, 5697), 'pulpcore.client.pulp_rpm.ContentModulemdsApi', 'pulp_rpm.ContentModulemdsApi', ([], {'api_client': 'self._rpm_api_client'}), '(api_client=self._rpm_api_client)\n', (5664, 5697), True, 'import pulpcore.client.pulp_rpm as pulp_rpm\n'), ((5715, 5768), 'pulpcore.client.pulp_rpm.PaginatedrpmModulemdResponseList', 'pulp_rpm.PaginatedrpmModulemdResponseList', ([], {'next': '"""foo"""'}), "(next='foo')\n", (5756, 5768), True, 'import pulpcore.client.pulp_rpm as pulp_rpm\n'), ((6853, 6913), 'pulpcore.client.pulp_rpm.ContentPackagesApi', 'pulp_rpm.ContentPackagesApi', ([], {'api_client': 'self._rpm_api_client'}), '(api_client=self._rpm_api_client)\n', (6880, 6913), True, 'import pulpcore.client.pulp_rpm as pulp_rpm\n'), ((6931, 6983), 'pulpcore.client.pulp_rpm.PaginatedrpmPackageResponseList', 'pulp_rpm.PaginatedrpmPackageResponseList', ([], {'next': '"""foo"""'}), "(next='foo')\n", (6971, 6983), True, 'import pulpcore.client.pulp_rpm as pulp_rpm\n'), ((8291, 8343), 'pulpcore.client.pulp_rpm.RpmCopyApi', 'pulp_rpm.RpmCopyApi', ([], {'api_client': 'self._rpm_api_client'}), '(api_client=self._rpm_api_client)\n', (8310, 8343), True, 'import pulpcore.client.pulp_rpm as pulp_rpm\n'), ((8359, 8426), 'pulpcore.client.pulp_rpm.Copy', 'pulp_rpm.Copy', ([], {'config': 'config', 'dependency_solving': 'dependency_solving'}), '(config=config, dependency_solving=dependency_solving)\n', (8372, 8426), True, 'import pulpcore.client.pulp_rpm as pulp_rpm\n'), ((8829, 8884), 'pulpcore.client.pulpcore.TasksApi', 'pulpcore.TasksApi', ([], {'api_client': 'self._pulpcore_api_client'}), '(api_client=self._pulpcore_api_client)\n', (8846, 8884), True, 'import pulpcore.client.pulpcore as pulpcore\n'), ((4128, 4172), 'sys.exit', 'sys.exit', (['"""There were no repositories found"""'], {}), "('There were no repositories found')\n", (4136, 4172), False, 'import sys\n'), ((4876, 4920), 'sys.exit', 'sys.exit', (['"""There were no repositories found"""'], {}), "('There were no repositories found')\n", (4884, 4920), False, 'import sys\n'), ((9087, 9095), 'time.sleep', 'sleep', (['(2)'], {}), '(2)\n', (9092, 9095), False, 'from time import sleep\n'), ((9941, 9991), 'json.dumps', 'json.dumps', (['copy_config'], {'sort_keys': '(False)', 'indent': '(4)'}), '(copy_config, sort_keys=False, indent=4)\n', (9951, 9991), False, 'import json\n'), ((1397, 1408), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (1405, 1408), False, 'import sys\n'), ((4217, 4270), 'sys.exit', 'sys.exit', (['"""There were more then 1 repositories found"""'], {}), "('There were more then 1 repositories found')\n", (4225, 4270), False, 'import sys\n'), ((4965, 5018), 'sys.exit', 'sys.exit', (['"""There were more then 1 repositories found"""'], {}), "('There were more then 1 repositories found')\n", (4973, 5018), False, 'import sys\n'), ((6008, 6111), 'json.dumps', 'json.dumps', (["{'name': m.name, 'stream': m.stream, 'packages': m.packages}"], {'sort_keys': '(False)', 'indent': '(4)'}), "({'name': m.name, 'stream': m.stream, 'packages': m.packages},\n sort_keys=False, indent=4)\n", (6018, 6111), False, 'import json\n'), ((7599, 7718), 'json.dumps', 'json.dumps', (["{'name': p.name, 'version': p.version, 'release': p.release, 'arch': p.arch}"], {'sort_keys': '(False)', 'indent': '(4)'}), "({'name': p.name, 'version': p.version, 'release': p.release,\n 'arch': p.arch}, sort_keys=False, indent=4)\n", (7609, 7718), False, 'import json\n'), ((10747, 10782), 'json.JSONEncoder.default', 'json.JSONEncoder.default', (['self', 'obj'], {}), '(self, obj)\n', (10771, 10782), False, 'import json\n'), ((9359, 9408), 'json.dumps', 'json.dumps', (['task.error'], {'sort_keys': '(False)', 'indent': '(4)'}), '(task.error, sort_keys=False, indent=4)\n', (9369, 9408), False, 'import json\n')] |
from distutils.core import setup
setup(name='VisualRecognitionTooling',
version='1.0',
description='Tooling Library for IBM Watson Visual Recognition',
author='<NAME>',
author_email='',
packages=['vrtool'],
) | [
"distutils.core.setup"
] | [((34, 216), 'distutils.core.setup', 'setup', ([], {'name': '"""VisualRecognitionTooling"""', 'version': '"""1.0"""', 'description': '"""Tooling Library for IBM Watson Visual Recognition"""', 'author': '"""<NAME>"""', 'author_email': '""""""', 'packages': "['vrtool']"}), "(name='VisualRecognitionTooling', version='1.0', description=\n 'Tooling Library for IBM Watson Visual Recognition', author='<NAME>',\n author_email='', packages=['vrtool'])\n", (39, 216), False, 'from distutils.core import setup\n')] |
import sys
import json
import time
import random
import datetime
from pytz import timezone
import requests
# Settings
url = 'https://hooks.slack.com/services/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'
exercises = {
'Plank': {'range': (20, 50), 'unit': 'second'},
'Wall Sit': {'range': (40, 90), 'unit': 'second'},
'Push Ups': {'range': (5, 15), 'unit': None},
'Calf Raises': {'range': (25, 50), 'unit': None},
'Crunches': {'range': (15, 25), 'unit': None},
'Stretch': {'range': (60, 120), 'unit': 'second'},
'Lunges': {'range': (10, 25), 'unit': None}
}
message_timezone = 'MST'
next = (50, 75)
night = 18
def generate_message(minutes_to_next=None):
"""
Using the list of exercises, this function generates a new exercise message. Optionally it
takes a minutes_to_next parameter which it uses to add an indication of when the next exercise
will take place.
"""
# Randomly select an exercise and a number of repetitions
exercise, data = random.choice(exercises.items())
repetitions = random.randint(*data['range'])
# Prepare the message string
unit_string = ' ' + data['unit'] if data['unit'] else ''
text = '{}{} {} RIGHT NOW!'.format(repetitions, prefix, exercise)
# Add the next exercise indication
if minutes_to_next is not None:
current_time = datetime.datetime.now(timezone('UTC'))
next_time = (now.astimezone(timezone(message_timezone)) +
datetime.timedelta(minutes=minutes_to_next))
next_text = 'NEXT EXERCISE AT {}'.format(time.strftime('%H:%M'))
text += '\n' + next_text
return text
def postMessage():
exercise = random.choice(exercises.keys())
properties = exercises[exercise]
number = random.randint(properties['range'][0], properties['range'][1])
prefix = '' if not properties['unit'] else ' {}'.format(properties['unit'])
wait = random.randint(next[0], next[1])
now = datetime.datetime.now(timezone('UTC'))
time = (now.astimezone(timezone('MST')) + datetime.timedelta(minutes=wait))
text = '<!channel> {}{} {} RIGHT NOW!'.format(number, prefix, exercise)
if time.hour < night:
text += '\nNEXT THING AT {}'.format(time.strftime('%H:%M'))
#print "Posting {}".format(text)
payload = {'text': text}
r = requests.post(url, data=json.dumps(payload))
#if r.status_code != 200:
#print r.content
return wait
def startLoop():
while True:
# Post a new message
wait = postMessage()
assert wait > 5
#Heartbeat every 60 seconds to prevent program from terminating
for _ in xrange(wait):
time.sleep(60)
sys.stdout.write('/\\_')
sys.stdout.write('\n')
#Stop at Night
now = datetime.datetime.now(timezone('UTC'))
if now.astimezone(timezone('MST')).hour >= night:
text = 'I\'m out. PEACE Y\'ALL'
#print "Posting {}".format(text)
payload = {'text': text}
r = requests.post(url, data=json.dumps(payload))
exit()
if __name__ == '__main__':
generate_message()
#startLoop()
| [
"pytz.timezone",
"time.strftime",
"json.dumps",
"time.sleep",
"datetime.timedelta",
"random.randint",
"sys.stdout.write"
] | [((1091, 1121), 'random.randint', 'random.randint', (["*data['range']"], {}), "(*data['range'])\n", (1105, 1121), False, 'import random\n'), ((1818, 1880), 'random.randint', 'random.randint', (["properties['range'][0]", "properties['range'][1]"], {}), "(properties['range'][0], properties['range'][1])\n", (1832, 1880), False, 'import random\n'), ((1974, 2006), 'random.randint', 'random.randint', (['next[0]', 'next[1]'], {}), '(next[0], next[1])\n', (1988, 2006), False, 'import random\n'), ((2040, 2055), 'pytz.timezone', 'timezone', (['"""UTC"""'], {}), "('UTC')\n", (2048, 2055), False, 'from pytz import timezone\n'), ((2104, 2136), 'datetime.timedelta', 'datetime.timedelta', ([], {'minutes': 'wait'}), '(minutes=wait)\n', (2122, 2136), False, 'import datetime\n'), ((2827, 2849), 'sys.stdout.write', 'sys.stdout.write', (['"""\n"""'], {}), "('\\n')\n", (2843, 2849), False, 'import sys\n'), ((1416, 1431), 'pytz.timezone', 'timezone', (['"""UTC"""'], {}), "('UTC')\n", (1424, 1431), False, 'from pytz import timezone\n'), ((1522, 1565), 'datetime.timedelta', 'datetime.timedelta', ([], {'minutes': 'minutes_to_next'}), '(minutes=minutes_to_next)\n', (1540, 1565), False, 'import datetime\n'), ((1617, 1639), 'time.strftime', 'time.strftime', (['"""%H:%M"""'], {}), "('%H:%M')\n", (1630, 1639), False, 'import time\n'), ((2085, 2100), 'pytz.timezone', 'timezone', (['"""MST"""'], {}), "('MST')\n", (2093, 2100), False, 'from pytz import timezone\n'), ((2289, 2311), 'time.strftime', 'time.strftime', (['"""%H:%M"""'], {}), "('%H:%M')\n", (2302, 2311), False, 'import time\n'), ((2418, 2437), 'json.dumps', 'json.dumps', (['payload'], {}), '(payload)\n', (2428, 2437), False, 'import json\n'), ((2763, 2777), 'time.sleep', 'time.sleep', (['(60)'], {}), '(60)\n', (2773, 2777), False, 'import time\n'), ((2791, 2815), 'sys.stdout.write', 'sys.stdout.write', (['"""/\\\\_"""'], {}), "('/\\\\_')\n", (2807, 2815), False, 'import sys\n'), ((2913, 2928), 'pytz.timezone', 'timezone', (['"""UTC"""'], {}), "('UTC')\n", (2921, 2928), False, 'from pytz import timezone\n'), ((1470, 1496), 'pytz.timezone', 'timezone', (['message_timezone'], {}), '(message_timezone)\n', (1478, 1496), False, 'from pytz import timezone\n'), ((2957, 2972), 'pytz.timezone', 'timezone', (['"""MST"""'], {}), "('MST')\n", (2965, 2972), False, 'from pytz import timezone\n'), ((3161, 3180), 'json.dumps', 'json.dumps', (['payload'], {}), '(payload)\n', (3171, 3180), False, 'import json\n')] |
from ..utils import log
from .status import get_status_text
import json
class Response:
def __init__(self, status=200, data={}):
self.status_code = int(status)
self.status_text = str(get_status_text[self.status_code])
self.status_msg = str(self.status_code) + " " + self.status_text
self.data = json.dumps(data)
self.headers = [
('Content-Type', 'application/json'),
('Content-Length', str(len(self.data)))
]
def __str__(self):
return log("RES", self.status_code, self.status_text)
if __name__ == '__main__':
test = Response(status=200)
print(test)
| [
"json.dumps"
] | [((333, 349), 'json.dumps', 'json.dumps', (['data'], {}), '(data)\n', (343, 349), False, 'import json\n')] |
""" This script is the scraping controller """
# Standard library imports
from datetime import date
import logging
from typing import List, Dict, Sequence, Type, Tuple, Union
# Third party imports
import click
from sqlalchemy.engine.base import Connection
# Local imports
from naccbis.Scraping import (
BaseScraper,
GameLogScraper,
IndividualOffenseScraper,
IndividualPitchingScraper,
TeamFieldingScraper,
TeamOffenseScraper,
TeamPitchingScraper,
)
from naccbis.Common import utils
from naccbis.Common.splits import GameLogSplit, Split
from naccbis.Common.settings import Settings
PARSER_EPILOG = """\b
Examples:
scrape.py final 2015:2017
scrape.py final 2017 -S 1,3 -s conference -o sql
scrape.py inseason
scrape.py inseason -S 6 -s overall -o csv
"""
FINAL_PARSER_DESCRIPTION = """
Scrape end of the year final stats.
\b
Stat Options
---------------------
1) Individual Offense
2) Individual Pitching
3) Team Offense
4) Team Pitching
5) Team Fielding
6) Game Logs
all) All
"""
INSEASON_PARSER_DESCRIPTION = """
Scrape stats during the season.
A column is added for the scrape date.
\b
Stat Options
---------------------
1) Individual Offense
2) Individual Pitching
3) Team Offense
4) Team Pitching
5) Team Fielding
6) Game Logs
all) All
"""
@click.group(help=__doc__, epilog=PARSER_EPILOG)
def cli():
pass
def run_scrapers(
scraper_nums: List[int],
year: str,
splits: Sequence[Union[Split, GameLogSplit]],
output: str,
inseason: bool,
verbose: bool,
conn: Connection,
) -> None:
"""Run selected scrapers for a given year
:param scraper_nums: List of integers that correspond to the scrapers to be run
:param year: The integer representation of the year
:param splits: List of splits
:param output: Output type
:param inseason: Scraping during the season?
:param verbose: Print extra information to standard out?
"""
scrapers: Dict[int, Type[BaseScraper]]
scrapers = {
1: IndividualOffenseScraper,
2: IndividualPitchingScraper,
3: TeamOffenseScraper,
4: TeamPitchingScraper,
5: TeamFieldingScraper,
}
for split in splits:
for num in scraper_nums:
if num in scrapers.keys():
runScraper = scrapers[num](
year, split, output, inseason, verbose, conn=conn
)
runScraper.info()
runScraper.run()
runScraper.export()
# Game logs have special splits
if 6 in scraper_nums:
for split in list(GameLogSplit):
gameLogScraper = GameLogScraper(
year, split, output, inseason, verbose, conn=conn
)
gameLogScraper.info()
gameLogScraper.run()
gameLogScraper.export()
@cli.command(help=FINAL_PARSER_DESCRIPTION)
@click.argument("year", type=utils.parse_year)
@click.option(
"-S",
"--stat",
type=click.IntRange(min=1, max=6),
multiple=True,
default=range(1, 7),
help="Select stat scraper(s) to run. Provide list or omit argument for all scrapers",
)
@click.option(
"-s",
"--split",
type=click.Choice(["overall", "conference", "all"]),
default="all",
show_default=True,
help="Split choices",
)
@click.option(
"-o",
"--output",
type=click.Choice(["csv", "sql"]),
default="csv",
show_default=True,
help="Output choices",
)
@click.option(
"-v", "--verbose", is_flag=True, help="Print extra information to standard out"
)
def final(
year: List[int], stat: Tuple[int], split: str, output: str, verbose: bool
) -> None:
"""Scrape end of the year final stats
:param args: Arguments for the scrapers
"""
config = Settings(app_name="scrape")
utils.init_logging(config.log_level)
conn = utils.connect_db(config.get_db_url())
logging.info("Initializing scraping controller script")
years = [utils.season_to_year(x) for x in year]
if split == "all":
splits = list(Split)
else:
splits = [Split(split)]
for year_ in years:
print("\nScraping:", year_, "\n")
run_scrapers(
list(stat),
year_,
splits,
output,
inseason=False,
verbose=verbose,
conn=conn,
)
conn.close()
logging.info("Scraping completed")
@cli.command(help=INSEASON_PARSER_DESCRIPTION)
@click.option(
"-S",
"--stat",
type=click.IntRange(min=1, max=6),
multiple=True,
default=list(range(1, 7)),
help="Select stat scraper(s) to run. Provide list or omit argument for all scrapers",
)
@click.option(
"-s",
"--split",
type=click.Choice(["overall", "conference", "all"]),
default="all",
show_default=True,
help="Split choices",
)
@click.option(
"-o",
"--output",
type=click.Choice(["csv", "sql"]),
default="csv",
show_default=True,
help="Output choices",
)
@click.option(
"-v", "--verbose", is_flag=True, help="Print extra information to standard out"
)
def inseason(stat: Tuple[int], split: str, output: str, verbose: bool) -> None:
"""Run scrapers for the inseason subcommand
:param args: Arguments for the scrapers
"""
config = Settings(app_name="scrape")
utils.init_logging(config.log_level)
conn = utils.connect_db(config.get_db_url())
logging.info("Initializing scraping controller script")
season = date.today().year
year = utils.season_to_year(season)
if split == "all":
splits = list(Split)
else:
splits = [Split(split)]
run_scrapers(
list(stat), year, splits, output, inseason=True, verbose=verbose, conn=conn
)
conn.close()
logging.info("Scraping completed")
if __name__ == "__main__":
cli() # pragma: no cover
| [
"click.Choice",
"click.argument",
"naccbis.Scraping.GameLogScraper",
"click.IntRange",
"click.group",
"click.option",
"naccbis.Common.splits.Split",
"naccbis.Common.settings.Settings",
"naccbis.Common.utils.season_to_year",
"naccbis.Common.utils.init_logging",
"datetime.date.today",
"logging.i... | [((1336, 1383), 'click.group', 'click.group', ([], {'help': '__doc__', 'epilog': 'PARSER_EPILOG'}), '(help=__doc__, epilog=PARSER_EPILOG)\n', (1347, 1383), False, 'import click\n'), ((2925, 2970), 'click.argument', 'click.argument', (['"""year"""'], {'type': 'utils.parse_year'}), "('year', type=utils.parse_year)\n", (2939, 2970), False, 'import click\n'), ((3504, 3602), 'click.option', 'click.option', (['"""-v"""', '"""--verbose"""'], {'is_flag': '(True)', 'help': '"""Print extra information to standard out"""'}), "('-v', '--verbose', is_flag=True, help=\n 'Print extra information to standard out')\n", (3516, 3602), False, 'import click\n'), ((5045, 5143), 'click.option', 'click.option', (['"""-v"""', '"""--verbose"""'], {'is_flag': '(True)', 'help': '"""Print extra information to standard out"""'}), "('-v', '--verbose', is_flag=True, help=\n 'Print extra information to standard out')\n", (5057, 5143), False, 'import click\n'), ((3812, 3839), 'naccbis.Common.settings.Settings', 'Settings', ([], {'app_name': '"""scrape"""'}), "(app_name='scrape')\n", (3820, 3839), False, 'from naccbis.Common.settings import Settings\n'), ((3844, 3880), 'naccbis.Common.utils.init_logging', 'utils.init_logging', (['config.log_level'], {}), '(config.log_level)\n', (3862, 3880), False, 'from naccbis.Common import utils\n'), ((3935, 3990), 'logging.info', 'logging.info', (['"""Initializing scraping controller script"""'], {}), "('Initializing scraping controller script')\n", (3947, 3990), False, 'import logging\n'), ((4422, 4456), 'logging.info', 'logging.info', (['"""Scraping completed"""'], {}), "('Scraping completed')\n", (4434, 4456), False, 'import logging\n'), ((5339, 5366), 'naccbis.Common.settings.Settings', 'Settings', ([], {'app_name': '"""scrape"""'}), "(app_name='scrape')\n", (5347, 5366), False, 'from naccbis.Common.settings import Settings\n'), ((5371, 5407), 'naccbis.Common.utils.init_logging', 'utils.init_logging', (['config.log_level'], {}), '(config.log_level)\n', (5389, 5407), False, 'from naccbis.Common import utils\n'), ((5462, 5517), 'logging.info', 'logging.info', (['"""Initializing scraping controller script"""'], {}), "('Initializing scraping controller script')\n", (5474, 5517), False, 'import logging\n'), ((5560, 5588), 'naccbis.Common.utils.season_to_year', 'utils.season_to_year', (['season'], {}), '(season)\n', (5580, 5588), False, 'from naccbis.Common import utils\n'), ((5814, 5848), 'logging.info', 'logging.info', (['"""Scraping completed"""'], {}), "('Scraping completed')\n", (5826, 5848), False, 'import logging\n'), ((4004, 4027), 'naccbis.Common.utils.season_to_year', 'utils.season_to_year', (['x'], {}), '(x)\n', (4024, 4027), False, 'from naccbis.Common import utils\n'), ((3019, 3047), 'click.IntRange', 'click.IntRange', ([], {'min': '(1)', 'max': '(6)'}), '(min=1, max=6)\n', (3033, 3047), False, 'import click\n'), ((3234, 3280), 'click.Choice', 'click.Choice', (["['overall', 'conference', 'all']"], {}), "(['overall', 'conference', 'all'])\n", (3246, 3280), False, 'import click\n'), ((3402, 3430), 'click.Choice', 'click.Choice', (["['csv', 'sql']"], {}), "(['csv', 'sql'])\n", (3414, 3430), False, 'import click\n'), ((5531, 5543), 'datetime.date.today', 'date.today', ([], {}), '()\n', (5541, 5543), False, 'from datetime import date\n'), ((4554, 4582), 'click.IntRange', 'click.IntRange', ([], {'min': '(1)', 'max': '(6)'}), '(min=1, max=6)\n', (4568, 4582), False, 'import click\n'), ((4775, 4821), 'click.Choice', 'click.Choice', (["['overall', 'conference', 'all']"], {}), "(['overall', 'conference', 'all'])\n", (4787, 4821), False, 'import click\n'), ((4943, 4971), 'click.Choice', 'click.Choice', (["['csv', 'sql']"], {}), "(['csv', 'sql'])\n", (4955, 4971), False, 'import click\n'), ((2679, 2744), 'naccbis.Scraping.GameLogScraper', 'GameLogScraper', (['year', 'split', 'output', 'inseason', 'verbose'], {'conn': 'conn'}), '(year, split, output, inseason, verbose, conn=conn)\n', (2693, 2744), False, 'from naccbis.Scraping import BaseScraper, GameLogScraper, IndividualOffenseScraper, IndividualPitchingScraper, TeamFieldingScraper, TeamOffenseScraper, TeamPitchingScraper\n'), ((4124, 4136), 'naccbis.Common.splits.Split', 'Split', (['split'], {}), '(split)\n', (4129, 4136), False, 'from naccbis.Common.splits import GameLogSplit, Split\n'), ((5670, 5682), 'naccbis.Common.splits.Split', 'Split', (['split'], {}), '(split)\n', (5675, 5682), False, 'from naccbis.Common.splits import GameLogSplit, Split\n')] |
from django.conf.urls import url
from django.urls import path, include
from rest_framework_swagger.views import get_swagger_view
from rest_framework import permissions
from drf_yasg.views import get_schema_view
from drf_yasg import openapi
from rest_framework_jwt.authentication import JSONWebTokenAuthentication
from rest_framework.authentication import BasicAuthentication
schema_view = get_schema_view(
openapi.Info(
title="MUCTODO API",
default_version='v1',
description="MUCTodo REST API",
),
public=True,
permission_classes=(permissions.AllowAny,),
authentication_classes=(JSONWebTokenAuthentication, BasicAuthentication)
)
urlpatterns = [
path('swagger', schema_view.with_ui(
'swagger', cache_timeout=0), name='schema-swagger-ui'),
path('redoc', schema_view.with_ui(
'redoc', cache_timeout=0), name='schema-redoc'),
path('', include("todos.router")),
]
| [
"django.urls.include",
"drf_yasg.openapi.Info"
] | [((411, 503), 'drf_yasg.openapi.Info', 'openapi.Info', ([], {'title': '"""MUCTODO API"""', 'default_version': '"""v1"""', 'description': '"""MUCTodo REST API"""'}), "(title='MUCTODO API', default_version='v1', description=\n 'MUCTodo REST API')\n", (423, 503), False, 'from drf_yasg import openapi\n'), ((907, 930), 'django.urls.include', 'include', (['"""todos.router"""'], {}), "('todos.router')\n", (914, 930), False, 'from django.urls import path, include\n')] |
#!/usr/bin/env python
# coding: utf-8
"""
Copy-pasted from 'Informative git prompt for zsh' with small changes
Original: https://github.com/olivierverdier/zsh-git-prompt
"""
from __future__ import print_function
from subprocess import Popen, PIPE
def git_commit():
"""
Get git HEAD commit hash.
"""
git_cmd = ['git', 'rev-parse', '--short', 'HEAD']
return Popen(git_cmd, stdout=PIPE).communicate()[0][:-1].decode('utf-8')
def parse_git_branch(line):
"""
Parse 'git status -b --porcelain' command branch info output.
Possible strings:
- simple: "## dev"
- detached: "## HEAD (no branch)"
- ahead/behind: "## master...origin/master [ahead 1, behind 2]"
Ahead/behind format:
- [ahead 1]
- [behind 1]
- [ahead 1, behind 1]
"""
branch = remote_branch = ''
ahead = behind = 0
if line == 'HEAD (no branch)': # detached state
branch = '#' + git_commit()
elif '...' in line: # ahead of or behind remote branch
if ' ' in line:
branches, ahead_behind = line.split(' ', 1)
else:
branches, ahead_behind = line, None
branch, remote_branch = branches.split('...')
if ahead_behind and ahead_behind[0] == '[' and ahead_behind[-1] == ']':
ahead_behind = ahead_behind[1:-1]
for state in ahead_behind.split(', '):
if state.startswith('ahead '):
ahead = state[6:]
elif state.startswith('behind '):
behind = state[7:]
else:
branch = line
return branch, remote_branch, ahead, behind
def git_status():
"""
Get git status.
"""
git_cmd = ['git', 'status', '-b', '--porcelain']
result, __ = Popen(git_cmd, stdout=PIPE).communicate()
branch = remote_branch = ''
staged = changed = untracked = unmerged = ahead = behind = 0
for line in result.splitlines():
line = line.decode('utf-8')
prefix = line[0:2]
line = line[3:]
if prefix == '##': # branch name + ahead & behind info
branch, remote_branch, ahead, behind = parse_git_branch(line)
elif prefix == '??': # untracked file
untracked += 1
elif prefix in ('DD', 'AU', 'UD', 'UA', 'DU', 'AA', 'UU'): # unmerged
unmerged += 1
else:
if prefix[0] in ('M', 'A', 'D', 'R', 'C'): # changes in index
staged += 1
if prefix[1] in ('M', 'D'): # changes in work tree
changed += 1
return (branch, remote_branch, staged, changed, untracked, unmerged,
ahead, behind)
if __name__ == '__main__':
print('\n'.join(str(param) for param in git_status()))
| [
"subprocess.Popen"
] | [((1753, 1780), 'subprocess.Popen', 'Popen', (['git_cmd'], {'stdout': 'PIPE'}), '(git_cmd, stdout=PIPE)\n', (1758, 1780), False, 'from subprocess import Popen, PIPE\n'), ((378, 405), 'subprocess.Popen', 'Popen', (['git_cmd'], {'stdout': 'PIPE'}), '(git_cmd, stdout=PIPE)\n', (383, 405), False, 'from subprocess import Popen, PIPE\n')] |
#!/usr/bin/env python3
from typing import List
from os import environ, path
from json import dump
import argparse
import logging
import logging.config
from . import CONFIG
from .ldap import LDAPConnector
from .process import Processor
LDAP: LDAPConnector = None
class EnvDefault(argparse.Action):
"""Argparse action to use the env as fallback."""
def __init__(self, envvar, required=True, default=None, **kwargs):
if not default and envvar:
if envvar in environ:
default = environ[envvar]
if required and default:
required = False
super(EnvDefault, self).__init__(default=default, required=required,
**kwargs)
def __call__(self, parser, namespace, values, option_string=None):
setattr(namespace, self.dest, values)
def run(config_file: str, alias_files: List[str]):
"""Process the given alias files using the given config file."""
config_file_abs = path.abspath(config_file)
dir_path = path.dirname(config_file_abs)
CONFIG.read(config_file_abs)
if "main" in CONFIG and "logging_conf" in CONFIG["main"]:
logging_config_path = path.join(dir_path, CONFIG["main"]["logging_conf"])
logging.config.fileConfig(logging_config_path, disable_existing_loggers=False)
logger = logging.getLogger("main")
logger.info("Master log level: {}".format(logging.getLevelName(logging.root.level)))
global LDAP
LDAP = LDAPConnector()
processor = Processor()
processor.load_files(alias_files)
if CONFIG["main"].getboolean("check_syntax_only"):
print("Done with syntax check. Not doing anything else.")
return
processor.process()
with open("sender_aliases.json", 'w') as f:
dump(processor.sender_aliases, f)
with open("recipient_aliases.json", 'w') as f:
dump(processor.recipient_aliases, f)
def main():
"""Run the script."""
parser = argparse.ArgumentParser(description='Create our mail alias tables from alias definitions')
parser.add_argument('--config', '-c', metavar='file', action=EnvDefault, envvar='MAC_CONFIG', required=False, default="./mac.conf",
help='The config file to use. Defaults to "./mac.conf". Can also be specified via the environment variable MAC_CONFIG')
parser.add_argument('alias_files', nargs='+',
help='The alias files to be used for generation. May contain folders, which should be recursed.')
args = parser.parse_args()
run(args.config, args.alias_files)
if __name__ == "__main__":
main()
| [
"logging.getLogger",
"argparse.ArgumentParser",
"os.path.join",
"os.path.dirname",
"logging.getLevelName",
"logging.config.fileConfig",
"os.path.abspath",
"json.dump"
] | [((992, 1017), 'os.path.abspath', 'path.abspath', (['config_file'], {}), '(config_file)\n', (1004, 1017), False, 'from os import environ, path\n'), ((1033, 1062), 'os.path.dirname', 'path.dirname', (['config_file_abs'], {}), '(config_file_abs)\n', (1045, 1062), False, 'from os import environ, path\n'), ((1340, 1365), 'logging.getLogger', 'logging.getLogger', (['"""main"""'], {}), "('main')\n", (1357, 1365), False, 'import logging\n'), ((1964, 2059), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Create our mail alias tables from alias definitions"""'}), "(description=\n 'Create our mail alias tables from alias definitions')\n", (1987, 2059), False, 'import argparse\n'), ((1188, 1239), 'os.path.join', 'path.join', (['dir_path', "CONFIG['main']['logging_conf']"], {}), "(dir_path, CONFIG['main']['logging_conf'])\n", (1197, 1239), False, 'from os import environ, path\n'), ((1248, 1326), 'logging.config.fileConfig', 'logging.config.fileConfig', (['logging_config_path'], {'disable_existing_loggers': '(False)'}), '(logging_config_path, disable_existing_loggers=False)\n', (1273, 1326), False, 'import logging\n'), ((1781, 1814), 'json.dump', 'dump', (['processor.sender_aliases', 'f'], {}), '(processor.sender_aliases, f)\n', (1785, 1814), False, 'from json import dump\n'), ((1874, 1910), 'json.dump', 'dump', (['processor.recipient_aliases', 'f'], {}), '(processor.recipient_aliases, f)\n', (1878, 1910), False, 'from json import dump\n'), ((1412, 1452), 'logging.getLevelName', 'logging.getLevelName', (['logging.root.level'], {}), '(logging.root.level)\n', (1432, 1452), False, 'import logging\n')] |
# imports
from aficionado.route import Route
from aficionado.defaults import not_found_handler, internal_error_handler
class Router:
def __init__(self):
'''
Constructor
'''
# create route for not found
not_found = Route(
path=None,
handler=not_found_handler,
allowed_methods=['ALL']
)
self.routes = []
self.not_found_route = not_found
def add (self, route):
'''
Add a route to the router
Parameters:
self (Router): self object
route (Route): route object
'''
# check that route is not already in routes
for r in self.routes:
if r.path == route.path:
raise Exception('Path {path} already exists'.format(route.path))
self.routes.append(route)
def find_route (self, path):
'''
Find the current route
Parameters:
self (Router): self object
path (str): search path
Returns:
route (Route): found route object
'''
# find route in list
for r in self.routes:
if r.path == path:
return r
return self.not_found_route
def not_found(self, handler):
'''
Set the not found handler
Parameters:
self (Router): self object
handler (function): handler function
'''
not_found = Route(
path=None,
handler=handler,
allowed_methods=['ALL']
)
self.not_found_route = not_found
| [
"aficionado.route.Route"
] | [((238, 306), 'aficionado.route.Route', 'Route', ([], {'path': 'None', 'handler': 'not_found_handler', 'allowed_methods': "['ALL']"}), "(path=None, handler=not_found_handler, allowed_methods=['ALL'])\n", (243, 306), False, 'from aficionado.route import Route\n'), ((1281, 1339), 'aficionado.route.Route', 'Route', ([], {'path': 'None', 'handler': 'handler', 'allowed_methods': "['ALL']"}), "(path=None, handler=handler, allowed_methods=['ALL'])\n", (1286, 1339), False, 'from aficionado.route import Route\n')] |
import logging
import torch
import torch.nn as nn
import torch.utils.checkpoint as cp
from mmcv.cnn import constant_init, kaiming_init, normal_init
from mmcv.runner import load_checkpoint
from torch.nn.modules.batchnorm import _BatchNorm
import torch.nn.functional as F
from mmdet.models.plugins import GeneralizedAttention
from mmdet.ops import ContextBlock, DeformConv, ModulatedDeformConv
from ..registry import BACKBONES
from ..utils import build_conv_layer, build_norm_layer, ConvModule
from ..builder import build_loss
import math
from IPython import embed
class BasicBlock(nn.Module):
expansion = 1
def __init__(self,
inplanes,
planes,
stride=1,
dilation=1,
downsample=None,
style='pytorch',
with_cp=False,
conv_cfg=None,
norm_cfg=dict(type='BN'),
dcn=None,
gcb=None,
gen_attention=None):
super(BasicBlock, self).__init__()
assert dcn is None, "Not implemented yet."
assert gen_attention is None, "Not implemented yet."
assert gcb is None, "Not implemented yet."
self.norm1_name, norm1 = build_norm_layer(norm_cfg, planes, postfix=1)
self.norm2_name, norm2 = build_norm_layer(norm_cfg, planes, postfix=2)
self.conv1 = build_conv_layer(
conv_cfg,
inplanes,
planes,
3,
stride=stride,
padding=dilation,
dilation=dilation,
bias=False)
self.add_module(self.norm1_name, norm1)
self.conv2 = build_conv_layer(
conv_cfg, planes, planes, 3, padding=1, bias=False)
self.add_module(self.norm2_name, norm2)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
self.dilation = dilation
assert not with_cp
@property
def norm1(self):
return getattr(self, self.norm1_name)
@property
def norm2(self):
return getattr(self, self.norm2_name)
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.norm1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.norm2(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(self,
inplanes,
planes,
stride=1,
dilation=1,
downsample=None,
style='pytorch',
with_cp=False,
conv_cfg=None,
norm_cfg=dict(type='BN'),
dcn=None,
gcb=None,
gen_attention=None):
"""Bottleneck block for ResNet.
If style is "pytorch", the stride-two layer is the 3x3 conv layer,
if it is "caffe", the stride-two layer is the first 1x1 conv layer.
"""
super(Bottleneck, self).__init__()
assert style in ['pytorch', 'caffe']
assert dcn is None or isinstance(dcn, dict)
assert gcb is None or isinstance(gcb, dict)
assert gen_attention is None or isinstance(gen_attention, dict)
self.inplanes = inplanes
self.planes = planes
self.stride = stride
self.dilation = dilation
self.style = style
self.with_cp = with_cp
self.conv_cfg = conv_cfg
self.norm_cfg = norm_cfg
self.dcn = dcn
self.with_dcn = dcn is not None
self.gcb = gcb
self.with_gcb = gcb is not None
self.gen_attention = gen_attention
self.with_gen_attention = gen_attention is not None
if self.style == 'pytorch':
self.conv1_stride = 1
self.conv2_stride = stride
else:
self.conv1_stride = stride
self.conv2_stride = 1
self.norm1_name, norm1 = build_norm_layer(norm_cfg, planes, postfix=1)
self.norm2_name, norm2 = build_norm_layer(norm_cfg, planes, postfix=2)
self.norm3_name, norm3 = build_norm_layer(
norm_cfg, planes * self.expansion, postfix=3)
self.conv1 = build_conv_layer(
conv_cfg,
inplanes,
planes,
kernel_size=1,
stride=self.conv1_stride,
bias=False)
self.add_module(self.norm1_name, norm1)
fallback_on_stride = False
self.with_modulated_dcn = False
if self.with_dcn:
fallback_on_stride = dcn.get('fallback_on_stride', False)
self.with_modulated_dcn = dcn.get('modulated', False)
if not self.with_dcn or fallback_on_stride:
self.conv2 = build_conv_layer(
conv_cfg,
planes,
planes,
kernel_size=3,
stride=self.conv2_stride,
padding=dilation,
dilation=dilation,
bias=False)
else:
assert conv_cfg is None, 'conv_cfg must be None for DCN'
self.deformable_groups = dcn.get('deformable_groups', 1)
if not self.with_modulated_dcn:
conv_op = DeformConv
offset_channels = 18
else:
conv_op = ModulatedDeformConv
offset_channels = 27
self.conv2_offset = nn.Conv2d(
planes,
self.deformable_groups * offset_channels,
kernel_size=3,
stride=self.conv2_stride,
padding=dilation,
dilation=dilation)
self.conv2 = conv_op(
planes,
planes,
kernel_size=3,
stride=self.conv2_stride,
padding=dilation,
dilation=dilation,
deformable_groups=self.deformable_groups,
bias=False)
self.add_module(self.norm2_name, norm2)
self.conv3 = build_conv_layer(
conv_cfg,
planes,
planes * self.expansion,
kernel_size=1,
bias=False)
self.add_module(self.norm3_name, norm3)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
if self.with_gcb:
gcb_inplanes = planes * self.expansion
self.context_block = ContextBlock(inplanes=gcb_inplanes, **gcb)
# gen_attention
if self.with_gen_attention:
self.gen_attention_block = GeneralizedAttention(
planes, **gen_attention)
@property
def norm1(self):
return getattr(self, self.norm1_name)
@property
def norm2(self):
return getattr(self, self.norm2_name)
@property
def norm3(self):
return getattr(self, self.norm3_name)
def forward(self, x):
def _inner_forward(x):
identity = x
out = self.conv1(x)
out = self.norm1(out)
out = self.relu(out)
if not self.with_dcn:
out = self.conv2(out)
elif self.with_modulated_dcn:
offset_mask = self.conv2_offset(out)
offset = offset_mask[:, :18 * self.deformable_groups, :, :]
mask = offset_mask[:, -9 * self.deformable_groups:, :, :]
mask = mask.sigmoid()
out = self.conv2(out, offset, mask)
else:
offset = self.conv2_offset(out)
out = self.conv2(out, offset)
out = self.norm2(out)
out = self.relu(out)
if self.with_gen_attention:
out = self.gen_attention_block(out)
out = self.conv3(out)
out = self.norm3(out)
if self.with_gcb:
out = self.context_block(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
return out
if self.with_cp and x.requires_grad:
out = cp.checkpoint(_inner_forward, x)
else:
out = _inner_forward(x)
out = self.relu(out)
return out
def make_rests_layer(block,
inplanes,
planes,
blocks,
stride=1,
dilation=1,
style='pytorch',
with_cp=False,
conv_cfg=None,
norm_cfg=dict(type='BN'),
dcn=None,
gcb=None,
gen_attention=None,
gen_attention_blocks=[]):
downsample = None
if stride != 1 or inplanes != planes * block.expansion:
downsample = nn.Sequential(
build_conv_layer(
conv_cfg,
inplanes,
planes * block.expansion,
kernel_size=1,
stride=stride,
bias=False),
build_norm_layer(norm_cfg, planes * block.expansion)[1],
)
layers = []
layers.append(
block(
inplanes=inplanes,
planes=planes,
stride=stride,
dilation=dilation,
downsample=downsample,
style=style,
with_cp=with_cp,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
dcn=dcn,
gcb=gcb,
gen_attention=gen_attention if
(0 in gen_attention_blocks) else None))
inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(
block(
inplanes=inplanes,
planes=planes,
stride=1,
dilation=dilation,
style=style,
with_cp=with_cp,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
dcn=dcn,
gcb=gcb,
gen_attention=gen_attention if
(i in gen_attention_blocks) else None))
return nn.Sequential(*layers)
@BACKBONES.register_module
class ResTSNet(nn.Module):
"""ResNet backbone.
Args:
depth (int): Depth of resnet, from {18, 34, 50, 101, 152}.
in_channels (int): Number of input image channels. Normally 3.
num_stages (int): Resnet stages, normally 4.
strides (Sequence[int]): Strides of the first block of each stage.
dilations (Sequence[int]): Dilation of each stage.
out_indices (Sequence[int]): Output from which stages.
style (str): `pytorch` or `caffe`. If set to "pytorch", the stride-two
layer is the 3x3 conv layer, otherwise the stride-two layer is
the first 1x1 conv layer.
frozen_stages (int): Stages to be frozen (stop grad and set eval mode).
-1 means not freezing any parameters.
norm_cfg (dict): dictionary to construct and config norm layer.
norm_eval (bool): Whether to set norm layers to eval mode, namely,
freeze running stats (mean and var). Note: Effect on Batch Norm
and its variants only.
with_cp (bool): Use checkpoint or not. Using checkpoint will save some
memory while slowing down the training speed.
zero_init_residual (bool): whether to use zero init for last norm layer
in resblocks to let them behave as identity.
Example:
>>> from mmdet.models import ResNet
>>> import torch
>>> self = ResNet(depth=18)
>>> self.eval()
>>> inputs = torch.rand(1, 3, 32, 32)
>>> level_outputs = self.forward(inputs)
>>> for level_out in level_outputs:
... print(tuple(level_out.shape))
(1, 64, 8, 8)
(1, 128, 4, 4)
(1, 256, 2, 2)
(1, 512, 1, 1)
"""
arch_settings = {
18: (BasicBlock, (2, 2, 2, 2)),
34: (BasicBlock, (3, 4, 6, 3)),
50: (Bottleneck, (3, 4, 6, 3)),
101: (Bottleneck, (3, 4, 23, 3)),
152: (Bottleneck, (3, 8, 36, 3))
}
def __init__(self,
depth,
s_depth,
in_channels=3,
t_s_ratio=1,
spatial_ratio=1,
num_stages=4,
strides=(1, 2, 2, 2),
dilations=(1, 1, 1, 1),
out_indices=(0, 1, 2, 3),
style='pytorch',
pyramid_hint_loss=dict(type='MSELoss', loss_weight=1),
apply_block_wise_alignment=False,
freeze_teacher=False,
good_initial=False,
feature_adaption=False,
kernel_adaption=False,
conv_downsample=False,
train_mode=True,
constant_term=False,
pure_student_term=False,
bn_topk_selection=False,
frozen_stages=-1,
conv_cfg=None,
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=True,
dcn=None,
stage_with_dcn=(False, False, False, False),
gcb=None,
stage_with_gcb=(False, False, False, False),
gen_attention=None,
stage_with_gen_attention=((), (), (), ()),
with_cp=False,
zero_init_residual=True,
rouse_student_point=0):
super(ResTSNet, self).__init__()
if depth not in self.arch_settings:
raise KeyError('invalid depth {} for resnet'.format(depth))
self.depth = depth
self.s_depth = s_depth
self.t_s_ratio = t_s_ratio
self.spatial_ratio = spatial_ratio
self.num_stages = num_stages
assert num_stages >= 1 and num_stages <= 4
self.strides = strides
self.dilations = dilations
assert len(strides) == len(dilations) == num_stages
self.out_indices = out_indices
assert max(out_indices) < num_stages
self.style = style
self.pyramid_hint_loss = build_loss(pyramid_hint_loss)
self.apply_block_wise_alignment = apply_block_wise_alignment
self.freeze_teacher = freeze_teacher
self.frozen_stages = frozen_stages
self.good_initial = good_initial
self.feature_adaption = feature_adaption
self.kernel_adaption = kernel_adaption
self.conv_downsample = conv_downsample
self.bn_topk_selection = bn_topk_selection
self.conv_cfg = conv_cfg
self.norm_cfg = norm_cfg
self.with_cp = with_cp
self.norm_eval = norm_eval
self.dcn = dcn
self.stage_with_dcn = stage_with_dcn
if dcn is not None:
assert len(stage_with_dcn) == num_stages
self.gen_attention = gen_attention
self.gcb = gcb
self.stage_with_gcb = stage_with_gcb
if gcb is not None:
assert len(stage_with_gcb) == num_stages
self.zero_init_residual = zero_init_residual
self.block, stage_blocks = self.arch_settings[depth]
self.stage_blocks = stage_blocks[:num_stages]
self.s_block, s_stage_blocks = self.arch_settings[s_depth]
self.s_stage_blocks = s_stage_blocks[:num_stages]
self.inplanes = 64
self.rouse_student_point = rouse_student_point
self.train_step = 0
self.train_mode = train_mode
self.constant_term = constant_term
self.pure_student_term = pure_student_term
self._make_stem_layer(in_channels)
self._make_s_stem_layer(in_channels)
self.res_layers = []
self.s_res_layers = []
self.align_layers = nn.ModuleList()
# teacher net
teacher_block_output_channel = []
for i, num_blocks in enumerate(self.stage_blocks):
stride = strides[i]
dilation = dilations[i]
dcn = self.dcn if self.stage_with_dcn[i] else None
gcb = self.gcb if self.stage_with_gcb[i] else None
planes = 64 * 2**i
res_layer = make_rests_layer(
self.block,
self.inplanes,
planes,
num_blocks,
stride=stride,
dilation=dilation,
style=self.style,
with_cp=with_cp,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
dcn=dcn,
gcb=gcb,
gen_attention=gen_attention,
gen_attention_blocks=stage_with_gen_attention[i])
teacher_block_output_channel.append(planes)
self.inplanes = planes * self.block.expansion
layer_name = 'layer{}'.format(i + 1)
self.add_module(layer_name, res_layer)
self.res_layers.append(layer_name)
self._freeze_stages()
# student net
# TODO: rewrite student layers;
# current block1[0] layer input channel not fully pruned in same way
self.inplanes = 64 # // self.t_s_ratio
student_block_output_channel = []
for j, num_blocks in enumerate(self.s_stage_blocks):
stride = strides[j]
dilation = dilations[j]
dcn = self.dcn if self.stage_with_dcn[j] else None
gcb = self.gcb if self.stage_with_gcb[j] else None
planes = 64 * 2**j // self.t_s_ratio # Prune the channel
s_res_layer = make_rests_layer(
self.s_block,
self.inplanes,
planes,
num_blocks,
stride=stride,
dilation=dilation,
style=self.style,
with_cp=with_cp,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
dcn=dcn,
gcb=gcb,
gen_attention=gen_attention,
gen_attention_blocks=stage_with_gen_attention[j])
student_block_output_channel.append(planes)
self.inplanes = planes * self.s_block.expansion
s_layer_name = 's_layer{}'.format(j + 1)
self.add_module(s_layer_name, s_res_layer)
self.s_res_layers.append(s_layer_name)
self.feat_dim = self.s_block.expansion * 64 * 2**(
len(self.stage_blocks) - 1)
# hint knowlege, align teacher and student
self.inplanes = 64
# TODO: Add to config file
self.align_layers_output_channel_size = [256, 512, 1024, 2048]
if self.apply_block_wise_alignment:
for out_channel in self.align_layers_output_channel_size:
input_channel = out_channel // self.t_s_ratio
self.align_layers.append(
nn.Conv2d(input_channel, out_channel, 3, padding=1))
# print("self.inplanes:{}".format(self.inplanes))
if self.feature_adaption and self.conv_downsample:
self.adaption_channels = [256, 512, 1024, 2048]
self.adaption_layers = nn.ModuleList()
for adaption_channel in self.adaption_channels:
self.adaption_layers.append(
nn.Conv2d(
adaption_channel,
adaption_channel // self.t_s_ratio,
3,
padding=1))
'''
self.adaption_layers.append(
nn.Conv2d(
adaption_channel,
adaption_channel // self.t_s_ratio,
1,
padding=0))
'''
if self.kernel_adaption:
'''
self.adaption_channels = [[[64, 64, 64, 64], [256, 64, 64],
[256, 64, 64]],
[[256, 128, 128, 256], [512, 128, 128],
[512, 128, 128], [512, 128, 128]],
[[512, 256, 256, 512], [1024, 256, 256],
[1024, 256, 256], [1024, 256, 256],
[1024, 256, 256], [1024, 256, 256]],
[[1024, 512, 512, 1024],
[2048, 512, 512], [2048, 512, 512]]]
self.linear_channels = [[[64, 64, 256, 256], [64, 64, 256],
[64, 64, 256]],
[[128, 128, 512, 512], [128, 128, 512],
[128, 128, 512], [128, 128, 512]],
[[256, 256, 1024, 1024], [256, 256, 1024],
[256, 256, 1024], [256, 256, 1024],
[256, 256, 1024], [256, 256, 1024]],
[[512, 512, 2048, 2048], [512, 512, 2048],
[512, 512, 2048]]]
'''
'''
self.adaption_channels = [[[64, -1, -1, -1], [256, -1, -1], [256, -1, -1]],
[[256, -1, -1, -1], [512, -1, -1], [512, -1, -1], [512, -1, -1]],
[[512, -1, -1, -1], [1024, -1, -1], [1024, -1, -1], [1024, -1, -1], [1024, -1, -1],
[1024, -1, -1]], [[1024, -1, -1, -1], [2048, -1, -1], [2048, -1, -1]]]
self.linear_channels = [[[64, -1, -1, -1], [64, -1, -1], [64, -1, -1]],
[[128, -1, -1, -1], [128, -1, -1], [128, -1, -1], [128, -1, -1]],
[[256, -1, -1, -1], [256, -1, -1], [256, -1, -1], [256, -1, -1], [256, -1, -1], [256, -1, -1]],
[[512, -1, -1, -1], [512, -1, -1], [512, -1, -1]]]
'''
'''
# one block alignment
self.adaption_channels = [[[64, -1, -1, -1], [-1, -1, -1],
[-1, -1, -1]],
[[256, -1, -1, -1], [-1, -1, -1],
[-1, -1, -1], [-1, -1, -1]],
[[512, -1, -1, -1], [-1, -1, -1],
[-1, -1, -1], [-1, -1, -1],
[-1, -1, -1], [-1, -1, -1]],
[[1024, -1, -1, -1], [-1, -1, -1],
[-1, -1, -1]]]
self.linear_channels = [[[64, -1, -1, -1], [-1, -1, -1],
[-1, -1, -1]],
[[128, -1, -1, -1], [-1, -1, -1],
[-1, -1, -1], [-1, -1, -1]],
[[256, -1, -1, -1], [-1, -1, -1],
[-1, -1, -1], [-1, -1, -1], [-1, -1, -1],
[-1, -1, -1]],
[[512, -1, -1, -1], [-1, -1, -1],
[-1, -1, -1]]]
'''
'''
# two block alignment
self.adaption_channels = [[[64, -1, -1, -1], [256, -1, -1],
[-1, -1, -1]],
[[256, -1, -1, -1], [512, -1, -1],
[-1, -1, -1], [-1, -1, -1]],
[[512, -1, -1, -1], [1024, -1, -1],
[-1, -1, -1], [-1, -1, -1],
[-1, -1, -1], [-1, -1, -1]],
[[1024, -1, -1, -1], [2048, -1, -1],
[-1, -1, -1]]]
self.linear_channels = [[[64, -1, -1, -1], [64, -1, -1],
[-1, -1, -1]],
[[128, -1, -1, -1], [128, -1, -1],
[-1, -1, -1], [-1, -1, -1]],
[[256, -1, -1, -1], [256, -1, -1],
[-1, -1, -1], [-1, -1, -1], [-1, -1, -1],
[-1, -1, -1]],
[[512, -1, -1, -1], [512, -1, -1],
[-1, -1, -1]]]
'''
'''
# middle block alignment
self.adaption_channels = [[[-1, 64, -1, -1], [-1, 64, -1],
[-1, 64, -1]],
[[-1, 128, -1, -1], [-1, 128, -1],
[-1, 128, -1], [-1, 128, -1]],
[[-1, 256, -1, -1], [-1, 256, -1],
[-1, 256, -1], [-1, 256, -1],
[-1, 256, -1], [-1, 256, -1]],
[[-1, 512, -1, -1],
[-1, 512, -1], [-1, 512, -1]]]
self.linear_channels = [[[-1, 64, -1, -1], [-1, 64, -1],
[-1, 64, -1]],
[[-1, 128, -1, -1], [-1, 128, -1],
[-1, 128, -1], [-1, 128, -1]],
[[-1, 256, -1, -1], [-1, 256, -1],
[-1, 256, -1], [-1, 256, -1],
[-1, 256, -1], [-1, 256, -1]],
[[-1, 512, -1, -1], [-1, 512, -1],
[-1, 512, -1]]]
'''
'''
# three block alignment
self.adaption_channels = [[[64, -1, -1, -1], [256, -1, -1], [256, -1, -1]],
[[256, -1, -1, -1], [512, -1, -1], [512, -1, -1], [-1, -1, -1]],
[[512, -1, -1, -1], [1024, -1, -1], [1024, -1, -1], [-1, -1, -1], [-1, -1, -1],
[-1, -1, -1]], [[1024, -1, -1, -1], [2048, -1, -1], [2048, -1, -1]]]
self.linear_channels = [[[64, -1, -1, -1], [64, -1, -1], [64, -1, -1]],
[[128, -1, -1, -1], [128, -1, -1], [128, -1, -1], [-1, -1, -1]],
[[256, -1, -1, -1], [256, -1, -1], [256, -1, -1], [-1, -1, -1], [-1, -1, -1], [-1, -1, -1]],
[[512, -1, -1, -1], [512, -1, -1], [512, -1, -1]]]
'''
# all block alignment
'''
self.adaption_channels = [[[64, -1, -1, -1], [256, -1, -1], [256, -1, -1]],
[[256, -1, -1, -1], [512, -1, -1], [512, -1, -1], [512, -1, -1]],
[[512, -1, -1, -1], [1024, -1, -1], [1024, -1, -1], [1024, -1, -1], [1024, -1, -1],
[1024, -1, -1]], [[1024, -1, -1, -1], [2048, -1, -1], [2048, -1, -1]]]
self.linear_channels = [[[64, -1, -1, -1], [64, -1, -1], [64, -1, -1]],
[[128, -1, -1, -1], [128, -1, -1], [128, -1, -1], [128, -1, -1]],
[[256, -1, -1, -1], [256, -1, -1], [256, -1, -1], [256, -1, -1], [256, -1, -1], [256, -1, -1]],
[[512, -1, -1, -1], [512, -1, -1], [512, -1, -1]]]
'''
'''
# shallow block2
self.adaption_channels = [[[64, 64, -1, -1], [256, 64, -1],
[256, 64, -1]],
[[256, 128, -1, -1], [512, 128, -1],
[512, 128, -1], [512, 128, -1]],
[[512, 256, -1, -1], [1024, 256, -1],
[1024, 256, -1], [1024, 256, -1],
[1024, 256, -1], [1024, 256, -1]],
[[1024, 512, -1, -1], [2048, 512, -1],
[2048, 512, -1]]]
self.linear_channels = [[[64, 64, -1, -1], [64, 64, -1],
[64, 64, -1]],
[[128, 128, -1, -1], [128, 128, -1],
[128, 128, -1], [128, 128, -1]],
[[256, 256, -1, -1], [256, 256, -1],
[256, 256, -1], [256, 256, -1],
[256, 256, -1], [256, 256, -1]],
[[512, 512, -1, -1], [512, 512, -1],
[512, 512, -1]]]
'''
# deep block1
self.adaption_channels = [[[-1, -1, -1, -1], [-1, -1, -1],
[-1, -1, 64]],
[[-1, -1, -1, -1], [-1, -1, -1],
[-1, -1, -1], [-1, -1, 128]],
[[-1, -1, -1, -1], [-1, -1, -1],
[-1, -1, -1], [-1, -1, -1],
[-1, -1, -1], [-1, -1, 256]],
[[-1, -1, -1, -1],
[-1, -1, -1], [-1, -1, 512]]]
self.linear_channels = [[[-1, -1, -1, -1], [-1, -1, -1],
[-1, -1, 256]],
[[-1, -1, -1, -1], [-1, -1, -1],
[-1, -1, -1], [-1, -1, 512]],
[[-1, -1, -1, -1], [-1, -1, -1],
[-1, -1, -1], [-1, -1, -1],
[-1, -1, -1], [-1, -1, 1024]],
[[-1, -1, -1, -1], [-1, -1, -1],
[-1, -1, 2048]]]
'''
# deep block2
self.adaption_channels = [[[-1, -1, -1, -1], [-1, -1, -1],
[-1, 64, 64]],
[[-1, -1, -1, -1], [-1, -1, -1],
[-1, -1, -1], [-1, 128, 128]],
[[-1, -1, -1, -1], [-1, -1, -1],
[-1, -1, -1], [-1, -1, -1],
[-1, -1, -1], [-1, 256, 256]],
[[-1, -1, -1, -1], [-1, -1, -1],
[-1, 512, 512]]]
self.linear_channels = [[[-1, -1, -1, -1], [-1, -1, -1],
[-1, 64, 256]],
[[-1, -1, -1, -1], [-1, -1, -1],
[-1, -1, -1], [-1, 128, 512]],
[[-1, -1, -1, -1], [-1, -1, -1],
[-1, -1, -1], [-1, -1, -1], [-1, -1, -1],
[-1, 256, 1024]],
[[-1, -1, -1, -1], [-1, -1, -1],
[-1, 512, 2048]]]
'''
'''
# deep block3
self.adaption_channels = [[[-1, -1, -1, -1], [-1, -1, -1],
[256, 64, 64]],
[[-1, -1, -1, -1], [-1, -1, -1],
[-1, -1, -1], [512, 128, 128]],
[[-1, -1, -1, -1], [-1, -1, -1],
[-1, -1, -1], [-1, -1, -1],
[-1, -1, -1], [1024, 256, 256]],
[[-1, -1, -1, -1],
[-1, -1, -1], [2048, 512, 512]]]
self.linear_channels = [[[-1, -1, -1, -1], [-1, -1, -1],
[64, 64, 256]],
[[-1, -1, -1, -1], [-1, -1, -1],
[-1, -1, -1], [128, 128, 512]],
[[-1, -1, -1, -1], [-1, -1, -1],
[-1, -1, -1], [-1, -1, -1],
[-1, -1, -1], [256, 256, 1024]],
[[-1, -1, -1, -1], [-1, -1, -1],
[512, 512, 2048]]]
'''
self.downsample_layers_group = nn.ModuleList()
self.adaption_layers_group = nn.ModuleList()
self.linear_layers_group = nn.ModuleList()
self.conv1_adaption_3d = nn.Conv3d(
64,
64 // self.t_s_ratio,
kernel_size=(3 // self.t_s_ratio, 1, 1),
padding=0)
'''
self.conv1_linear = nn.Linear(64, 32, bias=False)
for i in range(len(self.adaption_channels)):
adaption_blocks = nn.ModuleList()
linear_blocks = nn.ModuleList()
for j in range(len(self.adaption_channels[i])):
adaption_layers = nn.ModuleList()
for adaption_channel in self.adaption_channels[i][j]:
adaption_layers.append(
nn.Conv2d(
adaption_channel,
adaption_channel // self.t_s_ratio,
1,
padding=0))
adaption_blocks.append(adaption_layers)
self.adaption_layers_group.append(adaption_blocks)
for k in range(len(self.linear_channels[i])):
linear_layers = nn.ModuleList()
for linear_channel in self.linear_channels[i][k]:
linear_layers.append(
nn.Linear(
linear_channel,
linear_channel // self.t_s_ratio,
bias=False))
linear_blocks.append(linear_layers)
self.linear_layers_group.append(linear_blocks)
'''
# 3d conv
for i in range(len(self.adaption_channels)):
downsample_blocks = nn.ModuleList()
adaption_blocks = nn.ModuleList()
for j in range(len(self.adaption_channels[i])):
downsample_layers = nn.ModuleList()
adaption_layers = nn.ModuleList()
for linear_channel, adaption_channel in zip(
self.linear_channels[i][j],
self.adaption_channels[i][j]):
if linear_channel != -1 and adaption_channel != -1:
downsample_layers.append(
nn.Conv2d(
adaption_channel,
adaption_channel // self.t_s_ratio,
# adaption_channel * 3,
kernel_size=1,
padding=0))
adaption_layers.append(
nn.Conv3d(
linear_channel,
linear_channel // self.t_s_ratio,
kernel_size=(3, 1, 1),
# kernel_size=(1, 1,
# 1), #kernel_size=(3, 3, 3),
padding=(1, 0, 0))) # padding=(1, 1, 1)))
'''
adaption_layers.append(
nn.Conv3d(
linear_channel,
linear_channel // self.t_s_ratio,
kernel_size=(
adaption_channel // self.t_s_ratio + 1,
3, 3),
padding=(0, 1, 1)))
'''
else:
downsample_layers.append(nn.ModuleList())
adaption_layers.append(nn.ModuleList())
downsample_blocks.append(downsample_layers)
adaption_blocks.append(adaption_layers)
self.downsample_layers_group.append(downsample_blocks)
self.adaption_layers_group.append(adaption_blocks)
@property
def norm1(self):
return getattr(self, self.norm1_name)
@property
def s_norm1(self):
return getattr(self, self.s_norm1_name)
def _make_stem_layer(self, in_channels):
self.conv1 = build_conv_layer(
self.conv_cfg,
in_channels,
64,
kernel_size=7,
stride=2,
padding=3,
bias=False)
self.norm1_name, norm1 = build_norm_layer(self.norm_cfg, 64, postfix=1)
self.add_module(self.norm1_name, norm1)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
def _make_s_stem_layer(self, in_channels):
self.s_conv1 = build_conv_layer(
self.conv_cfg,
in_channels,
64 // self.t_s_ratio,
kernel_size=7,
stride=2,
padding=3,
bias=False)
self.s_norm1_name, s_norm1 = build_norm_layer(
self.norm_cfg, 64 // self.t_s_ratio, postfix=2)
self.add_module(self.s_norm1_name, s_norm1)
self.s_relu = nn.ReLU(inplace=True)
self.s_maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
def _freeze_stages(self):
if self.freeze_teacher:
assert self.frozen_stages == 4
else:
assert self.frozen_stages == 1
if self.frozen_stages >= 0:
self.norm1.eval()
for m in [self.conv1, self.norm1]:
for param in m.parameters():
param.requires_grad = False
for i in range(1, self.frozen_stages + 1):
m = getattr(self, 'layer{}'.format(i))
m.eval()
for param in m.parameters():
param.requires_grad = False
def adapt_kernel_train(self, s_x, j, l, t_layer, s_layer):
identity = s_x
'''
linear_layers = self.linear_layers_group[j][l]
'''
# conv
t_layer_conv1_data = t_layer.conv1.weight # .detach()
t_layer_conv2_data = t_layer.conv2.weight # .detach()
t_layer_conv3_data = t_layer.conv3.weight # .detach()
downsamples_layers = self.downsample_layers_group[j][l]
adaption_layers = self.adaption_layers_group[j][l]
if adaption_layers[0] and downsamples_layers[0]:
t_layer_conv1_data = downsamples_layers[0](t_layer_conv1_data)
# match the adaption kernel size for adaption
t_layer_conv1_data = F.interpolate(
t_layer_conv1_data.permute(2, 3, 0, 1).detach(),
size=s_layer.conv1.weight.shape[:2],
mode='bilinear').permute(2, 3, 0, 1)
'''
t_layer_conv1_data = torch.squeeze(
adaption_layers[0](torch.unsqueeze(t_layer_conv1_data,
axis=0)), 0)
'''
# NOTE: Manually apply convolution on student features
# TODO: F.relu cannot be inplace, figure out why
s_out = F.conv2d(s_x, t_layer_conv1_data, stride=(1, 1))
else:
s_out = s_layer.conv1(s_x)
s_out = s_layer.bn1(s_out)
s_out = F.relu(s_out)
if adaption_layers[1] and downsamples_layers[1]:
t_layer_conv2_data = downsamples_layers[1](t_layer_conv2_data)
# match the adaption kernel size for adaption
t_layer_conv2_data = F.interpolate(
t_layer_conv2_data.permute(2, 3, 0, 1).detach(),
size=s_layer.conv2.weight.shape[:2],
mode='bilinear').permute(2, 3, 0, 1)
'''
t_layer_conv2_data = torch.squeeze(
adaption_layers[1](torch.unsqueeze(t_layer_conv2_data,
axis=0)), 0)
'''
# NOTE: downsample first block from second bottleneck
if j >= 1 and l == 0:
s_out = F.conv2d(
s_out, t_layer_conv2_data, stride=(2, 2), padding=(1, 1))
else:
s_out = F.conv2d(
s_out, t_layer_conv2_data, stride=(1, 1), padding=(1, 1))
else:
s_out = s_layer.conv2(s_out)
s_out = s_layer.bn2(s_out)
s_out = F.relu(s_out)
if adaption_layers[2] and downsamples_layers[2]:
t_layer_conv3_data = downsamples_layers[2](t_layer_conv3_data)
# match the adaption kernel size for adaption
t_layer_conv3_data = F.interpolate(
t_layer_conv3_data.permute(2, 3, 0, 1).detach(),
size=s_layer.conv3.weight.shape[:2],
mode='bilinear').permute(2, 3, 0, 1)
'''
t_layer_conv3_data = torch.squeeze(
adaption_layers[2](torch.unsqueeze(t_layer_conv3_data,
axis=0)), 0)
'''
s_out = F.conv2d(s_out, t_layer_conv3_data, stride=(1, 1))
else:
s_out = s_layer.conv3(s_out)
s_out = s_layer.bn3(s_out)
if t_layer.downsample is not None:
t_layer_downsample_conv_data = t_layer.downsample[0].weight.data
if adaption_layers[3] and downsamples_layers[3]:
t_layer_downsample_conv_data = downsamples_layers[3](
t_layer_downsample_conv_data)
# match the adaption kernel size for adaption
t_layer_downsample_conv_data = F.interpolate(
t_layer_downsample_conv_data.permute(2, 3, 0, 1).detach(),
size=s_layer.downsample[0].weight.shape[:2],
mode='bilinear').permute(2, 3, 0, 1)
'''
t_layer_downsample_conv_data = torch.squeeze(
adaption_layers[3](torch.unsqueeze(
t_layer_downsample_conv_data, axis=0)), 0)
'''
if j >= 1:
identity = F.conv2d(
s_x, t_layer_downsample_conv_data, stride=(2, 2))
else:
identity = F.conv2d(
s_x, t_layer_downsample_conv_data, stride=(1, 1))
else:
identity = s_layer.downsample[0](s_x)
identity = s_layer.downsample[1](identity)
s_out += identity
s_out = F.relu(s_out)
return s_out
def adapt_kernel_inference(self, j, l, t_layer, s_layer):
'''
linear_layers = self.linear_layers_group[j][l]
'''
# conv
t_layer_conv1_data = t_layer.conv1.weight.detach()
t_layer_conv2_data = t_layer.conv2.weight.detach()
t_layer_conv3_data = t_layer.conv3.weight.detach()
s_conv1_weight = torch.squeeze(
self.conv1_adaption_3d(torch.unsqueeze(self.conv1.weight.data, 0)),
0)
self.s_conv1.weight = torch.nn.Parameter(s_conv1_weight)
downsamples_layers = self.downsample_layers_group[j][l]
adaption_layers = self.adaption_layers_group[j][l]
if adaption_layers[0] and downsamples_layers[0]:
t_layer_conv1_data = downsamples_layers[0](t_layer_conv1_data)
# match the adaption kernel size for adaption
t_layer_conv1_data = torch.squeeze(
adaption_layers[0](torch.unsqueeze(t_layer_conv1_data,
axis=0)), 0)
s_layer.conv1.weight = torch.nn.Parameter(t_layer_conv1_data)
if adaption_layers[1] and downsamples_layers[1]:
t_layer_conv2_data = downsamples_layers[1](t_layer_conv2_data)
# match the adaption kernel size for adaption
t_layer_conv2_data = torch.squeeze(
adaption_layers[1](torch.unsqueeze(t_layer_conv2_data,
axis=0)), 0)
s_layer.conv2.weight = torch.nn.Parameter(t_layer_conv2_data)
if adaption_layers[2] and downsamples_layers[2]:
t_layer_conv3_data = downsamples_layers[2](t_layer_conv3_data)
# match the adaption kernel size for adaption
t_layer_conv3_data = torch.squeeze(
adaption_layers[2](torch.unsqueeze(t_layer_conv3_data,
axis=0)), 0)
s_layer.conv3.weight = torch.nn.Parameter(t_layer_conv3_data)
if s_layer.downsample is not None:
t_layer_downsample_conv_data = t_layer.downsample[0].weight.data
if adaption_layers[3] and downsamples_layers[3]:
t_layer_downsample_conv_data = downsamples_layers[3](
t_layer_downsample_conv_data)
# match the adaption kernel size for adaption
t_layer_downsample_conv_data = torch.squeeze(
adaption_layers[3](torch.unsqueeze(
t_layer_downsample_conv_data, axis=0)), 0)
s_layer.downsample[0].weight = torch.nn.Parameter(
t_layer_downsample_conv_data)
def copy_backbone(self):
factor = 0.5
# stem layer
self.s_conv1.weight.data = factor * self.s_conv1.weight.data + (
1 - factor) * F.interpolate(
self.conv1.weight.data.permute(2, 3, 0, 1).detach(),
size=self.s_conv1.weight.shape[:2],
mode='bilinear').permute(2, 3, 0, 1)
for m in self.modules():
if hasattr(m, 's_layer1'):
t_bottleneck_list = [m.layer1, m.layer2, m.layer3, m.layer4]
s_bottleneck_list = [
m.s_layer1, m.s_layer2, m.s_layer3, m.s_layer4
]
# t_bottleneck_list = [t_layers1]
# s_bottleneck_list = [s_layers1]
for t_layers, s_layers in zip(t_bottleneck_list,
s_bottleneck_list):
for t_layer, s_layer in zip(t_layers, s_layers):
# conv
t_layer_conv1_data = t_layer.conv1.weight.data.permute(
2, 3, 0, 1).detach()
s_layer.conv1.weight.data = factor * s_layer.conv1.weight.data + (
1 - factor) * F.interpolate(
t_layer_conv1_data,
size=s_layer.conv1.weight.shape[:2],
mode='bilinear').permute(2, 3, 0, 1)
t_layer_conv2_data = t_layer.conv2.weight.data.permute(
2, 3, 0, 1).detach()
s_layer.conv2.weight.data = factor * s_layer.conv2.weight.data + (
1 - factor) * F.interpolate(
t_layer_conv2_data,
size=s_layer.conv2.weight.shape[:2],
mode='bilinear').permute(2, 3, 0, 1)
t_layer_conv3_data = t_layer.conv3.weight.data.permute(
2, 3, 0, 1).detach()
s_layer.conv3.weight.data = factor * s_layer.conv3.weight.data + (
1 - factor) * F.interpolate(
t_layer_conv3_data,
size=s_layer.conv3.weight.shape[:2],
mode='bilinear').permute(2, 3, 0, 1)
if t_layer.downsample is not None:
# donwsample
t_layer_downsample_conv_data = t_layer.downsample[
0].weight.data.permute(2, 3, 0, 1)
s_layer.downsample[
0].weight.data = factor * s_layer.downsample[
0].weight.data + (
1 - factor) * F.interpolate(
t_layer_downsample_conv_data,
size=s_layer.downsample[0].weight.
shape[:2],
mode='bilinear').permute(
2, 3, 0, 1)
def init_weights(self, pretrained=None):
if isinstance(pretrained, str):
logger = logging.getLogger()
load_checkpoint(self, pretrained, strict=False, logger=logger)
if self.good_initial:
self.copy_backbone()
elif pretrained is None:
for m in self.modules():
if isinstance(m, nn.Conv2d):
kaiming_init(m)
elif isinstance(m, (_BatchNorm, nn.GroupNorm)):
constant_init(m, 1)
if self.dcn is not None:
for m in self.modules():
if isinstance(m, Bottleneck) and hasattr(
m, 'conv2_offset'):
constant_init(m.conv2_offset, 0)
if self.zero_init_residual:
for m in self.modules():
if isinstance(m, Bottleneck):
constant_init(m.norm3, 0)
elif isinstance(m, BasicBlock):
constant_init(m.norm2, 0)
else:
raise TypeError('pretrained must be a str or None')
if self.feature_adaption and self.conv_downsample:
for m in self.adaption_layers:
normal_init(m, std=0.01)
def forward(self, x):
# Adapt kernel once at inference
if not self.train_mode and self.kernel_adaption and self.train_step == 0:
for j, s_layer_name in enumerate(self.s_res_layers):
s_layer_name = self.s_res_layers[j]
t_layer_name = self.res_layers[j]
s_bottlenecks = getattr(self, s_layer_name)
t_bottlenecks = getattr(self, t_layer_name)
for l, (t_layer, s_layer) in enumerate(
zip(t_bottlenecks, s_bottlenecks)):
self.adapt_kernel_inference(j, l, t_layer, s_layer)
self.train_step += 1
'''
if self.spatial_ratio != 1:
s_x = F.interpolate(x, scale_factor=1 / self.spatial_ratio)
else:
s_x = x
'''
x = self.conv1(x)
x = self.norm1(x)
x = self.relu(x)
x = self.maxpool(x)
'''
if self.kernel_adaption and self.train_mode:
# s_conv1_weight = self.conv1_linear(
# self.conv1.weight.data.permute(1, 2, 3,
# 0)).permute(3, 0, 1, 2)
s_conv1_weight = torch.squeeze(
self.conv1_adaption_3d(
torch.unsqueeze(self.conv1.weight.data, 0)), 0)
s_x = F.conv2d(s_x, s_conv1_weight, stride=(2, 2), padding=(3, 3))
else:
s_x = self.s_conv1(s_x)
s_x = self.s_norm1(s_x)
s_x = self.s_relu(s_x)
s_x = self.s_maxpool(s_x)
'''
if self.spatial_ratio != 1:
s_x = F.interpolate(x, scale_factor=1 / self.spatial_ratio)
else:
s_x = x
if self.pure_student_term:
pure_s_x = s_x
s_pure_outs = []
inputs = []
outs = []
s_outs = []
# hint_losses = []
block_distill_pairs = []
for i, layer_name in enumerate(self.res_layers):
res_layer = getattr(self, layer_name)
'''
if self.feature_adaption:
inputs.append(x)
'''
x = res_layer(x)
if i in self.out_indices:
outs.append(x)
# student net
for j, s_layer_name in enumerate(self.s_res_layers):
s_res_layer = getattr(self, s_layer_name)
# FIXME: no gradient update in adaption layers
if self.kernel_adaption and self.train_mode:
# no 'copy' bn yet
# with torch.no_grad():
s_layer_name = self.s_res_layers[j]
t_layer_name = self.res_layers[j]
s_bottlenecks = getattr(self, s_layer_name)
t_bottlenecks = getattr(self, t_layer_name)
for l, (t_layer, s_layer) in enumerate(
zip(t_bottlenecks, s_bottlenecks)):
s_x = self.adapt_kernel_train(s_x, j, l, t_layer, s_layer)
else:
s_x = s_res_layer(s_x)
if self.feature_adaption and self.train_mode:
# adaption_factor = 0.5
# adaption_factor = self.train_step / (7330 * 12)
# self.train_step = 7330 * 7 + 8
ep = 2 # 4
if int(self.train_step / (7330 * ep)) > 3 - j:
adaption_factor = 1
elif int(self.train_step / (7330 * ep)) == 3 - j:
adaption_factor = (self.train_step %
(7330 * ep)) / (7330 * ep)
else:
adaption_factor = 0
if self.pure_student_term:
pure_s_x = s_res_layer(pure_s_x)
if self.conv_downsample:
# x_detached = inputs[j].detach()
x_detached = outs[j].detach()
x_detached_adapted = self.adaption_layers[j](x_detached)
'''
# TODO: Try autoencoder structure
'''
'''
_, x_detached_batch, x_detached_w, x_detached_h = x_detached_adapted.shape
rand_list = torch.randperm(
x_detached_w * x_detached_h)[:int(adaption_factor *
x_detached_w *
x_detached_h)]
adaption_weights = torch.zeros_like(
x_detached_adapted).view(-1, x_detached_batch,
x_detached_w * x_detached_h)
adaption_weights[:, :, rand_list] = 1
adaption_weights = adaption_weights.reshape(
-1, x_detached_batch, x_detached_w, x_detached_h)
adaption_weights[adaption_weights != 1] = 0.5
s_x = adaption_weights * s_x + (
1 - adaption_weights) * x_detached_adapted
'''
# align to teacher network and get the loss
# '''
s_x = adaption_factor * s_x + (
1 - adaption_factor) * x_detached_adapted
# '''
if self.apply_block_wise_alignment:
block_distill_pairs.append([s_x, x_detached_adapted])
else:
x_detached = inputs[j].permute(2, 3, 0, 1).detach()
s_x = adaption_factor * s_x + (
1 - adaption_factor) * F.interpolate(
x_detached, size=s_x.shape[:2],
mode='bilinear').permute(2, 3, 0, 1)
if j in self.out_indices:
s_outs.append(s_x)
if self.pure_student_term:
s_pure_outs.append(pure_s_x)
if self.apply_block_wise_alignment:
return tuple(outs), tuple(s_outs), tuple(block_distill_pairs)
elif self.pure_student_term:
return tuple(outs), tuple(s_outs), tuple(s_pure_outs)
else:
return tuple(outs), tuple(s_outs)
def train(self, mode=True):
super(ResTSNet, self).train(mode)
self._freeze_stages()
if mode and self.norm_eval:
for m in self.modules():
# trick: eval have effect on BatchNorm only
if isinstance(m, _BatchNorm):
m.eval()
| [
"logging.getLogger",
"torch.nn.functional.conv2d",
"torch.nn.ReLU",
"mmcv.cnn.kaiming_init",
"torch.nn.Sequential",
"torch.nn.functional.interpolate",
"mmcv.cnn.constant_init",
"torch.nn.ModuleList",
"torch.unsqueeze",
"mmcv.runner.load_checkpoint",
"torch.nn.Conv3d",
"mmdet.ops.ContextBlock",... | [((10271, 10293), 'torch.nn.Sequential', 'nn.Sequential', (['*layers'], {}), '(*layers)\n', (10284, 10293), True, 'import torch.nn as nn\n'), ((1830, 1851), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (1837, 1851), True, 'import torch.nn as nn\n'), ((6405, 6426), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (6412, 6426), True, 'import torch.nn as nn\n'), ((15931, 15946), 'torch.nn.ModuleList', 'nn.ModuleList', ([], {}), '()\n', (15944, 15946), True, 'import torch.nn as nn\n'), ((37367, 37388), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (37374, 37388), True, 'import torch.nn as nn\n'), ((37412, 37460), 'torch.nn.MaxPool2d', 'nn.MaxPool2d', ([], {'kernel_size': '(3)', 'stride': '(2)', 'padding': '(1)'}), '(kernel_size=3, stride=2, padding=1)\n', (37424, 37460), True, 'import torch.nn as nn\n'), ((37921, 37942), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (37928, 37942), True, 'import torch.nn as nn\n'), ((37968, 38016), 'torch.nn.MaxPool2d', 'nn.MaxPool2d', ([], {'kernel_size': '(3)', 'stride': '(2)', 'padding': '(1)'}), '(kernel_size=3, stride=2, padding=1)\n', (37980, 38016), True, 'import torch.nn as nn\n'), ((40017, 40030), 'torch.nn.functional.relu', 'F.relu', (['s_out'], {}), '(s_out)\n', (40023, 40030), True, 'import torch.nn.functional as F\n'), ((41105, 41118), 'torch.nn.functional.relu', 'F.relu', (['s_out'], {}), '(s_out)\n', (41111, 41118), True, 'import torch.nn.functional as F\n'), ((43208, 43221), 'torch.nn.functional.relu', 'F.relu', (['s_out'], {}), '(s_out)\n', (43214, 43221), True, 'import torch.nn.functional as F\n'), ((43744, 43778), 'torch.nn.Parameter', 'torch.nn.Parameter', (['s_conv1_weight'], {}), '(s_conv1_weight)\n', (43762, 43778), False, 'import torch\n'), ((5574, 5715), 'torch.nn.Conv2d', 'nn.Conv2d', (['planes', '(self.deformable_groups * offset_channels)'], {'kernel_size': '(3)', 'stride': 'self.conv2_stride', 'padding': 'dilation', 'dilation': 'dilation'}), '(planes, self.deformable_groups * offset_channels, kernel_size=3,\n stride=self.conv2_stride, padding=dilation, dilation=dilation)\n', (5583, 5715), True, 'import torch.nn as nn\n'), ((6575, 6617), 'mmdet.ops.ContextBlock', 'ContextBlock', ([], {'inplanes': 'gcb_inplanes'}), '(inplanes=gcb_inplanes, **gcb)\n', (6587, 6617), False, 'from mmdet.ops import ContextBlock, DeformConv, ModulatedDeformConv\n'), ((6718, 6763), 'mmdet.models.plugins.GeneralizedAttention', 'GeneralizedAttention', (['planes'], {}), '(planes, **gen_attention)\n', (6738, 6763), False, 'from mmdet.models.plugins import GeneralizedAttention\n'), ((8244, 8276), 'torch.utils.checkpoint.checkpoint', 'cp.checkpoint', (['_inner_forward', 'x'], {}), '(_inner_forward, x)\n', (8257, 8276), True, 'import torch.utils.checkpoint as cp\n'), ((19249, 19264), 'torch.nn.ModuleList', 'nn.ModuleList', ([], {}), '()\n', (19262, 19264), True, 'import torch.nn as nn\n'), ((32650, 32665), 'torch.nn.ModuleList', 'nn.ModuleList', ([], {}), '()\n', (32663, 32665), True, 'import torch.nn as nn\n'), ((32707, 32722), 'torch.nn.ModuleList', 'nn.ModuleList', ([], {}), '()\n', (32720, 32722), True, 'import torch.nn as nn\n'), ((32762, 32777), 'torch.nn.ModuleList', 'nn.ModuleList', ([], {}), '()\n', (32775, 32777), True, 'import torch.nn as nn\n'), ((32815, 32906), 'torch.nn.Conv3d', 'nn.Conv3d', (['(64)', '(64 // self.t_s_ratio)'], {'kernel_size': '(3 // self.t_s_ratio, 1, 1)', 'padding': '(0)'}), '(64, 64 // self.t_s_ratio, kernel_size=(3 // self.t_s_ratio, 1, 1),\n padding=0)\n', (32824, 32906), True, 'import torch.nn as nn\n'), ((39863, 39911), 'torch.nn.functional.conv2d', 'F.conv2d', (['s_x', 't_layer_conv1_data'], {'stride': '(1, 1)'}), '(s_x, t_layer_conv1_data, stride=(1, 1))\n', (39871, 39911), True, 'import torch.nn.functional as F\n'), ((41764, 41814), 'torch.nn.functional.conv2d', 'F.conv2d', (['s_out', 't_layer_conv3_data'], {'stride': '(1, 1)'}), '(s_out, t_layer_conv3_data, stride=(1, 1))\n', (41772, 41814), True, 'import torch.nn.functional as F\n'), ((44311, 44349), 'torch.nn.Parameter', 'torch.nn.Parameter', (['t_layer_conv1_data'], {}), '(t_layer_conv1_data)\n', (44329, 44349), False, 'import torch\n'), ((44759, 44797), 'torch.nn.Parameter', 'torch.nn.Parameter', (['t_layer_conv2_data'], {}), '(t_layer_conv2_data)\n', (44777, 44797), False, 'import torch\n'), ((45207, 45245), 'torch.nn.Parameter', 'torch.nn.Parameter', (['t_layer_conv3_data'], {}), '(t_layer_conv3_data)\n', (45225, 45245), False, 'import torch\n'), ((49220, 49239), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (49237, 49239), False, 'import logging\n'), ((49252, 49314), 'mmcv.runner.load_checkpoint', 'load_checkpoint', (['self', 'pretrained'], {'strict': '(False)', 'logger': 'logger'}), '(self, pretrained, strict=False, logger=logger)\n', (49267, 49314), False, 'from mmcv.runner import load_checkpoint\n'), ((52015, 52068), 'torch.nn.functional.interpolate', 'F.interpolate', (['x'], {'scale_factor': '(1 / self.spatial_ratio)'}), '(x, scale_factor=1 / self.spatial_ratio)\n', (52028, 52068), True, 'import torch.nn.functional as F\n'), ((34484, 34499), 'torch.nn.ModuleList', 'nn.ModuleList', ([], {}), '()\n', (34497, 34499), True, 'import torch.nn as nn\n'), ((34534, 34549), 'torch.nn.ModuleList', 'nn.ModuleList', ([], {}), '()\n', (34547, 34549), True, 'import torch.nn as nn\n'), ((40780, 40846), 'torch.nn.functional.conv2d', 'F.conv2d', (['s_out', 't_layer_conv2_data'], {'stride': '(2, 2)', 'padding': '(1, 1)'}), '(s_out, t_layer_conv2_data, stride=(2, 2), padding=(1, 1))\n', (40788, 40846), True, 'import torch.nn.functional as F\n'), ((40910, 40976), 'torch.nn.functional.conv2d', 'F.conv2d', (['s_out', 't_layer_conv2_data'], {'stride': '(1, 1)', 'padding': '(1, 1)'}), '(s_out, t_layer_conv2_data, stride=(1, 1), padding=(1, 1))\n', (40918, 40976), True, 'import torch.nn.functional as F\n'), ((43654, 43696), 'torch.unsqueeze', 'torch.unsqueeze', (['self.conv1.weight.data', '(0)'], {}), '(self.conv1.weight.data, 0)\n', (43669, 43696), False, 'import torch\n'), ((45843, 45891), 'torch.nn.Parameter', 'torch.nn.Parameter', (['t_layer_downsample_conv_data'], {}), '(t_layer_downsample_conv_data)\n', (45861, 45891), False, 'import torch\n'), ((50369, 50393), 'mmcv.cnn.normal_init', 'normal_init', (['m'], {'std': '(0.01)'}), '(m, std=0.01)\n', (50380, 50393), False, 'from mmcv.cnn import constant_init, kaiming_init, normal_init\n'), ((18976, 19027), 'torch.nn.Conv2d', 'nn.Conv2d', (['input_channel', 'out_channel', '(3)'], {'padding': '(1)'}), '(input_channel, out_channel, 3, padding=1)\n', (18985, 19027), True, 'import torch.nn as nn\n'), ((19391, 19468), 'torch.nn.Conv2d', 'nn.Conv2d', (['adaption_channel', '(adaption_channel // self.t_s_ratio)', '(3)'], {'padding': '(1)'}), '(adaption_channel, adaption_channel // self.t_s_ratio, 3, padding=1)\n', (19400, 19468), True, 'import torch.nn as nn\n'), ((34655, 34670), 'torch.nn.ModuleList', 'nn.ModuleList', ([], {}), '()\n', (34668, 34670), True, 'import torch.nn as nn\n'), ((34709, 34724), 'torch.nn.ModuleList', 'nn.ModuleList', ([], {}), '()\n', (34722, 34724), True, 'import torch.nn as nn\n'), ((42816, 42874), 'torch.nn.functional.conv2d', 'F.conv2d', (['s_x', 't_layer_downsample_conv_data'], {'stride': '(2, 2)'}), '(s_x, t_layer_downsample_conv_data, stride=(2, 2))\n', (42824, 42874), True, 'import torch.nn.functional as F\n'), ((42953, 43011), 'torch.nn.functional.conv2d', 'F.conv2d', (['s_x', 't_layer_downsample_conv_data'], {'stride': '(1, 1)'}), '(s_x, t_layer_downsample_conv_data, stride=(1, 1))\n', (42961, 43011), True, 'import torch.nn.functional as F\n'), ((44176, 44219), 'torch.unsqueeze', 'torch.unsqueeze', (['t_layer_conv1_data'], {'axis': '(0)'}), '(t_layer_conv1_data, axis=0)\n', (44191, 44219), False, 'import torch\n'), ((44624, 44667), 'torch.unsqueeze', 'torch.unsqueeze', (['t_layer_conv2_data'], {'axis': '(0)'}), '(t_layer_conv2_data, axis=0)\n', (44639, 44667), False, 'import torch\n'), ((45072, 45115), 'torch.unsqueeze', 'torch.unsqueeze', (['t_layer_conv3_data'], {'axis': '(0)'}), '(t_layer_conv3_data, axis=0)\n', (45087, 45115), False, 'import torch\n'), ((45712, 45765), 'torch.unsqueeze', 'torch.unsqueeze', (['t_layer_downsample_conv_data'], {'axis': '(0)'}), '(t_layer_downsample_conv_data, axis=0)\n', (45727, 45765), False, 'import torch\n'), ((49522, 49537), 'mmcv.cnn.kaiming_init', 'kaiming_init', (['m'], {}), '(m)\n', (49534, 49537), False, 'from mmcv.cnn import constant_init, kaiming_init, normal_init\n'), ((49622, 49641), 'mmcv.cnn.constant_init', 'constant_init', (['m', '(1)'], {}), '(m, 1)\n', (49635, 49641), False, 'from mmcv.cnn import constant_init, kaiming_init, normal_init\n'), ((49855, 49887), 'mmcv.cnn.constant_init', 'constant_init', (['m.conv2_offset', '(0)'], {}), '(m.conv2_offset, 0)\n', (49868, 49887), False, 'from mmcv.cnn import constant_init, kaiming_init, normal_init\n'), ((50044, 50069), 'mmcv.cnn.constant_init', 'constant_init', (['m.norm3', '(0)'], {}), '(m.norm3, 0)\n', (50057, 50069), False, 'from mmcv.cnn import constant_init, kaiming_init, normal_init\n'), ((35068, 35162), 'torch.nn.Conv2d', 'nn.Conv2d', (['adaption_channel', '(adaption_channel // self.t_s_ratio)'], {'kernel_size': '(1)', 'padding': '(0)'}), '(adaption_channel, adaption_channel // self.t_s_ratio, kernel_size\n =1, padding=0)\n', (35077, 35162), True, 'import torch.nn as nn\n'), ((35448, 35553), 'torch.nn.Conv3d', 'nn.Conv3d', (['linear_channel', '(linear_channel // self.t_s_ratio)'], {'kernel_size': '(3, 1, 1)', 'padding': '(1, 0, 0)'}), '(linear_channel, linear_channel // self.t_s_ratio, kernel_size=(3,\n 1, 1), padding=(1, 0, 0))\n', (35457, 35553), True, 'import torch.nn as nn\n'), ((36453, 36468), 'torch.nn.ModuleList', 'nn.ModuleList', ([], {}), '()\n', (36466, 36468), True, 'import torch.nn as nn\n'), ((36521, 36536), 'torch.nn.ModuleList', 'nn.ModuleList', ([], {}), '()\n', (36534, 36536), True, 'import torch.nn as nn\n'), ((50146, 50171), 'mmcv.cnn.constant_init', 'constant_init', (['m.norm2', '(0)'], {}), '(m.norm2, 0)\n', (50159, 50171), False, 'from mmcv.cnn import constant_init, kaiming_init, normal_init\n'), ((56007, 56069), 'torch.nn.functional.interpolate', 'F.interpolate', (['x_detached'], {'size': 's_x.shape[:2]', 'mode': '"""bilinear"""'}), "(x_detached, size=s_x.shape[:2], mode='bilinear')\n", (56020, 56069), True, 'import torch.nn.functional as F\n'), ((47139, 47231), 'torch.nn.functional.interpolate', 'F.interpolate', (['t_layer_conv1_data'], {'size': 's_layer.conv1.weight.shape[:2]', 'mode': '"""bilinear"""'}), "(t_layer_conv1_data, size=s_layer.conv1.weight.shape[:2], mode\n ='bilinear')\n", (47152, 47231), True, 'import torch.nn.functional as F\n'), ((47606, 47698), 'torch.nn.functional.interpolate', 'F.interpolate', (['t_layer_conv2_data'], {'size': 's_layer.conv2.weight.shape[:2]', 'mode': '"""bilinear"""'}), "(t_layer_conv2_data, size=s_layer.conv2.weight.shape[:2], mode\n ='bilinear')\n", (47619, 47698), True, 'import torch.nn.functional as F\n'), ((48073, 48165), 'torch.nn.functional.interpolate', 'F.interpolate', (['t_layer_conv3_data'], {'size': 's_layer.conv3.weight.shape[:2]', 'mode': '"""bilinear"""'}), "(t_layer_conv3_data, size=s_layer.conv3.weight.shape[:2], mode\n ='bilinear')\n", (48086, 48165), True, 'import torch.nn.functional as F\n'), ((48760, 48870), 'torch.nn.functional.interpolate', 'F.interpolate', (['t_layer_downsample_conv_data'], {'size': 's_layer.downsample[0].weight.shape[:2]', 'mode': '"""bilinear"""'}), "(t_layer_downsample_conv_data, size=s_layer.downsample[0].\n weight.shape[:2], mode='bilinear')\n", (48773, 48870), True, 'import torch.nn.functional as F\n')] |
# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
"""Client and server classes corresponding to protobuf-defined services."""
import grpc
from yandex.cloud.mdb.clickhouse.v1 import versions_service_pb2 as yandex_dot_cloud_dot_mdb_dot_clickhouse_dot_v1_dot_versions__service__pb2
class VersionsServiceStub(object):
"""A set of methods for managing ClickHouse versions.
"""
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.List = channel.unary_unary(
'/yandex.cloud.mdb.clickhouse.v1.VersionsService/List',
request_serializer=yandex_dot_cloud_dot_mdb_dot_clickhouse_dot_v1_dot_versions__service__pb2.ListVersionsRequest.SerializeToString,
response_deserializer=yandex_dot_cloud_dot_mdb_dot_clickhouse_dot_v1_dot_versions__service__pb2.ListVersionsResponse.FromString,
)
class VersionsServiceServicer(object):
"""A set of methods for managing ClickHouse versions.
"""
def List(self, request, context):
"""Returns list of available ClickHouse versions.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_VersionsServiceServicer_to_server(servicer, server):
rpc_method_handlers = {
'List': grpc.unary_unary_rpc_method_handler(
servicer.List,
request_deserializer=yandex_dot_cloud_dot_mdb_dot_clickhouse_dot_v1_dot_versions__service__pb2.ListVersionsRequest.FromString,
response_serializer=yandex_dot_cloud_dot_mdb_dot_clickhouse_dot_v1_dot_versions__service__pb2.ListVersionsResponse.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'yandex.cloud.mdb.clickhouse.v1.VersionsService', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
# This class is part of an EXPERIMENTAL API.
class VersionsService(object):
"""A set of methods for managing ClickHouse versions.
"""
@staticmethod
def List(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/yandex.cloud.mdb.clickhouse.v1.VersionsService/List',
yandex_dot_cloud_dot_mdb_dot_clickhouse_dot_v1_dot_versions__service__pb2.ListVersionsRequest.SerializeToString,
yandex_dot_cloud_dot_mdb_dot_clickhouse_dot_v1_dot_versions__service__pb2.ListVersionsResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
| [
"grpc.method_handlers_generic_handler",
"grpc.experimental.unary_unary",
"grpc.unary_unary_rpc_method_handler"
] | [((1864, 1976), 'grpc.method_handlers_generic_handler', 'grpc.method_handlers_generic_handler', (['"""yandex.cloud.mdb.clickhouse.v1.VersionsService"""', 'rpc_method_handlers'], {}), "(\n 'yandex.cloud.mdb.clickhouse.v1.VersionsService', rpc_method_handlers)\n", (1900, 1976), False, 'import grpc\n'), ((1448, 1779), 'grpc.unary_unary_rpc_method_handler', 'grpc.unary_unary_rpc_method_handler', (['servicer.List'], {'request_deserializer': 'yandex_dot_cloud_dot_mdb_dot_clickhouse_dot_v1_dot_versions__service__pb2.ListVersionsRequest.FromString', 'response_serializer': 'yandex_dot_cloud_dot_mdb_dot_clickhouse_dot_v1_dot_versions__service__pb2.ListVersionsResponse.SerializeToString'}), '(servicer.List, request_deserializer=\n yandex_dot_cloud_dot_mdb_dot_clickhouse_dot_v1_dot_versions__service__pb2\n .ListVersionsRequest.FromString, response_serializer=\n yandex_dot_cloud_dot_mdb_dot_clickhouse_dot_v1_dot_versions__service__pb2\n .ListVersionsResponse.SerializeToString)\n', (1483, 1779), False, 'import grpc\n'), ((2504, 2958), 'grpc.experimental.unary_unary', 'grpc.experimental.unary_unary', (['request', 'target', '"""/yandex.cloud.mdb.clickhouse.v1.VersionsService/List"""', 'yandex_dot_cloud_dot_mdb_dot_clickhouse_dot_v1_dot_versions__service__pb2.ListVersionsRequest.SerializeToString', 'yandex_dot_cloud_dot_mdb_dot_clickhouse_dot_v1_dot_versions__service__pb2.ListVersionsResponse.FromString', 'options', 'channel_credentials', 'insecure', 'call_credentials', 'compression', 'wait_for_ready', 'timeout', 'metadata'], {}), "(request, target,\n '/yandex.cloud.mdb.clickhouse.v1.VersionsService/List',\n yandex_dot_cloud_dot_mdb_dot_clickhouse_dot_v1_dot_versions__service__pb2\n .ListVersionsRequest.SerializeToString,\n yandex_dot_cloud_dot_mdb_dot_clickhouse_dot_v1_dot_versions__service__pb2\n .ListVersionsResponse.FromString, options, channel_credentials,\n insecure, call_credentials, compression, wait_for_ready, timeout, metadata)\n", (2533, 2958), False, 'import grpc\n')] |
from .sql_io import SQLio
from sql_tools.utils import refetch_filter, listify, kwgs, bin2str
from .keymap import (
ADD_USER,
DROP_USER,
LOCK_USER,
UNLOCK_USER,
GRANT_POWER,
REVOKE_POWER,
USER_GRANTS
)
try:
SQ = SQLio('mysql')
except:
SQ = None
##@@ USERS
def create_user(user, host, password):
"""
Add User to mysql server configuration
"""
SQ.execute_only(ADD_USER.format(user, host, password))
def remove_user(user, host):
"""
Remove User from mysql server configuration
"""
SQ.execute_only(DROP_USER.format(user, host))
def users_list(filter_by=['User', 'Host', 'account_locked']):
"""
Return list of users filtred by filtred_by + *args
"""
from .queries import select_elements
s = listify(filter_by)
res = select_elements('mysql', 'user', selection=s)
if isinstance(res[0][0], str):
return res
_res = []
_t = []
for elements in res:
_t = ()
for i in elements:
if isinstance(i, str):
_t += (i, )
continue
_t += (bin2str(i), )
_res += [_t]
return _res
##@@ LOCKS
def lock_user(user, host):
"""
Show User grants
"""
SQ.execute_only(LOCK_USER.format(user, host))
def unlock_user(user, host):
"""
Show User grants
"""
SQ.execute_only(UNLOCK_USER.format(user, host))
##@@ GRANTS
def set_user_grants(user, host, grants=None, database=None, table=None):
"""
Grants rights to user
"""
g, d, t = kwgs(grants, database, table)
SQ.execute_only(GRANT_POWER.format(g, d, t, user, host))
def revoke_user_grants(user, host, grants=None, database=None, table=None):
"""
Revoke rights
"""
g, d, t = kwgs(grants, database, table)
SQ.execute_only(REVOKE_POWER.format(g, d, t, user, host))
@refetch_filter([0])
def user_grants(user, host):
"""
Show User grants
"""
return SQ.execute_and_fetch(USER_GRANTS.format(user, host))
| [
"sql_tools.utils.listify",
"sql_tools.utils.kwgs",
"sql_tools.utils.bin2str",
"sql_tools.utils.refetch_filter"
] | [((1869, 1888), 'sql_tools.utils.refetch_filter', 'refetch_filter', (['[0]'], {}), '([0])\n', (1883, 1888), False, 'from sql_tools.utils import refetch_filter, listify, kwgs, bin2str\n'), ((782, 800), 'sql_tools.utils.listify', 'listify', (['filter_by'], {}), '(filter_by)\n', (789, 800), False, 'from sql_tools.utils import refetch_filter, listify, kwgs, bin2str\n'), ((1557, 1586), 'sql_tools.utils.kwgs', 'kwgs', (['grants', 'database', 'table'], {}), '(grants, database, table)\n', (1561, 1586), False, 'from sql_tools.utils import refetch_filter, listify, kwgs, bin2str\n'), ((1774, 1803), 'sql_tools.utils.kwgs', 'kwgs', (['grants', 'database', 'table'], {}), '(grants, database, table)\n', (1778, 1803), False, 'from sql_tools.utils import refetch_filter, listify, kwgs, bin2str\n'), ((1112, 1122), 'sql_tools.utils.bin2str', 'bin2str', (['i'], {}), '(i)\n', (1119, 1122), False, 'from sql_tools.utils import refetch_filter, listify, kwgs, bin2str\n')] |
import discord
from discord.ext import commands
class Admin(commands.Cog):
def __init__(self, bot):
self.bot = bot
@commands.is_owner()
@commands.command()
async def reload(self, ctx, extension):
#try:
self.bot.reload_extension(f"cogs.{extension}")
await ctx.send(f"{extension} was succesfully reloaded")
#except:
#await ctx.send("A error occured while reloading the cog")
@commands.is_owner()
@commands.command()
async def load(self, ctx, extension):
try:
self.bot.load_extension(f"cogs.{extension}")
await ctx.send(f"{extension} was succesfully loaded")
except:
await ctx.send("A error occured while loading the cog")
@commands.is_owner()
@commands.command()
async def unload(self, ctx, extension):
try:
if extension == "admin":
return await ctx.send("You can't unload this cog as its the admin cog")
self.bot.unload_extension(f"cogs.{extension}")
await ctx.send(f"{extension} was succesfully unloaded")
except:
await ctx.send("A error occured while unloading the cog")
def setup(bot):
bot.add_cog(Admin(bot))
| [
"discord.ext.commands.command",
"discord.ext.commands.is_owner"
] | [((141, 160), 'discord.ext.commands.is_owner', 'commands.is_owner', ([], {}), '()\n', (158, 160), False, 'from discord.ext import commands\n'), ((167, 185), 'discord.ext.commands.command', 'commands.command', ([], {}), '()\n', (183, 185), False, 'from discord.ext import commands\n'), ((465, 484), 'discord.ext.commands.is_owner', 'commands.is_owner', ([], {}), '()\n', (482, 484), False, 'from discord.ext import commands\n'), ((491, 509), 'discord.ext.commands.command', 'commands.command', ([], {}), '()\n', (507, 509), False, 'from discord.ext import commands\n'), ((786, 805), 'discord.ext.commands.is_owner', 'commands.is_owner', ([], {}), '()\n', (803, 805), False, 'from discord.ext import commands\n'), ((812, 830), 'discord.ext.commands.command', 'commands.command', ([], {}), '()\n', (828, 830), False, 'from discord.ext import commands\n')] |
# Copyright 2010 <NAME> ('kripken'). All rights reserved.
# This file is part of Syntensity/the Intensity Engine, an open source project. See COPYING.txt for licensing.
"""
Provides a thread-safe queue of actions.
A SafeActionQueue is a queue of actions that multiple threads
can add actions to. The queued actions can then be carried out
by the appropriate thread - either a thread dedicated to this purpose
alone, or a thread that executes all the queued actions as part of
its main loop.
"""
from __future__ import with_statement
import threading, time, thread
class SafeActionQueue:
def __init__(self):
self.action_queue = []
self.lock = threading.Lock()
self.action_needed = threading.Event() # Either a new action, or to quit
self.should_quit = False
self.has_quit = False
def add_action(self, action):
with self.lock:
self.action_queue.append(action)
self.action_needed.set()
## Runs all queued actions, in order, and returns
def execute_all(self):
# Under the lock, copy the list, delete it, then execute from the copy
# Executing from the copy lets actions affect the queue (like adding new actions)
with self.lock:
to_do = self.action_queue
self.action_queue = []
if not self.should_quit: # If we should quit, then don't clear - leave set
self.action_needed.clear()
for i in range(len(to_do)):
to_do[i]()
## Internal main loop function
def _actual_main_loop(self):
while not self.should_quit:
self.action_needed.wait()
if self.should_quit: # If quitting, quit, don't do the actions
break
self.execute_all()
self.has_quit = True
## Runs as a 'main loop': Creates a thread, which
## waits for actions to appear
## in the queue, at which time it executes them
def main_loop(self):
thread = threading.Thread(target=self._actual_main_loop)
thread.setDaemon(True) # Main program should not stop quitting if only this is left
thread.start()
## Calling this will tell a running main_loop to quit
def quit(self):
with self.lock:
self.should_quit = True
self.action_needed.set()
## A class that runs CModule.render_progress() every now and then. This is done in the
## main thread (which is allowed to access the OpenGL context), and meanwhile other
## threads can do other stuff. This class continues until it is told by the other
## threads that it can stop
class KeepAliver:
def __init__(self, message, delay=0.02, cancellable=False): # Poll 50fps by default
self.message = message
self.delay = delay
self.should_quit = False
self.cancellable = cancellable
def wait(self):
start_time = time.time()
# Done at this late time, because otherwise loops
import intensity.c_module
CModule = intensity.c_module.CModule.holder
while not self.should_quit:
CModule.render_progress( -((time.time() - start_time)%3.0)/3.0, self.message )
if Global.CLIENT:
if not self.cancellable:
CModule.intercept_key(0)
elif CModule.intercept_key(CModule.get_escape()):
thread.interrupt_main()
break
time.sleep(self.delay)
def quit(self):
self.should_quit = True
@staticmethod
def do(func, message):
'''
E.g.:
KeepAliver.do(
lambda: some_func(),
"Decompressing JPEG2000 image..."
)
'''
from intensity.base import side_actionqueue
keep_aliver = KeepAliver(message)
class Result: pass
def side_operations():
Result.output = func()
keep_aliver.quit()
side_actionqueue.add_action(side_operations)
keep_aliver.wait()
return Result.output
from intensity.base import *
| [
"threading.Lock",
"thread.start",
"thread.interrupt_main",
"intensity.base.side_actionqueue.add_action",
"time.sleep",
"thread.setDaemon",
"threading.Event",
"threading.Thread",
"time.time"
] | [((668, 684), 'threading.Lock', 'threading.Lock', ([], {}), '()\n', (682, 684), False, 'import threading, time, thread\n'), ((714, 731), 'threading.Event', 'threading.Event', ([], {}), '()\n', (729, 731), False, 'import threading, time, thread\n'), ((1993, 2040), 'threading.Thread', 'threading.Thread', ([], {'target': 'self._actual_main_loop'}), '(target=self._actual_main_loop)\n', (2009, 2040), False, 'import threading, time, thread\n'), ((2049, 2071), 'thread.setDaemon', 'thread.setDaemon', (['(True)'], {}), '(True)\n', (2065, 2071), False, 'import threading, time, thread\n'), ((2141, 2155), 'thread.start', 'thread.start', ([], {}), '()\n', (2153, 2155), False, 'import threading, time, thread\n'), ((2893, 2904), 'time.time', 'time.time', ([], {}), '()\n', (2902, 2904), False, 'import threading, time, thread\n'), ((3941, 3985), 'intensity.base.side_actionqueue.add_action', 'side_actionqueue.add_action', (['side_operations'], {}), '(side_operations)\n', (3968, 3985), False, 'from intensity.base import side_actionqueue\n'), ((3443, 3465), 'time.sleep', 'time.sleep', (['self.delay'], {}), '(self.delay)\n', (3453, 3465), False, 'import threading, time, thread\n'), ((3380, 3403), 'thread.interrupt_main', 'thread.interrupt_main', ([], {}), '()\n', (3401, 3403), False, 'import threading, time, thread\n'), ((3127, 3138), 'time.time', 'time.time', ([], {}), '()\n', (3136, 3138), False, 'import threading, time, thread\n')] |
import ast
import os
import warnings
def set_optimization_level(level, bits=8, recalc_interval=100, **unused):
level = level.lower()
if level == 'exact':
pass
elif level == 'swap':
config.swap = True
config.compress_activation = False
elif level == 'actnn-l0': # Do nothing
config.compress_activation = False
config.adaptive_conv_scheme = config.adaptive_bn_scheme = False
elif level == 'actnn-l1': # 4-bit conv + 32-bit bn
config.activation_compression_bits = [4]
config.adaptive_conv_scheme = config.adaptive_bn_scheme = False
config.enable_quantized_bn = False
elif level == 'actnn-l2': # 4-bit
config.activation_compression_bits = [4]
config.adaptive_conv_scheme = config.adaptive_bn_scheme = False
elif level == 'actnn-l3': # 2-bit
pass
elif level == 'actnn-l3.1': # 2-bit + light system optimization
pass
config.cudnn_benchmark_conv2d = False
config.empty_cache_threshold = 0.2
config.pipeline_threshold = 3 * 1024**3
elif level == 'actnn-l4': # 2-bit + swap
pass
config.swap = True
elif level == 'actnn-l5': # 2-bit + swap + defragmentation
config.swap = True
os.environ['PYTORCH_CACHE_THRESHOLD'] = '256000000'
warnings.warn("The defragmentation at L5 requires modification of the c++ "
"code of PyTorch. You need to compile this special fork of "
"PyTorch: https://github.com/merrymercy/pytorch/tree/actnn_exp")
elif level == 'acgc-quant':
bits = int(bits)
config.compress_activation = True
config.activation_compression_bits = [bits]
config.enable_quantized_bn = True
config.adaptive_conv_scheme = config.adaptive_bn_scheme = False
config.acgc = True
config.acgc_quant = True
config.recalc_interval = int(recalc_interval)
config.zvc = False
config.cudnn_benchmark_conv2d = False
config.empty_cache_threshold = 0.2
config.pipeline_threshold = 3 * 1024**3
elif level == 'acgc-quantz':
bits = int(bits)
config.compress_activation = True
config.activation_compression_bits = [bits]
config.enable_quantized_bn = True
config.adaptive_conv_scheme = config.adaptive_bn_scheme = False
config.acgc = True
config.acgc_quant = True
config.recalc_interval = int(recalc_interval)
config.zvc = True
config.cudnn_benchmark_conv2d = False
config.empty_cache_threshold = 0.2
config.pipeline_threshold = 3 * 1024**3
elif level == 'acgc-aquant':
config.compress_activation = True
config.activation_compression_bits = [None]
config.enable_quantized_bn = True
config.adaptive_conv_scheme = config.adaptive_bn_scheme = True
config.acgc = True
config.acgc_quant = True
config.recalc_interval = int(recalc_interval)
config.zvc = False
config.cudnn_benchmark_conv2d = False
config.empty_cache_threshold = 0.2
config.pipeline_threshold = 3 * 1024**3
elif level == 'acgc-aquantz':
config.compress_activation = True
config.activation_compression_bits = [None]
config.enable_quantized_bn = True
config.adaptive_conv_scheme = config.adaptive_bn_scheme = True
config.acgc = True
config.acgc_quant = True
config.recalc_interval = int(recalc_interval)
config.zvc = True
config.cudnn_benchmark_conv2d = False
config.empty_cache_threshold = 0.2
config.pipeline_threshold = 3 * 1024**3
else:
raise ValueError("Invalid level: " + level)
class QuantizationConfig:
def __init__(self):
self.compress_activation = True
self.activation_compression_bits = [2, 8, 8]
self.pergroup = True
self.perlayer = True
self.initial_bits = 8
self.stochastic = True
self.training = True
self.group_size = 256
self.use_gradient = False
self.adaptive_conv_scheme = True
self.adaptive_bn_scheme = True
self.simulate = False
self.enable_quantized_bn = True
# Memory management flag
self.empty_cache_threshold = None
self.pipeline_threshold = None
self.cudnn_benchmark_conv2d = True
self.swap = False
# AC-GC related flags
self.acgc = False
self.acgc_quant = False
self.zvc = False
self.recalc_interval = 100
# Debug related flag
self.debug_memory_model = ast.literal_eval(os.environ.get('DEBUG_MEM', "False"))
self.debug_speed = ast.literal_eval(os.environ.get('DEBUG_SPEED', "False"))
self.debug_memory_op_forward = False
self.debug_memory_op_backward = False
self.debug_remove_bn = False
self.debug_remove_relu = False
self.debug_nan = False
self.debug_acgc = False
config = QuantizationConfig()
| [
"warnings.warn",
"os.environ.get"
] | [((4807, 4843), 'os.environ.get', 'os.environ.get', (['"""DEBUG_MEM"""', '"""False"""'], {}), "('DEBUG_MEM', 'False')\n", (4821, 4843), False, 'import os\n'), ((4889, 4927), 'os.environ.get', 'os.environ.get', (['"""DEBUG_SPEED"""', '"""False"""'], {}), "('DEBUG_SPEED', 'False')\n", (4903, 4927), False, 'import os\n'), ((1349, 1554), 'warnings.warn', 'warnings.warn', (['"""The defragmentation at L5 requires modification of the c++ code of PyTorch. You need to compile this special fork of PyTorch: https://github.com/merrymercy/pytorch/tree/actnn_exp"""'], {}), "(\n 'The defragmentation at L5 requires modification of the c++ code of PyTorch. You need to compile this special fork of PyTorch: https://github.com/merrymercy/pytorch/tree/actnn_exp'\n )\n", (1362, 1554), False, 'import warnings\n')] |
# Generated by Django 2.0.3 on 2018-09-30 23:09
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('perftracker', '0037_auto_20181001_0118'),
]
operations = [
migrations.AddField(
model_name='artifactmetamodel',
name='compression',
field=models.BooleanField(default=False, help_text='Decompress on download/view'),
),
migrations.AddField(
model_name='artifactmetamodel',
name='inline',
field=models.BooleanField(default=False, help_text='View document in browser (do not download)'),
),
migrations.AlterField(
model_name='artifactmetamodel',
name='mime',
field=models.CharField(default='', help_text='Artifact file mime type', max_length=32),
),
]
| [
"django.db.models.CharField",
"django.db.models.BooleanField"
] | [((354, 429), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(False)', 'help_text': '"""Decompress on download/view"""'}), "(default=False, help_text='Decompress on download/view')\n", (373, 429), False, 'from django.db import migrations, models\n'), ((560, 655), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(False)', 'help_text': '"""View document in browser (do not download)"""'}), "(default=False, help_text=\n 'View document in browser (do not download)')\n", (579, 655), False, 'from django.db import migrations, models\n'), ((781, 866), 'django.db.models.CharField', 'models.CharField', ([], {'default': '""""""', 'help_text': '"""Artifact file mime type"""', 'max_length': '(32)'}), "(default='', help_text='Artifact file mime type', max_length=32\n )\n", (797, 866), False, 'from django.db import migrations, models\n')] |
"""Module for adding visual media in Jupyter Notebook."""
def load_image_from_url(url):
"""Loads an image from the specified URL.
Args:
url (str): URL of the image.
Returns:
[type]: Image object.
"""
from PIL import Image
import requests
from io import BytesIO
# from urllib.parse import urlparse
try:
response = requests.get(url)
img = Image.open(BytesIO(response.content))
return img
except Exception as e:
print(e)
def display_youtube(id="h0pz3S6Tvx0"):
"""Displays a YouTube video within Jupyter notebooks.
Args:
id (str, optional): Unique ID of the video. Defaults to "h0pz3S6Tvx0".
"""
from IPython.display import YouTubeVideo, display
import ipywidgets
if "/" in id:
id = id.split("/")[-1]
try:
out = ipywidgets.Output(layout={"width": "815px"})
# layout={'border': '1px solid black', 'width': '815px'})
out.clear_output(wait=True)
display(out)
with out:
display(YouTubeVideo(id, width=800, height=450))
except Exception as e:
print(e) | [
"IPython.display.display",
"ipywidgets.Output",
"io.BytesIO",
"requests.get",
"IPython.display.YouTubeVideo"
] | [((383, 400), 'requests.get', 'requests.get', (['url'], {}), '(url)\n', (395, 400), False, 'import requests\n'), ((867, 911), 'ipywidgets.Output', 'ipywidgets.Output', ([], {'layout': "{'width': '815px'}"}), "(layout={'width': '815px'})\n", (884, 911), False, 'import ipywidgets\n'), ((1022, 1034), 'IPython.display.display', 'display', (['out'], {}), '(out)\n', (1029, 1034), False, 'from IPython.display import YouTubeVideo, display\n'), ((426, 451), 'io.BytesIO', 'BytesIO', (['response.content'], {}), '(response.content)\n', (433, 451), False, 'from io import BytesIO\n'), ((1073, 1112), 'IPython.display.YouTubeVideo', 'YouTubeVideo', (['id'], {'width': '(800)', 'height': '(450)'}), '(id, width=800, height=450)\n', (1085, 1112), False, 'from IPython.display import YouTubeVideo, display\n')] |
import json
from bokeh.client import push_session
from bokeh.driving import repeat
from bokeh.io import curdoc
from bokeh.models import GeoJSONDataSource
from bokeh.plotting import figure
from bokeh.sampledata.sample_geojson import geojson as original
updated = json.dumps({
'type': 'FeatureCollection',
'features': [{
"type": "Feature",
"geometry": {
"type": "Point",
"coordinates": [-2.1208465099334717, 51.4613151550293]
},
"properties": {"OrganisationCode": "Q64"}
}]
})
source = GeoJSONDataSource(geojson=original)
p = figure(tools='box_select', x_range=(-5, 1), y_range=(49, 56),
title="geojson updating on and off")
p.circle(x='x', y='y', size=10, line_color=None, fill_alpha=0.8, source=source)
@repeat([0,1])
def update(i):
# alternate between original/updated
source.geojson = [original, updated][i]
document = curdoc()
document.add_root(p)
document.add_periodic_callback(update, 300)
if __name__ == "__main__":
print("\npress ctrl-C to exit")
session = push_session(document)
session.show()
session.loop_until_closed()
| [
"bokeh.plotting.figure",
"bokeh.client.push_session",
"json.dumps",
"bokeh.io.curdoc",
"bokeh.models.GeoJSONDataSource",
"bokeh.driving.repeat"
] | [((264, 479), 'json.dumps', 'json.dumps', (["{'type': 'FeatureCollection', 'features': [{'type': 'Feature', 'geometry':\n {'type': 'Point', 'coordinates': [-2.1208465099334717, 51.4613151550293\n ]}, 'properties': {'OrganisationCode': 'Q64'}}]}"], {}), "({'type': 'FeatureCollection', 'features': [{'type': 'Feature',\n 'geometry': {'type': 'Point', 'coordinates': [-2.1208465099334717, \n 51.4613151550293]}, 'properties': {'OrganisationCode': 'Q64'}}]})\n", (274, 479), False, 'import json\n'), ((555, 590), 'bokeh.models.GeoJSONDataSource', 'GeoJSONDataSource', ([], {'geojson': 'original'}), '(geojson=original)\n', (572, 590), False, 'from bokeh.models import GeoJSONDataSource\n'), ((596, 699), 'bokeh.plotting.figure', 'figure', ([], {'tools': '"""box_select"""', 'x_range': '(-5, 1)', 'y_range': '(49, 56)', 'title': '"""geojson updating on and off"""'}), "(tools='box_select', x_range=(-5, 1), y_range=(49, 56), title=\n 'geojson updating on and off')\n", (602, 699), False, 'from bokeh.plotting import figure\n'), ((788, 802), 'bokeh.driving.repeat', 'repeat', (['[0, 1]'], {}), '([0, 1])\n', (794, 802), False, 'from bokeh.driving import repeat\n'), ((914, 922), 'bokeh.io.curdoc', 'curdoc', ([], {}), '()\n', (920, 922), False, 'from bokeh.io import curdoc\n'), ((1066, 1088), 'bokeh.client.push_session', 'push_session', (['document'], {}), '(document)\n', (1078, 1088), False, 'from bokeh.client import push_session\n')] |
import torch
torch.randperm(159826, device='cpu')
| [
"torch.randperm"
] | [((13, 49), 'torch.randperm', 'torch.randperm', (['(159826)'], {'device': '"""cpu"""'}), "(159826, device='cpu')\n", (27, 49), False, 'import torch\n')] |
########################################################################################################################
# |||||||||||||||||||||||||||||||||||||||||||||||||| AQUITANIA ||||||||||||||||||||||||||||||||||||||||||||||||||||||| #
# |||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||| #
# |||| To be a thinker means to go by the factual evidence of a case, not by the judgment of others |||||||||||||||||| #
# |||| As there is no group stomach to digest collectively, there is no group mind to think collectively. |||||||||||| #
# |||| Each man must accept responsibility for his own life, each must be sovereign by his own judgment. ||||||||||||| #
# |||| If a man believes a claim to be true, then he must hold to this belief even though society opposes him. ||||||| #
# |||| Not only know what you want, but be willing to break all established conventions to accomplish it. |||||||||||| #
# |||| The merit of a design is the only credential that you require. |||||||||||||||||||||||||||||||||||||||||||||||| #
# |||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||| #
########################################################################################################################
"""
.. moduleauthor:: <NAME>
"""
from collections import deque
from aquitania.indicator.abstract.indicator_output_abc import AbstractIndicatorOutput
class RSI(AbstractIndicatorOutput):
"""
The Relative Strength Index (RSI) is a momentum oscillator that measures the speed and change of price movements.
RSI oscillates between zero and 100. Traditionally, and according to Wilder, RSI is considered overbought when
above 70 and oversold when below 30.
"""
def __init__(self, obs_id, period):
"""
Initializes the Indicator, setting indicator named and number of candles to be observed.
:param obs_id: (str) Indicator Name
:param period: (int) Sets number of periods that will be evaluated
"""
# Initiates variables to instantiate super()
columns = ['']
is_open = False
last_output = (-1.0,)
# Instantiates super().__init__
super().__init__(obs_id, columns, is_open, last_output)
# Instantiate attributes
self.period = period
self.high = deque(maxlen=period)
self.low = deque(maxlen=period)
self.last_close = None
def indicator_logic(self, candle):
"""
Logic of the indicator that will be run candle by candle.
"""
# Initializes close diff
close_diff = candle.close[1] - self.last_close if self.last_close is not None else 0
# Saves diff to 'self.high' if green candle
if close_diff > 0:
self.high.append(close_diff)
# Saves diff to 'self.low' if red candle
elif close_diff < 0:
self.low.append(abs(close_diff))
# Checks if there are enough periods instantiated for both 'self.high' and 'self.low'
if self.period == len(self.high) == len(self.low):
# Calculates Relative Strength
rs = sum(self.high) / sum(self.low)
# Calculates Relative Strength Index
rsi = 100 - (100 / (1 + rs))
else:
# Saves Values for invalid RSI
rsi = -1.0
# Sets last close for next loop
self.last_close = candle.close[1]
# Returns RSI in form of a 1 element tuple (mandatory for indicators)
return (rsi,)
| [
"collections.deque"
] | [((2415, 2435), 'collections.deque', 'deque', ([], {'maxlen': 'period'}), '(maxlen=period)\n', (2420, 2435), False, 'from collections import deque\n'), ((2455, 2475), 'collections.deque', 'deque', ([], {'maxlen': 'period'}), '(maxlen=period)\n', (2460, 2475), False, 'from collections import deque\n')] |
from PyQt5 import QtCore, QtGui, QtWidgets
from hwentry import Ui_MainWindow7
import sqlite3
class Ui_MainWindow5(object):
def hw_entry(self):
self.window = QtWidgets.QMainWindow()
self.ui = Ui_MainWindow7()
self.ui.setupUi(self.window)
self.window.show()
def setupUi(self, MainWindow):
MainWindow.setObjectName("MainWindow")
MainWindow.resize(732, 269)
MainWindow.setStyleSheet("background-color: qlineargradient(spread:pad, x1:0, y1:0.489, x2:0.971591, y2:0.494, stop:0.0738636 rgba(0, 117, 232, 255), stop:1 rgba(255, 255, 255, 255));")
self.centralwidget = QtWidgets.QWidget(MainWindow)
self.centralwidget.setObjectName("centralwidget")
self.label = QtWidgets.QLabel(self.centralwidget)
self.label.setGeometry(QtCore.QRect(-30, 0, 791, 51))
font = QtGui.QFont()
font.setPointSize(14)
font.setBold(True)
font.setWeight(75)
self.label.setFont(font)
self.label.setAlignment(QtCore.Qt.AlignCenter)
self.label.setObjectName("label")
self.pushButton = QtWidgets.QPushButton(self.centralwidget)
self.pushButton.setGeometry(QtCore.QRect(90, 80, 221, 51))
self.pushButton.setObjectName("pushButton")
self.pushButton_2 = QtWidgets.QPushButton(self.centralwidget)
self.pushButton_2.setGeometry(QtCore.QRect(90, 160, 221, 51))
self.pushButton_2.setObjectName("pushButton_2")
self.pushButton_2.clicked.connect(self.hw_entry)
self.pushButton_3 = QtWidgets.QPushButton(self.centralwidget)
self.pushButton_3.setGeometry(QtCore.QRect(440, 80, 221, 51))
self.pushButton_3.setObjectName("pushButton_3")
self.pushButton_4 = QtWidgets.QPushButton(self.centralwidget)
self.pushButton_4.setGeometry(QtCore.QRect(440, 160, 221, 51))
self.pushButton_4.setObjectName("pushButton_4")
MainWindow.setCentralWidget(self.centralwidget)
self.menubar = QtWidgets.QMenuBar(MainWindow)
self.menubar.setGeometry(QtCore.QRect(0, 0, 732, 21))
self.menubar.setObjectName("menubar")
self.menuHelp = QtWidgets.QMenu(self.menubar)
self.menuHelp.setObjectName("menuHelp")
MainWindow.setMenuBar(self.menubar)
self.statusbar = QtWidgets.QStatusBar(MainWindow)
self.statusbar.setObjectName("statusbar")
MainWindow.setStatusBar(self.statusbar)
self.menubar.addAction(self.menuHelp.menuAction())
self.retranslateUi(MainWindow)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def retranslateUi(self, MainWindow):
_translate = QtCore.QCoreApplication.translate
MainWindow.setWindowTitle(_translate("MainWindow", "Staff Options"))
self.label.setText(_translate("MainWindow", "Staff Options"))
self.pushButton.setStatusTip(_translate("MainWindow", "Add Student Grades"))
self.pushButton.setText(_translate("MainWindow", "Add Grades"))
self.pushButton_2.setStatusTip(_translate("MainWindow", "Add Division-wise Homework"))
self.pushButton_2.setText(_translate("MainWindow", "Add Homework"))
self.pushButton_3.setStatusTip(_translate("MainWindow", "Update Student Attendance."))
self.pushButton_3.setText(_translate("MainWindow", "Update Attendance"))
self.pushButton_4.setStatusTip(_translate("MainWindow", "Personal Staff Details"))
self.pushButton_4.setText(_translate("MainWindow", "Details"))
self.menuHelp.setTitle(_translate("MainWindow", "About"))
if __name__ == "__main__":
import sys
app = QtWidgets.QApplication(sys.argv)
MainWindow5 = QtWidgets.QMainWindow()
ui = Ui_MainWindow5()
ui.setupUi(MainWindow5)
MainWindow5.show()
sys.exit(app.exec_())
| [
"PyQt5.QtWidgets.QWidget",
"PyQt5.QtWidgets.QMainWindow",
"PyQt5.QtWidgets.QMenu",
"PyQt5.QtGui.QFont",
"hwentry.Ui_MainWindow7",
"PyQt5.QtCore.QMetaObject.connectSlotsByName",
"PyQt5.QtCore.QRect",
"PyQt5.QtWidgets.QStatusBar",
"PyQt5.QtWidgets.QLabel",
"PyQt5.QtWidgets.QApplication",
"PyQt5.Qt... | [((3703, 3735), 'PyQt5.QtWidgets.QApplication', 'QtWidgets.QApplication', (['sys.argv'], {}), '(sys.argv)\n', (3725, 3735), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((3755, 3778), 'PyQt5.QtWidgets.QMainWindow', 'QtWidgets.QMainWindow', ([], {}), '()\n', (3776, 3778), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((182, 205), 'PyQt5.QtWidgets.QMainWindow', 'QtWidgets.QMainWindow', ([], {}), '()\n', (203, 205), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((225, 241), 'hwentry.Ui_MainWindow7', 'Ui_MainWindow7', ([], {}), '()\n', (239, 241), False, 'from hwentry import Ui_MainWindow7\n'), ((656, 685), 'PyQt5.QtWidgets.QWidget', 'QtWidgets.QWidget', (['MainWindow'], {}), '(MainWindow)\n', (673, 685), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((767, 803), 'PyQt5.QtWidgets.QLabel', 'QtWidgets.QLabel', (['self.centralwidget'], {}), '(self.centralwidget)\n', (783, 803), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((883, 896), 'PyQt5.QtGui.QFont', 'QtGui.QFont', ([], {}), '()\n', (894, 896), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((1144, 1185), 'PyQt5.QtWidgets.QPushButton', 'QtWidgets.QPushButton', (['self.centralwidget'], {}), '(self.centralwidget)\n', (1165, 1185), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((1336, 1377), 'PyQt5.QtWidgets.QPushButton', 'QtWidgets.QPushButton', (['self.centralwidget'], {}), '(self.centralwidget)\n', (1357, 1377), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((1593, 1634), 'PyQt5.QtWidgets.QPushButton', 'QtWidgets.QPushButton', (['self.centralwidget'], {}), '(self.centralwidget)\n', (1614, 1634), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((1792, 1833), 'PyQt5.QtWidgets.QPushButton', 'QtWidgets.QPushButton', (['self.centralwidget'], {}), '(self.centralwidget)\n', (1813, 1833), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((2044, 2074), 'PyQt5.QtWidgets.QMenuBar', 'QtWidgets.QMenuBar', (['MainWindow'], {}), '(MainWindow)\n', (2062, 2074), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((2210, 2239), 'PyQt5.QtWidgets.QMenu', 'QtWidgets.QMenu', (['self.menubar'], {}), '(self.menubar)\n', (2225, 2239), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((2360, 2392), 'PyQt5.QtWidgets.QStatusBar', 'QtWidgets.QStatusBar', (['MainWindow'], {}), '(MainWindow)\n', (2380, 2392), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((2604, 2653), 'PyQt5.QtCore.QMetaObject.connectSlotsByName', 'QtCore.QMetaObject.connectSlotsByName', (['MainWindow'], {}), '(MainWindow)\n', (2641, 2653), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((836, 865), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(-30)', '(0)', '(791)', '(51)'], {}), '(-30, 0, 791, 51)\n', (848, 865), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((1223, 1252), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(90)', '(80)', '(221)', '(51)'], {}), '(90, 80, 221, 51)\n', (1235, 1252), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((1417, 1447), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(90)', '(160)', '(221)', '(51)'], {}), '(90, 160, 221, 51)\n', (1429, 1447), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((1674, 1704), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(440)', '(80)', '(221)', '(51)'], {}), '(440, 80, 221, 51)\n', (1686, 1704), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((1873, 1904), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(440)', '(160)', '(221)', '(51)'], {}), '(440, 160, 221, 51)\n', (1885, 1904), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((2109, 2136), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(0)', '(0)', '(732)', '(21)'], {}), '(0, 0, 732, 21)\n', (2121, 2136), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n')] |
import unittest
import CheckName
class Check(unittest.TestCase):
def test_check_name_function(self):
full_name = CheckName.get_name()
self.assertEqual(full_name, '<NAME>')
| [
"CheckName.get_name"
] | [((133, 153), 'CheckName.get_name', 'CheckName.get_name', ([], {}), '()\n', (151, 153), False, 'import CheckName\n')] |
import os
import json
def translate(key, lang='fr'):
'''
Traduire un mot clé dans données.
'''
if not lang:
# mettre une langue par defaut
lang = 'fr'
if not os.path.isfile("langs.json"):
print("Attention, fichier langs.json n'existe pas")
return key
with open("langs.json") as fichier:
trans = json.load(fichier)
mot_cle = trans.get(key)
if mot_cle:
if mot_cle.get(lang):
return mot_cle.get(lang)
return key
| [
"os.path.isfile",
"json.load"
] | [((201, 229), 'os.path.isfile', 'os.path.isfile', (['"""langs.json"""'], {}), "('langs.json')\n", (215, 229), False, 'import os\n'), ((367, 385), 'json.load', 'json.load', (['fichier'], {}), '(fichier)\n', (376, 385), False, 'import json\n')] |
import requests
class Authentication_service:
def sendMessage( Adress, body):
file = open("./Authentication.setup")
FinalAdress = (file.readlines[0] + Adress)
sended = requests.get(FinalAdress, data=body)
return sended
| [
"requests.get"
] | [((197, 233), 'requests.get', 'requests.get', (['FinalAdress'], {'data': 'body'}), '(FinalAdress, data=body)\n', (209, 233), False, 'import requests\n')] |
import os
from nltk.parse.corenlp import CoreNLPServer
# The server needs to know the location of the following files:
# - stanford-corenlp-X.X.X.jar
# - stanford-corenlp-X.X.X-models.jar
STANFORD = os.path.join("models", "stanford-corenlp-4.2.0")
# Create the server
server = CoreNLPServer(
os.path.join(STANFORD, "stanford-corenlp-4.2.0.jar"),
os.path.join(STANFORD, "stanford-corenlp-4.2.0-models.jar"),
)
# Start the server in the background
server.start()
| [
"os.path.join"
] | [((204, 252), 'os.path.join', 'os.path.join', (['"""models"""', '"""stanford-corenlp-4.2.0"""'], {}), "('models', 'stanford-corenlp-4.2.0')\n", (216, 252), False, 'import os\n'), ((301, 353), 'os.path.join', 'os.path.join', (['STANFORD', '"""stanford-corenlp-4.2.0.jar"""'], {}), "(STANFORD, 'stanford-corenlp-4.2.0.jar')\n", (313, 353), False, 'import os\n'), ((358, 417), 'os.path.join', 'os.path.join', (['STANFORD', '"""stanford-corenlp-4.2.0-models.jar"""'], {}), "(STANFORD, 'stanford-corenlp-4.2.0-models.jar')\n", (370, 417), False, 'import os\n')] |
import numpy as np
class DataLoader(object):
def __init__(self, fpath1, fpath2, maxlen1, maxlen2, vocab_fpath):
self.sents1, self.sents2 = self.load_data(fpath1, fpath2, maxlen1, maxlen2)
self.token2idx, self.idx2token = self.load_vocab(vocab_fpath)
self.maxlen1 = maxlen1
self.maxlen2 = maxlen2
def load_vocab(self, vocab_fpath):
'''Loads vocabulary file and returns idx<->token maps
vocab_fpath: string. vocabulary file path.
Note that these are reserved
0: <pad>, 1: <unk>, 2: <s>, 3: </s>
Returns
two dictionaries.
'''
vocab = [line.split()[0] for line in open(vocab_fpath, 'r', encoding='utf-8').read().splitlines()]
token2idx = {token: idx for idx, token in enumerate(vocab)}
idx2token = {idx: token for idx, token in enumerate(vocab)}
return token2idx, idx2token
def load_data(self, fpath1, fpath2, maxlen1, maxlen2):
'''Loads source and target data and filters out too lengthy samples.
fpath1: source file path. string.
fpath2: target file path. string.
maxlen1: source sent maximum length. scalar.
maxlen2: target sent maximum length. scalar.
Returns
sents1: list of source sents
sents2: list of target sents
'''
sents1, sents2 = [], []
with open(fpath1, 'r', encoding='utf-8') as f1, open(fpath2, 'r', encoding='utf-8') as f2:
for sent1, sent2 in zip(f1, f2):
if len(sent1.split()) + 1 > maxlen1: continue # 1: </s>
if len(sent2.split()) + 1 > maxlen2: continue # 1: </s>
sents1.append(sent1.strip())
sents2.append(sent2.strip())
return sents1, sents2
def encode(self, inp, type, dict):
'''Converts string to number. Used for `generator_fn`.
inp: 1d byte array.
type: "x" (source side) or "y" (target side)
dict: token2idx dictionary
Returns
list of numbers
'''
inp_str = inp
if type=="x": tokens = inp_str.split() + ["</s>"]
else: tokens = ["<s>"] + inp_str.split() + ["</s>"]
x = [dict.get(t, dict["<unk>"]) for t in tokens]
return x
def make_epoch_data(self, batch_size, shuffle=False):
import copy
new_sents1 = copy.deepcopy(self.sents1)
new_sents2 = copy.deepcopy(self.sents2)
if shuffle:
import random
random.shuffle(new_sents1)
random.shuffle(new_sents2)
xs = [self.encode(sent1, "x", self.token2idx) for sent1 in new_sents1]
ys = [self.encode(sent2, "y", self.token2idx) for sent2 in new_sents2]
batch_xs = []
batch_ys = []
for i in range(0, len(xs), batch_size):
start = i
end = start + batch_size
batch_xs.append(xs[start:end])
batch_ys.append(ys[start:end])
if len(batch_xs[-1]) != batch_size:
batch_xs = batch_xs[:-1]
batch_ys = batch_ys[:-1]
self.cur_xs = batch_xs
self.cur_ys = batch_ys
self.batch_num = len(batch_xs)
self.idx = 0
def get_batch(self, fill_maxlen=True):
if self.idx >= self.batch_num:
assert False
cur_batch_x = self.cur_xs[self.idx]
cur_batch_y = self.cur_ys[self.idx]
self.idx += 1
if fill_maxlen:
cur_largest_len_x = self.maxlen1
cur_largest_len_y = self.maxlen2
else:
cur_largest_len_x = max([len(x) for x in cur_batch_x])
cur_largest_len_y = max([len(y) for y in cur_batch_y])
cur_batch_x = np.array([self.align(x, cur_largest_len_x) for x in cur_batch_x]).astype(np.float32)
cur_batch_y = np.array([self.align(y, cur_largest_len_y) for y in cur_batch_y]).astype(np.float32)
return (cur_batch_x, cur_largest_len_x), (cur_batch_y, cur_largest_len_y)
def align(self, arr, length):
ori_len = len(arr)
if length > ori_len:
return arr + [0] * (length - ori_len)
else:
return arr[:length]
def get_pad(self):
return self.token2idx["<pad>"]
| [
"random.shuffle",
"copy.deepcopy"
] | [((2367, 2393), 'copy.deepcopy', 'copy.deepcopy', (['self.sents1'], {}), '(self.sents1)\n', (2380, 2393), False, 'import copy\n'), ((2415, 2441), 'copy.deepcopy', 'copy.deepcopy', (['self.sents2'], {}), '(self.sents2)\n', (2428, 2441), False, 'import copy\n'), ((2500, 2526), 'random.shuffle', 'random.shuffle', (['new_sents1'], {}), '(new_sents1)\n', (2514, 2526), False, 'import random\n'), ((2539, 2565), 'random.shuffle', 'random.shuffle', (['new_sents2'], {}), '(new_sents2)\n', (2553, 2565), False, 'import random\n')] |
import os
import sys
from mininet.node import RemoteController
from mininet.net import Mininet
import dc_gym.utils as dc_utils
import logging
log = logging.getLogger(__name__)
cwd = os.getcwd()
FILE_DIR = os.path.dirname(os.path.abspath(__file__))
sys.path.insert(0, FILE_DIR)
def get_congestion_control():
prev_cc = os.popen("sysctl -n net.ipv4.tcp_congestion_control").read()
return prev_cc
def load_congestion_control(tcp_policy):
if tcp_policy == "dctcp":
dc_utils.exec_process("modprobe tcp_dctcp")
dc_utils.exec_process("sysctl -w net.ipv4.tcp_ecn=1")
elif tcp_policy == "tcp_nv":
dc_utils.exec_process("modprobe tcp_nv")
elif tcp_policy == "pcc":
if (os.popen("lsmod | grep pcc").read() == ""):
dc_utils.exec_process("insmod %s/tcp_pcc.ko" % FILE_DIR)
def calc_ecn(max_throughput, avg_pkt_size):
# Calculate the marking threshold as part of the BDP
bdp = max_throughput * 100 * 1e-6
marking_threshold = bdp * 0.17
# if the marking_threshold is smaller than the packet size set the
# threshold to around two packets
if (marking_threshold < avg_pkt_size):
marking_threshold = avg_pkt_size * 2
# also limit the marking threshold to 50KB
elif marking_threshold > 50e3:
marking_threshold = 50e3
return marking_threshold
class NetworkManager():
def __init__(self, topo, tcp_policy="tcp"):
self.topo = topo
self.net = None
self.net_stopped = False
self.host_ctrl_map = {}
self.tcp_policy = tcp_policy
self.prev_cc = get_congestion_control()
load_congestion_control(tcp_policy)
self.start_network()
def _apply_qdisc(self, port):
""" Here be dragons... """
# tc_cmd = "tc qdisc add dev %s " % (port)
# cmd = "root handle 1: hfsc default 10"
# log.info(tc_cmd + cmd)
# dc_utils.exec_process(tc_cmd + cmd)
# tc_cmd = "tc class add dev %s " % (port)
# cmd = "parent 1: classid 1:10 hfsc sc rate %dbit ul rate %dbit" % (
# self.topo.max_bps, self.topo.max_bps)
# log.info(tc_cmd + cmd)
# dc_utils.exec_process(tc_cmd + cmd)
limit = int(self.topo.max_queue)
avg_pkt_size = 1500 # MTU packet size
tc_cmd = "tc qdisc add dev %s " % (port)
cmd = "root handle 1: htb default 10 "
# cmd = "root handle 1: estimator 250msec 1sec htb default 10 "
cmd += " direct_qlen %d " % (limit / avg_pkt_size)
log.debug(tc_cmd + cmd)
dc_utils.exec_process(tc_cmd + cmd)
tc_cmd = "tc class add dev %s " % (port)
cmd = "parent 1: classid 1:10 htb rate %dbit burst %d" % (
self.topo.max_bps, self.topo.max_bps)
log.debug(tc_cmd + cmd)
dc_utils.exec_process(tc_cmd + cmd)
if self.tcp_policy == "dctcp":
marking_threshold = calc_ecn(self.topo.max_bps, avg_pkt_size)
# Apply aggressive RED to mark excess packets in the queue
max_q = limit / 4
min_q = int(marking_threshold)
tc_cmd = "tc qdisc add dev %s " % (port)
cmd = "parent 1:10 handle 20:1 red "
cmd += "limit %d " % (limit)
cmd += "bandwidth %dbit " % self.topo.max_bps
cmd += "avpkt %d " % avg_pkt_size
cmd += "min %d " % min_q
cmd += "max %d " % max_q
# Ballpark burst hard limit...
burst = (min_q + min_q + max_q) / (3 * avg_pkt_size)
cmd += "burst %d " % burst
cmd += "probability 0.1"
cmd += " ecn "
log.debug(tc_cmd + cmd)
dc_utils.exec_process(tc_cmd + cmd)
else:
tc_cmd = "tc qdisc add dev %s " % (port)
cmd = "parent 1:10 handle 20:1 bfifo "
cmd += " limit %d" % limit
dc_utils.exec_process(tc_cmd + cmd)
# tc_cmd = "tc qdisc add dev %s " % (port)
# cmd = "root handle 1 netem limit %d rate 10mbit" % (
# limit / avg_pkt_size)
# log.info(tc_cmd + cmd)
# dc_utils.exec_process(tc_cmd + cmd)
# limit = int(self.topo.max_queue)
# tc_cmd = "tc qdisc add dev %s " % (port)
# cmd = "parent 1:10 handle 20: codel "
# cmd += " limit %d" % (limit)
# dc_utils.exec_process(tc_cmd + cmd)
# limit = int(self.topo.max_queue)
# max_q = self.topo.max_queue / 4
# min_q = max_q / 3
# tc_cmd = "tc qdisc add dev %s " % (port)
# cmd = "parent 1:10 handle 20:1 sfq limit %d" % (
# self.topo.max_queue)
# if self.dctcp:
# dc_utils.exec_process("sysctl -w net.ipv4.tcp_ecn=1")
# cmd += "ecn "
# # cmd += "redflowlimit "
# # cmd += "min %d " % (min_q)
# # cmd += "max %d " % (max_q)
# # cmd += "probability 1"
# log.info(tc_cmd + cmd)
# dc_utils.exec_process(tc_cmd + cmd)
# Apply tc choke to mark excess packets in the queue with ecn
# limit = int(self.topo.max_queue)
# max_q = self.topo.max_queue
# min_q = 400
# tc_cmd = "tc qdisc add dev %s " % (port)
# cmd = "parent 1:10 handle 10:1 choke limit %d " % limit
# cmd += "bandwidth %dbit " % self.topo.max_bps
# cmd += "min %d " % (min_q)
# cmd += "max %d " % (max_q)
# cmd += "probability 0.001"
# # if self.dctcp:
# cmd += " ecn "
# log.info(tc_cmd + cmd)
# dc_utils.exec_process(tc_cmd + cmd)
# tc_cmd = "tc qdisc add dev %s " % (port)
# cmd = "parent 1:10 handle 30:1 fq_codel limit %d " % (
# self.topo.max_queue)
# if ("dctcp" in self.conf) and self.conf["dctcp"]:
# dc_utils.exec_process("sysctl -w net.ipv4.tcp_ecn=1")
# cmd += "ecn "
# log.info(tc_cmd + cmd)
# dc_utils.exec_process(tc_cmd + cmd)
dc_utils.exec_process("ip link set %s txqueuelen %d" %
(port, limit / avg_pkt_size))
dc_utils.exec_process("ip link set %s mtu 1500" % port)
def _connect_controller(self, net):
controller = RemoteController(self.topo.switch_id + "_c")
net.addController(controller)
for i, host in enumerate(self.topo.host_list):
# Configure host
net.addLink(controller, host)
# Configure controller
ctrl_iface = "%s_c-eth%d" % (self.topo.switch_id, i)
for index, switch in self.topo.ports[host].items():
switch_iface = switch[0] + "-eth" + str(switch[1])
self.host_ctrl_map[switch_iface] = ctrl_iface
def _config_links(self, net):
for switch in net.switches:
for port in switch.intfList():
if port.name != "lo":
self._apply_qdisc(port)
def _config_hosts(self, net):
for host in net.hosts:
# Increase the maximum total buffer-space allocatable
# This is measured in units of pages (4096 bytes)
dc_utils.exec_process(
"sysctl -w net.ipv4.tcp_window_scaling=1", host)
dc_utils.exec_process(
"sysctl -w net.ipv4.tcp_timestamps=1", host)
dc_utils.exec_process("sysctl -w net.ipv4.tcp_sack=1", host)
dc_utils.exec_process(
"sysctl -w net.ipv4.tcp_syn_retries=10", host)
# dc_utils.exec_process(
# "sysctl -w net.core.default_qdisc=pfifo_fast", host)
# dc_utils.exec_process("sysctl -w net.ipv4.tcp_recovery=0")
if self.tcp_policy == "dctcp":
dc_utils.exec_process(
"sysctl -w net.ipv4.tcp_congestion_control=dctcp", host)
dc_utils.exec_process("sysctl -w net.ipv4.tcp_ecn=1", host)
dc_utils.exec_process(
"sysctl -w net.ipv4.tcp_ecn_fallback=0", host)
elif self.tcp_policy == "tcp_nv":
dc_utils.exec_process(
"sysctl -w net.ipv4.tcp_congestion_control=nv", host)
elif self.tcp_policy == "pcc":
dc_utils.exec_process(
"sysctl -w net.ipv4.tcp_congestion_control=pcc", host)
def _config_network(self, net):
self.topo._config_topo()
self._config_links(net)
self._config_hosts(net)
self._connect_controller(net)
# log.info("Testing reachability after configuration...\n")
# net.ping()
# log.info("Testing bandwidth after configuration...\n")
# net.iperf()
def get_net(self):
return self.net
def get_topo(self):
return self.topo
def get_sw_ports(self):
switches = self.net.switches
sw_intfs = []
for switch in switches:
for intf in switch.intfNames():
if intf is not "lo":
sw_intfs.append(intf)
return sw_intfs
def get_host_ports(self):
return self.host_ctrl_map.keys()
def get_num_sw_ports(self):
return self.topo.get_num_sw_ports()
def get_num_hosts(self):
return self.topo.get_num_hosts()
def start_network(self):
# Start Mininet
self.net = Mininet(topo=self.topo, controller=None, autoSetMacs=True)
self.net.start()
self._config_network(self.net)
self.net_stopped = False
def stop_network(self):
if not self.net_stopped:
self.net_stopped = True
log.info("Removing interfaces and restoring all network state.")
if self.tcp_policy == "dctcp":
dc_utils.exec_process("sysctl -w net.ipv4.tcp_ecn=0")
# reset the active host congestion control to the previous value
cmd = "sysctl -w net.ipv4.tcp_congestion_control=%s" % self.prev_cc
dc_utils.exec_process(cmd)
log.info("Deleting the virtual network")
self.net.stop()
log.info("Successfully deleted the virtual network")
| [
"logging.getLogger",
"dc_gym.utils.exec_process",
"sys.path.insert",
"os.getcwd",
"os.popen",
"os.path.abspath",
"mininet.net.Mininet",
"mininet.node.RemoteController"
] | [((149, 176), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (166, 176), False, 'import logging\n'), ((184, 195), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (193, 195), False, 'import os\n'), ((250, 278), 'sys.path.insert', 'sys.path.insert', (['(0)', 'FILE_DIR'], {}), '(0, FILE_DIR)\n', (265, 278), False, 'import sys\n'), ((223, 248), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (238, 248), False, 'import os\n'), ((486, 529), 'dc_gym.utils.exec_process', 'dc_utils.exec_process', (['"""modprobe tcp_dctcp"""'], {}), "('modprobe tcp_dctcp')\n", (507, 529), True, 'import dc_gym.utils as dc_utils\n'), ((538, 591), 'dc_gym.utils.exec_process', 'dc_utils.exec_process', (['"""sysctl -w net.ipv4.tcp_ecn=1"""'], {}), "('sysctl -w net.ipv4.tcp_ecn=1')\n", (559, 591), True, 'import dc_gym.utils as dc_utils\n'), ((2559, 2594), 'dc_gym.utils.exec_process', 'dc_utils.exec_process', (['(tc_cmd + cmd)'], {}), '(tc_cmd + cmd)\n', (2580, 2594), True, 'import dc_gym.utils as dc_utils\n'), ((2801, 2836), 'dc_gym.utils.exec_process', 'dc_utils.exec_process', (['(tc_cmd + cmd)'], {}), '(tc_cmd + cmd)\n', (2822, 2836), True, 'import dc_gym.utils as dc_utils\n'), ((5984, 6072), 'dc_gym.utils.exec_process', 'dc_utils.exec_process', (["('ip link set %s txqueuelen %d' % (port, limit / avg_pkt_size))"], {}), "('ip link set %s txqueuelen %d' % (port, limit /\n avg_pkt_size))\n", (6005, 6072), True, 'import dc_gym.utils as dc_utils\n'), ((6107, 6162), 'dc_gym.utils.exec_process', 'dc_utils.exec_process', (["('ip link set %s mtu 1500' % port)"], {}), "('ip link set %s mtu 1500' % port)\n", (6128, 6162), True, 'import dc_gym.utils as dc_utils\n'), ((6225, 6269), 'mininet.node.RemoteController', 'RemoteController', (["(self.topo.switch_id + '_c')"], {}), "(self.topo.switch_id + '_c')\n", (6241, 6269), False, 'from mininet.node import RemoteController\n'), ((9328, 9386), 'mininet.net.Mininet', 'Mininet', ([], {'topo': 'self.topo', 'controller': 'None', 'autoSetMacs': '(True)'}), '(topo=self.topo, controller=None, autoSetMacs=True)\n', (9335, 9386), False, 'from mininet.net import Mininet\n'), ((325, 378), 'os.popen', 'os.popen', (['"""sysctl -n net.ipv4.tcp_congestion_control"""'], {}), "('sysctl -n net.ipv4.tcp_congestion_control')\n", (333, 378), False, 'import os\n'), ((633, 673), 'dc_gym.utils.exec_process', 'dc_utils.exec_process', (['"""modprobe tcp_nv"""'], {}), "('modprobe tcp_nv')\n", (654, 673), True, 'import dc_gym.utils as dc_utils\n'), ((3676, 3711), 'dc_gym.utils.exec_process', 'dc_utils.exec_process', (['(tc_cmd + cmd)'], {}), '(tc_cmd + cmd)\n', (3697, 3711), True, 'import dc_gym.utils as dc_utils\n'), ((3881, 3916), 'dc_gym.utils.exec_process', 'dc_utils.exec_process', (['(tc_cmd + cmd)'], {}), '(tc_cmd + cmd)\n', (3902, 3916), True, 'import dc_gym.utils as dc_utils\n'), ((7130, 7200), 'dc_gym.utils.exec_process', 'dc_utils.exec_process', (['"""sysctl -w net.ipv4.tcp_window_scaling=1"""', 'host'], {}), "('sysctl -w net.ipv4.tcp_window_scaling=1', host)\n", (7151, 7200), True, 'import dc_gym.utils as dc_utils\n'), ((7230, 7296), 'dc_gym.utils.exec_process', 'dc_utils.exec_process', (['"""sysctl -w net.ipv4.tcp_timestamps=1"""', 'host'], {}), "('sysctl -w net.ipv4.tcp_timestamps=1', host)\n", (7251, 7296), True, 'import dc_gym.utils as dc_utils\n'), ((7326, 7386), 'dc_gym.utils.exec_process', 'dc_utils.exec_process', (['"""sysctl -w net.ipv4.tcp_sack=1"""', 'host'], {}), "('sysctl -w net.ipv4.tcp_sack=1', host)\n", (7347, 7386), True, 'import dc_gym.utils as dc_utils\n'), ((7399, 7467), 'dc_gym.utils.exec_process', 'dc_utils.exec_process', (['"""sysctl -w net.ipv4.tcp_syn_retries=10"""', 'host'], {}), "('sysctl -w net.ipv4.tcp_syn_retries=10', host)\n", (7420, 7467), True, 'import dc_gym.utils as dc_utils\n'), ((9941, 9967), 'dc_gym.utils.exec_process', 'dc_utils.exec_process', (['cmd'], {}), '(cmd)\n', (9962, 9967), True, 'import dc_gym.utils as dc_utils\n'), ((7724, 7802), 'dc_gym.utils.exec_process', 'dc_utils.exec_process', (['"""sysctl -w net.ipv4.tcp_congestion_control=dctcp"""', 'host'], {}), "('sysctl -w net.ipv4.tcp_congestion_control=dctcp', host)\n", (7745, 7802), True, 'import dc_gym.utils as dc_utils\n'), ((7840, 7899), 'dc_gym.utils.exec_process', 'dc_utils.exec_process', (['"""sysctl -w net.ipv4.tcp_ecn=1"""', 'host'], {}), "('sysctl -w net.ipv4.tcp_ecn=1', host)\n", (7861, 7899), True, 'import dc_gym.utils as dc_utils\n'), ((7916, 7984), 'dc_gym.utils.exec_process', 'dc_utils.exec_process', (['"""sysctl -w net.ipv4.tcp_ecn_fallback=0"""', 'host'], {}), "('sysctl -w net.ipv4.tcp_ecn_fallback=0', host)\n", (7937, 7984), True, 'import dc_gym.utils as dc_utils\n'), ((9718, 9771), 'dc_gym.utils.exec_process', 'dc_utils.exec_process', (['"""sysctl -w net.ipv4.tcp_ecn=0"""'], {}), "('sysctl -w net.ipv4.tcp_ecn=0')\n", (9739, 9771), True, 'import dc_gym.utils as dc_utils\n'), ((772, 828), 'dc_gym.utils.exec_process', 'dc_utils.exec_process', (["('insmod %s/tcp_pcc.ko' % FILE_DIR)"], {}), "('insmod %s/tcp_pcc.ko' % FILE_DIR)\n", (793, 828), True, 'import dc_gym.utils as dc_utils\n'), ((8068, 8143), 'dc_gym.utils.exec_process', 'dc_utils.exec_process', (['"""sysctl -w net.ipv4.tcp_congestion_control=nv"""', 'host'], {}), "('sysctl -w net.ipv4.tcp_congestion_control=nv', host)\n", (8089, 8143), True, 'import dc_gym.utils as dc_utils\n'), ((8224, 8300), 'dc_gym.utils.exec_process', 'dc_utils.exec_process', (['"""sysctl -w net.ipv4.tcp_congestion_control=pcc"""', 'host'], {}), "('sysctl -w net.ipv4.tcp_congestion_control=pcc', host)\n", (8245, 8300), True, 'import dc_gym.utils as dc_utils\n'), ((716, 744), 'os.popen', 'os.popen', (['"""lsmod | grep pcc"""'], {}), "('lsmod | grep pcc')\n", (724, 744), False, 'import os\n')] |
from django.shortcuts import render
from django.http import HttpResponse
from django.http import Http404
from django.views.decorators.csrf import csrf_exempt
from .models import Player
from django.core.paginator import Paginator
@csrf_exempt
def basic_view(request):
if request.method == 'POST':
players = Player.objects.all()
# Set search parameters
try:
Name = request.POST['PlayerName']
if Name != '':
players = players.filter(Name=Name)
except KeyError:
pass
try:
Age = request.POST['Age']
players = players.filter(Age=Age)
except KeyError:
pass
except ValueError:
pass
try:
Nationality = request.POST['Nationality']
if Nationality != '':
players = players.filter(Nationality=Nationality)
except KeyError:
pass
try:
Club = request.POST['Club']
if Club != '':
players = players.filter(Club=Club)
except KeyError:
pass
try:
Position = request.POST['Position']
if Position != '':
players = players.filter(Position=Position)
except KeyError:
pass
try:
Overall = request.POST['Overall']
players = players.filter(Overall__gte=Overall)
except KeyError:
pass
except ValueError:
pass
try:
Potential = request.POST['Potential']
players = players.filter(Potential=Potential)
except KeyError:
pass
except ValueError:
pass
try:
WeakFoot = request.POST['WeakFoot']
players = players.filter(WeakFoot=WeakFoot)
except KeyError:
pass
except ValueError:
pass
try:
SkillMoves = request.POST['SkillMoves']
players = players.filter(SkillMoves=SkillMoves)
except KeyError:
pass
except ValueError:
pass
try:
PreferredFoot = request.POST['PreferredFoot']
if PreferredFoot != '':
players = players.filter(PreferredFoot=PreferredFoot)
except KeyError:
pass
try:
InternationalReputation = request.POST['InternationalReputation']
players = players.filter(InternationalReputation=InternationalReputation)
except KeyError:
pass
except ValueError:
pass
# Generating output
player_list = list(players)
# player_list = Paginator(players, 50)
print(player_list)
context = {
'results': 'yes',
'some_list': player_list
}
return render(request, 'players.html', context)
else:
return render(request, 'players.html')
| [
"django.shortcuts.render"
] | [((2983, 3023), 'django.shortcuts.render', 'render', (['request', '"""players.html"""', 'context'], {}), "(request, 'players.html', context)\n", (2989, 3023), False, 'from django.shortcuts import render\n'), ((3051, 3082), 'django.shortcuts.render', 'render', (['request', '"""players.html"""'], {}), "(request, 'players.html')\n", (3057, 3082), False, 'from django.shortcuts import render\n')] |
# -*- coding: utf-8 -*-
import logging
import os
import time
import click
import requests
from unsync import unsync
@unsync
def setup_repos(git_url: str):
"""
Run git clone command for repo
"""
try:
os.system(f"git clone {git_url}")
return 1
# os.system("git fetch")
except Exception as e:
logging.warning(e)
return 0
@click.command()
@click.option("--github_profile", prompt="github profile name", help="e.g. devsetgo")
@click.option(
"--max_repos", prompt="maximum number of public repos to retrieve", help="e.g. 50"
)
def call_api(github_profile, max_repos):
"""
call Github API for User
Loop through all repos and call
"""
try:
max_repos = int(max_repos)
except ValueError:
logging.warning(
f"Maximum repos must be an integer and\
not a {type(max_repos)}"
)
exit()
# Gets the most recent repos
url = f"https://api.github.com/users/{github_profile}/repos?sort=\
updated&per_page={max_repos}"
t0 = time.time()
r = requests.get(url)
logging.info(f"Fetching Repos for {github_profile}")
data = r.json()
tasks = [setup_repos(d["clone_url"]) for d in data]
results = [task.result() for task in tasks]
count = sum(results)
t1 = time.time() - t0
print(f"{count} repos cloned in {t1:.2f} seconds.")
if __name__ == "__main__":
call_api()
| [
"click.option",
"logging.warning",
"logging.info",
"requests.get",
"os.system",
"click.command",
"time.time"
] | [((380, 395), 'click.command', 'click.command', ([], {}), '()\n', (393, 395), False, 'import click\n'), ((397, 486), 'click.option', 'click.option', (['"""--github_profile"""'], {'prompt': '"""github profile name"""', 'help': '"""e.g. devsetgo"""'}), "('--github_profile', prompt='github profile name', help=\n 'e.g. devsetgo')\n", (409, 486), False, 'import click\n'), ((483, 584), 'click.option', 'click.option', (['"""--max_repos"""'], {'prompt': '"""maximum number of public repos to retrieve"""', 'help': '"""e.g. 50"""'}), "('--max_repos', prompt=\n 'maximum number of public repos to retrieve', help='e.g. 50')\n", (495, 584), False, 'import click\n'), ((1060, 1071), 'time.time', 'time.time', ([], {}), '()\n', (1069, 1071), False, 'import time\n'), ((1080, 1097), 'requests.get', 'requests.get', (['url'], {}), '(url)\n', (1092, 1097), False, 'import requests\n'), ((1102, 1154), 'logging.info', 'logging.info', (['f"""Fetching Repos for {github_profile}"""'], {}), "(f'Fetching Repos for {github_profile}')\n", (1114, 1154), False, 'import logging\n'), ((226, 259), 'os.system', 'os.system', (['f"""git clone {git_url}"""'], {}), "(f'git clone {git_url}')\n", (235, 259), False, 'import os\n'), ((1316, 1327), 'time.time', 'time.time', ([], {}), '()\n', (1325, 1327), False, 'import time\n'), ((341, 359), 'logging.warning', 'logging.warning', (['e'], {}), '(e)\n', (356, 359), False, 'import logging\n')] |
import requests #type: ignore
class WittyFlowSms:
def __init__(self, app_id, app_secret):
self.app_id = app_id
self.app_secret = app_secret
def app_id(self):
return self.app_id
def app_secret(self):
return self.app_secret
def send_sms(self, phone, to, message, is_flash=False):
body_to_send = {
"from": f"{to}",
"to": f"233{phone[1:]}",
"type": 1,
"message": f"{message}",
"app_id": f"{self.app_id}",
"app_secret" : f"{self.app_secret}",
}
if is_flash:
body_to_send["type"] = 0
response = requests.post('https://api.wittyflow.com/v1/messages/send', data=body_to_send)
return response.json()
def get_account_balance(self):
response = requests.get(f'https://api.wittyflow.com/v1/account/balance?app_id={self.app_id}&app_secret={self.app_secret}')
return response.json()
def check_sms_status(self, sms_id):
response = requests.get(f'https://api.wittyflow.com/v1/messages/{sms_id}/retrieve?app_id={self.app_id}&app_secret={self.app_secret}')
return response.json() | [
"requests.post",
"requests.get"
] | [((674, 752), 'requests.post', 'requests.post', (['"""https://api.wittyflow.com/v1/messages/send"""'], {'data': 'body_to_send'}), "('https://api.wittyflow.com/v1/messages/send', data=body_to_send)\n", (687, 752), False, 'import requests\n'), ((839, 960), 'requests.get', 'requests.get', (['f"""https://api.wittyflow.com/v1/account/balance?app_id={self.app_id}&app_secret={self.app_secret}"""'], {}), "(\n f'https://api.wittyflow.com/v1/account/balance?app_id={self.app_id}&app_secret={self.app_secret}'\n )\n", (851, 960), False, 'import requests\n'), ((1042, 1174), 'requests.get', 'requests.get', (['f"""https://api.wittyflow.com/v1/messages/{sms_id}/retrieve?app_id={self.app_id}&app_secret={self.app_secret}"""'], {}), "(\n f'https://api.wittyflow.com/v1/messages/{sms_id}/retrieve?app_id={self.app_id}&app_secret={self.app_secret}'\n )\n", (1054, 1174), False, 'import requests\n')] |
from __future__ import print_function
from __future__ import absolute_import
from __future__ import division
import better.marketdata.globaldatamatrix as gdm
import numpy as np
import pandas as pd
import logging
from better.tools.configprocess import parse_time
from better.tools.data import get_volume_forward, get_type_list
import better.marketdata.replaybuffer as rb
MIN_NUM_PERIOD = 3
class DataMatrices:
def __init__(self, start, end, period, batch_size=50, volume_average_days=30, buffur_bias_ratio=0,
market="poloniex", coin_filter=1, window_size=50, feature_number=3, test_portion=0.15,
portion_reversed=False, online=False, is_permed=False):
"""
:param start: Unix time
:param end: Unix time
:param access_period: the data access period of the input matrix.
:param trade_period: the trading period of the agent.
:param global_period: the data access period of the global price matrix.
if it is not equal to the access period, there will be inserted observations
:param coin_filter: number of coins that would be selected
:param window_size: periods of input data
:param train_portion: portion of training set
:param is_permed: if False, the sample inside a mini-batch is in order
:param validation_portion: portion of cross-validation set
:param test_portion: portion of test set
:param portion_reversed: if False, the order to sets are [train, validation, test]
else the order is [test, validation, train]
"""
start = int(start)
self.__end = int(end)
# assert window_size >= MIN_NUM_PERIOD
self.__coin_no = coin_filter
type_list = get_type_list(feature_number)
self.__features = type_list
self.feature_number = feature_number
volume_forward = get_volume_forward(self.__end-start, test_portion, portion_reversed)
self.__history_manager = gdm.HistoryManager(coin_number=coin_filter, end=self.__end,
volume_average_days=volume_average_days,
volume_forward=volume_forward, online=online)
if market == "poloniex":
self.__global_data = self.__history_manager.get_global_panel(start,
self.__end,
period=period,
features=type_list)
else:
raise ValueError("market {} is not valid".format(market))
self.__period_length = period
# portfolio vector memory, [time, assets]
self.__PVM = pd.DataFrame(index=self.__global_data.minor_axis,
columns=self.__global_data.major_axis)
self.__PVM = self.__PVM.fillna(1.0 / self.__coin_no)
self._window_size = window_size
self._num_periods = len(self.__global_data.minor_axis)
self.__divide_data(test_portion, portion_reversed)
self._portion_reversed = portion_reversed
self.__is_permed = is_permed
self.__batch_size = batch_size
self.__replay_buffer = None
self.__delta = 0 # the count of global increased
end_index = self._train_ind[-1]
self.__replay_buffer = rb.ReplayBuffer(start_index=self._train_ind[0],
end_index=end_index,
sample_bias=buffur_bias_ratio,
batch_size=self.__batch_size,
coin_number=self.__coin_no,
is_permed=self.__is_permed)
logging.info("the number of training examples is %s"
", of test examples is %s" % (self._num_train_samples, self._num_test_samples))
logging.debug("the training set is from %s to %s" % (min(self._train_ind), max(self._train_ind)))
logging.debug("the test set is from %s to %s" % (min(self._test_ind), max(self._test_ind)))
@property
def global_weights(self):
return self.__PVM
@staticmethod
def create_from_config(config):
"""main method to create the DataMatrices in this project
@:param config: config dictionary
@:return: a DataMatrices object
"""
config = config.copy()
input_config = config["input"]
train_config = config["training"]
start = parse_time(input_config["start_date"])
end = parse_time(input_config["end_date"])
return DataMatrices(start=start,
end=end,
market=input_config["market"],
feature_number=input_config["feature_number"],
window_size=input_config["window_size"],
online=input_config["online"],
period=input_config["global_period"],
coin_filter=input_config["coin_number"],
is_permed=input_config["is_permed"],
buffur_bias_ratio=train_config["buffer_biased"],
batch_size=train_config["batch_size"],
volume_average_days=input_config["volume_average_days"],
test_portion=input_config["test_portion"],
portion_reversed=input_config["portion_reversed"],
)
@property
def global_matrix(self):
return self.__global_data
@property
def coin_list(self):
return self.__history_manager.coins
@property
def num_train_samples(self):
return self._num_train_samples
@property
def test_indices(self):
return self._test_ind[:-(self._window_size+1):]
@property
def num_test_samples(self):
return self._num_test_samples
def append_experience(self, online_w=None):
"""
:param online_w: (number of assets + 1, ) numpy array
Let it be None if in the backtest case.
"""
self.__delta += 1
self._train_ind.append(self._train_ind[-1]+1)
appended_index = self._train_ind[-1]
self.__replay_buffer.append_experience(appended_index)
def get_test_set(self):
return self.__pack_samples(self.test_indices)
def get_training_set(self):
return self.__pack_samples(self._train_ind[:-self._window_size])
def next_batch(self):
"""
@:return: the next batch of training sample. The sample is a dictionary
with key "X"(input data); "y"(future relative price); "last_w" a numpy array
with shape [batch_size, assets]; "w" a list of numpy arrays list length is
batch_size
"""
batch = self.__pack_samples([exp.state_index for exp in self.__replay_buffer.next_experience_batch()])
return batch
def __pack_samples(self, indexs):
indexs = np.array(indexs)
last_w = self.__PVM.values[indexs-1, :]
def setw(w):
self.__PVM.iloc[indexs, :] = w
M = [self.get_submatrix(index) for index in indexs]
M = np.array(M)
X = M[:, :, :, :-1]
y = M[:, :, :, -1] / M[:, 0, None, :, -2]
return {"X": X, "y": y, "last_w": last_w, "setw": setw}
# volume in y is the volume in next access period
def get_submatrix(self, ind):
return self.__global_data.values[:, :, ind:ind+self._window_size+1]
def __divide_data(self, test_portion, portion_reversed):
train_portion = 1 - test_portion
s = float(train_portion + test_portion)
if portion_reversed:
portions = np.array([test_portion]) / s
portion_split = (portions * self._num_periods).astype(int)
indices = np.arange(self._num_periods)
self._test_ind, self._train_ind = np.split(indices, portion_split)
else:
portions = np.array([train_portion]) / s
portion_split = (portions * self._num_periods).astype(int)
indices = np.arange(self._num_periods)
self._train_ind, self._test_ind = np.split(indices, portion_split)
self._train_ind = self._train_ind[:-(self._window_size + 1)]
# NOTE(zhengyao): change the logic here in order to fit both
# reversed and normal version
self._train_ind = list(self._train_ind)
self._num_train_samples = len(self._train_ind)
self._num_test_samples = len(self.test_indices)
| [
"better.tools.data.get_volume_forward",
"better.tools.data.get_type_list",
"numpy.arange",
"numpy.array",
"numpy.split",
"better.marketdata.replaybuffer.ReplayBuffer",
"pandas.DataFrame",
"better.marketdata.globaldatamatrix.HistoryManager",
"logging.info",
"better.tools.configprocess.parse_time"
] | [((1773, 1802), 'better.tools.data.get_type_list', 'get_type_list', (['feature_number'], {}), '(feature_number)\n', (1786, 1802), False, 'from better.tools.data import get_volume_forward, get_type_list\n'), ((1909, 1979), 'better.tools.data.get_volume_forward', 'get_volume_forward', (['(self.__end - start)', 'test_portion', 'portion_reversed'], {}), '(self.__end - start, test_portion, portion_reversed)\n', (1927, 1979), False, 'from better.tools.data import get_volume_forward, get_type_list\n'), ((2011, 2165), 'better.marketdata.globaldatamatrix.HistoryManager', 'gdm.HistoryManager', ([], {'coin_number': 'coin_filter', 'end': 'self.__end', 'volume_average_days': 'volume_average_days', 'volume_forward': 'volume_forward', 'online': 'online'}), '(coin_number=coin_filter, end=self.__end,\n volume_average_days=volume_average_days, volume_forward=volume_forward,\n online=online)\n', (2029, 2165), True, 'import better.marketdata.globaldatamatrix as gdm\n'), ((2834, 2927), 'pandas.DataFrame', 'pd.DataFrame', ([], {'index': 'self.__global_data.minor_axis', 'columns': 'self.__global_data.major_axis'}), '(index=self.__global_data.minor_axis, columns=self.\n __global_data.major_axis)\n', (2846, 2927), True, 'import pandas as pd\n'), ((3474, 3667), 'better.marketdata.replaybuffer.ReplayBuffer', 'rb.ReplayBuffer', ([], {'start_index': 'self._train_ind[0]', 'end_index': 'end_index', 'sample_bias': 'buffur_bias_ratio', 'batch_size': 'self.__batch_size', 'coin_number': 'self.__coin_no', 'is_permed': 'self.__is_permed'}), '(start_index=self._train_ind[0], end_index=end_index,\n sample_bias=buffur_bias_ratio, batch_size=self.__batch_size,\n coin_number=self.__coin_no, is_permed=self.__is_permed)\n', (3489, 3667), True, 'import better.marketdata.replaybuffer as rb\n'), ((3904, 4043), 'logging.info', 'logging.info', (["('the number of training examples is %s, of test examples is %s' % (self.\n _num_train_samples, self._num_test_samples))"], {}), "(\n 'the number of training examples is %s, of test examples is %s' % (self\n ._num_train_samples, self._num_test_samples))\n", (3916, 4043), False, 'import logging\n'), ((4678, 4716), 'better.tools.configprocess.parse_time', 'parse_time', (["input_config['start_date']"], {}), "(input_config['start_date'])\n", (4688, 4716), False, 'from better.tools.configprocess import parse_time\n'), ((4731, 4767), 'better.tools.configprocess.parse_time', 'parse_time', (["input_config['end_date']"], {}), "(input_config['end_date'])\n", (4741, 4767), False, 'from better.tools.configprocess import parse_time\n'), ((7216, 7232), 'numpy.array', 'np.array', (['indexs'], {}), '(indexs)\n', (7224, 7232), True, 'import numpy as np\n'), ((7418, 7429), 'numpy.array', 'np.array', (['M'], {}), '(M)\n', (7426, 7429), True, 'import numpy as np\n'), ((8062, 8090), 'numpy.arange', 'np.arange', (['self._num_periods'], {}), '(self._num_periods)\n', (8071, 8090), True, 'import numpy as np\n'), ((8137, 8169), 'numpy.split', 'np.split', (['indices', 'portion_split'], {}), '(indices, portion_split)\n', (8145, 8169), True, 'import numpy as np\n'), ((8330, 8358), 'numpy.arange', 'np.arange', (['self._num_periods'], {}), '(self._num_periods)\n', (8339, 8358), True, 'import numpy as np\n'), ((8405, 8437), 'numpy.split', 'np.split', (['indices', 'portion_split'], {}), '(indices, portion_split)\n', (8413, 8437), True, 'import numpy as np\n'), ((7940, 7964), 'numpy.array', 'np.array', (['[test_portion]'], {}), '([test_portion])\n', (7948, 7964), True, 'import numpy as np\n'), ((8207, 8232), 'numpy.array', 'np.array', (['[train_portion]'], {}), '([train_portion])\n', (8215, 8232), True, 'import numpy as np\n')] |
# How to Do Linear Regression using Gradient Descent - Live session from 3/29/17
# https://www.youtube.com/watch?v=XdM6ER7zTLk
# https://github.com/llSourcell/linear_regression_live
# My modification, that uses Numpy to the full extent, which can be faster.
import numpy as np
def computeErrorForGivenPoints(m, b, points):
x, y = points[:, 0], points[:, 1]
squareDiff = np.square(y - (m*x + b))
totalError = squareDiff.mean()
return totalError
def step_gradient(mCurrent, bCurrent, points, learningRate):
""" gradient descent """
x, y = points[:, 0], points[:, 1]
bGradient = (mCurrent*x + bCurrent) - y
mGradient = x*bGradient
mGradient = 2.*mGradient.mean()
bGradient = 2.*bGradient.mean()
newM = mCurrent - learningRate*mGradient
newB = bCurrent - learningRate*bGradient
return newM, newB
def gradient_descent_runner(points, startingM, startingB, learningRate, numIterations):
m = startingM
b = startingB
for i in range(numIterations):
m, b = step_gradient(m, b, points, learningRate)
return m, b
def run():
points = np.genfromtxt('data.csv', delimiter=',')
# hyperparameter(s)
learningRate = .0001
# y = mx + b (slope formula)
initialM = 0.
initialB = 0.
numIterations = 1000
print('Starting gradient descent at m = {}, b = {}, error = {}'.format(initialM, initialB, computeErrorForGivenPoints(initialM, initialB, points))) # error = 5565.1078
print('Running...')
m, b = gradient_descent_runner(points, initialM, initialB, learningRate, numIterations)
print('After {} iterations:'.format(numIterations))
print('m =', m) # 1.4777
print('b =', b) # 0.0889
print('error = ', computeErrorForGivenPoints(m, b, points)) # 112.6148
if __name__ == "__main__":
run() | [
"numpy.genfromtxt",
"numpy.square"
] | [((391, 417), 'numpy.square', 'np.square', (['(y - (m * x + b))'], {}), '(y - (m * x + b))\n', (400, 417), True, 'import numpy as np\n'), ((1139, 1179), 'numpy.genfromtxt', 'np.genfromtxt', (['"""data.csv"""'], {'delimiter': '""","""'}), "('data.csv', delimiter=',')\n", (1152, 1179), True, 'import numpy as np\n')] |
import yaml
def read_config(filename):
with open(filename) as f:
return yaml.load(f, yaml.Loader)
| [
"yaml.load"
] | [((86, 111), 'yaml.load', 'yaml.load', (['f', 'yaml.Loader'], {}), '(f, yaml.Loader)\n', (95, 111), False, 'import yaml\n')] |
import copy
import pytest
from icupy import U_ICU_VERSION_MAJOR_NUM
if U_ICU_VERSION_MAJOR_NUM < 50:
pytest.skip("ICU4C<50", allow_module_level=True)
from icupy import (
Locale, StringEnumeration, TimeZoneNames, UTimeZoneNameType,
UnicodeString,
)
def test_api():
# static TimeZoneNames *icu::TimeZoneNames::createInstance(
# const Locale &locale,
# UErrorCode &status
# )
tzn1 = TimeZoneNames.create_instance(Locale("en"))
assert isinstance(tzn1, TimeZoneNames)
tzn2 = TimeZoneNames.create_instance("en")
assert isinstance(tzn2, TimeZoneNames)
tzn3 = TimeZoneNames.create_instance("ja")
# UBool icu::TimeZoneNames::operator!=(const TimeZoneNames &other)
assert not (tzn1 != tzn2)
assert tzn1 != tzn3
assert tzn2 != tzn3
# UBool icu::TimeZoneNames::operator==(const TimeZoneNames &other)
assert tzn1 == tzn2
assert not (tzn1 == tzn3)
assert not (tzn2 == tzn3)
# [1]
# StringEnumeration *icu::TimeZoneNames::getAvailableMetaZoneIDs(
# const UnicodeString &tzID,
# UErrorCode &status
# )
it1 = tzn1.get_available_meta_zone_ids(
UnicodeString("America/Los_Angeles"))
assert isinstance(it1, StringEnumeration)
assert len(it1) > 0
assert "America_Pacific" in it1
it2 = tzn1.get_available_meta_zone_ids(
"America/Los_Angeles")
assert isinstance(it2, StringEnumeration)
assert it2 == it1
# [2]
# StringEnumeration *icu::TimeZoneNames::getAvailableMetaZoneIDs(
# UErrorCode &status
# )
it3 = tzn1.get_available_meta_zone_ids()
assert isinstance(it3, StringEnumeration)
assert len(it3) > 0
assert "America_Pacific" in it3
date = 1358208000000.0 # 2013-01-15T00:00:00Z
name = UnicodeString()
# UnicodeString &icu::TimeZoneNames::getDisplayName(
# const UnicodeString &tzID,
# UTimeZoneNameType type,
# UDate date,
# UnicodeString &name
# )
result = tzn1.get_display_name(
UnicodeString("America/Los_Angeles"),
UTimeZoneNameType.UTZNM_LONG_STANDARD,
date,
name)
assert isinstance(result, UnicodeString)
assert id(result) == id(name)
assert result == "Pacific Standard Time"
result = tzn1.get_display_name(
"America/Los_Angeles",
UTimeZoneNameType.UTZNM_LONG_DAYLIGHT,
date,
name)
assert isinstance(result, UnicodeString)
assert id(result) == id(name)
assert result == "Pacific Daylight Time"
# UnicodeString &icu::TimeZoneNames::getExemplarLocationName(
# const UnicodeString &tzID,
# UnicodeString &name
# )
result = tzn1.get_exemplar_location_name(
UnicodeString("America/Los_Angeles"),
name)
assert isinstance(result, UnicodeString)
assert id(result) == id(name)
assert result == "Los Angeles"
result = tzn1.get_exemplar_location_name(
"Asia/Tokyo",
name)
assert isinstance(result, UnicodeString)
assert id(result) == id(name)
assert result == "Tokyo"
# UnicodeString &icu::TimeZoneNames::getMetaZoneDisplayName(
# const UnicodeString &mzID,
# UTimeZoneNameType type,
# UnicodeString &name
# )
result = tzn1.get_meta_zone_display_name(
UnicodeString("America_Pacific"),
UTimeZoneNameType.UTZNM_LONG_STANDARD,
name)
assert isinstance(result, UnicodeString)
assert id(result) == id(name)
assert result == "Pacific Standard Time"
result = tzn1.get_meta_zone_display_name(
"America_Pacific",
UTimeZoneNameType.UTZNM_LONG_DAYLIGHT,
name)
assert isinstance(result, UnicodeString)
assert id(result) == id(name)
assert result == "Pacific Daylight Time"
mz_id = UnicodeString()
# UnicodeString &icu::TimeZoneNames::getMetaZoneID(
# const UnicodeString &tzID,
# UDate date,
# UnicodeString &mzID
# )
result = tzn1.get_meta_zone_id(
UnicodeString("America/Los_Angeles"),
date,
mz_id)
assert isinstance(result, UnicodeString)
assert id(result) == id(mz_id)
assert result == "America_Pacific"
result = tzn1.get_meta_zone_id(
"America/Los_Angeles",
date,
mz_id)
assert isinstance(result, UnicodeString)
assert id(result) == id(mz_id)
assert result == "America_Pacific"
tz_id = UnicodeString()
# UnicodeString &icu::TimeZoneNames::getReferenceZoneID(
# const UnicodeString &mzID,
# const char *region,
# UnicodeString &tzID
# )
result = tzn1.get_reference_zone_id(
UnicodeString("America_Pacific"),
"001",
tz_id)
assert isinstance(result, UnicodeString)
assert id(result) == id(tz_id)
assert result == "America/Los_Angeles"
result = tzn1.get_reference_zone_id(
"America_Pacific",
"001",
tz_id)
assert isinstance(result, UnicodeString)
assert id(result) == id(tz_id)
assert result == "America/Los_Angeles"
# UnicodeString &icu::TimeZoneNames::getTimeZoneDisplayName(
# const UnicodeString &tzID,
# UTimeZoneNameType type,
# UnicodeString &name
# )
result = tzn1.get_time_zone_display_name(
UnicodeString("America/Los_Angeles"),
UTimeZoneNameType.UTZNM_LONG_STANDARD,
name)
assert isinstance(result, UnicodeString)
assert id(result) == id(name)
assert result.is_bogus()
result = tzn1.get_time_zone_display_name(
"America/Los_Angeles",
UTimeZoneNameType.UTZNM_LONG_STANDARD,
name)
assert isinstance(result, UnicodeString)
assert id(result) == id(name)
assert result.is_bogus()
def test_clone():
tzn1 = TimeZoneNames.create_instance(Locale("en"))
# TimeZoneNames *icu::TimeZoneNames::clone()
tzn2 = tzn1.clone()
assert isinstance(tzn2, TimeZoneNames)
assert tzn2 == tzn1
tzn3 = copy.copy(tzn1)
assert tzn3 == tzn1
tzn4 = copy.deepcopy(tzn1)
assert tzn4 == tzn1
@pytest.mark.skipif(U_ICU_VERSION_MAJOR_NUM < 54, reason="ICU4C<54")
def test_create_tzdb_instance():
# static TimeZoneNames *icu::TimeZoneNames::createTZDBInstance(
# const Locale &locale,
# UErrorCode &status
# )
tzn1 = TimeZoneNames.create_tzdb_instance(Locale("en"))
assert isinstance(tzn1, TimeZoneNames)
tzn2 = TimeZoneNames.create_tzdb_instance("en")
assert isinstance(tzn2, TimeZoneNames)
# UBool icu::TZDBTimeZoneNames::operator==(...) is not implemented (ICU 69)
# assert tzn1 == tzn2
| [
"icupy.Locale",
"copy.deepcopy",
"icupy.UnicodeString",
"icupy.TimeZoneNames.create_instance",
"copy.copy",
"icupy.TimeZoneNames.create_tzdb_instance",
"pytest.mark.skipif",
"pytest.skip"
] | [((6136, 6203), 'pytest.mark.skipif', 'pytest.mark.skipif', (['(U_ICU_VERSION_MAJOR_NUM < 54)'], {'reason': '"""ICU4C<54"""'}), "(U_ICU_VERSION_MAJOR_NUM < 54, reason='ICU4C<54')\n", (6154, 6203), False, 'import pytest\n'), ((107, 155), 'pytest.skip', 'pytest.skip', (['"""ICU4C<50"""'], {'allow_module_level': '(True)'}), "('ICU4C<50', allow_module_level=True)\n", (118, 155), False, 'import pytest\n'), ((528, 563), 'icupy.TimeZoneNames.create_instance', 'TimeZoneNames.create_instance', (['"""en"""'], {}), "('en')\n", (557, 563), False, 'from icupy import Locale, StringEnumeration, TimeZoneNames, UTimeZoneNameType, UnicodeString\n'), ((619, 654), 'icupy.TimeZoneNames.create_instance', 'TimeZoneNames.create_instance', (['"""ja"""'], {}), "('ja')\n", (648, 654), False, 'from icupy import Locale, StringEnumeration, TimeZoneNames, UTimeZoneNameType, UnicodeString\n'), ((1794, 1809), 'icupy.UnicodeString', 'UnicodeString', ([], {}), '()\n', (1807, 1809), False, 'from icupy import Locale, StringEnumeration, TimeZoneNames, UTimeZoneNameType, UnicodeString\n'), ((3838, 3853), 'icupy.UnicodeString', 'UnicodeString', ([], {}), '()\n', (3851, 3853), False, 'from icupy import Locale, StringEnumeration, TimeZoneNames, UTimeZoneNameType, UnicodeString\n'), ((4473, 4488), 'icupy.UnicodeString', 'UnicodeString', ([], {}), '()\n', (4486, 4488), False, 'from icupy import Locale, StringEnumeration, TimeZoneNames, UTimeZoneNameType, UnicodeString\n'), ((6037, 6052), 'copy.copy', 'copy.copy', (['tzn1'], {}), '(tzn1)\n', (6046, 6052), False, 'import copy\n'), ((6089, 6108), 'copy.deepcopy', 'copy.deepcopy', (['tzn1'], {}), '(tzn1)\n', (6102, 6108), False, 'import copy\n'), ((6493, 6533), 'icupy.TimeZoneNames.create_tzdb_instance', 'TimeZoneNames.create_tzdb_instance', (['"""en"""'], {}), "('en')\n", (6527, 6533), False, 'from icupy import Locale, StringEnumeration, TimeZoneNames, UTimeZoneNameType, UnicodeString\n'), ((459, 471), 'icupy.Locale', 'Locale', (['"""en"""'], {}), "('en')\n", (465, 471), False, 'from icupy import Locale, StringEnumeration, TimeZoneNames, UTimeZoneNameType, UnicodeString\n'), ((1172, 1208), 'icupy.UnicodeString', 'UnicodeString', (['"""America/Los_Angeles"""'], {}), "('America/Los_Angeles')\n", (1185, 1208), False, 'from icupy import Locale, StringEnumeration, TimeZoneNames, UTimeZoneNameType, UnicodeString\n'), ((2051, 2087), 'icupy.UnicodeString', 'UnicodeString', (['"""America/Los_Angeles"""'], {}), "('America/Los_Angeles')\n", (2064, 2087), False, 'from icupy import Locale, StringEnumeration, TimeZoneNames, UTimeZoneNameType, UnicodeString\n'), ((2755, 2791), 'icupy.UnicodeString', 'UnicodeString', (['"""America/Los_Angeles"""'], {}), "('America/Los_Angeles')\n", (2768, 2791), False, 'from icupy import Locale, StringEnumeration, TimeZoneNames, UTimeZoneNameType, UnicodeString\n'), ((3347, 3379), 'icupy.UnicodeString', 'UnicodeString', (['"""America_Pacific"""'], {}), "('America_Pacific')\n", (3360, 3379), False, 'from icupy import Locale, StringEnumeration, TimeZoneNames, UTimeZoneNameType, UnicodeString\n'), ((4058, 4094), 'icupy.UnicodeString', 'UnicodeString', (['"""America/Los_Angeles"""'], {}), "('America/Los_Angeles')\n", (4071, 4094), False, 'from icupy import Locale, StringEnumeration, TimeZoneNames, UTimeZoneNameType, UnicodeString\n'), ((4711, 4743), 'icupy.UnicodeString', 'UnicodeString', (['"""America_Pacific"""'], {}), "('America_Pacific')\n", (4724, 4743), False, 'from icupy import Locale, StringEnumeration, TimeZoneNames, UTimeZoneNameType, UnicodeString\n'), ((5355, 5391), 'icupy.UnicodeString', 'UnicodeString', (['"""America/Los_Angeles"""'], {}), "('America/Los_Angeles')\n", (5368, 5391), False, 'from icupy import Locale, StringEnumeration, TimeZoneNames, UTimeZoneNameType, UnicodeString\n'), ((5870, 5882), 'icupy.Locale', 'Locale', (['"""en"""'], {}), "('en')\n", (5876, 5882), False, 'from icupy import Locale, StringEnumeration, TimeZoneNames, UTimeZoneNameType, UnicodeString\n'), ((6424, 6436), 'icupy.Locale', 'Locale', (['"""en"""'], {}), "('en')\n", (6430, 6436), False, 'from icupy import Locale, StringEnumeration, TimeZoneNames, UTimeZoneNameType, UnicodeString\n')] |
from __future__ import unicode_literals
from future.builtins import int
from collections import defaultdict
from django.core.urlresolvers import reverse
from django.template.defaultfilters import linebreaksbr, urlize
from mezzanine import template
from mezzanine.conf import settings
from mezzanine.generic.forms import ThreadedCommentForm
from mezzanine.generic.models import ThreadedComment
from mezzanine.utils.importing import import_dotted_path
from mezzanine.pages.models import Page, RichTextPage
register = template.Library()
@register.assignment_tag
def allpages():
page_fields = [ 'content', 'created', 'description', 'expiry_date', 'gen_description', u'id', 'keywords', u'keywords_string', 'publish_date', 'short_url', 'slug', 'status', 'title', 'titles', 'updated']
output = []
# import pdb;pdb.set_trace()
AllPages = RichTextPage.objects.all()
for item in AllPages:
temp = {}
for fld in page_fields:
temp[fld] = getattr(item, fld)
output.append(temp)
return {
'pages': output
}
@register.filter()
def remove_slash(value):
return '#' + value[1:-1]
@register.filter()
def lower(value):
# import pdb;pdb.set_trace()
return value.lower() | [
"mezzanine.pages.models.RichTextPage.objects.all",
"mezzanine.template.Library"
] | [((519, 537), 'mezzanine.template.Library', 'template.Library', ([], {}), '()\n', (535, 537), False, 'from mezzanine import template\n'), ((853, 879), 'mezzanine.pages.models.RichTextPage.objects.all', 'RichTextPage.objects.all', ([], {}), '()\n', (877, 879), False, 'from mezzanine.pages.models import Page, RichTextPage\n')] |
from __future__ import unicode_literals
from SourceFile import SourceFile
class ProjectSizeCheckerBlock:
def __init__(self, cursor, minClasses, nextBlock):
self.cursor = cursor
self.minClasses = minClasses
self.nextBlock = nextBlock
def process(self, project):
sql = "SELECT * FROM source_files WHERE project_id=%s"
self.cursor.execute(sql, project.projectId)
filesInProject = self.cursor.fetchall()
if len(filesInProject) >= self.minClasses:
for file in filesInProject:
project.files.add(SourceFile(file['id'], file['name']))
self.nextBlock.process(project)
| [
"SourceFile.SourceFile"
] | [((593, 629), 'SourceFile.SourceFile', 'SourceFile', (["file['id']", "file['name']"], {}), "(file['id'], file['name'])\n", (603, 629), False, 'from SourceFile import SourceFile\n')] |
import unittest
from descriptastorus import MolFileIndex
import os, shutil
import logging
import datahook
TEST_DIR = "test1"
class TestCase(unittest.TestCase):
def setUp(self):
if os.path.exists(TEST_DIR):
shutil.rmtree(TEST_DIR, ignore_errors=True)
index = self.index = MolFileIndex.MakeSmilesIndex(
os.path.join(datahook.datadir, "../data/test1.smi"), TEST_DIR, hasHeader=True,
smilesColumn="smiles", nameColumn="name")
def tearDown(self):
if os.path.exists(TEST_DIR):
shutil.rmtree(TEST_DIR, ignore_errors=True)
def testIndexing(self):
logging.info("Running index test")
self.assertEqual(self.index.N, 14)
self.assertEqual(self.index.getMol(12), 'c1ccccc1CCCCCCCCCCCC')
self.assertEqual(self.index.getName(12), '13')
self.assertEqual(self.index.getRDMol(13), None)
if __name__ == '__main__':
logging.getLogger().setLevel(logging.INFO)
unittest.main()
| [
"logging.getLogger",
"os.path.exists",
"os.path.join",
"shutil.rmtree",
"unittest.main",
"logging.info"
] | [((999, 1014), 'unittest.main', 'unittest.main', ([], {}), '()\n', (1012, 1014), False, 'import unittest\n'), ((195, 219), 'os.path.exists', 'os.path.exists', (['TEST_DIR'], {}), '(TEST_DIR)\n', (209, 219), False, 'import os, shutil\n'), ((525, 549), 'os.path.exists', 'os.path.exists', (['TEST_DIR'], {}), '(TEST_DIR)\n', (539, 549), False, 'import os, shutil\n'), ((657, 691), 'logging.info', 'logging.info', (['"""Running index test"""'], {}), "('Running index test')\n", (669, 691), False, 'import logging\n'), ((233, 276), 'shutil.rmtree', 'shutil.rmtree', (['TEST_DIR'], {'ignore_errors': '(True)'}), '(TEST_DIR, ignore_errors=True)\n', (246, 276), False, 'import os, shutil\n'), ((348, 399), 'os.path.join', 'os.path.join', (['datahook.datadir', '"""../data/test1.smi"""'], {}), "(datahook.datadir, '../data/test1.smi')\n", (360, 399), False, 'import os, shutil\n'), ((563, 606), 'shutil.rmtree', 'shutil.rmtree', (['TEST_DIR'], {'ignore_errors': '(True)'}), '(TEST_DIR, ignore_errors=True)\n', (576, 606), False, 'import os, shutil\n'), ((952, 971), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (969, 971), False, 'import logging\n')] |
from django.shortcuts import render
from .forms import YouTubeLinks
import pytube,sys
from pytube import YouTube
# Create your views here.
def indexView(request):
video_url = ""
video_title = ""
form = YouTubeLinks()
if request.method == "POST":
form = YouTubeLinks(request.POST)
if form.is_valid():
video_url = form.cleaned_data['link']
form = YouTubeLinks(request.POST)
#creating YouTube object from the link
video = YouTube(video_url)
#video title
video_title = video.title
#length of the video in seconds
video_length = video.length
#thumbail
video_thumbnail = video.thumbnail_url
#video id
video_id = video.video_id
context = {
"type":"mp4",
"form":form,
"video_title":video_title,
"video_length":video_length,
"video_thumbail":video_thumbnail,
"video_id":video_id
}
return render(request,'index.html',context)
| [
"django.shortcuts.render",
"pytube.YouTube"
] | [((483, 501), 'pytube.YouTube', 'YouTube', (['video_url'], {}), '(video_url)\n', (490, 501), False, 'from pytube import YouTube\n'), ((936, 974), 'django.shortcuts.render', 'render', (['request', '"""index.html"""', 'context'], {}), "(request, 'index.html', context)\n", (942, 974), False, 'from django.shortcuts import render\n')] |
# -*- coding: utf-8 -*-
"""
Created on Fri Jan 24 14:38:33 2020
@author: Administrator
"""
"""
数字1-1000放在一个含有1001个元素的数组中,其中只有唯一的一个元素值重复,其他数字均只出现过一次.
设计一个算法,将重复元素找出来,要求每个元素只能访问一次.如果不使用辅助存储空间,能否设计一个算法实现.
"""
import copy
def Method1(array):
if array == None:
return -1
length = len(array) - 1
arr_dict = dict()
for i in range(length):
arr_dict[i + 1] = 0
for item in array:
if arr_dict[item] == 0:
arr_dict[item] += 1
elif arr_dict[item] == 1:
return item
def Method2(array):
result = array[0]
for item in array[1 : ]:
result = result ^ item
for i in range(1000):
result = result ^ (i + 1)
return result
def Method3(array):
if array == None:
return -1
index = 0
while array[index] > 0:
temp = array[index]
array[index] = - temp
index = temp
return index
if __name__ == "__main__":
array = [(i + 1) for i in range(1000)]
array.append(125)
replicated = Method1(array)
print("Based on method1, The replicated element is : " , replicated)
replicated = Method2(array)
print("Based on method2, The replicated element is : " , replicated)
array_copy = copy.deepcopy(array)
replicated = Method3(array_copy)
print("Based on method3, The replicated element is : " , replicated) | [
"copy.deepcopy"
] | [((1283, 1303), 'copy.deepcopy', 'copy.deepcopy', (['array'], {}), '(array)\n', (1296, 1303), False, 'import copy\n')] |
import numpy as np
from pyNN.random import NumpyRNG
from sklearn.linear_model import LinearRegression
import math
def gaussian_convolution(spikes,dt):
#---- takes a spiketrain and the simulation time constant
# and computes the smoothed spike rate
#-----works only after the simulation has run; not online!!!!!!!!
kernel_size = 10
gaussian_kernel = signal.gaussian(kernel_size, std=2)
scaling_factor = 1/np.sum(gaussian_kernel)*1/dt
gauss_rate = np.convolve(spikes,gaussian_kernel,mode='same')*scaling_factor
mean_rate = np.mean(gauss_rate)
return mean_rate
def spike_mean_rate(spikes, sim_time):
return len(spikes) / sim_time
def generate_testImage(direction):
potential = 100
if direction=="left":
return [potential,0,0,potential,0,0,potential,0,0]
elif direction=='middle':
return [0,potential,0,0,potential,0,0,potential,0]
elif direction=='right':
return [0,0,potential,0,0,potential,0,0,potential]
else:
return [0,0,0,0,0,0,0,0,0]
# Labeled image has the form (image, label)
# Label is a list [on1, on2], on# being the correct value for
# the output neurons
def generate_labeledImages(nr):
labeledImages = []
for i in range(nr/3):
labeledImages.append((generate_testImage("right"), [0,10]))
labeledImages.append((generate_testImage("middle"), [0,0]))
labeledImages.append((generate_testImage("left"), [10,0]))
return labeledImages
# title: title of result
# strains: spiking trains
def print_mean_spike_rate(strains):
mean_left = spike_mean_rate(strains[0], param.simulation_time)
mean_right = spike_mean_rate(strains[1], param.simulation_time)
print('Mean rate readout neurons (left, right)')
print('(' + str(mean_left) + ',' + str(mean_right) + ')')
return (mean_left, mean_right)
def compute_linear_weights(X, rout_left, rout_right):
print('size of X',np.size(X))
regr1 = LinearRegression()
regr1.fit(X,rout_left)
#print('Coefficients: \n', regr1.coef_)
w1 = regr1.coef_
regr2 = LinearRegression()
regr2.fit(X,rout_right)
#print('Coefficients: \n', regr2.coef_)
w2 = regr2.coef_
w = []
for i in range(param.reservoir_nr):
w.append(w1[i])
w.append(w2[i])
return w
def compute_weights(X, rout_left, rout_right):
######### Fit weights to each output neuron with linear regression ###########
w1 = np.linalg.lstsq(X.T.dot(X) + 0.1*np.identity(param.reservoir_nr), X.T.dot(rout_left))[0].tolist()
# The coefficients
print('Weights w1 reservoir - readout neuron left')
print(w1)
w2 = np.linalg.lstsq(X.T.dot(X) + 0.1*np.identity(param.reservoir_nr), X.T.dot(rout_right))[0].tolist()
print('Weights w2 reservoir - readout neuron right')
print(w2)
# Connection['r2rout'] looks like
# [ [r0, rout0, value], [r0, rout1, v], [r1, rout0, v] ... ]
w = []
for i in range(param.reservoir_nr):
w.append(w1[i])
w.append(w2[i])
return w
def compute_weights_exc_inh(X, rout_left, rout_right):
######### Fit weights to each output neuron with linear regression ###########
w1 = np.linalg.lstsq(X.T.dot(X) + 0.1*np.identity(param.res_exc_nr+param.res_inh_nr), X.T.dot(rout_left))[0].tolist()
# The coefficients
print('Weights w1 reservoir - readout neuron left')
print(w1)
w2 = np.linalg.lstsq(X.T.dot(X) + 0.1*np.identity(param.res_exc_nr+param.res_inh_nr), X.T.dot(rout_right))[0].tolist()
print('Weights w2 reservoir - readout neuron right')
print(w2)
# Connection['r2rout'] looks like
# [ [r0, rout0, value], [r0, rout1, v], [r1, rout0, v] ... ]
w = []
for i in range(param.res_exc_nr+param.res_inh_nr):
w.append(w1[i])
w.append(w2[i])
w_exc = []
for i in range(param.res_exc_nr):
w_exc.append(w1[i])
w_exc.append(w2[i])
w_inh = []
for i in range(param.res_inh_nr):
w_inh.append(w1[param.res_exc_nr + i])
w_inh.append(w2[param.res_exc_nr + i])
return (w_exc, w_inh)
class param:
seed = 8658764 # Seed for reproduction of random number
rng = NumpyRNG() # Use seed to reproduce
input_nr = 9 # Number of input neurons
readout_nr = 2 # Number of readout neurons
reservoir_nr = 50 # Number of reservour neurons
simulation_time = 19.0 # Simulation time for each input
dt = 1 # Timestep in simulation
res_pconn = 0.1 # sparse connection probability for reservoir
images_train_nr = 9 # Number of training images to train with,
# Must be a factor of 3
images_test_nr = 9 # Number of test images
images_train = generate_labeledImages(images_train_nr)
images_test = generate_labeledImages(images_test_nr)
# If network uses excitatory and inhibatory neurons
res_exc_nr = int(math.ceil(reservoir_nr*0.8)) # Number of excitatory neurons
res_inh_nr = int(math.floor(reservoir_nr*0.2)) # Number of inhibitory neurons
print('exc:', res_exc_nr)
| [
"numpy.identity",
"numpy.mean",
"pyNN.random.NumpyRNG",
"numpy.convolve",
"math.ceil",
"math.floor",
"numpy.size",
"numpy.sum",
"sklearn.linear_model.LinearRegression"
] | [((558, 577), 'numpy.mean', 'np.mean', (['gauss_rate'], {}), '(gauss_rate)\n', (565, 577), True, 'import numpy as np\n'), ((1862, 1880), 'sklearn.linear_model.LinearRegression', 'LinearRegression', ([], {}), '()\n', (1878, 1880), False, 'from sklearn.linear_model import LinearRegression\n'), ((1975, 1993), 'sklearn.linear_model.LinearRegression', 'LinearRegression', ([], {}), '()\n', (1991, 1993), False, 'from sklearn.linear_model import LinearRegression\n'), ((3902, 3912), 'pyNN.random.NumpyRNG', 'NumpyRNG', ([], {}), '()\n', (3910, 3912), False, 'from pyNN.random import NumpyRNG\n'), ((479, 528), 'numpy.convolve', 'np.convolve', (['spikes', 'gaussian_kernel'], {'mode': '"""same"""'}), "(spikes, gaussian_kernel, mode='same')\n", (490, 528), True, 'import numpy as np\n'), ((1841, 1851), 'numpy.size', 'np.size', (['X'], {}), '(X)\n', (1848, 1851), True, 'import numpy as np\n'), ((4561, 4590), 'math.ceil', 'math.ceil', (['(reservoir_nr * 0.8)'], {}), '(reservoir_nr * 0.8)\n', (4570, 4590), False, 'import math\n'), ((4640, 4670), 'math.floor', 'math.floor', (['(reservoir_nr * 0.2)'], {}), '(reservoir_nr * 0.2)\n', (4650, 4670), False, 'import math\n'), ((433, 456), 'numpy.sum', 'np.sum', (['gaussian_kernel'], {}), '(gaussian_kernel)\n', (439, 456), True, 'import numpy as np\n'), ((2341, 2372), 'numpy.identity', 'np.identity', (['param.reservoir_nr'], {}), '(param.reservoir_nr)\n', (2352, 2372), True, 'import numpy as np\n'), ((2531, 2562), 'numpy.identity', 'np.identity', (['param.reservoir_nr'], {}), '(param.reservoir_nr)\n', (2542, 2562), True, 'import numpy as np\n'), ((3029, 3077), 'numpy.identity', 'np.identity', (['(param.res_exc_nr + param.res_inh_nr)'], {}), '(param.res_exc_nr + param.res_inh_nr)\n', (3040, 3077), True, 'import numpy as np\n'), ((3234, 3282), 'numpy.identity', 'np.identity', (['(param.res_exc_nr + param.res_inh_nr)'], {}), '(param.res_exc_nr + param.res_inh_nr)\n', (3245, 3282), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
"""Flask app models."""
import os.path
import datetime
from flask import url_for
from flask_login import UserMixin
from sqlalchemy.sql.expression import and_
from werkzeug.security import generate_password_hash, check_password_hash
import hashlib
import qrcode
from app import db, login, whooshee
@whooshee.register_model('username', 'first_name', 'last_name', 'nickname')
class User(UserMixin, db.Model):
"""User model."""
id = db.Column(db.Integer, primary_key=True)
# Login info
username = db.Column(db.String(64), index=True, unique=True,
nullable=False)
email = db.Column(db.String(120), index=True, unique=True, nullable=False)
password_hash = db.Column(db.String(128), nullable=False)
qrcode_hash = db.Column(db.String(128), nullable=False)
# Account type
is_observer = db.Column(db.Boolean, default=False, nullable=False)
is_customer = db.Column(db.Boolean, default=False, nullable=False)
is_bartender = db.Column(db.Boolean, default=False, nullable=False)
is_admin = db.Column(db.Boolean, default=False, nullable=False)
# Personal info
first_name = db.Column(db.String(64), index=True, nullable=False)
last_name = db.Column(db.String(64), index=True, nullable=False)
nickname = db.Column(db.String(64), index=True)
birthdate = db.Column(db.Date, default=datetime.datetime.today(),
nullable=False)
grad_class = db.Column(db.Integer, index=True, default=0, nullable=False)
# Account info
balance = db.Column(db.Float, default=0.0, nullable=False)
last_drink = db.Column(db.DateTime, default=None, nullable=True)
transactions = db.relationship('Transaction', backref='client',
lazy='dynamic')
deposit = db.Column(db.Boolean, default=False)
def __repr__(self):
"""Print user's username when printing an user object."""
return '<User {}>'.format(self.username)
def set_password(self, password):
"""Set user password hash."""
self.password_hash = generate_password_hash(password)
def set_qrcode(self):
"""Set user QR code hash."""
self.qrcode_hash = \
generate_password_hash(str(datetime.datetime.utcnow()))
# Create QR code object
qr = qrcode.QRCode(
version=1,
error_correction=qrcode.constants.ERROR_CORRECT_H,
box_size=10,
border=0,
)
qr.add_data(self.qrcode_hash)
qr.make(fit=True)
# md5 encode qrcode_hash to get jpg filename
qrcode_name = hashlib.md5()
qrcode_name.update(self.qrcode_hash.encode('utf-8'))
# Create QR code image and save it to static folder
img = qr.make_image()
img = img.resize((160, 160))
img.save('app/static/img/qr/'+qrcode_name.hexdigest()+'.jpg')
def check_password(self, password):
"""Check password against stored hash."""
return check_password_hash(self.password_hash, password)
def avatar(self):
"""Return url for avatar file."""
avatar_path = 'img/avatar/'+str(self.grad_class)+'/'+self.username
if os.path.isfile(os.path.join('app', 'static', avatar_path+'.jpg')):
avatar_filename = url_for('static', filename=avatar_path+'.jpg')
return avatar_filename
else:
return url_for('static',
filename='img/avatar/avatar_placeholder.png')
def qr(self):
"""Return url for qr code file."""
# md5 encode qrcode_hash to get jpg filename
qrcode_name = hashlib.md5()
qrcode_name.update(self.qrcode_hash.encode('utf-8'))
qr_path = os.path.join('img', 'qr',
qrcode_name.hexdigest()+'.jpg')
if os.path.isfile(os.path.join('app', 'static', qr_path)):
qr_filename = url_for('static', filename=qr_path)
return qr_filename
return None
def can_buy(self, item):
"""Return the user's right to buy the item."""
if not self.deposit:
return "{} {} hasn't given a deposit.".\
format(self.first_name, self.last_name)
if not item:
return 'No item selected.'
if (item.is_quantifiable and item.quantity <= 0):
return 'No {} left.'.format(item.name)
# Get current day start
today = datetime.datetime.today()
yesterday = today - datetime.timedelta(days=1)
if today.hour < 6:
current_day_start = datetime.\
datetime(year=yesterday.year, month=yesterday.month,
day=yesterday.day, hour=6)
else:
current_day_start = datetime.\
datetime(year=today.year, month=today.month,
day=today.day, hour=6)
# Get global app settings
minimum_legal_age = \
int(GlobalSetting.query.
filter_by(key='MINIMUM_LEGAL_AGE').first().value)
max_alcoholic_drinks_per_day = \
int(GlobalSetting.query.
filter_by(key='MAX_DAILY_ALCOHOLIC_DRINKS_PER_USER').
first().value)
# Get user age
age = today.year - self.birthdate.year - \
((today.month, today.day) <
(self.birthdate.month, self.birthdate.day))
# Get user daily alcoholic drinks
nb_alcoholic_drinks = self.transactions.\
filter_by(is_reverted=False).\
filter(and_(Transaction.item.has(is_alcohol=True),
Transaction.date > current_day_start)).count()
if item.is_alcohol and age < minimum_legal_age:
return "{} {} isn't old enough, the minimum legal age being {}.".\
format(self.first_name, self.last_name,
minimum_legal_age)
elif item.is_alcohol and nb_alcoholic_drinks >= \
max_alcoholic_drinks_per_day:
return '{} {} has reached the limit of {} drinks per night.'.\
format(self.first_name, self.last_name,
max_alcoholic_drinks_per_day)
elif self.balance < item.price:
return "{} {} doesn't have enough funds to buy {}.".\
format(self.first_name, self.last_name, item.name)
else:
return True
@login.user_loader
def load_user(id):
"""Return user from id."""
return User.query.get(int(id))
class Item(db.Model):
"""Item model."""
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(64), index=True, unique=True, nullable=False)
is_alcohol = db.Column(db.Boolean)
price = db.Column(db.Float, nullable=False)
is_quantifiable = db.Column(db.Boolean)
quantity = db.Column(db.Integer, default=0)
is_favorite = db.Column(db.Boolean, default=False)
transactions = db.relationship('Transaction', backref='item',
lazy='dynamic')
def __repr__(self):
"""Print item's name when printing an item object."""
return '<Item {}>'.format(self.name)
class Transaction(db.Model):
"""Transaction model."""
id = db.Column(db.Integer, primary_key=True)
# True if the transaction has been reverted. In this case, won't ever go
# back to False
is_reverted = db.Column(db.Boolean, default=False)
date = db.Column(db.DateTime, index=True, default=datetime.datetime.utcnow,
nullable=False)
# The barman who made the transaction
barman = db.Column(db.String(64), index=True, nullable=False)
# Not NULL if type is 'Pay <Item>' or 'Top up'
client_id = db.Column(db.Integer, db.ForeignKey('user.id'))
# Not NULL if type is 'Pay <Item>'
item_id = db.Column(db.Integer, db.ForeignKey('item.id'))
# type can be 'Top up', 'Pay <Item>' or 'Revert #<id>'
type = db.Column(db.String(64), index=True, nullable=False)
balance_change = db.Column(db.Float)
def __repr__(self):
"""Print transaction's date when printing a transaction object."""
return '<Transaction {}>'.format(self.date)
class GlobalSetting(db.Model):
"""App global settings model."""
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(128), nullable=False)
key = db.Column(db.String(64), nullable=False)
value = db.Column(db.Integer, default=0)
def __repr__(self):
"""Print setting's key when printing a global setting object."""
return '<Setting {}>'.format(self.key)
| [
"datetime.datetime",
"qrcode.QRCode",
"hashlib.md5",
"datetime.datetime.utcnow",
"app.db.String",
"flask.url_for",
"datetime.timedelta",
"werkzeug.security.generate_password_hash",
"app.db.Column",
"datetime.datetime.today",
"app.whooshee.register_model",
"app.db.ForeignKey",
"app.db.relatio... | [((324, 398), 'app.whooshee.register_model', 'whooshee.register_model', (['"""username"""', '"""first_name"""', '"""last_name"""', '"""nickname"""'], {}), "('username', 'first_name', 'last_name', 'nickname')\n", (347, 398), False, 'from app import db, login, whooshee\n'), ((464, 503), 'app.db.Column', 'db.Column', (['db.Integer'], {'primary_key': '(True)'}), '(db.Integer, primary_key=True)\n', (473, 503), False, 'from app import db, login, whooshee\n'), ((867, 919), 'app.db.Column', 'db.Column', (['db.Boolean'], {'default': '(False)', 'nullable': '(False)'}), '(db.Boolean, default=False, nullable=False)\n', (876, 919), False, 'from app import db, login, whooshee\n'), ((938, 990), 'app.db.Column', 'db.Column', (['db.Boolean'], {'default': '(False)', 'nullable': '(False)'}), '(db.Boolean, default=False, nullable=False)\n', (947, 990), False, 'from app import db, login, whooshee\n'), ((1010, 1062), 'app.db.Column', 'db.Column', (['db.Boolean'], {'default': '(False)', 'nullable': '(False)'}), '(db.Boolean, default=False, nullable=False)\n', (1019, 1062), False, 'from app import db, login, whooshee\n'), ((1078, 1130), 'app.db.Column', 'db.Column', (['db.Boolean'], {'default': '(False)', 'nullable': '(False)'}), '(db.Boolean, default=False, nullable=False)\n', (1087, 1130), False, 'from app import db, login, whooshee\n'), ((1472, 1532), 'app.db.Column', 'db.Column', (['db.Integer'], {'index': '(True)', 'default': '(0)', 'nullable': '(False)'}), '(db.Integer, index=True, default=0, nullable=False)\n', (1481, 1532), False, 'from app import db, login, whooshee\n'), ((1567, 1615), 'app.db.Column', 'db.Column', (['db.Float'], {'default': '(0.0)', 'nullable': '(False)'}), '(db.Float, default=0.0, nullable=False)\n', (1576, 1615), False, 'from app import db, login, whooshee\n'), ((1633, 1684), 'app.db.Column', 'db.Column', (['db.DateTime'], {'default': 'None', 'nullable': '(True)'}), '(db.DateTime, default=None, nullable=True)\n', (1642, 1684), False, 'from app import db, login, whooshee\n'), ((1704, 1768), 'app.db.relationship', 'db.relationship', (['"""Transaction"""'], {'backref': '"""client"""', 'lazy': '"""dynamic"""'}), "('Transaction', backref='client', lazy='dynamic')\n", (1719, 1768), False, 'from app import db, login, whooshee\n'), ((1818, 1854), 'app.db.Column', 'db.Column', (['db.Boolean'], {'default': '(False)'}), '(db.Boolean, default=False)\n', (1827, 1854), False, 'from app import db, login, whooshee\n'), ((6586, 6625), 'app.db.Column', 'db.Column', (['db.Integer'], {'primary_key': '(True)'}), '(db.Integer, primary_key=True)\n', (6595, 6625), False, 'from app import db, login, whooshee\n'), ((6722, 6743), 'app.db.Column', 'db.Column', (['db.Boolean'], {}), '(db.Boolean)\n', (6731, 6743), False, 'from app import db, login, whooshee\n'), ((6757, 6792), 'app.db.Column', 'db.Column', (['db.Float'], {'nullable': '(False)'}), '(db.Float, nullable=False)\n', (6766, 6792), False, 'from app import db, login, whooshee\n'), ((6816, 6837), 'app.db.Column', 'db.Column', (['db.Boolean'], {}), '(db.Boolean)\n', (6825, 6837), False, 'from app import db, login, whooshee\n'), ((6853, 6885), 'app.db.Column', 'db.Column', (['db.Integer'], {'default': '(0)'}), '(db.Integer, default=0)\n', (6862, 6885), False, 'from app import db, login, whooshee\n'), ((6905, 6941), 'app.db.Column', 'db.Column', (['db.Boolean'], {'default': '(False)'}), '(db.Boolean, default=False)\n', (6914, 6941), False, 'from app import db, login, whooshee\n'), ((6962, 7024), 'app.db.relationship', 'db.relationship', (['"""Transaction"""'], {'backref': '"""item"""', 'lazy': '"""dynamic"""'}), "('Transaction', backref='item', lazy='dynamic')\n", (6977, 7024), False, 'from app import db, login, whooshee\n'), ((7262, 7301), 'app.db.Column', 'db.Column', (['db.Integer'], {'primary_key': '(True)'}), '(db.Integer, primary_key=True)\n', (7271, 7301), False, 'from app import db, login, whooshee\n'), ((7418, 7454), 'app.db.Column', 'db.Column', (['db.Boolean'], {'default': '(False)'}), '(db.Boolean, default=False)\n', (7427, 7454), False, 'from app import db, login, whooshee\n'), ((7467, 7555), 'app.db.Column', 'db.Column', (['db.DateTime'], {'index': '(True)', 'default': 'datetime.datetime.utcnow', 'nullable': '(False)'}), '(db.DateTime, index=True, default=datetime.datetime.utcnow,\n nullable=False)\n', (7476, 7555), False, 'from app import db, login, whooshee\n'), ((8045, 8064), 'app.db.Column', 'db.Column', (['db.Float'], {}), '(db.Float)\n', (8054, 8064), False, 'from app import db, login, whooshee\n'), ((8297, 8336), 'app.db.Column', 'db.Column', (['db.Integer'], {'primary_key': '(True)'}), '(db.Integer, primary_key=True)\n', (8306, 8336), False, 'from app import db, login, whooshee\n'), ((8454, 8486), 'app.db.Column', 'db.Column', (['db.Integer'], {'default': '(0)'}), '(db.Integer, default=0)\n', (8463, 8486), False, 'from app import db, login, whooshee\n'), ((547, 560), 'app.db.String', 'db.String', (['(64)'], {}), '(64)\n', (556, 560), False, 'from app import db, login, whooshee\n'), ((650, 664), 'app.db.String', 'db.String', (['(120)'], {}), '(120)\n', (659, 664), False, 'from app import db, login, whooshee\n'), ((737, 751), 'app.db.String', 'db.String', (['(128)'], {}), '(128)\n', (746, 751), False, 'from app import db, login, whooshee\n'), ((797, 811), 'app.db.String', 'db.String', (['(128)'], {}), '(128)\n', (806, 811), False, 'from app import db, login, whooshee\n'), ((1179, 1192), 'app.db.String', 'db.String', (['(64)'], {}), '(64)\n', (1188, 1192), False, 'from app import db, login, whooshee\n'), ((1248, 1261), 'app.db.String', 'db.String', (['(64)'], {}), '(64)\n', (1257, 1261), False, 'from app import db, login, whooshee\n'), ((1316, 1329), 'app.db.String', 'db.String', (['(64)'], {}), '(64)\n', (1325, 1329), False, 'from app import db, login, whooshee\n'), ((2101, 2133), 'werkzeug.security.generate_password_hash', 'generate_password_hash', (['password'], {}), '(password)\n', (2123, 2133), False, 'from werkzeug.security import generate_password_hash, check_password_hash\n'), ((2341, 2443), 'qrcode.QRCode', 'qrcode.QRCode', ([], {'version': '(1)', 'error_correction': 'qrcode.constants.ERROR_CORRECT_H', 'box_size': '(10)', 'border': '(0)'}), '(version=1, error_correction=qrcode.constants.ERROR_CORRECT_H,\n box_size=10, border=0)\n', (2354, 2443), False, 'import qrcode\n'), ((2639, 2652), 'hashlib.md5', 'hashlib.md5', ([], {}), '()\n', (2650, 2652), False, 'import hashlib\n'), ((3018, 3067), 'werkzeug.security.check_password_hash', 'check_password_hash', (['self.password_hash', 'password'], {}), '(self.password_hash, password)\n', (3037, 3067), False, 'from werkzeug.security import generate_password_hash, check_password_hash\n'), ((3659, 3672), 'hashlib.md5', 'hashlib.md5', ([], {}), '()\n', (3670, 3672), False, 'import hashlib\n'), ((4462, 4487), 'datetime.datetime.today', 'datetime.datetime.today', ([], {}), '()\n', (4485, 4487), False, 'import datetime\n'), ((6648, 6661), 'app.db.String', 'db.String', (['(64)'], {}), '(64)\n', (6657, 6661), False, 'from app import db, login, whooshee\n'), ((7639, 7652), 'app.db.String', 'db.String', (['(64)'], {}), '(64)\n', (7648, 7652), False, 'from app import db, login, whooshee\n'), ((7772, 7796), 'app.db.ForeignKey', 'db.ForeignKey', (['"""user.id"""'], {}), "('user.id')\n", (7785, 7796), False, 'from app import db, login, whooshee\n'), ((7874, 7898), 'app.db.ForeignKey', 'db.ForeignKey', (['"""item.id"""'], {}), "('item.id')\n", (7887, 7898), False, 'from app import db, login, whooshee\n'), ((7981, 7994), 'app.db.String', 'db.String', (['(64)'], {}), '(64)\n', (7990, 7994), False, 'from app import db, login, whooshee\n'), ((8359, 8373), 'app.db.String', 'db.String', (['(128)'], {}), '(128)\n', (8368, 8373), False, 'from app import db, login, whooshee\n'), ((8411, 8424), 'app.db.String', 'db.String', (['(64)'], {}), '(64)\n', (8420, 8424), False, 'from app import db, login, whooshee\n'), ((1386, 1411), 'datetime.datetime.today', 'datetime.datetime.today', ([], {}), '()\n', (1409, 1411), False, 'import datetime\n'), ((3316, 3364), 'flask.url_for', 'url_for', (['"""static"""'], {'filename': "(avatar_path + '.jpg')"}), "('static', filename=avatar_path + '.jpg')\n", (3323, 3364), False, 'from flask import url_for\n'), ((3431, 3494), 'flask.url_for', 'url_for', (['"""static"""'], {'filename': '"""img/avatar/avatar_placeholder.png"""'}), "('static', filename='img/avatar/avatar_placeholder.png')\n", (3438, 3494), False, 'from flask import url_for\n'), ((3934, 3969), 'flask.url_for', 'url_for', (['"""static"""'], {'filename': 'qr_path'}), "('static', filename=qr_path)\n", (3941, 3969), False, 'from flask import url_for\n'), ((4516, 4542), 'datetime.timedelta', 'datetime.timedelta', ([], {'days': '(1)'}), '(days=1)\n', (4534, 4542), False, 'import datetime\n'), ((4602, 4695), 'datetime.datetime', 'datetime.datetime', ([], {'year': 'yesterday.year', 'month': 'yesterday.month', 'day': 'yesterday.day', 'hour': '(6)'}), '(year=yesterday.year, month=yesterday.month, day=yesterday\n .day, hour=6)\n', (4619, 4695), False, 'import datetime\n'), ((4780, 4856), 'datetime.datetime', 'datetime.datetime', ([], {'year': 'today.year', 'month': 'today.month', 'day': 'today.day', 'hour': '(6)'}), '(year=today.year, month=today.month, day=today.day, hour=6)\n', (4797, 4856), False, 'import datetime\n'), ((2266, 2292), 'datetime.datetime.utcnow', 'datetime.datetime.utcnow', ([], {}), '()\n', (2290, 2292), False, 'import datetime\n')] |
#
"""
Base class for encoders.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from texar.module_base import ModuleBase
__all__ = [
"EncoderBase"
]
class EncoderBase(ModuleBase):
"""Base class inherited by all encoder classes.
"""
def __init__(self, hparams=None):
ModuleBase.__init__(self, hparams)
@staticmethod
def default_hparams():
"""Returns a dictionary of hyperparameters with default values.
"""
return {
"name": "encoder"
}
def _build(self, inputs, *args, **kwargs):
"""Encodes the inputs.
Args:
inputs: Inputs to the encoder.
*args: Other arguments.
**kwargs: Keyword arguments.
Returns:
Encoding results.
"""
raise NotImplementedError
| [
"texar.module_base.ModuleBase.__init__"
] | [((359, 393), 'texar.module_base.ModuleBase.__init__', 'ModuleBase.__init__', (['self', 'hparams'], {}), '(self, hparams)\n', (378, 393), False, 'from texar.module_base import ModuleBase\n')] |
""" NOTICE: A Custom Dataset SHOULD BE PROVIDED
Created: May 02,2019 - <NAME>
Revised: May 07,2019 - <NAME>
"""
import os
import numpy as np
from PIL import Image
import torchvision.transforms as transforms
import inception_preprocessing
from torch.utils.data import Dataset
__all__ = ['CustomDataset']
config = {
# e.g. train/val/test set should be located in os.path.join(config['datapath'], 'train/val/test')
'datapath': '/data/shaozl/WS-DAN.PyTorch/dataset',
}
class My_transform(object):
def __call__(self, img):
return add_and_mul(img)
def add_and_mul(image):
image = image-0.5
image = image*2.0
return image
class CustomDataset(Dataset):
"""
# Description:
Basic class for retrieving images and labels
# Member Functions:
__init__(self, phase, shape): initializes a dataset
phase: a string in ['train', 'val', 'test']
shape: output shape/size of an image
__getitem__(self, item): returns an image
item: the idex of image in the whole dataset
__len__(self): returns the length of dataset
"""
def __init__(self, phase='train', shape=(512, 512)):
self.create_lable_map()
assert phase in ['train', 'val', 'test']
self.phase = phase
self.data_path = os.path.join(config['datapath'], phase)
self.data_list = os.listdir(self.data_path)
self.shape = shape
self.config = config
if self.phase=='train':
self.transform = transforms.Compose([
transforms.Resize(size=(int(self.shape[0]*1.0/0.875), int(self.shape[1]*1.0/0.875))),
transforms.RandomCrop((self.shape[0], self.shape[1])),
transforms.RandomHorizontalFlip(p=0.5),
transforms.ColorJitter(brightness=0.125, contrast=0.5),
transforms.ToTensor(),
My_transform()
# transforms.ToTensor(),
# transforms.Normalize(mean=[0.4729, 0.4871, 0.4217], std=[0.1589, 0.1598, 0.1681])
# tensor([0.4856, 0.4994, 0.4324]) tensor([0.1784, 0.1778, 0.1895]) 没有增强的mean和std
# tensor([0.4729, 0.4871, 0.4217]) tensor([0.1589, 0.1598, 0.1681]) 有增强的mean和std
])
else:
self.transform = transforms.Compose([
# transforms.Resize(size=(self.shape[0], self.shape[1])),
transforms.Resize(size=(int(self.shape[0] * 1.0 / 0.875), int(self.shape[1] * 1.0 / 0.875))),
transforms.CenterCrop((self.shape[0], self.shape[1])),
transforms.ToTensor(),
My_transform()
# transforms.Normalize(mean=[0.4862, 0.4998, 0.4311], std=[0.1796, 0.1781, 0.1904])
# tensor([0.4862, 0.4998, 0.4311]) tensor([0.1796, 0.1781, 0.1904]) 没有增强的mean和std
])
def __getitem__(self, item):
image = Image.open(os.path.join(self.data_path, self.data_list[item])).convert('RGB') # (C, H, W)
image = self.transform(image)
assert image.size(1) == self.shape[0] and image.size(2) == self.shape[1]
if self.phase != 'test':
# filename of image should have 'id_label.jpg/png' form
label = int(self.class_name.index(self.data_list[item].rsplit('_',2)[0].lower())) # label
return image, label
else:
# filename of image should have 'id.jpg/png' form, and simply return filename in case of 'test'
return image, self.data_list[item]
def __len__(self):
return len(self.data_list)
def create_lable_map(self):
with open('/data/shaozl/WS-DAN.PyTorch/CUB_200_2011/classes.txt') as f:
class_index_and_name = f.readlines()
class_index_and_name = [i.strip().lower() for i in class_index_and_name]
self.class_index = [i.split(' ', 1)[0] for i in class_index_and_name]
self.class_name = [i.split(' ', 1)[1].split('.',1)[1] for i in class_index_and_name]
| [
"torchvision.transforms.CenterCrop",
"os.listdir",
"os.path.join",
"torchvision.transforms.RandomHorizontalFlip",
"torchvision.transforms.RandomCrop",
"torchvision.transforms.ColorJitter",
"torchvision.transforms.ToTensor"
] | [((1405, 1444), 'os.path.join', 'os.path.join', (["config['datapath']", 'phase'], {}), "(config['datapath'], phase)\n", (1417, 1444), False, 'import os\n'), ((1470, 1496), 'os.listdir', 'os.listdir', (['self.data_path'], {}), '(self.data_path)\n', (1480, 1496), False, 'import os\n'), ((1755, 1808), 'torchvision.transforms.RandomCrop', 'transforms.RandomCrop', (['(self.shape[0], self.shape[1])'], {}), '((self.shape[0], self.shape[1]))\n', (1776, 1808), True, 'import torchvision.transforms as transforms\n'), ((1826, 1864), 'torchvision.transforms.RandomHorizontalFlip', 'transforms.RandomHorizontalFlip', ([], {'p': '(0.5)'}), '(p=0.5)\n', (1857, 1864), True, 'import torchvision.transforms as transforms\n'), ((1882, 1936), 'torchvision.transforms.ColorJitter', 'transforms.ColorJitter', ([], {'brightness': '(0.125)', 'contrast': '(0.5)'}), '(brightness=0.125, contrast=0.5)\n', (1904, 1936), True, 'import torchvision.transforms as transforms\n'), ((1954, 1975), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (1973, 1975), True, 'import torchvision.transforms as transforms\n'), ((2623, 2676), 'torchvision.transforms.CenterCrop', 'transforms.CenterCrop', (['(self.shape[0], self.shape[1])'], {}), '((self.shape[0], self.shape[1]))\n', (2644, 2676), True, 'import torchvision.transforms as transforms\n'), ((2694, 2715), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (2713, 2715), True, 'import torchvision.transforms as transforms\n'), ((3022, 3072), 'os.path.join', 'os.path.join', (['self.data_path', 'self.data_list[item]'], {}), '(self.data_path, self.data_list[item])\n', (3034, 3072), False, 'import os\n')] |
import os
import sys
import argparse
import time
parser = argparse.ArgumentParser()
parser.add_argument('-gpu', default='0', type=str)
args = parser.parse_args()
os.environ["CUDA_VISIBLE_DEVICES"]=args.gpu
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.optim import lr_scheduler
from torch.autograd import Variable
import torchvision
from torchvision import datasets, transforms
#import videotransforms
from torchsummary import summary
import numpy as np
import pkbar
from apmeter import APMeter
import x3d_fine
from charades_fine import Charades
from charades_fine import mt_collate_fn as collate_fn
from transforms.spatial_transforms import Compose, Normalize, RandomHorizontalFlip, MultiScaleRandomCrop, MultiScaleRandomCropMultigrid, ToTensor, CenterCrop, CenterCropScaled
from transforms.temporal_transforms import TemporalRandomCrop
from transforms.target_transforms import ClassLabel
import warnings
warnings.filterwarnings("ignore")
BS = 1
BS_UPSCALE = 1
INIT_LR = 0.02 * BS_UPSCALE
X3D_VERSION = 'M'
CHARADES_MEAN = [0.413, 0.368, 0.338]
CHARADES_STD = [0.131, 0.125, 0.132]
CHARADES_TR_SIZE = 7900
CHARADES_VAL_SIZE = 1850
CHARADES_ROOT = '/data/add_disk0/kumarak/Charades_v1_rgb'
CHARADES_ANNO = 'data/charades.json'
FINE_SAVE_DIR = '/nfs/bigcornea/add_disk0/kumarak/fine_spatial7x7'
# pre-extract fine features and save here, to reduce compute req
# MAKE DIRS FINE_SAVE_DIR/['layer1', 'layer2', 'layer3', 'layer4', 'conv5']
feat_keys = ['layer1', 'layer2', 'layer3', 'layer4', 'conv5']
for k in feat_keys:
if not os.path.exists(os.path.join(FINE_SAVE_DIR,k)):
os.makedirs(os.path.join(FINE_SAVE_DIR,k))
# 0.00125 * BS_UPSCALE --> 80 epochs warmup 2000
def run(init_lr=INIT_LR, warmup_steps=0, max_epochs=100, root=CHARADES_ROOT,
train_split=CHARADES_ANNO, batch_size=BS*BS_UPSCALE, frames=80, save_dir= FINE_SAVE_DIR):
crop_size = {'S':160, 'M':224, 'XL':312}[X3D_VERSION]
resize_size = {'S':[180.,225.], 'M':[256.,256.], 'XL':[360.,450.]}[X3D_VERSION] #[256.,320.]
gamma_tau = {'S':6, 'M':5*1, 'XL':5}[X3D_VERSION] # 5
load_steps = st_steps = steps = 0
epochs = 0
num_steps_per_update = 1
cur_iterations = steps * num_steps_per_update
iterations_per_epoch = CHARADES_TR_SIZE//(batch_size*1)
val_iterations_per_epoch = CHARADES_VAL_SIZE//(batch_size)
max_steps = iterations_per_epoch * max_epochs
val_spatial_transforms = Compose([CenterCropScaled(crop_size),
ToTensor(255),
Normalize(CHARADES_MEAN, CHARADES_STD)])
# SET 'TESTING' FOR BOTH, TO EXTRACT
dataset = Charades(train_split, 'testing', root, val_spatial_transforms,
task='loc', frames=frames, gamma_tau=gamma_tau, crops=1, extract_feat=True)
dataloader = torch.utils.data.DataLoader(dataset, batch_size=batch_size, shuffle=True,
num_workers=8, pin_memory=True, collate_fn=collate_fn)
val_dataset = Charades(train_split, 'testing', root, val_spatial_transforms,
task='loc', frames=frames, gamma_tau=gamma_tau, crops=1, extract_feat=True)
val_dataloader = torch.utils.data.DataLoader(val_dataset, batch_size=batch_size, shuffle=False,
num_workers=8, pin_memory=True, collate_fn=collate_fn)
dataloaders = {'train': dataloader, 'val': val_dataloader}
datasets = {'train': dataset, 'val': val_dataset}
print('train',len(datasets['train']),'val',len(datasets['val']))
print('Total iterations:', max_steps, 'Total epochs:', max_epochs)
print('datasets created')
fine_net = x3d_fine.generate_model(x3d_version=X3D_VERSION, n_classes=400, n_input_channels=3, task='loc',
dropout=0.5, base_bn_splits=1, global_tower=True)
fine_net.replace_logits(157)
load_ckpt = torch.load('models/fine_charades_039000_SAVE.pt')
state = fine_net.state_dict()
state.update(load_ckpt['model_state_dict'])
fine_net.load_state_dict(state)
fine_net.cuda()
fine_net = nn.DataParallel(fine_net)
print('model loaded')
lr = init_lr
print ('LR:%f'%lr)
optimizer = optim.SGD(fine_net.parameters(), lr=lr, momentum=0.9, weight_decay=1e-5)
lr_sched = optim.lr_scheduler.ReduceLROnPlateau(optimizer, mode='min', patience=3, factor=0.1, verbose=True)
if steps>0:
optimizer.load_state_dict(load_ckpt['optimizer_state_dict'])
lr_sched.load_state_dict(load_ckpt['scheduler_state_dict'])
criterion = nn.BCEWithLogitsLoss()
val_apm = APMeter()
tr_apm = APMeter()
while epochs < max_epochs:
print ('Step {} Epoch {}'.format(steps, epochs))
print ('-' * 10)
# Each epoch has a training and validation phase
for phase in ['train']+['val']:
bar_st = iterations_per_epoch if phase == 'train' else val_iterations_per_epoch
bar = pkbar.Pbar(name='update: ', target=bar_st)
fine_net.train(False) # Set model to evaluate mode
# FOR EVAL AGGREGATE BN STATS
_ = fine_net.module.aggregate_sub_bn_stats()
torch.autograd.set_grad_enabled(False)
tot_loss = 0.0
tot_loc_loss = 0.0
tot_cls_loss = 0.0
tot_dis_loss = 0.0
tot_acc = 0.0
tot_corr = 0.0
tot_dat = 0.0
num_iter = 0
optimizer.zero_grad()
# Iterate over data.
print(phase)
for i,data in enumerate(dataloaders[phase]):
#for data in dataloaders[phase]:
num_iter += 1
bar.update(i)
inputs, labels, masks, name = data
b,n,c,t,h,w = inputs.shape
inputs = inputs.view(b*n,c,t,h,w)
inputs = inputs.cuda() # B 3 T W H
tl = labels.size(2)
labels = labels.cuda() # B C TL
masks = masks.cuda() # B TL
valid_t = torch.sum(masks, dim=1).int()
feat,_ = fine_net([inputs, masks]) # N C T 1 1
keys = list(feat.keys())
print(i, name[0], feat[keys[0]].cpu().numpy().shape, feat[keys[1]].cpu().numpy().shape,
feat[keys[2]].cpu().numpy().shape, feat[keys[3]].cpu().numpy().shape, feat[keys[4]].cpu().numpy().shape)
for k in feat:
torch.save(feat[k].data.cpu(), os.path.join(save_dir, k, name[0]))
break
if __name__ == '__main__':
run()
| [
"torch.optim.lr_scheduler.ReduceLROnPlateau",
"pkbar.Pbar",
"argparse.ArgumentParser",
"transforms.spatial_transforms.Normalize",
"torch.load",
"os.path.join",
"torch.nn.DataParallel",
"torch.autograd.set_grad_enabled",
"apmeter.APMeter",
"charades_fine.Charades",
"torch.sum",
"torch.utils.dat... | [((59, 84), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (82, 84), False, 'import argparse\n'), ((974, 1007), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {}), "('ignore')\n", (997, 1007), False, 'import warnings\n'), ((2699, 2841), 'charades_fine.Charades', 'Charades', (['train_split', '"""testing"""', 'root', 'val_spatial_transforms'], {'task': '"""loc"""', 'frames': 'frames', 'gamma_tau': 'gamma_tau', 'crops': '(1)', 'extract_feat': '(True)'}), "(train_split, 'testing', root, val_spatial_transforms, task='loc',\n frames=frames, gamma_tau=gamma_tau, crops=1, extract_feat=True)\n", (2707, 2841), False, 'from charades_fine import Charades\n'), ((2887, 3019), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['dataset'], {'batch_size': 'batch_size', 'shuffle': '(True)', 'num_workers': '(8)', 'pin_memory': '(True)', 'collate_fn': 'collate_fn'}), '(dataset, batch_size=batch_size, shuffle=True,\n num_workers=8, pin_memory=True, collate_fn=collate_fn)\n', (2914, 3019), False, 'import torch\n'), ((3067, 3209), 'charades_fine.Charades', 'Charades', (['train_split', '"""testing"""', 'root', 'val_spatial_transforms'], {'task': '"""loc"""', 'frames': 'frames', 'gamma_tau': 'gamma_tau', 'crops': '(1)', 'extract_feat': '(True)'}), "(train_split, 'testing', root, val_spatial_transforms, task='loc',\n frames=frames, gamma_tau=gamma_tau, crops=1, extract_feat=True)\n", (3075, 3209), False, 'from charades_fine import Charades\n'), ((3259, 3397), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['val_dataset'], {'batch_size': 'batch_size', 'shuffle': '(False)', 'num_workers': '(8)', 'pin_memory': '(True)', 'collate_fn': 'collate_fn'}), '(val_dataset, batch_size=batch_size, shuffle=\n False, num_workers=8, pin_memory=True, collate_fn=collate_fn)\n', (3286, 3397), False, 'import torch\n'), ((3730, 3883), 'x3d_fine.generate_model', 'x3d_fine.generate_model', ([], {'x3d_version': 'X3D_VERSION', 'n_classes': '(400)', 'n_input_channels': '(3)', 'task': '"""loc"""', 'dropout': '(0.5)', 'base_bn_splits': '(1)', 'global_tower': '(True)'}), "(x3d_version=X3D_VERSION, n_classes=400,\n n_input_channels=3, task='loc', dropout=0.5, base_bn_splits=1,\n global_tower=True)\n", (3753, 3883), False, 'import x3d_fine\n'), ((3963, 4012), 'torch.load', 'torch.load', (['"""models/fine_charades_039000_SAVE.pt"""'], {}), "('models/fine_charades_039000_SAVE.pt')\n", (3973, 4012), False, 'import torch\n'), ((4167, 4192), 'torch.nn.DataParallel', 'nn.DataParallel', (['fine_net'], {}), '(fine_net)\n', (4182, 4192), True, 'import torch.nn as nn\n'), ((4366, 4467), 'torch.optim.lr_scheduler.ReduceLROnPlateau', 'optim.lr_scheduler.ReduceLROnPlateau', (['optimizer'], {'mode': '"""min"""', 'patience': '(3)', 'factor': '(0.1)', 'verbose': '(True)'}), "(optimizer, mode='min', patience=3,\n factor=0.1, verbose=True)\n", (4402, 4467), True, 'import torch.optim as optim\n'), ((4634, 4656), 'torch.nn.BCEWithLogitsLoss', 'nn.BCEWithLogitsLoss', ([], {}), '()\n', (4654, 4656), True, 'import torch.nn as nn\n'), ((4672, 4681), 'apmeter.APMeter', 'APMeter', ([], {}), '()\n', (4679, 4681), False, 'from apmeter import APMeter\n'), ((4695, 4704), 'apmeter.APMeter', 'APMeter', ([], {}), '()\n', (4702, 4704), False, 'from apmeter import APMeter\n'), ((1613, 1643), 'os.path.join', 'os.path.join', (['FINE_SAVE_DIR', 'k'], {}), '(FINE_SAVE_DIR, k)\n', (1625, 1643), False, 'import os\n'), ((1665, 1695), 'os.path.join', 'os.path.join', (['FINE_SAVE_DIR', 'k'], {}), '(FINE_SAVE_DIR, k)\n', (1677, 1695), False, 'import os\n'), ((2478, 2505), 'transforms.spatial_transforms.CenterCropScaled', 'CenterCropScaled', (['crop_size'], {}), '(crop_size)\n', (2494, 2505), False, 'from transforms.spatial_transforms import Compose, Normalize, RandomHorizontalFlip, MultiScaleRandomCrop, MultiScaleRandomCropMultigrid, ToTensor, CenterCrop, CenterCropScaled\n'), ((2547, 2560), 'transforms.spatial_transforms.ToTensor', 'ToTensor', (['(255)'], {}), '(255)\n', (2555, 2560), False, 'from transforms.spatial_transforms import Compose, Normalize, RandomHorizontalFlip, MultiScaleRandomCrop, MultiScaleRandomCropMultigrid, ToTensor, CenterCrop, CenterCropScaled\n'), ((2602, 2640), 'transforms.spatial_transforms.Normalize', 'Normalize', (['CHARADES_MEAN', 'CHARADES_STD'], {}), '(CHARADES_MEAN, CHARADES_STD)\n', (2611, 2640), False, 'from transforms.spatial_transforms import Compose, Normalize, RandomHorizontalFlip, MultiScaleRandomCrop, MultiScaleRandomCropMultigrid, ToTensor, CenterCrop, CenterCropScaled\n'), ((5027, 5069), 'pkbar.Pbar', 'pkbar.Pbar', ([], {'name': '"""update: """', 'target': 'bar_st'}), "(name='update: ', target=bar_st)\n", (5037, 5069), False, 'import pkbar\n'), ((5246, 5284), 'torch.autograd.set_grad_enabled', 'torch.autograd.set_grad_enabled', (['(False)'], {}), '(False)\n', (5277, 5284), False, 'import torch\n'), ((6120, 6143), 'torch.sum', 'torch.sum', (['masks'], {'dim': '(1)'}), '(masks, dim=1)\n', (6129, 6143), False, 'import torch\n'), ((6570, 6604), 'os.path.join', 'os.path.join', (['save_dir', 'k', 'name[0]'], {}), '(save_dir, k, name[0])\n', (6582, 6604), False, 'import os\n')] |
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'MainWindow.ui'
#
# Created by: PyQt5 UI code generator 5.11.3
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName("MainWindow")
MainWindow.resize(330, 210)
MainWindow.setMinimumSize(QtCore.QSize(330, 210))
MainWindow.setMaximumSize(QtCore.QSize(330, 210))
font = QtGui.QFont()
font.setFamily("微软雅黑")
MainWindow.setFont(font)
MainWindow.setToolButtonStyle(QtCore.Qt.ToolButtonIconOnly)
MainWindow.setAnimated(True)
MainWindow.setTabShape(QtWidgets.QTabWidget.Rounded)
MainWindow.setDockOptions(QtWidgets.QMainWindow.AllowTabbedDocks|QtWidgets.QMainWindow.AnimatedDocks)
self.centralwidget = QtWidgets.QWidget(MainWindow)
self.centralwidget.setObjectName("centralwidget")
self.lineEdit = QtWidgets.QLineEdit(self.centralwidget)
self.lineEdit.setGeometry(QtCore.QRect(90, 40, 221, 21))
font = QtGui.QFont()
font.setFamily("微软雅黑")
font.setPointSize(9)
self.lineEdit.setFont(font)
self.lineEdit.setText("")
self.lineEdit.setObjectName("lineEdit")
self.lineEdit_2 = QtWidgets.QLineEdit(self.centralwidget)
self.lineEdit_2.setGeometry(QtCore.QRect(90, 70, 221, 20))
font = QtGui.QFont()
font.setFamily("微软雅黑")
font.setPointSize(10)
self.lineEdit_2.setFont(font)
self.lineEdit_2.setText("")
self.lineEdit_2.setEchoMode(QtWidgets.QLineEdit.Password)
self.lineEdit_2.setObjectName("lineEdit_2")
self.label = QtWidgets.QLabel(self.centralwidget)
self.label.setGeometry(QtCore.QRect(10, 40, 71, 20))
font = QtGui.QFont()
font.setFamily("微软雅黑")
font.setPointSize(10)
self.label.setFont(font)
self.label.setAlignment(QtCore.Qt.AlignCenter)
self.label.setObjectName("label")
self.label_2 = QtWidgets.QLabel(self.centralwidget)
self.label_2.setGeometry(QtCore.QRect(10, 70, 71, 20))
font = QtGui.QFont()
font.setFamily("微软雅黑")
font.setPointSize(10)
self.label_2.setFont(font)
self.label_2.setAlignment(QtCore.Qt.AlignCenter)
self.label_2.setObjectName("label_2")
self.checkBox = QtWidgets.QCheckBox(self.centralwidget)
self.checkBox.setGeometry(QtCore.QRect(60, 100, 71, 20))
font = QtGui.QFont()
font.setFamily("微软雅黑")
font.setPointSize(9)
self.checkBox.setFont(font)
self.checkBox.setObjectName("checkBox")
self.pushButton = QtWidgets.QPushButton(self.centralwidget)
self.pushButton.setGeometry(QtCore.QRect(50, 130, 81, 25))
font = QtGui.QFont()
font.setFamily("微软雅黑")
font.setPointSize(9)
self.pushButton.setFont(font)
self.pushButton.setObjectName("pushButton")
self.checkBox_2 = QtWidgets.QCheckBox(self.centralwidget)
self.checkBox_2.setGeometry(QtCore.QRect(190, 100, 91, 20))
font = QtGui.QFont()
font.setFamily("微软雅黑")
font.setPointSize(9)
self.checkBox_2.setFont(font)
self.checkBox_2.setObjectName("checkBox_2")
self.pushButton_2 = QtWidgets.QPushButton(self.centralwidget)
self.pushButton_2.setGeometry(QtCore.QRect(200, 130, 81, 25))
font = QtGui.QFont()
font.setFamily("微软雅黑")
font.setPointSize(9)
self.pushButton_2.setFont(font)
self.pushButton_2.setObjectName("pushButton_2")
self.label_3 = QtWidgets.QLabel(self.centralwidget)
self.label_3.setGeometry(QtCore.QRect(10, 10, 71, 21))
font = QtGui.QFont()
font.setFamily("微软雅黑")
font.setPointSize(10)
self.label_3.setFont(font)
self.label_3.setAlignment(QtCore.Qt.AlignCenter)
self.label_3.setObjectName("label_3")
self.label_4 = QtWidgets.QLabel(self.centralwidget)
self.label_4.setGeometry(QtCore.QRect(160, 10, 141, 20))
font = QtGui.QFont()
font.setFamily("微软雅黑")
self.label_4.setFont(font)
self.label_4.setObjectName("label_4")
self.comboBox = QtWidgets.QComboBox(self.centralwidget)
self.comboBox.setGeometry(QtCore.QRect(90, 10, 61, 20))
self.comboBox.setEditable(False)
self.comboBox.setCurrentText("")
self.comboBox.setObjectName("comboBox")
MainWindow.setCentralWidget(self.centralwidget)
self.menubar = QtWidgets.QMenuBar(MainWindow)
self.menubar.setGeometry(QtCore.QRect(0, 0, 330, 23))
self.menubar.setObjectName("menubar")
self.menu = QtWidgets.QMenu(self.menubar)
self.menu.setObjectName("menu")
MainWindow.setMenuBar(self.menubar)
self.statusBar = QtWidgets.QStatusBar(MainWindow)
font = QtGui.QFont()
font.setPointSize(9)
self.statusBar.setFont(font)
self.statusBar.setObjectName("statusBar")
MainWindow.setStatusBar(self.statusBar)
self.menubar.addAction(self.menu.menuAction())
self.retranslateUi(MainWindow)
self.comboBox.setCurrentIndex(-1)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def retranslateUi(self, MainWindow):
_translate = QtCore.QCoreApplication.translate
MainWindow.setWindowTitle(_translate("MainWindow", "Tsinghua Auth"))
self.label.setText(_translate("MainWindow", "用户名"))
self.label_2.setText(_translate("MainWindow", "密码"))
self.checkBox.setText(_translate("MainWindow", "保存密码"))
self.pushButton.setText(_translate("MainWindow", "登录"))
self.checkBox_2.setText(_translate("MainWindow", "自动断线重连"))
self.pushButton_2.setText(_translate("MainWindow", "断开"))
self.label_3.setText(_translate("MainWindow", "认证服务器"))
self.label_4.setText(_translate("MainWindow", ".tsinghua.edu.cn"))
self.menu.setTitle(_translate("MainWindow", "关于"))
import icon_rc
| [
"PyQt5.QtWidgets.QWidget",
"PyQt5.QtWidgets.QMenu",
"PyQt5.QtGui.QFont",
"PyQt5.QtWidgets.QComboBox",
"PyQt5.QtCore.QMetaObject.connectSlotsByName",
"PyQt5.QtCore.QRect",
"PyQt5.QtWidgets.QStatusBar",
"PyQt5.QtWidgets.QLabel",
"PyQt5.QtWidgets.QPushButton",
"PyQt5.QtCore.QSize",
"PyQt5.QtWidgets... | [((521, 534), 'PyQt5.QtGui.QFont', 'QtGui.QFont', ([], {}), '()\n', (532, 534), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((904, 933), 'PyQt5.QtWidgets.QWidget', 'QtWidgets.QWidget', (['MainWindow'], {}), '(MainWindow)\n', (921, 933), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((1016, 1055), 'PyQt5.QtWidgets.QLineEdit', 'QtWidgets.QLineEdit', (['self.centralwidget'], {}), '(self.centralwidget)\n', (1035, 1055), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((1136, 1149), 'PyQt5.QtGui.QFont', 'QtGui.QFont', ([], {}), '()\n', (1147, 1149), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((1354, 1393), 'PyQt5.QtWidgets.QLineEdit', 'QtWidgets.QLineEdit', (['self.centralwidget'], {}), '(self.centralwidget)\n', (1373, 1393), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((1476, 1489), 'PyQt5.QtGui.QFont', 'QtGui.QFont', ([], {}), '()\n', (1487, 1489), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((1764, 1800), 'PyQt5.QtWidgets.QLabel', 'QtWidgets.QLabel', (['self.centralwidget'], {}), '(self.centralwidget)\n', (1780, 1800), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((1877, 1890), 'PyQt5.QtGui.QFont', 'QtGui.QFont', ([], {}), '()\n', (1888, 1890), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((2105, 2141), 'PyQt5.QtWidgets.QLabel', 'QtWidgets.QLabel', (['self.centralwidget'], {}), '(self.centralwidget)\n', (2121, 2141), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((2220, 2233), 'PyQt5.QtGui.QFont', 'QtGui.QFont', ([], {}), '()\n', (2231, 2233), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((2457, 2496), 'PyQt5.QtWidgets.QCheckBox', 'QtWidgets.QCheckBox', (['self.centralwidget'], {}), '(self.centralwidget)\n', (2476, 2496), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((2577, 2590), 'PyQt5.QtGui.QFont', 'QtGui.QFont', ([], {}), '()\n', (2588, 2590), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((2761, 2802), 'PyQt5.QtWidgets.QPushButton', 'QtWidgets.QPushButton', (['self.centralwidget'], {}), '(self.centralwidget)\n', (2782, 2802), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((2885, 2898), 'PyQt5.QtGui.QFont', 'QtGui.QFont', ([], {}), '()\n', (2896, 2898), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((3075, 3114), 'PyQt5.QtWidgets.QCheckBox', 'QtWidgets.QCheckBox', (['self.centralwidget'], {}), '(self.centralwidget)\n', (3094, 3114), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((3198, 3211), 'PyQt5.QtGui.QFont', 'QtGui.QFont', ([], {}), '()\n', (3209, 3211), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((3390, 3431), 'PyQt5.QtWidgets.QPushButton', 'QtWidgets.QPushButton', (['self.centralwidget'], {}), '(self.centralwidget)\n', (3411, 3431), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((3517, 3530), 'PyQt5.QtGui.QFont', 'QtGui.QFont', ([], {}), '()\n', (3528, 3530), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((3710, 3746), 'PyQt5.QtWidgets.QLabel', 'QtWidgets.QLabel', (['self.centralwidget'], {}), '(self.centralwidget)\n', (3726, 3746), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((3825, 3838), 'PyQt5.QtGui.QFont', 'QtGui.QFont', ([], {}), '()\n', (3836, 3838), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((4061, 4097), 'PyQt5.QtWidgets.QLabel', 'QtWidgets.QLabel', (['self.centralwidget'], {}), '(self.centralwidget)\n', (4077, 4097), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((4178, 4191), 'PyQt5.QtGui.QFont', 'QtGui.QFont', ([], {}), '()\n', (4189, 4191), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((4328, 4367), 'PyQt5.QtWidgets.QComboBox', 'QtWidgets.QComboBox', (['self.centralwidget'], {}), '(self.centralwidget)\n', (4347, 4367), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((4641, 4671), 'PyQt5.QtWidgets.QMenuBar', 'QtWidgets.QMenuBar', (['MainWindow'], {}), '(MainWindow)\n', (4659, 4671), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((4800, 4829), 'PyQt5.QtWidgets.QMenu', 'QtWidgets.QMenu', (['self.menubar'], {}), '(self.menubar)\n', (4815, 4829), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((4939, 4971), 'PyQt5.QtWidgets.QStatusBar', 'QtWidgets.QStatusBar', (['MainWindow'], {}), '(MainWindow)\n', (4959, 4971), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((4987, 5000), 'PyQt5.QtGui.QFont', 'QtGui.QFont', ([], {}), '()\n', (4998, 5000), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((5310, 5359), 'PyQt5.QtCore.QMetaObject.connectSlotsByName', 'QtCore.QMetaObject.connectSlotsByName', (['MainWindow'], {}), '(MainWindow)\n', (5347, 5359), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((424, 446), 'PyQt5.QtCore.QSize', 'QtCore.QSize', (['(330)', '(210)'], {}), '(330, 210)\n', (436, 446), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((482, 504), 'PyQt5.QtCore.QSize', 'QtCore.QSize', (['(330)', '(210)'], {}), '(330, 210)\n', (494, 504), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((1090, 1119), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(90)', '(40)', '(221)', '(21)'], {}), '(90, 40, 221, 21)\n', (1102, 1119), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((1430, 1459), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(90)', '(70)', '(221)', '(20)'], {}), '(90, 70, 221, 20)\n', (1442, 1459), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((1832, 1860), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(10)', '(40)', '(71)', '(20)'], {}), '(10, 40, 71, 20)\n', (1844, 1860), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((2175, 2203), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(10)', '(70)', '(71)', '(20)'], {}), '(10, 70, 71, 20)\n', (2187, 2203), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((2531, 2560), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(60)', '(100)', '(71)', '(20)'], {}), '(60, 100, 71, 20)\n', (2543, 2560), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((2839, 2868), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(50)', '(130)', '(81)', '(25)'], {}), '(50, 130, 81, 25)\n', (2851, 2868), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((3151, 3181), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(190)', '(100)', '(91)', '(20)'], {}), '(190, 100, 91, 20)\n', (3163, 3181), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((3470, 3500), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(200)', '(130)', '(81)', '(25)'], {}), '(200, 130, 81, 25)\n', (3482, 3500), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((3780, 3808), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(10)', '(10)', '(71)', '(21)'], {}), '(10, 10, 71, 21)\n', (3792, 3808), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((4131, 4161), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(160)', '(10)', '(141)', '(20)'], {}), '(160, 10, 141, 20)\n', (4143, 4161), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((4402, 4430), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(90)', '(10)', '(61)', '(20)'], {}), '(90, 10, 61, 20)\n', (4414, 4430), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((4705, 4732), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(0)', '(0)', '(330)', '(23)'], {}), '(0, 0, 330, 23)\n', (4717, 4732), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n')] |
"""
This file contains code to measure the performance of the system
(c) 2015 Massachusetts Institute of Technology
"""
# Native
import logging
LOGGER = logging.getLogger(__name__)
import time
import re
# CATAN
import catan.db
import catan.globals as G
def log_database_counts(node_dict):
"""
This function writes the number of persons in the database that
originated from each node to a log file
Primarily used for testing synchronization of the node databases
"""
with open(G.METRICS_DBSYNC_LOG_FILENAME, 'a') as log:
log.write("Time = %d, Nodes = %s\n" % ( int(round(time.time())),
str(node_dict) ) )
def log_person_update(node_message):
"""
This function takes a node message containing a person update and
logs the communication delay based on the time encoded in the message
called from catan_services
"""
db_obj = catan.db.CatanDatabaseObject(node_message.data)
diff_time = None
if db_obj.person_description:
if db_obj.person_description.person_description:
description = db_obj.person_description.person_description
diff_time = extract_time_diff(description)
msg_type = "Person"
elif db_obj.person_message:
if db_obj.person_message.person_message:
message = db_obj.person_message.person_message
diff_time = extract_time_diff(message)
msg_type = "Message"
if diff_time:
with open(G.METRICS_UPDATE_LOG_FILENAME, 'a') as log:
log.write("Time = %d, Delay = %d, Node = %d, Type = %s\n"
% (int(round(time.time())), diff_time,
node_message.source, msg_type ) )
def extract_time_diff(text):
"""
Returns the difference between the current time and the time
in the embedded << >> tag (encoded in POSIX time)
or None if a matching tag cannot be found
"""
time_list = re.findall('<<(.*)>>', text)
if time_list:
update_time = float(time_list[0])
diff_time = round(time.time() - update_time)
return diff_time
else:
return None
| [
"logging.getLogger",
"re.findall",
"time.time"
] | [((168, 195), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (185, 195), False, 'import logging\n'), ((2068, 2096), 're.findall', 're.findall', (['"""<<(.*)>>"""', 'text'], {}), "('<<(.*)>>', text)\n", (2078, 2096), False, 'import re\n'), ((2183, 2194), 'time.time', 'time.time', ([], {}), '()\n', (2192, 2194), False, 'import time\n'), ((642, 653), 'time.time', 'time.time', ([], {}), '()\n', (651, 653), False, 'import time\n'), ((1730, 1741), 'time.time', 'time.time', ([], {}), '()\n', (1739, 1741), False, 'import time\n')] |
from django.views.generic import TemplateView
from dejaviewer.models import Programme, Course, CourseTeacher
class CourseView(TemplateView):
template_name = 'course.html'
def get_context_data(self, **kwargs):
ctx = super().get_context_data(**kwargs)
user = self.request.user
programmes = Programme.objects.all()
programme = Programme.objects.get(code=kwargs['programme']) if 'programme' in kwargs else None
courses = programme.course_set.all()
course = Course.objects.get(pk=kwargs['course'])
teachers = CourseTeacher.objects.filter(course=course)
if False and user.is_superuser:
can_edit = True
elif user.is_authenticated:
try:
ct = CourseTeacher.objects.get(course=course, teacher__user=user)
can_edit = ct.coordinator
except CourseTeacher.DoesNotExist:
can_edit = False
ctx.update(**locals())
return ctx
| [
"dejaviewer.models.Programme.objects.get",
"dejaviewer.models.CourseTeacher.objects.filter",
"dejaviewer.models.Programme.objects.all",
"dejaviewer.models.Course.objects.get",
"dejaviewer.models.CourseTeacher.objects.get"
] | [((325, 348), 'dejaviewer.models.Programme.objects.all', 'Programme.objects.all', ([], {}), '()\n', (346, 348), False, 'from dejaviewer.models import Programme, Course, CourseTeacher\n'), ((515, 554), 'dejaviewer.models.Course.objects.get', 'Course.objects.get', ([], {'pk': "kwargs['course']"}), "(pk=kwargs['course'])\n", (533, 554), False, 'from dejaviewer.models import Programme, Course, CourseTeacher\n'), ((574, 617), 'dejaviewer.models.CourseTeacher.objects.filter', 'CourseTeacher.objects.filter', ([], {'course': 'course'}), '(course=course)\n', (602, 617), False, 'from dejaviewer.models import Programme, Course, CourseTeacher\n'), ((369, 416), 'dejaviewer.models.Programme.objects.get', 'Programme.objects.get', ([], {'code': "kwargs['programme']"}), "(code=kwargs['programme'])\n", (390, 416), False, 'from dejaviewer.models import Programme, Course, CourseTeacher\n'), ((761, 821), 'dejaviewer.models.CourseTeacher.objects.get', 'CourseTeacher.objects.get', ([], {'course': 'course', 'teacher__user': 'user'}), '(course=course, teacher__user=user)\n', (786, 821), False, 'from dejaviewer.models import Programme, Course, CourseTeacher\n')] |
import os
from py.path import local
import pypy
from pypy.tool.udir import udir
from pypy.translator.c.test.test_genc import compile
from pypy.rpython import extregistry
import errno
import sys
import py
def getllimpl(fn):
return extregistry.lookup(fn).lltypeimpl
def test_access():
filename = str(udir.join('test_access.txt'))
fd = file(filename, 'w')
fd.close()
for mode in os.R_OK, os.W_OK, os.X_OK, os.R_OK | os.W_OK | os.X_OK:
result = getllimpl(os.access)(filename, mode)
assert result == os.access(filename, mode)
def test_times():
"""
posix.times should compile as an RPython function and should return a
five-tuple giving float-representations (seconds, effectively) of the four
fields from the underlying struct tms and the return value.
"""
times = compile(lambda: os.times(), ())()
assert isinstance(times, tuple)
assert len(times) == 5
for value in times:
assert isinstance(value, float)
def test__getfullpathname():
if os.name != 'nt':
py.test.skip('nt specific function')
posix = __import__(os.name)
sysdrv = os.getenv('SystemDrive', 'C:')
stuff = sysdrv + 'stuff'
data = getllimpl(posix._getfullpathname)(stuff)
assert data == posix._getfullpathname(stuff)
# the most intriguing failure of ntpath.py should not repeat, here:
assert not data.endswith(stuff)
def test_getcwd():
data = getllimpl(os.getcwd)()
assert data == os.getcwd()
def test_strerror():
data = getllimpl(os.strerror)(2)
assert data == os.strerror(2)
def test_system():
filename = str(udir.join('test_system.txt'))
arg = 'python -c "print 1+1" > %s' % filename
data = getllimpl(os.system)(arg)
assert data == 0
assert file(filename).read().strip() == '2'
os.unlink(filename)
EXECVE_ENV = {"foo": "bar", "baz": "quux"}
execve_tests = str(local(__file__).dirpath().join('execve_tests.py'))
def test_execve():
if os.name != 'posix':
py.test.skip('posix specific function')
base = " ".join([
sys.executable,
execve_tests,
str(local(pypy.__file__).join('..', '..')),
''])
# Test exit status and code
result = os.system(base + "execve_true")
assert os.WIFEXITED(result)
assert os.WEXITSTATUS(result) == 0
result = os.system(base + "execve_false")
assert os.WIFEXITED(result)
assert os.WEXITSTATUS(result) == 1
# Test environment
result = os.popen(base + "execve_env").read()
assert dict([line.split('=') for line in result.splitlines()]) == EXECVE_ENV
# These won't actually execute anything, so they don't need a child process
# helper.
execve = getllimpl(os.execve)
# If the target does not exist, an OSError should result
info = py.test.raises(
OSError, execve, execve_tests + "-non-existent", [], {})
assert info.value.errno == errno.ENOENT
# If the target is not executable, an OSError should result
info = py.test.raises(
OSError, execve, execve_tests, [], {})
assert info.value.errno == errno.EACCES
class ExpectTestOs:
def setup_class(cls):
if not hasattr(os, 'ttyname'):
py.test.skip("no ttyname")
def test_ttyname(self):
import os
import py
from pypy.rpython.test.test_llinterp import interpret
def ll_to_string(s):
return ''.join(s.chars)
def f(num):
try:
return os.ttyname(num)
except OSError:
return ''
assert ll_to_string(interpret(f, [0])) == f(0)
assert ll_to_string(interpret(f, [338])) == ''
| [
"py.test.raises",
"os.getenv",
"os.times",
"os.access",
"os.ttyname",
"py.path.local",
"os.getcwd",
"pypy.tool.udir.udir.join",
"pypy.rpython.extregistry.lookup",
"pypy.rpython.test.test_llinterp.interpret",
"os.WEXITSTATUS",
"os.WIFEXITED",
"os.unlink",
"os.popen",
"os.system",
"os.st... | [((1132, 1162), 'os.getenv', 'os.getenv', (['"""SystemDrive"""', '"""C:"""'], {}), "('SystemDrive', 'C:')\n", (1141, 1162), False, 'import os\n'), ((1812, 1831), 'os.unlink', 'os.unlink', (['filename'], {}), '(filename)\n', (1821, 1831), False, 'import os\n'), ((2218, 2249), 'os.system', 'os.system', (["(base + 'execve_true')"], {}), "(base + 'execve_true')\n", (2227, 2249), False, 'import os\n'), ((2261, 2281), 'os.WIFEXITED', 'os.WIFEXITED', (['result'], {}), '(result)\n', (2273, 2281), False, 'import os\n'), ((2334, 2366), 'os.system', 'os.system', (["(base + 'execve_false')"], {}), "(base + 'execve_false')\n", (2343, 2366), False, 'import os\n'), ((2378, 2398), 'os.WIFEXITED', 'os.WIFEXITED', (['result'], {}), '(result)\n', (2390, 2398), False, 'import os\n'), ((2795, 2866), 'py.test.raises', 'py.test.raises', (['OSError', 'execve', "(execve_tests + '-non-existent')", '[]', '{}'], {}), "(OSError, execve, execve_tests + '-non-existent', [], {})\n", (2809, 2866), False, 'import py\n'), ((2996, 3049), 'py.test.raises', 'py.test.raises', (['OSError', 'execve', 'execve_tests', '[]', '{}'], {}), '(OSError, execve, execve_tests, [], {})\n', (3010, 3049), False, 'import py\n'), ((237, 259), 'pypy.rpython.extregistry.lookup', 'extregistry.lookup', (['fn'], {}), '(fn)\n', (255, 259), False, 'from pypy.rpython import extregistry\n'), ((310, 338), 'pypy.tool.udir.udir.join', 'udir.join', (['"""test_access.txt"""'], {}), "('test_access.txt')\n", (319, 338), False, 'from pypy.tool.udir import udir\n'), ((1050, 1086), 'py.test.skip', 'py.test.skip', (['"""nt specific function"""'], {}), "('nt specific function')\n", (1062, 1086), False, 'import py\n'), ((1478, 1489), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (1487, 1489), False, 'import os\n'), ((1568, 1582), 'os.strerror', 'os.strerror', (['(2)'], {}), '(2)\n', (1579, 1582), False, 'import os\n'), ((1622, 1650), 'pypy.tool.udir.udir.join', 'udir.join', (['"""test_system.txt"""'], {}), "('test_system.txt')\n", (1631, 1650), False, 'from pypy.tool.udir import udir\n'), ((2002, 2041), 'py.test.skip', 'py.test.skip', (['"""posix specific function"""'], {}), "('posix specific function')\n", (2014, 2041), False, 'import py\n'), ((2293, 2315), 'os.WEXITSTATUS', 'os.WEXITSTATUS', (['result'], {}), '(result)\n', (2307, 2315), False, 'import os\n'), ((2410, 2432), 'os.WEXITSTATUS', 'os.WEXITSTATUS', (['result'], {}), '(result)\n', (2424, 2432), False, 'import os\n'), ((536, 561), 'os.access', 'os.access', (['filename', 'mode'], {}), '(filename, mode)\n', (545, 561), False, 'import os\n'), ((2475, 2504), 'os.popen', 'os.popen', (["(base + 'execve_env')"], {}), "(base + 'execve_env')\n", (2483, 2504), False, 'import os\n'), ((3203, 3229), 'py.test.skip', 'py.test.skip', (['"""no ttyname"""'], {}), "('no ttyname')\n", (3215, 3229), False, 'import py\n'), ((843, 853), 'os.times', 'os.times', ([], {}), '()\n', (851, 853), False, 'import os\n'), ((3496, 3511), 'os.ttyname', 'os.ttyname', (['num'], {}), '(num)\n', (3506, 3511), False, 'import os\n'), ((3595, 3612), 'pypy.rpython.test.test_llinterp.interpret', 'interpret', (['f', '[0]'], {}), '(f, [0])\n', (3604, 3612), False, 'from pypy.rpython.test.test_llinterp import interpret\n'), ((3650, 3669), 'pypy.rpython.test.test_llinterp.interpret', 'interpret', (['f', '[338]'], {}), '(f, [338])\n', (3659, 3669), False, 'from pypy.rpython.test.test_llinterp import interpret\n'), ((1896, 1911), 'py.path.local', 'local', (['__file__'], {}), '(__file__)\n', (1901, 1911), False, 'from py.path import local\n'), ((2120, 2140), 'py.path.local', 'local', (['pypy.__file__'], {}), '(pypy.__file__)\n', (2125, 2140), False, 'from py.path import local\n')] |
import numpy as np
from PIL import Image
from scipy.ndimage import gaussian_filter, sobel
from scipy.ndimage.filters import laplace
def calc_gradients_test(test_dir):
for i in range(24):
calc_gradients(test_dir + '/test{}'.format(i))
def calc_gradients(dir):
g_noisy_dir = dir + '/g_noisy.png'
p_noisy_dir = dir + '/p_noisy.png'
g_noisy = Image.open(g_noisy_dir)
g_noisy = np.asarray(g_noisy)
p_noisy = Image.open(p_noisy_dir)
p_noisy = np.asarray(p_noisy)
g_noisy_grad = gradients(g_noisy)
p_noisy_grad = gradients(p_noisy)
Image.fromarray(g_noisy_grad).save(dir + '/g_noisy_grad.png')
Image.fromarray(p_noisy_grad).save(dir + '/p_noisy_grad.png')
def gradients(img):
"""Compute the xy derivatives of the input buffer. This helper is used in the _preprocess_<base_model>(...) functions
Args:
buf(np.array)[h, w, c]: input image-like tensor.
Returns:
(np.array)[h, w, 2*c]: horizontal and vertical gradients of buf.
"""
# dx = img[:, 1:, ...] - img[:, :-1, ...]
# dy = img[1:, ...] - img[:-1, ...]
# dx = np.pad(dx, [[0, 0], [1, 0], [0, 0]], mode="constant") # zero padding o the left
# dy = np.pad(dy, [[1, 0], [0, 0], [0, 0]], mode='constant') # zero padding to the up
# dx = sobel(gaussian_filter(img, 31), axis=0, mode='nearest')
# dy = sobel(gaussian_filter(img, 31), axis=1, mode='nearest')
dx = laplace(gaussian_filter(img, 10))
return dx
# calc_gradients('test/kpcn_decomp_mask_2/test5')
calc_gradients_test('test/kpcn_decomp_mask_2') | [
"PIL.Image.fromarray",
"PIL.Image.open",
"numpy.asarray",
"scipy.ndimage.gaussian_filter"
] | [((365, 388), 'PIL.Image.open', 'Image.open', (['g_noisy_dir'], {}), '(g_noisy_dir)\n', (375, 388), False, 'from PIL import Image\n'), ((403, 422), 'numpy.asarray', 'np.asarray', (['g_noisy'], {}), '(g_noisy)\n', (413, 422), True, 'import numpy as np\n'), ((437, 460), 'PIL.Image.open', 'Image.open', (['p_noisy_dir'], {}), '(p_noisy_dir)\n', (447, 460), False, 'from PIL import Image\n'), ((475, 494), 'numpy.asarray', 'np.asarray', (['p_noisy'], {}), '(p_noisy)\n', (485, 494), True, 'import numpy as np\n'), ((1427, 1451), 'scipy.ndimage.gaussian_filter', 'gaussian_filter', (['img', '(10)'], {}), '(img, 10)\n', (1442, 1451), False, 'from scipy.ndimage import gaussian_filter, sobel\n'), ((575, 604), 'PIL.Image.fromarray', 'Image.fromarray', (['g_noisy_grad'], {}), '(g_noisy_grad)\n', (590, 604), False, 'from PIL import Image\n'), ((641, 670), 'PIL.Image.fromarray', 'Image.fromarray', (['p_noisy_grad'], {}), '(p_noisy_grad)\n', (656, 670), False, 'from PIL import Image\n')] |
# -*- coding: utf-8 -*-
"""
Created on Sun Apr 21 13:52:07 2019
@author: Administrator
train.py: 训练模型
"""
from Unet import Unet
import LoadBatches1D
import tensorflow as tf
import keras
from keras import optimizers
import warnings
import matplotlib.pyplot as plt
gpu_options = tf.GPUOptions(allow_growth=True)
sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options))
warnings.filterwarnings("ignore")
def lr_schedule(epoch):
# 训练网络时学习率衰减方案
lr = 0.0001
if epoch >= 50:
lr = 0.00001
print('Learning rate: ', lr)
return lr
train_sigs_path = 'train_sigs/'
train_segs_path = 'train_labels/'
train_batch_size = 1
n_classes = 3
input_length = 1800
optimizer_name = optimizers.Adam(lr_schedule(0))
val_sigs_path = 'val_sigs/'
val_segs_path = 'val_labels/'
val_batch_size = 2
lr_scheduler = keras.callbacks.LearningRateScheduler(lr_schedule)
model = Unet(n_classes, input_length=input_length)
model.compile(loss='categorical_crossentropy',
optimizer=optimizer_name,
metrics=['accuracy'])
model.summary()
output_length = 1800
G = LoadBatches1D.SigSegmentationGenerator(train_sigs_path, train_segs_path, train_batch_size, n_classes, output_length)
G2 = LoadBatches1D.SigSegmentationGenerator(val_sigs_path, val_segs_path, val_batch_size, n_classes, output_length)
checkpointer = keras.callbacks.ModelCheckpoint(filepath='myNet.h5', monitor='val_acc', mode='max', save_best_only=True)
history = model.fit_generator(G, 500//train_batch_size,
validation_data=G2, validation_steps=200, epochs=70,
callbacks=[checkpointer, lr_scheduler])
plt.figure()
plt.plot(history.history['acc'])
plt.plot(history.history['val_acc'])
plt.title('Model accuracy')
plt.ylabel('Accuracy')
plt.xlabel('Epoch')
plt.legend(['Train', 'Test'], loc='upper left')
plt.grid(True)
plt.figure()
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('Model loss')
plt.ylabel('Loss')
plt.xlabel('Epoch')
plt.legend(['Train', 'Test'], loc='upper left')
plt.grid(True)
| [
"keras.callbacks.LearningRateScheduler",
"matplotlib.pyplot.grid",
"tensorflow.ConfigProto",
"keras.callbacks.ModelCheckpoint",
"matplotlib.pyplot.ylabel",
"Unet.Unet",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"LoadBatches1D.SigSegmentationGenerator",
"matplotlib.pyplot.figure",
"ma... | [((282, 314), 'tensorflow.GPUOptions', 'tf.GPUOptions', ([], {'allow_growth': '(True)'}), '(allow_growth=True)\n', (295, 314), True, 'import tensorflow as tf\n'), ((381, 414), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {}), "('ignore')\n", (404, 414), False, 'import warnings\n'), ((829, 879), 'keras.callbacks.LearningRateScheduler', 'keras.callbacks.LearningRateScheduler', (['lr_schedule'], {}), '(lr_schedule)\n', (866, 879), False, 'import keras\n'), ((889, 931), 'Unet.Unet', 'Unet', (['n_classes'], {'input_length': 'input_length'}), '(n_classes, input_length=input_length)\n', (893, 931), False, 'from Unet import Unet\n'), ((1101, 1221), 'LoadBatches1D.SigSegmentationGenerator', 'LoadBatches1D.SigSegmentationGenerator', (['train_sigs_path', 'train_segs_path', 'train_batch_size', 'n_classes', 'output_length'], {}), '(train_sigs_path, train_segs_path,\n train_batch_size, n_classes, output_length)\n', (1139, 1221), False, 'import LoadBatches1D\n'), ((1224, 1338), 'LoadBatches1D.SigSegmentationGenerator', 'LoadBatches1D.SigSegmentationGenerator', (['val_sigs_path', 'val_segs_path', 'val_batch_size', 'n_classes', 'output_length'], {}), '(val_sigs_path, val_segs_path,\n val_batch_size, n_classes, output_length)\n', (1262, 1338), False, 'import LoadBatches1D\n'), ((1351, 1459), 'keras.callbacks.ModelCheckpoint', 'keras.callbacks.ModelCheckpoint', ([], {'filepath': '"""myNet.h5"""', 'monitor': '"""val_acc"""', 'mode': '"""max"""', 'save_best_only': '(True)'}), "(filepath='myNet.h5', monitor='val_acc',\n mode='max', save_best_only=True)\n", (1382, 1459), False, 'import keras\n'), ((1661, 1673), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (1671, 1673), True, 'import matplotlib.pyplot as plt\n'), ((1674, 1706), 'matplotlib.pyplot.plot', 'plt.plot', (["history.history['acc']"], {}), "(history.history['acc'])\n", (1682, 1706), True, 'import matplotlib.pyplot as plt\n'), ((1707, 1743), 'matplotlib.pyplot.plot', 'plt.plot', (["history.history['val_acc']"], {}), "(history.history['val_acc'])\n", (1715, 1743), True, 'import matplotlib.pyplot as plt\n'), ((1744, 1771), 'matplotlib.pyplot.title', 'plt.title', (['"""Model accuracy"""'], {}), "('Model accuracy')\n", (1753, 1771), True, 'import matplotlib.pyplot as plt\n'), ((1772, 1794), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Accuracy"""'], {}), "('Accuracy')\n", (1782, 1794), True, 'import matplotlib.pyplot as plt\n'), ((1795, 1814), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Epoch"""'], {}), "('Epoch')\n", (1805, 1814), True, 'import matplotlib.pyplot as plt\n'), ((1815, 1862), 'matplotlib.pyplot.legend', 'plt.legend', (["['Train', 'Test']"], {'loc': '"""upper left"""'}), "(['Train', 'Test'], loc='upper left')\n", (1825, 1862), True, 'import matplotlib.pyplot as plt\n'), ((1863, 1877), 'matplotlib.pyplot.grid', 'plt.grid', (['(True)'], {}), '(True)\n', (1871, 1877), True, 'import matplotlib.pyplot as plt\n'), ((1879, 1891), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (1889, 1891), True, 'import matplotlib.pyplot as plt\n'), ((1892, 1925), 'matplotlib.pyplot.plot', 'plt.plot', (["history.history['loss']"], {}), "(history.history['loss'])\n", (1900, 1925), True, 'import matplotlib.pyplot as plt\n'), ((1926, 1963), 'matplotlib.pyplot.plot', 'plt.plot', (["history.history['val_loss']"], {}), "(history.history['val_loss'])\n", (1934, 1963), True, 'import matplotlib.pyplot as plt\n'), ((1964, 1987), 'matplotlib.pyplot.title', 'plt.title', (['"""Model loss"""'], {}), "('Model loss')\n", (1973, 1987), True, 'import matplotlib.pyplot as plt\n'), ((1988, 2006), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Loss"""'], {}), "('Loss')\n", (1998, 2006), True, 'import matplotlib.pyplot as plt\n'), ((2007, 2026), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Epoch"""'], {}), "('Epoch')\n", (2017, 2026), True, 'import matplotlib.pyplot as plt\n'), ((2027, 2074), 'matplotlib.pyplot.legend', 'plt.legend', (["['Train', 'Test']"], {'loc': '"""upper left"""'}), "(['Train', 'Test'], loc='upper left')\n", (2037, 2074), True, 'import matplotlib.pyplot as plt\n'), ((2075, 2089), 'matplotlib.pyplot.grid', 'plt.grid', (['(True)'], {}), '(True)\n', (2083, 2089), True, 'import matplotlib.pyplot as plt\n'), ((340, 379), 'tensorflow.ConfigProto', 'tf.ConfigProto', ([], {'gpu_options': 'gpu_options'}), '(gpu_options=gpu_options)\n', (354, 379), True, 'import tensorflow as tf\n')] |
# coding=utf-8
#
# Copyright (c) 2013-2015 First Flamingo Enterprise B.V.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# TAFormsHandler.py
# firstflamingo/treinenaapje
#
# Created by <NAME> on 27-Jan-2013.
#
import webapp2, logging
from google.appengine.ext import db
from TASeries import TASeries
from TAMission import TAMission
class SeriesHandler(webapp2.RequestHandler):
def post(self):
seriesID = self.request.get('series')
direction = self.request.get('direction')
logging.info('Handle form for series %s (%s).' % (seriesID, direction))
changedObjects = []
series = TASeries.get(seriesID)
if direction != 'down':
missions = series.up_missions
else:
missions = series.down_missions
for mission in missions:
missionCode = mission.code
offsetString = self.request.get('offset_%s' % missionCode)
if offsetString:
mission.offset_string = offsetString
odIDsString = self.request.get('odids_%s' % missionCode)
if odIDsString:
mission.odIDs_dictionary = dictionaryFromODIDs(odIDsString)
if mission.needs_datastore_put:
logging.info('changed mission: %s' % missionCode)
mission.needs_datastore_put = False
mission.cache_set()
changedObjects.append(mission)
db.put(changedObjects)
self.redirect('/console/missions?kind=pattern&series=%s&direction=%s&changes=%d' % (seriesID, direction, len(changedObjects)))
def dictionaryFromODIDs(odIDsString):
dictionary = {}
items = odIDsString.split(',')
for item in items:
dayString, value = item.split(':')
fromID, toID = value.split('-')
fromID = fromID.strip()
if fromID == 'None': fromID = None
toID = toID.strip()
if toID == 'None': toID = None
dictionary[keyString(dayString)] = [fromID, toID]
return dictionary
def keyString(dayString):
day=dayString.strip()
if day == 'ma':
return '0'
elif day == 'di':
return '1'
elif day == 'wo':
return '2'
elif day == 'do':
return '3'
elif day == 'vr':
return '4'
elif day == 'za':
return '5'
elif day == 'zo':
return '6'
elif day == 'normaal':
return 'd'
# WSGI Application
app = webapp2.WSGIApplication([('/forms/series', SeriesHandler)
], debug=True)
| [
"TASeries.TASeries.get",
"webapp2.WSGIApplication",
"logging.info",
"google.appengine.ext.db.put"
] | [((2949, 3020), 'webapp2.WSGIApplication', 'webapp2.WSGIApplication', (["[('/forms/series', SeriesHandler)]"], {'debug': '(True)'}), "([('/forms/series', SeriesHandler)], debug=True)\n", (2972, 3020), False, 'import webapp2, logging\n'), ((1034, 1105), 'logging.info', 'logging.info', (["('Handle form for series %s (%s).' % (seriesID, direction))"], {}), "('Handle form for series %s (%s).' % (seriesID, direction))\n", (1046, 1105), False, 'import webapp2, logging\n'), ((1151, 1173), 'TASeries.TASeries.get', 'TASeries.get', (['seriesID'], {}), '(seriesID)\n', (1163, 1173), False, 'from TASeries import TASeries\n'), ((1957, 1979), 'google.appengine.ext.db.put', 'db.put', (['changedObjects'], {}), '(changedObjects)\n', (1963, 1979), False, 'from google.appengine.ext import db\n'), ((1764, 1813), 'logging.info', 'logging.info', (["('changed mission: %s' % missionCode)"], {}), "('changed mission: %s' % missionCode)\n", (1776, 1813), False, 'import webapp2, logging\n')] |
import requests
from flask import Blueprint, render_template, abort, session, current_app
bp = Blueprint("item", __name__)
@bp.route("/item/<int:item_id>", methods=("GET", "POST"))
def index(item_id):
res = requests.get(
url="http://api:8000/recommend/item/{0}".format(item_id),
params={
"page_type": "item",
"user_id": session.get("username"),
"session_id": session.get("uid")
}
)
if res.status_code != 200:
abort(res.status_code)
res_json = res.json()
active_item = res_json["active_item"]
recommendations = res_json["recommendations"]
current_app.tracker.store_item_viewed(
"history:{0}".format(session.get("uid")),
active_item["id"]
)
return render_template(
"item/index.html",
active_item=active_item,
recommendations=recommendations
)
| [
"flask.render_template",
"flask.abort",
"flask.Blueprint",
"flask.session.get"
] | [((96, 123), 'flask.Blueprint', 'Blueprint', (['"""item"""', '__name__'], {}), "('item', __name__)\n", (105, 123), False, 'from flask import Blueprint, render_template, abort, session, current_app\n'), ((774, 871), 'flask.render_template', 'render_template', (['"""item/index.html"""'], {'active_item': 'active_item', 'recommendations': 'recommendations'}), "('item/index.html', active_item=active_item, recommendations\n =recommendations)\n", (789, 871), False, 'from flask import Blueprint, render_template, abort, session, current_app\n'), ((493, 515), 'flask.abort', 'abort', (['res.status_code'], {}), '(res.status_code)\n', (498, 515), False, 'from flask import Blueprint, render_template, abort, session, current_app\n'), ((709, 727), 'flask.session.get', 'session.get', (['"""uid"""'], {}), "('uid')\n", (720, 727), False, 'from flask import Blueprint, render_template, abort, session, current_app\n'), ((367, 390), 'flask.session.get', 'session.get', (['"""username"""'], {}), "('username')\n", (378, 390), False, 'from flask import Blueprint, render_template, abort, session, current_app\n'), ((418, 436), 'flask.session.get', 'session.get', (['"""uid"""'], {}), "('uid')\n", (429, 436), False, 'from flask import Blueprint, render_template, abort, session, current_app\n')] |
# -*- coding: utf-8 -*-
"""Adapting the general reegis power plants to the de21 model.
SPDX-FileCopyrightText: 2016-2019 <NAME> <<EMAIL>>
SPDX-License-Identifier: MIT
"""
__copyright__ = "<NAME> <<EMAIL>>"
__license__ = "MIT"
import pandas as pd
import os
import logging
import datetime
from xml.etree import ElementTree
import oemof.tools.logger as logger
from reegis import config as cfg
import berlin_hp.download
def fill_data_gaps(df):
logging.info("Fill the gaps and resample to hourly values.")
df.index = pd.DatetimeIndex(df.reset_index()["index"].str.slice(0, 19))
df = df.apply(pd.to_numeric)
df = df.replace(0, float("nan"))
for col in df.columns:
df[col] = df[col].fillna(df[col].shift(7 * 4 * 24))
df[col] = df[col].interpolate()
df[col] = df[col].fillna(method="bfill")
return df
def convert_net_xml2df(year, filename, hourly=True):
tree = ElementTree.parse(filename)
elem = tree.getroot()
logging.info("Convert xml-file to csv-file for {0}".format(year))
n = 0
attributes = ["usage", "generation", "feed", "key-acount-usage"]
df = pd.DataFrame(columns=attributes)
df_temp = pd.DataFrame(columns=attributes)
for distr_ele in elem.find("district"):
for f in distr_ele.getchildren():
value_list = []
for atr in attributes:
value_list.append(float(f.find(atr).text))
df_temp.loc[f.attrib["value"], attributes] = value_list
if n % 100 == 0:
df = pd.concat([df, df_temp])
df_temp = pd.DataFrame(columns=attributes)
n += 1
df = pd.concat([df, df_temp])
# fill the data gaps
df = fill_data_gaps(df)
# cut the time series to the given year
start_date = datetime.datetime(year, 1, 1)
end_date = datetime.datetime(year + 1, 1, 1)
df = df.loc[(df.index >= start_date) & (df.index < end_date)]
df.set_index(
(df.index - pd.DateOffset(hours=1))
.tz_localize(tz="UTC")
.tz_convert("Europe/Berlin"),
inplace=True,
)
# resample to hourly values if hourly is set to True
if hourly is True:
df = df.resample("H").mean()
df = df.interpolate()
return df
def get_electricity_demand(year, hourly=True, district=None):
"""Get the electricity demand in MW.
Parameters
----------
year : int
Year of the data set.
hourly : bool
Get hourly data.
district : str or None
District of Berlin. If None 'berlin' is used. Possible values are:
Pankow, Lichtenberg, Marzahn-Hellersdorf, Treptow-Koepenick, Neukoelln,
Friedrichshain-Kreuzberg, Mitte, Tempelhof-Schöneberg,
Steglitz-Zehlendorf, Charlottenburg-Wilmersdorf, Reinickendorf, Spandau
Returns
-------
pandas.DataFrame
"""
if district is None:
district_name = "berlin"
else:
district_name = district.replace("-", "_")
xml_filename = os.path.join(
cfg.get("paths", "electricity"),
cfg.get("electricity", "file_xml").format(
year=year, district=district_name
),
)
csv_filename = os.path.join(
cfg.get("paths", "electricity"), cfg.get("electricity", "file_csv")
).format(year=year, district=district_name)
if not os.path.isfile(xml_filename):
logging.info(
"Download {0} grid data for {1} as xml.".format(district, year)
)
xml_filename = berlin_hp.download.get_berlin_net_data(
year, district=district
)
if not os.path.isfile(csv_filename):
df = convert_net_xml2df(year, xml_filename, hourly=hourly)
df.to_csv(csv_filename)
msg = (
"The unit for the electricity demand of the source is kW. Values "
"will be divided by 1000 to get MW."
)
logging.warning(msg)
df = pd.read_csv(csv_filename, index_col=[0]).div(1000)
return df.set_index(
pd.to_datetime(df.index, utc=True).tz_convert("Europe/Berlin")
)
if __name__ == "__main__":
logger.define_logging(file_level=logging.INFO)
d_list = [
"berlin",
"Pankow",
"Lichtenberg",
"Marzahn-Hellersdorf",
"Treptow-Koepenick",
"Neukoelln",
"Friedrichshain-Kreuzberg",
"Mitte",
"Tempelhof-Schoeneberg",
"Steglitz-Zehlendorf",
"Charlottenburg-Wilmersdorf",
"Reinickendorf",
"Spandau",
]
for district in d_list:
c = []
for y in [2012, 2013, 2014, 2015, 2016]:
d = get_electricity_demand(y, district=district)
if d.isnull().values.any():
for column in d.columns:
if d[column].isnull().any():
c.append(column)
print(d.loc[d.usage.isnull()])
if len(c) < 1:
print("Everything is fine for {0}.".format(district))
else:
print(c)
| [
"datetime.datetime",
"xml.etree.ElementTree.parse",
"pandas.read_csv",
"logging.warning",
"oemof.tools.logger.define_logging",
"os.path.isfile",
"pandas.concat",
"pandas.DateOffset",
"pandas.DataFrame",
"logging.info",
"reegis.config.get",
"pandas.to_datetime"
] | [((455, 515), 'logging.info', 'logging.info', (['"""Fill the gaps and resample to hourly values."""'], {}), "('Fill the gaps and resample to hourly values.')\n", (467, 515), False, 'import logging\n'), ((918, 945), 'xml.etree.ElementTree.parse', 'ElementTree.parse', (['filename'], {}), '(filename)\n', (935, 945), False, 'from xml.etree import ElementTree\n'), ((1130, 1162), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': 'attributes'}), '(columns=attributes)\n', (1142, 1162), True, 'import pandas as pd\n'), ((1177, 1209), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': 'attributes'}), '(columns=attributes)\n', (1189, 1209), True, 'import pandas as pd\n'), ((1648, 1672), 'pandas.concat', 'pd.concat', (['[df, df_temp]'], {}), '([df, df_temp])\n', (1657, 1672), True, 'import pandas as pd\n'), ((1789, 1818), 'datetime.datetime', 'datetime.datetime', (['year', '(1)', '(1)'], {}), '(year, 1, 1)\n', (1806, 1818), False, 'import datetime\n'), ((1834, 1867), 'datetime.datetime', 'datetime.datetime', (['(year + 1)', '(1)', '(1)'], {}), '(year + 1, 1, 1)\n', (1851, 1867), False, 'import datetime\n'), ((3867, 3887), 'logging.warning', 'logging.warning', (['msg'], {}), '(msg)\n', (3882, 3887), False, 'import logging\n'), ((4084, 4130), 'oemof.tools.logger.define_logging', 'logger.define_logging', ([], {'file_level': 'logging.INFO'}), '(file_level=logging.INFO)\n', (4105, 4130), True, 'import oemof.tools.logger as logger\n'), ((3020, 3051), 'reegis.config.get', 'cfg.get', (['"""paths"""', '"""electricity"""'], {}), "('paths', 'electricity')\n", (3027, 3051), True, 'from reegis import config as cfg\n'), ((3336, 3364), 'os.path.isfile', 'os.path.isfile', (['xml_filename'], {}), '(xml_filename)\n', (3350, 3364), False, 'import os\n'), ((3595, 3623), 'os.path.isfile', 'os.path.isfile', (['csv_filename'], {}), '(csv_filename)\n', (3609, 3623), False, 'import os\n'), ((3898, 3938), 'pandas.read_csv', 'pd.read_csv', (['csv_filename'], {'index_col': '[0]'}), '(csv_filename, index_col=[0])\n', (3909, 3938), True, 'import pandas as pd\n'), ((1536, 1560), 'pandas.concat', 'pd.concat', (['[df, df_temp]'], {}), '([df, df_temp])\n', (1545, 1560), True, 'import pandas as pd\n'), ((1587, 1619), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': 'attributes'}), '(columns=attributes)\n', (1599, 1619), True, 'import pandas as pd\n'), ((3061, 3095), 'reegis.config.get', 'cfg.get', (['"""electricity"""', '"""file_xml"""'], {}), "('electricity', 'file_xml')\n", (3068, 3095), True, 'from reegis import config as cfg\n'), ((3208, 3239), 'reegis.config.get', 'cfg.get', (['"""paths"""', '"""electricity"""'], {}), "('paths', 'electricity')\n", (3215, 3239), True, 'from reegis import config as cfg\n'), ((3241, 3275), 'reegis.config.get', 'cfg.get', (['"""electricity"""', '"""file_csv"""'], {}), "('electricity', 'file_csv')\n", (3248, 3275), True, 'from reegis import config as cfg\n'), ((3982, 4016), 'pandas.to_datetime', 'pd.to_datetime', (['df.index'], {'utc': '(True)'}), '(df.index, utc=True)\n', (3996, 4016), True, 'import pandas as pd\n'), ((1972, 1994), 'pandas.DateOffset', 'pd.DateOffset', ([], {'hours': '(1)'}), '(hours=1)\n', (1985, 1994), True, 'import pandas as pd\n')] |
'''
Created on Dec 5, 2016
@author: wjadams
'''
import numpy as np
class AhpNode(object):
def __init__(self, parent_tree, name, nalts, pw=None):
self.children = []
self.name = name
self.alt_scores = np.zeros([nalts])
self.nalts = nalts
self.parent_tree = parent_tree
self.pw = pw
if pw != None:
self.add_children_pw(pw)
def add_children_pw(self, pw):
for alt_name in pw.alt_names:
self.add_child(alt_name)
def add_child(self, alt_name):
self.children.append(AhpNode(self.parent_tree, alt_name, self.nalts))
def add_alt(self):
self.alt_scores = np.append(self.alt_scores, 0)
self.nalts += 1
for child in self.children:
child.add_alt()
def set_alt_scores_old(self, new_scores):
if (len(new_scores)!=self.nalts):
raise NameError("Wrong length for new alt scores")
self.alt_scores = np.array(new_scores)
self.alt_scores = self.alt_scores
def set_pw(self, pw):
if pw.nalts() != self.nchildren():
raise NameError("Wrong number of children in Pairwise")
self.pw = pw
def nchildren(self):
return len(self.children)
def has_children(self):
return len(self.children) != 0
def set_alt_scores(self, vals):
nvals = np.array(vals)
s = np.max(nvals)
if s != 0:
nvals /= s
self.alt_scores = nvals
def synthesize(self, user = None):
if not self.has_children():
return(self.alt_scores)
#This node has children
rval = np.zeros([self.nalts])
if (self.pw is not None) and (user is not None):
coeffs = self.pw.single_stats(user)
else:
coeffs = np.array([0 for i in self.children])
#print(rval)
count = 0
i = 0
for kid in self.children:
kid_vals = kid.synthesize(user)
if np.max(kid_vals) > 0:
count+=1
rval += coeffs[i] * kid_vals
i += 1
if count > 0:
rval /= (count+0.0)
return(rval)
def get_child(self, node_path_list):
if len(node_path_list) <= 0:
return(self)
for child in self.children:
if child.name == node_path_list[0]:
return(child.get_child(node_path_list[1:]))
#If we make it here, we could not find a child
raise NameError("Could not find child `"+node_path_list[0]+"'")
class AhpTree(object):
def __init__(self, alt_names=None, pw=None):
self.usernames = []
if alt_names == None:
alt_names = []
self.nalts = len(alt_names)
self.alt_names = alt_names
self.root = AhpNode(self, "root", self.nalts, pw)
def add_alt(self, alt_name):
self.alt_names.append(alt_name)
self.root.add_alt()
def synthesize(self, user=None):
return self.root.synthesize(user)
def get_node(self, node_path_list):
return self.root.get_child(node_path_list) | [
"numpy.append",
"numpy.array",
"numpy.zeros",
"numpy.max"
] | [((234, 251), 'numpy.zeros', 'np.zeros', (['[nalts]'], {}), '([nalts])\n', (242, 251), True, 'import numpy as np\n'), ((702, 731), 'numpy.append', 'np.append', (['self.alt_scores', '(0)'], {}), '(self.alt_scores, 0)\n', (711, 731), True, 'import numpy as np\n'), ((1010, 1030), 'numpy.array', 'np.array', (['new_scores'], {}), '(new_scores)\n', (1018, 1030), True, 'import numpy as np\n'), ((1441, 1455), 'numpy.array', 'np.array', (['vals'], {}), '(vals)\n', (1449, 1455), True, 'import numpy as np\n'), ((1468, 1481), 'numpy.max', 'np.max', (['nvals'], {}), '(nvals)\n', (1474, 1481), True, 'import numpy as np\n'), ((1719, 1741), 'numpy.zeros', 'np.zeros', (['[self.nalts]'], {}), '([self.nalts])\n', (1727, 1741), True, 'import numpy as np\n'), ((1882, 1920), 'numpy.array', 'np.array', (['[(0) for i in self.children]'], {}), '([(0) for i in self.children])\n', (1890, 1920), True, 'import numpy as np\n'), ((2065, 2081), 'numpy.max', 'np.max', (['kid_vals'], {}), '(kid_vals)\n', (2071, 2081), True, 'import numpy as np\n')] |
import unittest
import yaml
from jnpr.jsnapy.check import Comparator
from mock import patch
import os
from nose.plugins.attrib import attr
@attr('unit')
class TestStringOperators(unittest.TestCase):
def setUp(self):
self.diff = False
self.hostname = "10.216.193.114"
self.db = dict()
self.db['store_in_sqlite'] = False
self.db['check_from_sqlite'] = False
self.db['db_name'] = ""
self.snap_del = False
@patch('jnpr.jsnapy.check.get_path')
def test_contains(self, mock_path):
self.chk = False
comp = Comparator()
conf_file = os.path.join(os.path.dirname(__file__),
'configs', 'main_contains.yml')
config_file = open(conf_file, 'r')
mock_path.return_value = os.path.join(os.path.dirname(__file__), 'configs')
main_file = yaml.load(config_file)
oper = comp.generate_test_files(
main_file,
self.hostname,
self.chk,
self.diff,
self.db,
self.snap_del,
"snap_contains_pre")
self.assertEqual(oper.no_passed, 1)
self.assertEqual(oper.no_failed, 0)
@patch('jnpr.jsnapy.check.get_path')
def test_contains_fail(self, mock_path):
self.chk = False
comp = Comparator()
conf_file = os.path.join(os.path.dirname(__file__),
'configs', 'main_contains.yml')
config_file = open(conf_file, 'r')
mock_path.return_value = os.path.join(os.path.dirname(__file__), 'configs')
main_file = yaml.load(config_file)
oper = comp.generate_test_files(
main_file,
self.hostname,
self.chk,
self.diff,
self.db,
self.snap_del,
"snap_contains_fail_pre")
self.assertEqual(oper.no_passed, 0)
self.assertEqual(oper.no_failed, 1)
@patch('jnpr.jsnapy.check.get_path')
def test_is_in(self, mock_path):
self.chk = False
comp = Comparator()
conf_file = os.path.join(os.path.dirname(__file__),
'configs', 'main_is-in.yml')
config_file = open(conf_file, 'r')
mock_path.return_value = os.path.join(os.path.dirname(__file__), 'configs')
main_file = yaml.load(config_file)
oper = comp.generate_test_files(
main_file,
self.hostname,
self.chk,
self.diff,
self.db,
self.snap_del,
"snap_is-in_pre")
self.assertEqual(oper.no_passed, 1)
self.assertEqual(oper.no_failed, 0)
@patch('jnpr.jsnapy.check.get_path')
def test_is_in_fail(self, mock_path):
self.chk = False
comp = Comparator()
conf_file = os.path.join(os.path.dirname(__file__),
'configs', 'main_is-in.yml')
config_file = open(conf_file, 'r')
mock_path.return_value = os.path.join(os.path.dirname(__file__), 'configs')
main_file = yaml.load(config_file)
oper = comp.generate_test_files(
main_file,
self.hostname,
self.chk,
self.diff,
self.db,
self.snap_del,
"snap_is-in_fail_pre")
self.assertEqual(oper.no_passed, 0)
self.assertEqual(oper.no_failed, 1)
@patch('jnpr.jsnapy.check.get_path')
def test_not_in(self, mock_path):
self.chk = False
comp = Comparator()
conf_file = os.path.join(os.path.dirname(__file__),
'configs', 'main_not-in.yml')
mock_path.return_value = os.path.join(os.path.dirname(__file__), 'configs')
config_file = open(conf_file, 'r')
main_file = yaml.load(config_file)
oper = comp.generate_test_files(
main_file,
self.hostname,
self.chk,
self.diff,
self.db,
self.snap_del,
"snap_not-in_pre")
self.assertEqual(oper.no_passed, 0)
self.assertEqual(oper.no_failed, 1)
@patch('jnpr.jsnapy.check.get_path')
def test_not_in_pass(self, mock_path):
self.chk = False
comp = Comparator()
conf_file = os.path.join(os.path.dirname(__file__),
'configs', 'main_not-in.yml')
mock_path.return_value = os.path.join(os.path.dirname(__file__), 'configs')
config_file = open(conf_file, 'r')
main_file = yaml.load(config_file)
oper = comp.generate_test_files(
main_file,
self.hostname,
self.chk,
self.diff,
self.db,
self.snap_del,
"snap_not-in_fail_pre")
self.assertEqual(oper.no_passed, 1)
self.assertEqual(oper.no_failed, 0)
with patch('logging.Logger') as mock_logger:
if __name__ == "__main__":
suite = unittest.TestLoader().loadTestsFromTestCase(
TestStringOperators)
unittest.TextTestRunner(verbosity=2).run(suite)
| [
"mock.patch",
"jnpr.jsnapy.check.Comparator",
"nose.plugins.attrib.attr",
"yaml.load",
"os.path.dirname",
"unittest.TextTestRunner",
"unittest.TestLoader"
] | [((141, 153), 'nose.plugins.attrib.attr', 'attr', (['"""unit"""'], {}), "('unit')\n", (145, 153), False, 'from nose.plugins.attrib import attr\n'), ((470, 505), 'mock.patch', 'patch', (['"""jnpr.jsnapy.check.get_path"""'], {}), "('jnpr.jsnapy.check.get_path')\n", (475, 505), False, 'from mock import patch\n'), ((1205, 1240), 'mock.patch', 'patch', (['"""jnpr.jsnapy.check.get_path"""'], {}), "('jnpr.jsnapy.check.get_path')\n", (1210, 1240), False, 'from mock import patch\n'), ((1950, 1985), 'mock.patch', 'patch', (['"""jnpr.jsnapy.check.get_path"""'], {}), "('jnpr.jsnapy.check.get_path')\n", (1955, 1985), False, 'from mock import patch\n'), ((2676, 2711), 'mock.patch', 'patch', (['"""jnpr.jsnapy.check.get_path"""'], {}), "('jnpr.jsnapy.check.get_path')\n", (2681, 2711), False, 'from mock import patch\n'), ((3412, 3447), 'mock.patch', 'patch', (['"""jnpr.jsnapy.check.get_path"""'], {}), "('jnpr.jsnapy.check.get_path')\n", (3417, 3447), False, 'from mock import patch\n'), ((4141, 4176), 'mock.patch', 'patch', (['"""jnpr.jsnapy.check.get_path"""'], {}), "('jnpr.jsnapy.check.get_path')\n", (4146, 4176), False, 'from mock import patch\n'), ((4880, 4903), 'mock.patch', 'patch', (['"""logging.Logger"""'], {}), "('logging.Logger')\n", (4885, 4903), False, 'from mock import patch\n'), ((586, 598), 'jnpr.jsnapy.check.Comparator', 'Comparator', ([], {}), '()\n', (596, 598), False, 'from jnpr.jsnapy.check import Comparator\n'), ((871, 893), 'yaml.load', 'yaml.load', (['config_file'], {}), '(config_file)\n', (880, 893), False, 'import yaml\n'), ((1326, 1338), 'jnpr.jsnapy.check.Comparator', 'Comparator', ([], {}), '()\n', (1336, 1338), False, 'from jnpr.jsnapy.check import Comparator\n'), ((1611, 1633), 'yaml.load', 'yaml.load', (['config_file'], {}), '(config_file)\n', (1620, 1633), False, 'import yaml\n'), ((2063, 2075), 'jnpr.jsnapy.check.Comparator', 'Comparator', ([], {}), '()\n', (2073, 2075), False, 'from jnpr.jsnapy.check import Comparator\n'), ((2345, 2367), 'yaml.load', 'yaml.load', (['config_file'], {}), '(config_file)\n', (2354, 2367), False, 'import yaml\n'), ((2794, 2806), 'jnpr.jsnapy.check.Comparator', 'Comparator', ([], {}), '()\n', (2804, 2806), False, 'from jnpr.jsnapy.check import Comparator\n'), ((3076, 3098), 'yaml.load', 'yaml.load', (['config_file'], {}), '(config_file)\n', (3085, 3098), False, 'import yaml\n'), ((3526, 3538), 'jnpr.jsnapy.check.Comparator', 'Comparator', ([], {}), '()\n', (3536, 3538), False, 'from jnpr.jsnapy.check import Comparator\n'), ((3809, 3831), 'yaml.load', 'yaml.load', (['config_file'], {}), '(config_file)\n', (3818, 3831), False, 'import yaml\n'), ((4260, 4272), 'jnpr.jsnapy.check.Comparator', 'Comparator', ([], {}), '()\n', (4270, 4272), False, 'from jnpr.jsnapy.check import Comparator\n'), ((4543, 4565), 'yaml.load', 'yaml.load', (['config_file'], {}), '(config_file)\n', (4552, 4565), False, 'import yaml\n'), ((632, 657), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (647, 657), False, 'import os\n'), ((813, 838), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (828, 838), False, 'import os\n'), ((1372, 1397), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (1387, 1397), False, 'import os\n'), ((1553, 1578), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (1568, 1578), False, 'import os\n'), ((2109, 2134), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (2124, 2134), False, 'import os\n'), ((2287, 2312), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (2302, 2312), False, 'import os\n'), ((2840, 2865), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (2855, 2865), False, 'import os\n'), ((3018, 3043), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (3033, 3043), False, 'import os\n'), ((3572, 3597), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (3587, 3597), False, 'import os\n'), ((3708, 3733), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (3723, 3733), False, 'import os\n'), ((4306, 4331), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (4321, 4331), False, 'import os\n'), ((4442, 4467), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (4457, 4467), False, 'import os\n'), ((4967, 4988), 'unittest.TestLoader', 'unittest.TestLoader', ([], {}), '()\n', (4986, 4988), False, 'import unittest\n'), ((5053, 5089), 'unittest.TextTestRunner', 'unittest.TextTestRunner', ([], {'verbosity': '(2)'}), '(verbosity=2)\n', (5076, 5089), False, 'import unittest\n')] |
# Import the library
from czml import czml
# Initialize a document
doc = czml.CZML()
# Create and append the document packet
packet1 = czml.CZMLPacket(id='document', name="czml", version='1.0')
doc.packets.append(packet1)
# Create and append a billboard packet
packet2 = czml.CZMLPacket(id='rect', name="visibility")
bb = czml.Billboard(scale=0.7, show=True)
bb.image = 'http://localhost/img.png'
bb.color = {'rgba': [0, 255, 127, 55]}
packet2.billboard = bb
doc.packets.append(packet2)
# Write the CZML document to a file
filename = "example.czml"
doc.write(filename) | [
"czml.czml.Billboard",
"czml.czml.CZMLPacket",
"czml.czml.CZML"
] | [((74, 85), 'czml.czml.CZML', 'czml.CZML', ([], {}), '()\n', (83, 85), False, 'from czml import czml\n'), ((137, 195), 'czml.czml.CZMLPacket', 'czml.CZMLPacket', ([], {'id': '"""document"""', 'name': '"""czml"""', 'version': '"""1.0"""'}), "(id='document', name='czml', version='1.0')\n", (152, 195), False, 'from czml import czml\n'), ((274, 319), 'czml.czml.CZMLPacket', 'czml.CZMLPacket', ([], {'id': '"""rect"""', 'name': '"""visibility"""'}), "(id='rect', name='visibility')\n", (289, 319), False, 'from czml import czml\n'), ((325, 361), 'czml.czml.Billboard', 'czml.Billboard', ([], {'scale': '(0.7)', 'show': '(True)'}), '(scale=0.7, show=True)\n', (339, 361), False, 'from czml import czml\n')] |
import time
import math
import random
def gcd_naive(a, b):
mx = max(a, b)
gcd = 1
for i in range(2, mx):
if a % i == 0 and b % i == 0:
gcd = i
return gcd
def gcd_naive_2(a, b):
gcd = 1
for i in range(2, max(a, b) // 2):
if a % i == 0 and a % b == 0:
gcd = i
return gcd
def gcd_naive_3(a, b):
gcd = 1
for i in range(2, math.ceil(math.sqrt(max(a, b)))):
if a % i == 0 and a % b == 0:
gcd = i
return gcd
def gcd_euclid(a, b):
"""
10) 15 (1
10
--------
5) 10 (2
10
--------
0
GCD = 5
"""
if b == 0:
return a
else:
return gcd_euclid(b, a % b)
def main():
for _ in range(10):
a = random.randint(1, 10000)
b = random.randint(1, 10000)
start_time = time.time()
gcd_naive(a, b)
print("--- %s seconds 1---" % (time.time() - start_time))
start_time = time.time()
gcd_naive_2(a, b)
print("--- %s seconds 2---" % (time.time() - start_time))
start_time = time.time()
gcd_naive_3(a, b)
print("--- %s seconds 3---" % (time.time() - start_time))
print("-------------------------------------------------")
if __name__ == "__main__":
main()
| [
"time.time",
"random.randint"
] | [((788, 812), 'random.randint', 'random.randint', (['(1)', '(10000)'], {}), '(1, 10000)\n', (802, 812), False, 'import random\n'), ((825, 849), 'random.randint', 'random.randint', (['(1)', '(10000)'], {}), '(1, 10000)\n', (839, 849), False, 'import random\n'), ((871, 882), 'time.time', 'time.time', ([], {}), '()\n', (880, 882), False, 'import time\n'), ((995, 1006), 'time.time', 'time.time', ([], {}), '()\n', (1004, 1006), False, 'import time\n'), ((1121, 1132), 'time.time', 'time.time', ([], {}), '()\n', (1130, 1132), False, 'import time\n'), ((946, 957), 'time.time', 'time.time', ([], {}), '()\n', (955, 957), False, 'import time\n'), ((1072, 1083), 'time.time', 'time.time', ([], {}), '()\n', (1081, 1083), False, 'import time\n'), ((1198, 1209), 'time.time', 'time.time', ([], {}), '()\n', (1207, 1209), False, 'import time\n')] |
#!/usr/bin/env python
"""
Bevington & Robinson's model of dual exponential decay
References::
[5] Bevington & Robinson (1992).
Data Reduction and Error Analysis for the Physical Sciences,
Second Edition, McGraw-Hill, Inc., New York.
"""
from numpy import exp, sqrt, vstack, array, asarray
def dual_exponential(t, A, B, C, tauA, tauB):
"""
Computes dual exponential decay.
y = A exp(-t/tauA) + B exp(-t/tauB) + C
"""
t = asarray(t)
return C + A*exp(-t/tauA) + B*exp(-t/tauB)
# data from Chapter 8 of [5].
data = array([[15, 775], [30, 479], [45, 380], [60, 302],
[75, 185], [90, 157], [105,137], [120, 119], [135, 110],
[150, 89], [165, 74], [180, 61], [195, 66], [210, 68],
[225, 48], [240, 54], [255, 51], [270, 46], [285, 55],
[300, 29], [315, 28], [330, 37], [345, 49], [360, 26],
[375, 35], [390, 29], [405, 31], [420, 24], [435, 25],
[450, 35], [465, 24], [480, 30], [495, 26], [510, 28],
[525, 21], [540, 18], [555, 20], [570, 27], [585, 17],
[600, 17], [615, 14], [630, 17], [645, 24], [660, 11],
[675, 22], [690, 17], [705, 12], [720, 10], [735, 13],
[750, 16], [765, 9], [780, 9], [795, 14], [810, 21],
[825, 17], [840, 13], [855, 12], [870, 18], [885, 10]])
# Set uncertainty to sqrt(counts)
data = { 'x': data[0], 'y': data[1], 'dy': sqrt(data[1]) }
#coeff = {'A': 1, 'B': 1, 'C': 1, 'tauA': 1, 'tauB': 1}
| [
"numpy.exp",
"numpy.array",
"numpy.sqrt",
"numpy.asarray"
] | [((557, 1255), 'numpy.array', 'array', (['[[15, 775], [30, 479], [45, 380], [60, 302], [75, 185], [90, 157], [105, \n 137], [120, 119], [135, 110], [150, 89], [165, 74], [180, 61], [195, 66\n ], [210, 68], [225, 48], [240, 54], [255, 51], [270, 46], [285, 55], [\n 300, 29], [315, 28], [330, 37], [345, 49], [360, 26], [375, 35], [390, \n 29], [405, 31], [420, 24], [435, 25], [450, 35], [465, 24], [480, 30],\n [495, 26], [510, 28], [525, 21], [540, 18], [555, 20], [570, 27], [585,\n 17], [600, 17], [615, 14], [630, 17], [645, 24], [660, 11], [675, 22],\n [690, 17], [705, 12], [720, 10], [735, 13], [750, 16], [765, 9], [780, \n 9], [795, 14], [810, 21], [825, 17], [840, 13], [855, 12], [870, 18], [\n 885, 10]]'], {}), '([[15, 775], [30, 479], [45, 380], [60, 302], [75, 185], [90, 157], [\n 105, 137], [120, 119], [135, 110], [150, 89], [165, 74], [180, 61], [\n 195, 66], [210, 68], [225, 48], [240, 54], [255, 51], [270, 46], [285, \n 55], [300, 29], [315, 28], [330, 37], [345, 49], [360, 26], [375, 35],\n [390, 29], [405, 31], [420, 24], [435, 25], [450, 35], [465, 24], [480,\n 30], [495, 26], [510, 28], [525, 21], [540, 18], [555, 20], [570, 27],\n [585, 17], [600, 17], [615, 14], [630, 17], [645, 24], [660, 11], [675,\n 22], [690, 17], [705, 12], [720, 10], [735, 13], [750, 16], [765, 9], [\n 780, 9], [795, 14], [810, 21], [825, 17], [840, 13], [855, 12], [870, \n 18], [885, 10]])\n', (562, 1255), False, 'from numpy import exp, sqrt, vstack, array, asarray\n'), ((461, 471), 'numpy.asarray', 'asarray', (['t'], {}), '(t)\n', (468, 471), False, 'from numpy import exp, sqrt, vstack, array, asarray\n'), ((1292, 1305), 'numpy.sqrt', 'sqrt', (['data[1]'], {}), '(data[1])\n', (1296, 1305), False, 'from numpy import exp, sqrt, vstack, array, asarray\n'), ((506, 520), 'numpy.exp', 'exp', (['(-t / tauB)'], {}), '(-t / tauB)\n', (509, 520), False, 'from numpy import exp, sqrt, vstack, array, asarray\n'), ((489, 503), 'numpy.exp', 'exp', (['(-t / tauA)'], {}), '(-t / tauA)\n', (492, 503), False, 'from numpy import exp, sqrt, vstack, array, asarray\n')] |
from flask import abort, jsonify, render_template, request
from app import app
from models.names import Names
import random
import os
@app.route('/names/<total>', methods=['GET'])
def homepage(total=10):
count = int(total)
names = dict()
tiefling = True
human = True
elf = True
if tiefling:
with open("./content/names/male/tiefling.txt") as f:
content = f.readlines()
content = [x.strip() for x in content]
names[tiefling] = content
if human:
with open("./content/names/male/human.txt") as f:
content = f.readlines()
content = [x.strip() for x in content]
names[human] = content
if elf:
with open("./content/names/male/elf.txt") as f:
content = f.readlines()
content = [x.strip() for x in content]
names[elf] = content
race_count = []
race_count.append(random.randrange(int(count/2)))
count = count - race_count[0]
race_count.append(random.randrange(int(count/2)))
count = count - race_count[1]
race_count.append(count)
return jsonify(
{
'data': [
{'race': 'Tiefling', 'names': random.sample(names[tiefling], race_count[0])},
{'race': 'Human', 'names': random.sample(names[human], race_count[1])},
{'race': 'Elf', 'names': random.sample(names[elf], race_count[2])}
]
}
)
| [
"random.sample",
"app.app.route"
] | [((138, 182), 'app.app.route', 'app.route', (['"""/names/<total>"""'], {'methods': "['GET']"}), "('/names/<total>', methods=['GET'])\n", (147, 182), False, 'from app import app\n'), ((1214, 1259), 'random.sample', 'random.sample', (['names[tiefling]', 'race_count[0]'], {}), '(names[tiefling], race_count[0])\n', (1227, 1259), False, 'import random\n'), ((1305, 1347), 'random.sample', 'random.sample', (['names[human]', 'race_count[1]'], {}), '(names[human], race_count[1])\n', (1318, 1347), False, 'import random\n'), ((1391, 1431), 'random.sample', 'random.sample', (['names[elf]', 'race_count[2]'], {}), '(names[elf], race_count[2])\n', (1404, 1431), False, 'import random\n')] |
import cv2
import numpy as np
import face_recognition
import os
from datetime import datetime
path = 'ImageAttendance'
images = []
classNames = []
myList = os.listdir(path)
print(myList)
for cl in myList:
curImg = cv2.imread(f'{path}/{cl}')
images.append(curImg)
classNames.append(os.path.splitext(cl)[0])
print(classNames)
def findEncodings(images):
encodelist = []
for img in images:
img = cv2.cvtColor(img,cv2.COLOR_BGR2RGB)
encode = face_recognition.face_encodings(img)[0]
encodelist.append(encode)
return encodelist
def markAttendance(name):
with open('Attendance.csv','r+') as f :
myDataList = f.readlines()
nameList= []
for line in myDataList:
entry = line.split(',')
nameList.append(entry[0])
if name not in nameList:
now = datetime.now()
dtString = now.strftime('%H:%M:%S')
f.writelines(f'\n{name},{dtString}')
markAttendance('Elon')
encodeListKnown = findEncodings(images)
print("Encoding Complete")
cap = cv2.VideoCapture(0)
while True:
success,img = cap.read()
imgS = cv2.resize(img,(0,0),None,fx=0.25, fy=0.25)
imgS = cv2.cvtColor(imgS,cv2.COLOR_BGR2RGB)
facesCurFrame = face_recognition.face_locations(imgS)
encodesCurFrame = face_recognition.face_encodings(imgS,facesCurFrame)
for encodeFace, faceLoc in zip(encodesCurFrame,facesCurFrame):
matches = face_recognition.compare_faces(encodeListKnown,encodeFace)
faceDis = face_recognition.face_distance(encodeListKnown,encodeFace)
print(faceDis)
matchIndex = np.argmin(faceDis)
if faceDis[matchIndex]< 0.50:
name = classNames[matchIndex].upper()
markAttendance(name)
else: name = 'Unknown'
#print(name)
y1,x2,y2,x1 = faceLoc
y1, x2, y2, x1 = y1*4,x2*4,y2*4,x1*4
cv2.rectangle(img,(x1,y1),(x2,y2),(0,255,0),2)
cv2.rectangle(img,(x1,y2-35),(x2,y2),(0,255,0),cv2.FILLED)
cv2.putText(img,name,(x1+6,y2-6),cv2.FONT_HERSHEY_COMPLEX_SMALL,1,(255,255,255),1)
cv2.imshow('Webcam',img)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
| [
"cv2.rectangle",
"face_recognition.face_locations",
"os.listdir",
"os.path.splitext",
"cv2.imshow",
"cv2.putText",
"datetime.datetime.now",
"face_recognition.face_distance",
"cv2.waitKey",
"cv2.VideoCapture",
"cv2.cvtColor",
"face_recognition.face_encodings",
"face_recognition.compare_faces"... | [((157, 173), 'os.listdir', 'os.listdir', (['path'], {}), '(path)\n', (167, 173), False, 'import os\n'), ((1066, 1085), 'cv2.VideoCapture', 'cv2.VideoCapture', (['(0)'], {}), '(0)\n', (1082, 1085), False, 'import cv2\n'), ((220, 246), 'cv2.imread', 'cv2.imread', (['f"""{path}/{cl}"""'], {}), "(f'{path}/{cl}')\n", (230, 246), False, 'import cv2\n'), ((1139, 1186), 'cv2.resize', 'cv2.resize', (['img', '(0, 0)', 'None'], {'fx': '(0.25)', 'fy': '(0.25)'}), '(img, (0, 0), None, fx=0.25, fy=0.25)\n', (1149, 1186), False, 'import cv2\n'), ((1194, 1231), 'cv2.cvtColor', 'cv2.cvtColor', (['imgS', 'cv2.COLOR_BGR2RGB'], {}), '(imgS, cv2.COLOR_BGR2RGB)\n', (1206, 1231), False, 'import cv2\n'), ((1252, 1289), 'face_recognition.face_locations', 'face_recognition.face_locations', (['imgS'], {}), '(imgS)\n', (1283, 1289), False, 'import face_recognition\n'), ((1312, 1364), 'face_recognition.face_encodings', 'face_recognition.face_encodings', (['imgS', 'facesCurFrame'], {}), '(imgS, facesCurFrame)\n', (1343, 1364), False, 'import face_recognition\n'), ((2124, 2149), 'cv2.imshow', 'cv2.imshow', (['"""Webcam"""', 'img'], {}), "('Webcam', img)\n", (2134, 2149), False, 'import cv2\n'), ((423, 459), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_BGR2RGB'], {}), '(img, cv2.COLOR_BGR2RGB)\n', (435, 459), False, 'import cv2\n'), ((1450, 1509), 'face_recognition.compare_faces', 'face_recognition.compare_faces', (['encodeListKnown', 'encodeFace'], {}), '(encodeListKnown, encodeFace)\n', (1480, 1509), False, 'import face_recognition\n'), ((1527, 1586), 'face_recognition.face_distance', 'face_recognition.face_distance', (['encodeListKnown', 'encodeFace'], {}), '(encodeListKnown, encodeFace)\n', (1557, 1586), False, 'import face_recognition\n'), ((1630, 1648), 'numpy.argmin', 'np.argmin', (['faceDis'], {}), '(faceDis)\n', (1639, 1648), True, 'import numpy as np\n'), ((1914, 1968), 'cv2.rectangle', 'cv2.rectangle', (['img', '(x1, y1)', '(x2, y2)', '(0, 255, 0)', '(2)'], {}), '(img, (x1, y1), (x2, y2), (0, 255, 0), 2)\n', (1927, 1968), False, 'import cv2\n'), ((1969, 2037), 'cv2.rectangle', 'cv2.rectangle', (['img', '(x1, y2 - 35)', '(x2, y2)', '(0, 255, 0)', 'cv2.FILLED'], {}), '(img, (x1, y2 - 35), (x2, y2), (0, 255, 0), cv2.FILLED)\n', (1982, 2037), False, 'import cv2\n'), ((2036, 2135), 'cv2.putText', 'cv2.putText', (['img', 'name', '(x1 + 6, y2 - 6)', 'cv2.FONT_HERSHEY_COMPLEX_SMALL', '(1)', '(255, 255, 255)', '(1)'], {}), '(img, name, (x1 + 6, y2 - 6), cv2.FONT_HERSHEY_COMPLEX_SMALL, 1,\n (255, 255, 255), 1)\n', (2047, 2135), False, 'import cv2\n'), ((295, 315), 'os.path.splitext', 'os.path.splitext', (['cl'], {}), '(cl)\n', (311, 315), False, 'import os\n'), ((476, 512), 'face_recognition.face_encodings', 'face_recognition.face_encodings', (['img'], {}), '(img)\n', (507, 512), False, 'import face_recognition\n'), ((856, 870), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (868, 870), False, 'from datetime import datetime\n'), ((2156, 2170), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (2167, 2170), False, 'import cv2\n')] |
import os
import random
import shutil
# We need to change the dataset so that it is split into train/validation/test
# portions, and labelled with a single attribute (e.g. 'color').
attributes = ('color', 'number', 'shape', 'shading', 'all')
attribute_label_extraction_fns = {
'number': lambda dir: dir.split('-')[0],
'color': lambda dir: dir.split('-')[1],
'shading': lambda dir: dir.split('-')[2],
'shape': lambda dir: dir.split('-')[3].rstrip('s'), # remove trailing 's'
'all': lambda dir: dir
}
def copyfile(src_dir, dest_dir, file):
if not os.path.exists(dest_dir):
os.makedirs(dest_dir)
shutil.copyfile(os.path.join(src_dir, file), os.path.join(dest_dir, file))
def create_split_datasets(dataset_dir, target_dir, label_extract_fn,
train_split_percent, validation_split_percent, test_split_percentage):
dirs = []
for (dirpath, dirnames, filenames) in os.walk(dataset_dir):
dirs.extend(dirnames)
break
target_train_dir = os.path.join(target_dir, 'train')
target_validation_dir = os.path.join(target_dir, 'validation')
target_test_dir = os.path.join(target_dir, 'test')
for dir in dirs:
subdir = os.path.join(dataset_dir, dir)
files = os.listdir(subdir)
random.shuffle(files)
i1 = int(len(files) * train_split_percent / 100)
i2 = int(len(files) * (train_split_percent + validation_split_percent) / 100)
train, validation, test = files[:i1], files[i1:i2], files[i2:]
label = label_extract_fn(dir)
for file in train:
copyfile(subdir, os.path.join(target_train_dir, label), file)
for file in validation:
copyfile(subdir, os.path.join(target_validation_dir, label), file)
for file in test:
copyfile(subdir, os.path.join(target_test_dir, label), file)
def create_single_attribute_test_dataset(dataset_dir, target_dir, label_extract_fn):
dirs = []
for (dirpath, dirnames, filenames) in os.walk(dataset_dir):
dirs.extend(dirnames)
break
for dir in dirs:
files = os.listdir(os.path.join(dataset_dir, dir))
label = label_extract_fn(dir)
for file in files:
copyfile(os.path.join(dataset_dir, dir), os.path.join(target_dir, label),
file)
for attribute in attributes:
create_split_datasets('data/train-v2/labelled', f'data/{attribute}',
attribute_label_extraction_fns[attribute],
70, 20, 10)
create_single_attribute_test_dataset('data/test-v2', f'data/{attribute}-test',
attribute_label_extraction_fns[attribute])
# Create an artificially small training dataset to observe overfitting
create_split_datasets('data/train-v2/labelled', f'data/shape-small',
attribute_label_extraction_fns['shape'],
1, 20, 79) | [
"os.path.exists",
"os.listdir",
"random.shuffle",
"os.makedirs",
"os.path.join",
"os.walk"
] | [((932, 952), 'os.walk', 'os.walk', (['dataset_dir'], {}), '(dataset_dir)\n', (939, 952), False, 'import os\n'), ((1022, 1055), 'os.path.join', 'os.path.join', (['target_dir', '"""train"""'], {}), "(target_dir, 'train')\n", (1034, 1055), False, 'import os\n'), ((1084, 1122), 'os.path.join', 'os.path.join', (['target_dir', '"""validation"""'], {}), "(target_dir, 'validation')\n", (1096, 1122), False, 'import os\n'), ((1145, 1177), 'os.path.join', 'os.path.join', (['target_dir', '"""test"""'], {}), "(target_dir, 'test')\n", (1157, 1177), False, 'import os\n'), ((2019, 2039), 'os.walk', 'os.walk', (['dataset_dir'], {}), '(dataset_dir)\n', (2026, 2039), False, 'import os\n'), ((573, 597), 'os.path.exists', 'os.path.exists', (['dest_dir'], {}), '(dest_dir)\n', (587, 597), False, 'import os\n'), ((607, 628), 'os.makedirs', 'os.makedirs', (['dest_dir'], {}), '(dest_dir)\n', (618, 628), False, 'import os\n'), ((649, 676), 'os.path.join', 'os.path.join', (['src_dir', 'file'], {}), '(src_dir, file)\n', (661, 676), False, 'import os\n'), ((678, 706), 'os.path.join', 'os.path.join', (['dest_dir', 'file'], {}), '(dest_dir, file)\n', (690, 706), False, 'import os\n'), ((1217, 1247), 'os.path.join', 'os.path.join', (['dataset_dir', 'dir'], {}), '(dataset_dir, dir)\n', (1229, 1247), False, 'import os\n'), ((1264, 1282), 'os.listdir', 'os.listdir', (['subdir'], {}), '(subdir)\n', (1274, 1282), False, 'import os\n'), ((1291, 1312), 'random.shuffle', 'random.shuffle', (['files'], {}), '(files)\n', (1305, 1312), False, 'import random\n'), ((2134, 2164), 'os.path.join', 'os.path.join', (['dataset_dir', 'dir'], {}), '(dataset_dir, dir)\n', (2146, 2164), False, 'import os\n'), ((1622, 1659), 'os.path.join', 'os.path.join', (['target_train_dir', 'label'], {}), '(target_train_dir, label)\n', (1634, 1659), False, 'import os\n'), ((1728, 1770), 'os.path.join', 'os.path.join', (['target_validation_dir', 'label'], {}), '(target_validation_dir, label)\n', (1740, 1770), False, 'import os\n'), ((1833, 1869), 'os.path.join', 'os.path.join', (['target_test_dir', 'label'], {}), '(target_test_dir, label)\n', (1845, 1869), False, 'import os\n'), ((2252, 2282), 'os.path.join', 'os.path.join', (['dataset_dir', 'dir'], {}), '(dataset_dir, dir)\n', (2264, 2282), False, 'import os\n'), ((2284, 2315), 'os.path.join', 'os.path.join', (['target_dir', 'label'], {}), '(target_dir, label)\n', (2296, 2315), False, 'import os\n')] |
#!/usr/bin/env python
# encoding: utf-8
# <NAME>, 2006-2008 (ita)
"""
Native compilation using gcj
highly experimental, and gcj sucks anyway
"""
import os, re
from waflib.Configure import conf
from waflib import TaskGen, Task, Utils, Node
from waflib.TaskGen import feature, before, after
from waflib.Tools import ccroot
def configure(conf):
conf.find_program('gcj', var='GCJ')
conf.env.GCJLINK = conf.env.GCJ
conf.env.GCJLINKFLAGS_gcj_shlib = ['-shared']
conf.env.GCJFLAGS_gcj_shlib = ['-fPIC']
class gcj(Task.Task):
run_str = '${GCJ} ${GCJFLAGS} -classpath ${CLASSPATH} -c -o ${TGT} ${SRC}'
class gcj_program(ccroot.link_task):
run_str = '${GCJLINK} ${GCJLINKFLAGS} ${SRC} -o ${TGT}'
color = 'YELLOW'
class gcj_shlib(gcj_program):
pass
ccroot.USELIB_VARS['gcj'] = set(['CLASSPATH', 'JAVACFLAGS', 'GCJFLAGS'])
ccroot.USELIB_VARS['gcj_program'] = set(['CLASSPATH', 'JAVACFLAGS', 'GCJLINKFLAGS'])
ccroot.USELIB_VARS['gcj_shlib'] = set(['CLASSPATH', 'JAVACFLAGS', 'GCJLINKFLAGS'])
feature('gcj_program', 'gcj_shlib')(ccroot.apply_link)
feature('gcj_program', 'gcj_shlib')(ccroot.propagate_uselib_vars)
@feature('gcj')
@after('propagate_uselib_vars', 'apply_gcj')
def set_gcj_classpath(self):
lst = [isinstance(x, str) and x or x.abspath() for x in self.env.CLASSPATH]
self.env.CLASSPATH = os.pathsep.join(lst) + os.pathsep
@feature('gcj')
@before('apply_java')
def apply_gcj(self):
if 'javac' in self.features:
self.bld.fatal('feature gcj_native is not compatible with javac %r' % self)
srcdir = getattr(self, 'srcdir', '')
if isinstance(srcdir, Node.Node):
srcdir = [srcdir]
tmp = []
for x in Utils.to_list(srcdir):
if isinstance(x, Node.Node):
y = x
else:
y = self.path.find_dir(x)
if not y:
self.bld.fatal('Could not find the folder %s from %s' % (x, self.path))
tmp.append(y)
nodes = []
for x in tmp:
nodes.extend(x.ant_glob('**/*.java'))
if not getattr(self, 'gcjonce', None):
for x in nodes:
self.create_compiled_task('gcj', x)
#############################################################
# gcj is still beta software
# and this workaround cannot work for shared object (-fPIC)
class fix_dummy(Task.Task):
run_str = 'objcopy -L _ZGr8_$$_dummy ${SRC}'
before = ['gcj_program', 'gcj_shlib']
@feature('gcj')
@after('apply_gcj')
def gcj_developers_like_duplicate_dummy_symbols(self):
if self.env.FIX_DUMMY:
for tsk in self.compiled_tasks:
if isinstance(tsk, gcj):
self.create_task('fix_dummy', tsk.outputs[0])
| [
"waflib.TaskGen.after",
"waflib.TaskGen.feature",
"os.pathsep.join",
"waflib.TaskGen.before",
"waflib.Utils.to_list",
"waflib.Configure.conf.find_program"
] | [((1120, 1134), 'waflib.TaskGen.feature', 'feature', (['"""gcj"""'], {}), "('gcj')\n", (1127, 1134), False, 'from waflib.TaskGen import feature, before, after\n'), ((1136, 1179), 'waflib.TaskGen.after', 'after', (['"""propagate_uselib_vars"""', '"""apply_gcj"""'], {}), "('propagate_uselib_vars', 'apply_gcj')\n", (1141, 1179), False, 'from waflib.TaskGen import feature, before, after\n'), ((1344, 1358), 'waflib.TaskGen.feature', 'feature', (['"""gcj"""'], {}), "('gcj')\n", (1351, 1358), False, 'from waflib.TaskGen import feature, before, after\n'), ((1360, 1380), 'waflib.TaskGen.before', 'before', (['"""apply_java"""'], {}), "('apply_java')\n", (1366, 1380), False, 'from waflib.TaskGen import feature, before, after\n'), ((2265, 2279), 'waflib.TaskGen.feature', 'feature', (['"""gcj"""'], {}), "('gcj')\n", (2272, 2279), False, 'from waflib.TaskGen import feature, before, after\n'), ((2281, 2299), 'waflib.TaskGen.after', 'after', (['"""apply_gcj"""'], {}), "('apply_gcj')\n", (2286, 2299), False, 'from waflib.TaskGen import feature, before, after\n'), ((347, 382), 'waflib.Configure.conf.find_program', 'conf.find_program', (['"""gcj"""'], {'var': '"""GCJ"""'}), "('gcj', var='GCJ')\n", (364, 382), False, 'from waflib.Configure import conf\n'), ((997, 1032), 'waflib.TaskGen.feature', 'feature', (['"""gcj_program"""', '"""gcj_shlib"""'], {}), "('gcj_program', 'gcj_shlib')\n", (1004, 1032), False, 'from waflib.TaskGen import feature, before, after\n'), ((1052, 1087), 'waflib.TaskGen.feature', 'feature', (['"""gcj_program"""', '"""gcj_shlib"""'], {}), "('gcj_program', 'gcj_shlib')\n", (1059, 1087), False, 'from waflib.TaskGen import feature, before, after\n'), ((1625, 1646), 'waflib.Utils.to_list', 'Utils.to_list', (['srcdir'], {}), '(srcdir)\n', (1638, 1646), False, 'from waflib import TaskGen, Task, Utils, Node\n'), ((1308, 1328), 'os.pathsep.join', 'os.pathsep.join', (['lst'], {}), '(lst)\n', (1323, 1328), False, 'import os, re\n')] |
import tensorflow as tf
import os
#LABEL_PATH = "/home/neo/projects/deepLearning/data/labelTest/"
#IMAGE_PATH = "/home/neo/projects/deepLearning/data/imageTest/"
LABEL_PATH = "/home/neo/projects/deepLearning/data/label/"
IMAGE_PATH = "/home/neo/projects/deepLearning/data/image/"
LABEL_SUFIX = "_emotion"
LABEL_FORMAT = "*.txt"
IMAGE_FORMAT = "*.png"
CHAR_COLON = ":"
NUM_EPOCHS = 1
LABEL = []
#NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN = 50000
NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN = 5000
#NUM_EPOCHS_PER_DECAY = 350.0
NUM_EPOCHS_PER_DECAY = 35.0
NUM_CLASSES = 7
INITIAL_LEARNING_RATE = 0.1
LEARNING_RATE_DECAY_FACTOR = 0.1
MOVING_AVERAGE_DECAY = 0.9999
# Basic model parameters.
FLAGS = tf.app.flags.FLAGS
# tf.app.flags.DEFINE_integer('batch_size', 128, """Number of images to process in a batch.""")
tf.app.flags.DEFINE_integer('batch_size', 48, """Number of images to process in a batch.""")
tf.app.flags.DEFINE_boolean('log_device_placement', False, """Whether to log device placement.""")
tf.app.flags.DEFINE_string('train_dir', '/home/neo/projects/deepLearning/log', """Directory where to write event logs and checkpoint.""")
tf.app.flags.DEFINE_string('eval_dir', '/home/neo/projects/deepLearning/log/test', """Directory where to write event logs.""")
tf.app.flags.DEFINE_string('checkpoint_dir', '/home/neo/projects/deepLearning/log', """Directory where to read model checkpoints.""")
# tf.app.flags.DEFINE_integer('max_steps', 1 000 000, """Number of batches to run.""")
tf.app.flags.DEFINE_integer('max_steps', 100, """Number of batches to run.""")
tf.app.flags.DEFINE_integer('num_examples', 1000, """Number of examples to run.""")
def search_label(filename):
for lab in LABEL:
if lab[0][:8] == filename[:8]:
return lab[1]
def rename_image_filename(image):
with tf.Session() as ppro:
# Initialize the variables define ("Read more about it !!")
tf.initialize_all_variables().run()
# Start to populate the label queue
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(coord=coord)
# Execute the image section of the graph
imageTensor = ppro.run([image])
key = imageTensor[0]
# Shutdown the queue coordinator.
coord.request_stop()
coord.join(threads)
return key[len(IMAGE_PATH):-len(IMAGE_FORMAT)+1]
def rename_label_filename(label):
return label[:-(len(LABEL_SUFIX)+len(LABEL_FORMAT)-1)]
def generate_label_dict(path):
for root, dirs, files in os.walk(path, True):
for name in files:
f = open(os.path.join(root, name), 'r')
LABEL.append([rename_label_filename(name), int(float(f.read()))])
return LABEL
def generate_train_batch(label, image, batch_size=FLAGS.batch_size):
num_preprocess_threads = 1
min_fraction_of_examples_in_queue = 0.5
min_queue_examples = int(NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN * min_fraction_of_examples_in_queue)
images, label_batch = tf.train.shuffle_batch(
[image, label],
batch_size=batch_size,
num_threads=num_preprocess_threads,
capacity=min_queue_examples + 3 * batch_size,
# capacity=4,
min_after_dequeue=min_queue_examples
# min_after_dequeue=1
)
tf.image_summary('images', images)
return images, tf.reshape(label_batch, [batch_size])
def read_input(image_queue):
# Read the images and generate the decode from PNG image
imageReader = tf.WholeFileReader()
image_key, image_value = imageReader.read(image_queue)
image_decode = tf.image.decode_png(image_value, channels=1)
image_decode = tf.cast(image_decode, tf.float32)
# Preprocess data
image_key = rename_image_filename(image_key) # rename image filename
label = search_label(image_key)
# CREATE OBJECT
class Record(object):
pass
record = Record()
# Instantiate object
record.key = image_key
record.label = tf.cast(label, tf.int32)
record.image = image_decode
# PROCESSING IMAGES
# reshaped_image = tf.cast(record.image, tf.float32)
# height = 245
# width = 320
height = 96
width = 96
# Image processing for training the network. Note the many random distortions applied to the image.
# Randomly crop a [height, width] section of the image.
distorted_image = tf.random_crop(record.image, [height, width, 1])
# Randomly flip the image horizontally.
distorted_image = tf.image.random_flip_left_right(distorted_image)
# Because these operations are not commutative, consider randomizing randomize the order their operation.
distorted_image = tf.image.random_brightness(distorted_image, max_delta=63)
distorted_image = tf.image.random_contrast(distorted_image, lower=0.2, upper=1.8)
# Subtract off the mean and divide by the variance of the pixels.
float_image = tf.image.per_image_whitening(distorted_image)
return generate_train_batch(record.label, float_image)
def get_input(label_path, label_format, image_path, image_format):
generate_label_dict(label_path)
image_queue = tf.train.string_input_producer(tf.train.match_filenames_once(os.path.join(image_path, image_format)), num_epochs=NUM_EPOCHS)
return read_input(image_queue) | [
"tensorflow.cast",
"tensorflow.train.shuffle_batch",
"os.walk",
"tensorflow.train.Coordinator",
"tensorflow.Session",
"tensorflow.random_crop",
"tensorflow.app.flags.DEFINE_boolean",
"tensorflow.initialize_all_variables",
"tensorflow.app.flags.DEFINE_integer",
"tensorflow.train.start_queue_runners... | [((793, 885), 'tensorflow.app.flags.DEFINE_integer', 'tf.app.flags.DEFINE_integer', (['"""batch_size"""', '(48)', '"""Number of images to process in a batch."""'], {}), "('batch_size', 48,\n 'Number of images to process in a batch.')\n", (820, 885), True, 'import tensorflow as tf\n'), ((886, 984), 'tensorflow.app.flags.DEFINE_boolean', 'tf.app.flags.DEFINE_boolean', (['"""log_device_placement"""', '(False)', '"""Whether to log device placement."""'], {}), "('log_device_placement', False,\n 'Whether to log device placement.')\n", (913, 984), True, 'import tensorflow as tf\n'), ((985, 1126), 'tensorflow.app.flags.DEFINE_string', 'tf.app.flags.DEFINE_string', (['"""train_dir"""', '"""/home/neo/projects/deepLearning/log"""', '"""Directory where to write event logs and checkpoint."""'], {}), "('train_dir',\n '/home/neo/projects/deepLearning/log',\n 'Directory where to write event logs and checkpoint.')\n", (1011, 1126), True, 'import tensorflow as tf\n'), ((1123, 1253), 'tensorflow.app.flags.DEFINE_string', 'tf.app.flags.DEFINE_string', (['"""eval_dir"""', '"""/home/neo/projects/deepLearning/log/test"""', '"""Directory where to write event logs."""'], {}), "('eval_dir',\n '/home/neo/projects/deepLearning/log/test',\n 'Directory where to write event logs.')\n", (1149, 1253), True, 'import tensorflow as tf\n'), ((1250, 1387), 'tensorflow.app.flags.DEFINE_string', 'tf.app.flags.DEFINE_string', (['"""checkpoint_dir"""', '"""/home/neo/projects/deepLearning/log"""', '"""Directory where to read model checkpoints."""'], {}), "('checkpoint_dir',\n '/home/neo/projects/deepLearning/log',\n 'Directory where to read model checkpoints.')\n", (1276, 1387), True, 'import tensorflow as tf\n'), ((1471, 1545), 'tensorflow.app.flags.DEFINE_integer', 'tf.app.flags.DEFINE_integer', (['"""max_steps"""', '(100)', '"""Number of batches to run."""'], {}), "('max_steps', 100, 'Number of batches to run.')\n", (1498, 1545), True, 'import tensorflow as tf\n'), ((1550, 1629), 'tensorflow.app.flags.DEFINE_integer', 'tf.app.flags.DEFINE_integer', (['"""num_examples"""', '(1000)', '"""Number of examples to run."""'], {}), "('num_examples', 1000, 'Number of examples to run.')\n", (1577, 1629), True, 'import tensorflow as tf\n'), ((2501, 2520), 'os.walk', 'os.walk', (['path', '(True)'], {}), '(path, True)\n', (2508, 2520), False, 'import os\n'), ((2966, 3156), 'tensorflow.train.shuffle_batch', 'tf.train.shuffle_batch', (['[image, label]'], {'batch_size': 'batch_size', 'num_threads': 'num_preprocess_threads', 'capacity': '(min_queue_examples + 3 * batch_size)', 'min_after_dequeue': 'min_queue_examples'}), '([image, label], batch_size=batch_size, num_threads=\n num_preprocess_threads, capacity=min_queue_examples + 3 * batch_size,\n min_after_dequeue=min_queue_examples)\n', (2988, 3156), True, 'import tensorflow as tf\n'), ((3254, 3288), 'tensorflow.image_summary', 'tf.image_summary', (['"""images"""', 'images'], {}), "('images', images)\n", (3270, 3288), True, 'import tensorflow as tf\n'), ((3455, 3475), 'tensorflow.WholeFileReader', 'tf.WholeFileReader', ([], {}), '()\n', (3473, 3475), True, 'import tensorflow as tf\n'), ((3554, 3598), 'tensorflow.image.decode_png', 'tf.image.decode_png', (['image_value'], {'channels': '(1)'}), '(image_value, channels=1)\n', (3573, 3598), True, 'import tensorflow as tf\n'), ((3618, 3651), 'tensorflow.cast', 'tf.cast', (['image_decode', 'tf.float32'], {}), '(image_decode, tf.float32)\n', (3625, 3651), True, 'import tensorflow as tf\n'), ((3939, 3963), 'tensorflow.cast', 'tf.cast', (['label', 'tf.int32'], {}), '(label, tf.int32)\n', (3946, 3963), True, 'import tensorflow as tf\n'), ((4331, 4379), 'tensorflow.random_crop', 'tf.random_crop', (['record.image', '[height, width, 1]'], {}), '(record.image, [height, width, 1])\n', (4345, 4379), True, 'import tensorflow as tf\n'), ((4446, 4494), 'tensorflow.image.random_flip_left_right', 'tf.image.random_flip_left_right', (['distorted_image'], {}), '(distorted_image)\n', (4477, 4494), True, 'import tensorflow as tf\n'), ((4627, 4684), 'tensorflow.image.random_brightness', 'tf.image.random_brightness', (['distorted_image'], {'max_delta': '(63)'}), '(distorted_image, max_delta=63)\n', (4653, 4684), True, 'import tensorflow as tf\n'), ((4707, 4770), 'tensorflow.image.random_contrast', 'tf.image.random_contrast', (['distorted_image'], {'lower': '(0.2)', 'upper': '(1.8)'}), '(distorted_image, lower=0.2, upper=1.8)\n', (4731, 4770), True, 'import tensorflow as tf\n'), ((4859, 4904), 'tensorflow.image.per_image_whitening', 'tf.image.per_image_whitening', (['distorted_image'], {}), '(distorted_image)\n', (4887, 4904), True, 'import tensorflow as tf\n'), ((1795, 1807), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (1805, 1807), True, 'import tensorflow as tf\n'), ((1989, 2011), 'tensorflow.train.Coordinator', 'tf.train.Coordinator', ([], {}), '()\n', (2009, 2011), True, 'import tensorflow as tf\n'), ((2030, 2071), 'tensorflow.train.start_queue_runners', 'tf.train.start_queue_runners', ([], {'coord': 'coord'}), '(coord=coord)\n', (2058, 2071), True, 'import tensorflow as tf\n'), ((3308, 3345), 'tensorflow.reshape', 'tf.reshape', (['label_batch', '[batch_size]'], {}), '(label_batch, [batch_size])\n', (3318, 3345), True, 'import tensorflow as tf\n'), ((5147, 5185), 'os.path.join', 'os.path.join', (['image_path', 'image_format'], {}), '(image_path, image_format)\n', (5159, 5185), False, 'import os\n'), ((1893, 1922), 'tensorflow.initialize_all_variables', 'tf.initialize_all_variables', ([], {}), '()\n', (1920, 1922), True, 'import tensorflow as tf\n'), ((2570, 2594), 'os.path.join', 'os.path.join', (['root', 'name'], {}), '(root, name)\n', (2582, 2594), False, 'import os\n')] |
"""
This module contains the transformer class, that turns a given parse tree into a dice pool.
"""
from operator import add, sub, mul, truediv
from lark.visitors import Transformer, v_args
from .pools import BinaryOperatorPool, DiceTypePool, DiceDropPool, ConstPool
def keep_sum(rolls, keep, lowest=False):
"A small helper function for keeping certain dice rolls"
rolls.sort(reverse=lowest)
return sum(rolls[len(rolls) - keep:])
@v_args(inline=True)
class DicePoolTransformer(Transformer):
"Transforms a read statement into a dice pool that can be rolled multiple times."
# pylint: disable=no-self-use, invalid-name
def number(self, n):
"Numbers should be read as ints."
return int(n)
def constpool(self, n):
"The constpool rule only read NumberPools, so return one of those."
return ConstPool(n)
def dice(self, *values):
"This rule reads in the number of dice and their sides. Returns them as a tuple of ints."
try:
amount, sides = values
except ValueError:
amount = 1
sides = values[0]
return (amount, sides)
def pool(self, dice):
"Reads in a simple dice pool."
return DiceTypePool(dice[1], dice[0])
def drop_lowest(self, dice):
"Reads in a dice pool that drops the lowest dice."
return DiceDropPool(dice[1], dice[0], 1, False)
def drop_highest(self, dice):
"Reads in a dice pool that drops the highest dice."
return DiceDropPool(dice[1], dice[0], 1, True)
def keep_highest(self, dice, keep):
"Reads in a dice pool that keeps the `keep` highest."
return DiceDropPool(dice[1], dice[0], keep, False)
def keep_lowest(self, dice, keep):
"Reads in a dice pool that drops the `keep` lowest."
return DiceDropPool(dice[1], dice[0], keep, True)
def add(self, p1, p2):
"Combined several dice pools by adding them up."
return BinaryOperatorPool(add, '+', p1, p2).simplify()
def sub(self, p1, p2):
"Combined several dice pools by subtracting them."
return BinaryOperatorPool(sub, '-', p1, p2).simplify()
def mul(self, p1, p2):
"Combined several dice pools by multipolying them."
return BinaryOperatorPool(mul, '*', p1, p2).simplify()
def div(self, p1, p2):
"Combined several dice pools by dividing them."
return BinaryOperatorPool(truediv, '/', p1, p2).simplify()
def start(self, *n):
"This grammar only returns one object, which is a dice pool."
try:
return n[0]
except IndexError:
return None
| [
"lark.visitors.v_args"
] | [((447, 466), 'lark.visitors.v_args', 'v_args', ([], {'inline': '(True)'}), '(inline=True)\n', (453, 466), False, 'from lark.visitors import Transformer, v_args\n')] |
import matplotlib.pyplot as plt
import numpy as np
import os
from federatedscope.core.message import Message
import logging
logger = logging.getLogger(__name__)
def plot_target_loss(loss_list, outdir):
'''
Args:
loss_list: the list of loss regrading the target data
outdir: the directory to store the loss
'''
target_data_loss = np.vstack(loss_list)
logger.info(target_data_loss.shape)
plt.plot(target_data_loss)
plt.savefig(os.path.join(outdir, 'target_loss.png'))
plt.close()
def sav_target_loss(loss_list, outdir):
target_data_loss = np.vstack(loss_list)
np.savetxt(os.path.join(outdir, 'target_loss.txt'),
target_data_loss.transpose(),
delimiter=',')
def callback_funcs_for_finish(self, message: Message):
logger.info(
"================= receiving Finish Message ============================"
)
if message.content != None:
self.trainer.update(message.content)
if self.is_attacker and self._cfg.attack.attack_method.lower(
) == "gradascent":
logger.info(
"================= start attack post-processing ======================="
)
plot_target_loss(self.trainer.ctx.target_data_loss,
self.trainer.ctx.outdir)
sav_target_loss(self.trainer.ctx.target_data_loss,
self.trainer.ctx.outdir)
def add_atk_method_to_Client_GradAscent(client_class):
setattr(client_class, 'callback_funcs_for_finish',
callback_funcs_for_finish)
return client_class
| [
"logging.getLogger",
"matplotlib.pyplot.plot",
"os.path.join",
"matplotlib.pyplot.close",
"numpy.vstack"
] | [((134, 161), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (151, 161), False, 'import logging\n'), ((367, 387), 'numpy.vstack', 'np.vstack', (['loss_list'], {}), '(loss_list)\n', (376, 387), True, 'import numpy as np\n'), ((432, 458), 'matplotlib.pyplot.plot', 'plt.plot', (['target_data_loss'], {}), '(target_data_loss)\n', (440, 458), True, 'import matplotlib.pyplot as plt\n'), ((520, 531), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (529, 531), True, 'import matplotlib.pyplot as plt\n'), ((597, 617), 'numpy.vstack', 'np.vstack', (['loss_list'], {}), '(loss_list)\n', (606, 617), True, 'import numpy as np\n'), ((475, 514), 'os.path.join', 'os.path.join', (['outdir', '"""target_loss.png"""'], {}), "(outdir, 'target_loss.png')\n", (487, 514), False, 'import os\n'), ((633, 672), 'os.path.join', 'os.path.join', (['outdir', '"""target_loss.txt"""'], {}), "(outdir, 'target_loss.txt')\n", (645, 672), False, 'import os\n')] |
# #!/usr/bin python
# Copyright (c) 2021. <NAME>
# #
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
from dg_config import settingsfile
from dg_db.db_write import write_skews
from rich.console import Console
console = Console()
settings = settingsfile.get_settings()
# ToDo: Add the correct data in here.
def get_skews(quarter):
# ToDo, actually get the skews.
#
# records_to_insert = [
# ("UK", 492693, 2458665, 0.20, 0.02, 0.1, 0.08, 0.08, 0.08, 0.07, 0.07, 0.07, 0.07, 0.07, 0.1, 0.08, 0.08, 0.03),
# ("IE", 51965, 409777, 0.13, 0.02, 0.1, 0.08, 0.08, 0.08, 0.07, 0.07, 0.07, 0.07, 0.07, 0.1, 0.08, 0.08, 0.03),
# ("DE", 729221, 3698601, 0.20, 0.03, 0.07, 0.07, 0.07, 0.08, 0.08, 0.07, 0.08, 0.08, 0.12, 0.08, 0.08, 0.08, 0.04),
# ("AT", 111815, 821338, 0.14, 0.03, 0.07, 0.07, 0.07, 0.08, 0.08, 0.07, 0.08, 0.08, 0.12, 0.08, 0.08, 0.08, 0.04),
# ("CH", 157896, 1044068, 0.15, 0.03, 0.07, 0.07, 0.07, 0.08, 0.08, 0.07, 0.08, 0.08, 0.12, 0.08, 0.08, 0.08, 0.04),
# ("FR", 230684, 1120651, 0.20, 0.02, 0.06, 0.07, 0.09, 0.09, 0.09, 0.11, 0.07, 0.11, 0.06, 0.06, 0.06, 0.07, 0.03),
# ("ES", 95035, 545993, 0.17, 0.06, 0.06, 0.06, 0.07, 0.08, 0.08, 0.08, 0.08, 0.08, 0.09, 0.08, 0.08, 0.08, 0.04),
# ("IT", 127896, 816425, 0.16, 0.03, 0.07, 0.07, 0.08, 0.08, 0.08, 0.09, 0.07, 0.07, 0.08, 0.08, 0.08, 0.08, 0.04),
# ("PT", 16885, 60524, 0.27, 0.05, 0.06, 0.06, 0.07, 0.08, 0.08, 0.09, 0.07, 0.09, 0.08, 0.08, 0.08, 0.08, 0.03),
# ("NO", 26584, 150431, 0.18, 0.02, 0.05, 0.07, 0.08, 0.07, 0.08, 0.13, 0.08, 0.1, 0.08, 0.08, 0.08, 0.08, 0.04),
# ("SE", 44339, 243477, 0.18, 0.02, 0.05, 0.07, 0.08, 0.07, 0.08, 0.13, 0.08, 0.1, 0.08, 0.08, 0.08, 0.08, 0.04),
# ("FI", 23039, 133146, 0.17, 0.02, 0.05, 0.07, 0.08, 0.07, 0.08, 0.13, 0.08, 0.1, 0.08, 0.08, 0.08, 0.08, 0.04),
# ("DK", 83330, 553985, 0.15, 0.02, 0.05, 0.07, 0.08, 0.07, 0.08, 0.13, 0.08, 0.1, 0.08, 0.08, 0.08, 0.08, 0.04),
# ("NL", 110166, 604416, 0.18, 0.02, 0.05, 0.07, 0.08, 0.07, 0.08, 0.12, 0.08, 0.09, 0.08, 0.08, 0.08, 0.08, 0.03),
# ("BE", 73444, 450679, 0.16, 0.02, 0.05, 0.07, 0.08, 0.07, 0.08, 0.12, 0.08, 0.09, 0.08, 0.08, 0.08, 0.08, 0.03)
# ]
#
# write_skews(records_to_insert)
pass | [
"rich.console.Console",
"dg_config.settingsfile.get_settings"
] | [((743, 752), 'rich.console.Console', 'Console', ([], {}), '()\n', (750, 752), False, 'from rich.console import Console\n'), ((764, 791), 'dg_config.settingsfile.get_settings', 'settingsfile.get_settings', ([], {}), '()\n', (789, 791), False, 'from dg_config import settingsfile\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
from slackpy import SlackLogger, LogLv, LOG_LEVELS
from argparse import ArgumentParser
__author__ = '<NAME>'
def main():
parser = ArgumentParser(description='slackpy command line tool')
parser.add_argument('-m',
'--message',
type=str,
required=True,
help='Message body.')
parser.add_argument('-c',
'--channel',
required=False,
help='Slack Channel. channel must be started with # or @',
default=None)
parser.add_argument('-t',
'--title',
type=str,
required=False,
help='Message title.',
default='Slack Notification')
parser.add_argument('-n',
'--name',
type=str,
required=False,
help='Your bot\'s user name',
default='Logger')
# The purpose of backward compatibility, old args (1, 2, 3)
# are being retained.
# DEBUG == 10, INFO == 20, # WARNING == 30, ERROR == 40
parser.add_argument('-l',
'--level',
type=int,
default=20,
choices=LOG_LEVELS.append([1, 2, 3]))
args = parser.parse_args()
try:
web_hook_url = os.environ["SLACK_INCOMING_WEB_HOOK"]
except KeyError:
print('ERROR: Please set a SLACK_INCOMING_WEB_HOOK variable in ' +
'your environment.')
else:
client = SlackLogger(web_hook_url, args.channel, args.name)
# Command Line mode can use only DEBUG level.
client.set_log_level(LogLv.DEBUG)
if args.level == LogLv.DEBUG:
response = client.debug(args.message, args.title)
elif args.level == LogLv.INFO:
response = client.info(args.message, args.title)
elif args.level == LogLv.WARN:
response = client.warn(args.message, args.title)
elif args.level == LogLv.ERROR:
response = client.error(args.message, args.title)
else:
raise Exception("'Level' must be selected from among 10 to 40")
if response.status_code == 200:
print(True)
else:
print(False)
| [
"slackpy.SlackLogger",
"slackpy.LOG_LEVELS.append",
"argparse.ArgumentParser"
] | [((194, 249), 'argparse.ArgumentParser', 'ArgumentParser', ([], {'description': '"""slackpy command line tool"""'}), "(description='slackpy command line tool')\n", (208, 249), False, 'from argparse import ArgumentParser\n'), ((1749, 1799), 'slackpy.SlackLogger', 'SlackLogger', (['web_hook_url', 'args.channel', 'args.name'], {}), '(web_hook_url, args.channel, args.name)\n', (1760, 1799), False, 'from slackpy import SlackLogger, LogLv, LOG_LEVELS\n'), ((1456, 1484), 'slackpy.LOG_LEVELS.append', 'LOG_LEVELS.append', (['[1, 2, 3]'], {}), '([1, 2, 3])\n', (1473, 1484), False, 'from slackpy import SlackLogger, LogLv, LOG_LEVELS\n')] |
#!/usr/bin/python3
import sys
import random
value=random.randint(0, 3)
print("Returning: " + str(value))
sys.exit(value)
| [
"random.randint",
"sys.exit"
] | [((52, 72), 'random.randint', 'random.randint', (['(0)', '(3)'], {}), '(0, 3)\n', (66, 72), False, 'import random\n'), ((107, 122), 'sys.exit', 'sys.exit', (['value'], {}), '(value)\n', (115, 122), False, 'import sys\n')] |
import requests, time
import math
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
from music_db import *
from concurrent.futures import ThreadPoolExecutor, ProcessPoolExecutor
# 创建请求头和requests会话对象session
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/69.0.3497.100 Safari/537.36'
}
session = requests.session()
# 下载歌曲
def download(guid, songmid, cookie_dict):
# 参数guid来自cookies的pgv_pvid
url = 'https://u.y.qq.com/cgi-bin/musicu.fcg?loginUin=0&hostUin=0&format=jsonp&inCharset=utf8&outCharset=utf-8¬ice=0&platform=yqq&needNewCode=0&data=%7B%22req%22%3A%7B%22module%22%3A%22CDN.SrfCdnDispatchServer%22%2C%22method%22%3A%22GetCdnDispatch%22%2C%22param%22%3A%7B%22guid%22%3A%22'+guid+'%22%2C%22calltype%22%3A0%2C%22userip%22%3A%22%22%7D%7D%2C%22req_0%22%3A%7B%22module%22%3A%22vkey.GetVkeyServer%22%2C%22method%22%3A%22CgiGetVkey%22%2C%22param%22%3A%7B%22guid%22%3A%22'+guid+'%22%2C%22songmid%22%3A%5B%22'+songmid+'%22%5D%2C%22songtype%22%3A%5B0%5D%2C%22uin%22%3A%220%22%2C%22loginflag%22%3A1%2C%22platform%22%3A%2220%22%7D%7D%2C%22comm%22%3A%7B%22uin%22%3A0%2C%22format%22%3A%22json%22%2C%22ct%22%3A20%2C%22cv%22%3A0%7D%7D'
r = session.get(url, headers=headers, cookies=cookie_dict)
purl = r.json()['req_0']['data']['midurlinfo'][0]['purl']
# 下载歌曲
if purl:
url = 'http://isure.stream.qqmusic.qq.com/%s' %(purl)
print(url)
r = requests.get(url, headers=headers)
f = open('song/' + songmid + '.m4a', 'wb')
f.write(r.content)
f.close()
return True
else:
return False
# 使用Selenium获取Cookies
def getCookies():
chrome_options = Options()
# 设置浏览器参数
# --headless是不显示浏览器启动以及执行过程
# chrome_options.add_argument('--headless')
path = '../venv/Scripts/chromedriver.exe'
driver = webdriver.Chrome(executable_path=path, chrome_options=chrome_options)
# driver = webdriver.Chrome(chrome_options=chrome_options)
# 访问两个URL,QQ网站才能生成Cookies
driver.get('https://y.qq.com/')
time.sleep(5)
# 某个歌手的歌曲信息,用于获取Cookies,因为不是全部请求地址都有Cookies
url = 'https://y.qq.com/n/yqq/singer/0025NhlN2yWrP4.html'
driver.get(url)
time.sleep(5)
cookie = driver.get_cookies()
driver.quit()
# Cookies格式化
print(cookie)
cookie_dict = {}
for i in cookie:
cookie_dict[i['name']] = i['value']
return cookie_dict
# 获取歌手的全部歌曲
def get_singer_songs(singermid, cookie_dict):
# 获取歌手姓名和歌曲总数
url = 'https://c.y.qq.com/v8/fcg-bin/fcg_v8_singer_track_cp.fcg?loginUin=0&hostUin=0&singermid=%s' \
'&order=listen&begin=0&num=30&songstatus=1' % (singermid)
r = session.get(url)
# 获取歌手姓名
song_singer = r.json()['data']['singer_name']
# 获取歌曲总数
songcount = r.json()['data']['total']
# 根据歌曲总数计算总页数
pagecount = math.ceil(int(songcount) / 30)
# 循环页数,获取每一页歌曲信息
for p in range(pagecount):
url = 'https://c.y.qq.com/v8/fcg-bin/fcg_v8_singer_track_cp.fcg?loginUin=0&hostUin=0&singermid=%s' \
'&order=listen&begin=%s&num=30&songstatus=1' % (singermid, p * 30)
r = session.get(url)
# 得到每页的歌曲信息
music_data = r.json()['data']['list']
# songname-歌名,ablum-专辑,interval-时长,songmid歌曲id,用于下载音频文件
# 将歌曲信息存放字典song_dict,用于入库
song_dict = {}
for i in music_data:
song_dict['song_name'] = i['musicData']['songname']
song_dict['song_ablum'] = i['musicData']['albumname']
song_dict['song_interval'] = i['musicData']['interval']
song_dict['song_songmid'] = i['musicData']['songmid']
song_dict['song_singer'] = song_singer
insert_data(song_dict)
# 下载歌曲
guid = cookie_dict['pgv_pvid']
info = download(guid, song_dict['song_songmid'], cookie_dict)
# 入库处理,参数song_dict
# if info:
# insert_data(song_dict)
# song_dict清空处理
song_dict = {}
# 获取当前字母下全部歌手
def get_genre_singer(index, page_list, cookie_dict):
for page in page_list:
url = 'https://u.y.qq.com/cgi-bin/musicu.fcg?loginUin=0&hostUin=0&format=jsonp&inCharset=utf8&outCharset=utf-8¬ice=0&platform=yqq&needNewCode=0&data=%7B%22comm%22%3A%7B%22ct%22%3A24%2C%22cv%22%3A10000%7D%2C%22singerList%22%3A%7B%22module%22%3A%22Music.SingerListServer%22%2C%22method%22%3A%22get_singer_list%22%2C%22param%22%3A%7B%22area%22%3A-100%2C%22sex%22%3A-100%2C%22genre%22%3A-100%2C%22index%22%3A'+str(index)+'%2C%22sin%22%3A'+str((page-1)*80)+'%2C%22cur_page%22%3A'+str(page)+'%7D%7D%7D'
r = session.get(url)
# 循环每一个歌手
for k in r.json()['singerList']['data']['singerlist']:
singermid = k['singer_mid']
get_singer_songs(singermid, cookie_dict)
# 单进程单线程
def get_all_singer():
# 获取字母A-Z全部歌手
cookie_dict = getCookies()
for index in range(1, 28):
# 获取每个字母分类下总歌手页数
url = 'https://u.y.qq.com/cgi-bin/musicu.fcg?loginUin=0&hostUin=0&format=jsonp&inCharset=utf8&outCharset=utf-8¬ice=0&platform=yqq&needNewCode=0&data=%7B%22comm%22%3A%7B%22ct%22%3A24%2C%22cv%22%3A10000%7D%2C%22singerList%22%3A%7B%22module%22%3A%22Music.SingerListServer%22%2C%22method%22%3A%22get_singer_list%22%2C%22param%22%3A%7B%22area%22%3A-100%2C%22sex%22%3A-100%2C%22genre%22%3A-100%2C%22index%22%3A' + str(index) + '%2C%22sin%22%3A0%2C%22cur_page%22%3A1%7D%7D%7D'
r = session.get(url, headers=headers)
total = r.json()['singerList']['data']['total']
pagecount = math.ceil(int(total) / 80)
page_list = [x for x in range(1, pagecount+1)]
# 获取当前字母下全部歌手
get_genre_singer(index, page_list, cookie_dict)
# 多线程
def myThread(index, cookie_dict):
# 每个字母分类的歌手列表页数
url = 'https://u.y.qq.com/cgi-bin/musicu.fcg?loginUin=0&hostUin=0&format=jsonp&inCharset=utf8&outCharset=utf-8¬ice=0&platform=yqq&needNewCode=0&data=%7B%22comm%22%3A%7B%22ct%22%3A24%2C%22cv%22%3A10000%7D%2C%22singerList%22%3A%7B%22module%22%3A%22Music.SingerListServer%22%2C%22method%22%3A%22get_singer_list%22%2C%22param%22%3A%7B%22area%22%3A-100%2C%22sex%22%3A-100%2C%22genre%22%3A-100%2C%22index%22%3A' + str(
index) + '%2C%22sin%22%3A0%2C%22cur_page%22%3A1%7D%7D%7D'
r = session.get(url, headers=headers)
total = r.json()['singerList']['data']['total']
pagecount = math.ceil(int(total) / 80)
page_list = [x for x in range(1, pagecount+1)]
thread_number = 10
# 将每个分类总页数平均分给线程数
list_interval = math.ceil(len(page_list) / thread_number)
# 设置线程对象
Thread = ThreadPoolExecutor(max_workers=thread_number)
for i in range(thread_number):
# 计算每条线程应执行的页数
start_num = list_interval * i
if list_interval * (i + 1) <= len(page_list):
end_num = list_interval * (i + 1)
else:
end_num = len(page_list)
# 每个线程各自执行不同的歌手列表页数
Thread.submit(get_genre_singer, index, page_list[start_num: end_num],cookie_dict)
# 多进程
def myProcess():
with ProcessPoolExecutor(max_workers=27) as executor:
cookie_dict = getCookies()
for index in range(1, 28):
# 创建27个进程,分别执行A-Z分类和特殊符号#
executor.submit(myThread, index, cookie_dict)
if __name__=='__main__':
# 执行多进程多线程
# myProcess()
# 执行单进程单线程
get_all_singer()
| [
"selenium.webdriver.chrome.options.Options",
"requests.session",
"concurrent.futures.ThreadPoolExecutor",
"selenium.webdriver.Chrome",
"time.sleep",
"requests.get",
"concurrent.futures.ProcessPoolExecutor"
] | [((412, 430), 'requests.session', 'requests.session', ([], {}), '()\n', (428, 430), False, 'import requests, time\n'), ((1739, 1748), 'selenium.webdriver.chrome.options.Options', 'Options', ([], {}), '()\n', (1746, 1748), False, 'from selenium.webdriver.chrome.options import Options\n'), ((1902, 1971), 'selenium.webdriver.Chrome', 'webdriver.Chrome', ([], {'executable_path': 'path', 'chrome_options': 'chrome_options'}), '(executable_path=path, chrome_options=chrome_options)\n', (1918, 1971), False, 'from selenium import webdriver\n'), ((2105, 2118), 'time.sleep', 'time.sleep', (['(5)'], {}), '(5)\n', (2115, 2118), False, 'import requests, time\n'), ((2253, 2266), 'time.sleep', 'time.sleep', (['(5)'], {}), '(5)\n', (2263, 2266), False, 'import requests, time\n'), ((6620, 6665), 'concurrent.futures.ThreadPoolExecutor', 'ThreadPoolExecutor', ([], {'max_workers': 'thread_number'}), '(max_workers=thread_number)\n', (6638, 6665), False, 'from concurrent.futures import ThreadPoolExecutor, ProcessPoolExecutor\n'), ((1495, 1529), 'requests.get', 'requests.get', (['url'], {'headers': 'headers'}), '(url, headers=headers)\n', (1507, 1529), False, 'import requests, time\n'), ((7064, 7099), 'concurrent.futures.ProcessPoolExecutor', 'ProcessPoolExecutor', ([], {'max_workers': '(27)'}), '(max_workers=27)\n', (7083, 7099), False, 'from concurrent.futures import ThreadPoolExecutor, ProcessPoolExecutor\n')] |
# Copyright 2017 AT&T Intellectual Property. All other rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""REST client for Drydock API."""
import logging
from drydock_provisioner import error as errors
class DrydockClient(object):
""""
A client for the Drydock API
:param DrydockSession session: A instance of DrydockSession to be used by this client
"""
def __init__(self, session):
self.session = session
self.logger = logging.getLogger(__name__)
def get_task_build_data(self, task_id):
"""Get the build data associated with ``task_id``.
:param str task_id: A UUID-formatted task ID
:return: A list of dictionaries resembling objects.builddata.BuildData
"""
endpoint = 'v1.0/tasks/{}/builddata'.format(task_id)
resp = self.session.get(endpoint)
self._check_response(resp)
return resp.json()
def get_node_build_data(self, nodename, latest=True):
"""Get the build data associated with ``nodename``.
:param str nodename: Name of the node
:param bool latest: Whether to request only the latest version of each data item
:return: A list of dictionaries resembling objects.builddata.BuildData
"""
endpoint = 'v1.0/nodes/{}/builddata?latest={}'.format(nodename, latest)
resp = self.session.get(endpoint)
self._check_response(resp)
return resp.json()
def get_nodes(self):
"""Get list of nodes in MaaS and their status."""
endpoint = 'v1.0/nodes'
resp = self.session.get(endpoint)
self._check_response(resp)
return resp.json()
def get_nodes_for_filter(self, design_ref, node_filter=None):
"""Get list of nodes that satisfy ``node_filter``.
:param str design_ref: str of a URL
:param NodeFilter node_filter (optional): A NodeFilter object.
:return: A list of node names based on the node_filter and design_ref.
"""
endpoint = 'v1.0/nodefilter'
body = {'node_filter': node_filter, 'design_ref': design_ref}
resp = self.session.post(endpoint, data=body)
self._check_response(resp)
return resp.json()
def get_tasks(self):
"""
Get a list of all the tasks, completed or running.
:return: List of string uuid task IDs
"""
endpoint = "v1.0/tasks"
resp = self.session.get(endpoint)
self._check_response(resp)
return resp.json()
def get_task(self,
task_id,
builddata=None,
subtaskerrors=None,
layers=None):
"""
Get the current description of a Drydock task
:param string task_id: The string uuid task id to query.
:param boolean builddata: If true will include the build_data in the response.
:param boolean subtaskerrors: If true it will add all the errors from the subtasks as a dictionary in
subtask_errors.
:param int layers: If -1 will include all subtasks, if a positive integer it will include that many layers
of subtasks.
:return: A dict representing the current state of the task.
"""
endpoint = "v1.0/tasks/%s" % (task_id)
query_params = []
if builddata:
query_params.append('builddata=true')
if subtaskerrors:
query_params.append('subtaskerrors=true')
if layers:
query_params.append('layers=%s' % layers)
if query_params:
endpoint = '%s?%s' % (endpoint, '&'.join(query_params))
resp = self.session.get(endpoint)
self._check_response(resp)
return resp.json()
def create_task(self, design_ref, task_action, node_filter=None):
"""
Create a new task in Drydock
:param string design_ref: A URI reference to the design documents for this task
:param string task_action: The action that should be executed
:param dict node_filter: A filter for narrowing the scope of the task. Valid fields are 'node_names',
'rack_names', 'node_tags'.
:return: The dictionary representation of the created task
"""
endpoint = 'v1.0/tasks'
task_dict = {
'action': task_action,
'design_ref': design_ref,
'node_filter': node_filter,
}
self.logger.debug("drydock_client is calling %s API: body is %s" %
(endpoint, str(task_dict)))
resp = self.session.post(endpoint, data=task_dict)
self._check_response(resp)
return resp.json()
def validate_design(self, href):
"""Get list of nodes in MaaS and their status.
:param href: A href that points to the design_ref.
:return: A dict containing the validation.
"""
endpoint = 'v1.0/validatedesign'
body = {'href': href}
resp = self.session.post(endpoint, data=body)
self._check_response(resp)
return resp.json()
def _check_response(self, resp):
if resp.status_code == 401:
raise errors.ClientUnauthorizedError(
"Unauthorized access to %s, include valid token." % resp.url)
elif resp.status_code == 403:
raise errors.ClientForbiddenError(
"Forbidden access to %s" % resp.url)
elif not resp.ok:
raise errors.ClientError(
"Error - received %d: %s" % (resp.status_code, resp.text),
code=resp.status_code)
| [
"logging.getLogger",
"drydock_provisioner.error.ClientError",
"drydock_provisioner.error.ClientUnauthorizedError",
"drydock_provisioner.error.ClientForbiddenError"
] | [((978, 1005), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (995, 1005), False, 'import logging\n'), ((5756, 5853), 'drydock_provisioner.error.ClientUnauthorizedError', 'errors.ClientUnauthorizedError', (["('Unauthorized access to %s, include valid token.' % resp.url)"], {}), "(\n 'Unauthorized access to %s, include valid token.' % resp.url)\n", (5786, 5853), True, 'from drydock_provisioner import error as errors\n'), ((5922, 5986), 'drydock_provisioner.error.ClientForbiddenError', 'errors.ClientForbiddenError', (["('Forbidden access to %s' % resp.url)"], {}), "('Forbidden access to %s' % resp.url)\n", (5949, 5986), True, 'from drydock_provisioner import error as errors\n'), ((6048, 6153), 'drydock_provisioner.error.ClientError', 'errors.ClientError', (["('Error - received %d: %s' % (resp.status_code, resp.text))"], {'code': 'resp.status_code'}), "('Error - received %d: %s' % (resp.status_code, resp.text\n ), code=resp.status_code)\n", (6066, 6153), True, 'from drydock_provisioner import error as errors\n')] |
import remote.libraries.LCD_1in44 as LCD_1in44
import remote.libraries.LCD_Config as LCD_Config
from PIL import Image, ImageDraw, ImageFont, ImageColor
class Display:
def __init__(self, remote_id):
self.LCD = LCD_1in44.LCD()
Lcd_ScanDir = LCD_1in44.SCAN_DIR_DFT # SCAN_DIR_DFT = D2U_L2R
self.LCD.LCD_Init(Lcd_ScanDir)
self.LCD.LCD_Clear()
font_path = "fonts/FreeMonoBold.ttf"
self.small_font = ImageFont.truetype(font_path, 20)
self.large_font = ImageFont.truetype(font_path, 35)
self.remote_id = remote_id
self.image = Image.new('RGB', (self.LCD.width, self.LCD.height), 'WHITE')
self.draw = ImageDraw.Draw(self.image)
self.add_remote_id()
self.refresh_display()
def add_remote_id(self):
self.draw.text((0, 0), f'ID', fill='BLUE', font=self.small_font)
self.draw.text((30, 0), f'{self.remote_id}', fill='BLUE', font=self.large_font)
self.refresh_display()
def add_message(self, message):
self.clear_message()
self.draw.text((0, 40), message, fill='BLUE', font=self.large_font)
self.refresh_display()
def refresh_display(self):
self.LCD.LCD_ShowImage(self.image, 0, 0)
def clear_message(self):
self.draw.rectangle([(0, 40), (self.LCD.width, self.LCD.height)], fill='WHITE')
| [
"PIL.Image.new",
"PIL.ImageDraw.Draw",
"remote.libraries.LCD_1in44.LCD",
"PIL.ImageFont.truetype"
] | [((224, 239), 'remote.libraries.LCD_1in44.LCD', 'LCD_1in44.LCD', ([], {}), '()\n', (237, 239), True, 'import remote.libraries.LCD_1in44 as LCD_1in44\n'), ((451, 484), 'PIL.ImageFont.truetype', 'ImageFont.truetype', (['font_path', '(20)'], {}), '(font_path, 20)\n', (469, 484), False, 'from PIL import Image, ImageDraw, ImageFont, ImageColor\n'), ((511, 544), 'PIL.ImageFont.truetype', 'ImageFont.truetype', (['font_path', '(35)'], {}), '(font_path, 35)\n', (529, 544), False, 'from PIL import Image, ImageDraw, ImageFont, ImageColor\n'), ((603, 663), 'PIL.Image.new', 'Image.new', (['"""RGB"""', '(self.LCD.width, self.LCD.height)', '"""WHITE"""'], {}), "('RGB', (self.LCD.width, self.LCD.height), 'WHITE')\n", (612, 663), False, 'from PIL import Image, ImageDraw, ImageFont, ImageColor\n'), ((684, 710), 'PIL.ImageDraw.Draw', 'ImageDraw.Draw', (['self.image'], {}), '(self.image)\n', (698, 710), False, 'from PIL import Image, ImageDraw, ImageFont, ImageColor\n')] |
import numpy as np
import os, sys, re
import mpi4py
import time
from mpi4py import MPI
# Paths
MACHINE_NAME = 'tmp'
TUNER_NAME = 'tmp'
ROOTDIR = os.path.abspath(os.path.join(os.path.realpath(__file__), os.pardir, os.pardir))
EXPDIR = os.path.abspath(os.path.join(ROOTDIR, "hypre-driver/exp", MACHINE_NAME + '/' + TUNER_NAME))
EXCUDIR = os.path.abspath(os.path.join(ROOTDIR, "hypre/src/test/ij"))
# print(EXPDIR)
# print(EXCUDIR)
max_setup_time = 1000
max_solve_time = 1000
comm = MPI.COMM_WORLD
# nprocs = comm.Get_size()
# print("ENTER HYPRE DRIVER, nprocs=", nprocs)
def execute(params, RUNDIR, niter = 1, max_iter = '1000', tol = '1e-8'):
print("params: ", params)
# extract arguments
Problem = params['problem_name']; solver = params['solver']
coeffs_c = params['coeffs_c']; coeffs_a = params['coeffs_a']
nx = params['nx']; ny = params['ny']; nz = params['nz']
Px = params['Px']; Py = params['Py']; Pz = params['Pz']
strong_threshold = params['strong_threshold']
trunc_factor = params['trunc_factor']
P_max_elmts = params['P_max_elmts']
coarsen_type = params['coarsen_type']
relax_type = params['relax_type']
smooth_type = params['smooth_type']
smooth_num_levels = params['smooth_num_levels']
interp_type = params['interp_type']
agg_num_levels = params['agg_num_levels']
nthreads = params['nthreads']
npernode = params['npernode']
# reshape for args
NProc = Px*Py*Pz
Size = "-n %d %d %d " % (nx, ny, nz)
ProcTopo = "-P %d %d %d " % (Px, Py, Pz)
StrThr = f"-th {strong_threshold} "
TrunFac = f"-tr {trunc_factor} "
PMax = "-Pmx %d " % P_max_elmts
RelType = "-rlx %d " % relax_type
SmooType = "-smtype %d " % smooth_type
SmooLev = "-smlv %d " % smooth_num_levels
InterType = "-interptype %d " % interp_type
AggLev = "-agg_nl %d " % agg_num_levels
CoarsTypes = {0:"-cljp", 1:"-ruge", 2:"-ruge2b", 3:"-ruge2b", 4:"-ruge3c", 6:"-falgout", 8:"-pmis", 10:"-hmis"}
CoarsType = CoarsTypes[coarsen_type]
outputfilename = os.path.abspath(os.path.join(RUNDIR,f"ijoutput_{nx}_{ny}_{nz}_{Px}_{Py}_{Pz}_{strong_threshold}_{trunc_factor}_{P_max_elmts}_{coarsen_type}_{relax_type}_{smooth_type}_{smooth_num_levels}_{interp_type}_{agg_num_levels}"))
myargs = Problem + Size + coeffs_c + coeffs_a + f"-solver {solver} " + ProcTopo + StrThr + TrunFac + PMax + RelType + SmooType + SmooLev + InterType + AggLev + CoarsType
myargslist = [Problem, '-n', f'{nx}', f'{ny}', f'{nz}', coeffs_c, coeffs_a, '-solver', f'{solver}', '-P', f'{Px}', f'{Py}', f'{Pz}', '-th', f'{strong_threshold}', '-tr', f'{trunc_factor}',
'-Pmx', f'{P_max_elmts}', '-rlx', f'{relax_type}', '-smtype', f'{smooth_type}', '-smlv', f'{smooth_num_levels}', '-interptype', f'{interp_type}', '-agg_nl', f'{agg_num_levels}', CoarsType, '-logfile', outputfilename, '-max_iter', max_iter, '-tol', tol]
# default params
# myargslist = [Problem, '-n', f'{nx}', f'{ny}', f'{nz}', coeffs_c, coeffs_a, '-solver', f'{solver}',
# '-logfile', outputfilename, '-max_iter', max_iter, '-tol', tol]
# print(f"myargslist: ", myargslist)
def read_output(outputfilename):
setup_time = max_setup_time
solve_time = max_solve_time
with open(outputfilename,'r') as outputfile:
while True:
line = outputfile.readline()
if not line:
break
if 'ERROR' in line:
break
if 'Setup phase times' in line:
outputfile.readline()
outputfile.readline()
setup_wallclocktime_str = outputfile.readline()
time_str = re.findall("\d+\.\d+", setup_wallclocktime_str)
if time_str:
setup_time = float(time_str[0])
if 'Solve phase times' in line:
outputfile.readline()
outputfile.readline()
solve_wallclocktime_str = outputfile.readline()
time_str = re.findall("\d+\.\d+", solve_wallclocktime_str)
if time_str:
solve_time = float(time_str[0])
runtime = setup_time + solve_time
print("[----- runtime = %f -----]\n" % runtime)
return runtime
def v_parallel():
info = MPI.Info.Create()
info.Set('env', 'OMP_NUM_THREADS=%d\n' %(nthreads))
info.Set('npernode','%d'%(npernode)) # YL: npernode is deprecated in openmpi 4.0, but no other parameter (e.g. 'map-by') works
print('exec ', EXCUDIR, 'args: ', myargslist, 'nproc', NProc)
runtimes = []
for i in range(niter):
# os.system("rm -rf %s"%(outputfilename))
comm = MPI.COMM_SELF.Spawn(EXCUDIR, args=myargslist, maxprocs=NProc,info=info)
comm.Disconnect()
time.sleep(2.0) # this gives new MPI_spawn more time to find the resource
runtime = read_output(outputfilename)
runtimes.append(runtime)
return np.mean(runtimes)
runtime = v_parallel()
return runtime
def hypredriver(params, niter=1, JOBID: int=-1, max_iter = '1000', tol = '1e-8', budget=None):
global EXPDIR
global ROOTDIR
MACHINE_NAME = os.environ['MACHINE_NAME']
TUNER_NAME = os.environ['TUNER_NAME']
EXPDIR = os.path.abspath(os.path.join(ROOTDIR, "hypre-driver/exp", MACHINE_NAME + '/' + TUNER_NAME))
# map budget to tol or max_iter, if budget is given
if budget != None:
assert budget<=10, "Max_budget = 10"
assert budget>=1, "Min_budget = 1"
tol = str(10**(-14*budget/15 + 4/3))
max_iter = '1000'
# print(f"Hypredriver received budget, budget={budget}, tol={tol}")
if (JOBID==-1): # -1 is the default value if jobid is not set from command line
JOBID = os.getpid()
RUNDIR = os.path.abspath(os.path.join(EXPDIR, str(JOBID)))
os.makedirs("%s"%(RUNDIR), exist_ok=True)
dtype = [("nx", int), ("ny", int), ("nz", int), ("coeffs_a", 'U10'), ("coeffs_c", 'U10'), ("problem_name", 'U10'), ("solver", int),
("Px", int), ("Py", int), ("Pz", int), ("strong_threshold", float),
("trunc_factor", float), ("P_max_elmts", int), ("coarsen_type", int), ("relax_type", int),
("smooth_type", int), ("smooth_num_levels", int), ("interp_type", int), ("agg_num_levels", int), ("nthreads", int), ("npernode", int)]
params = np.array(params, dtype=dtype)
times = []
for param in params:
print(f"Current param {param}, tol={tol}")
time_cur = execute(param, RUNDIR, niter=niter, max_iter = max_iter, tol = tol)
times.append(time_cur)
os.system('rm -fr %s'%(RUNDIR))
return times
if __name__ == "__main__":
os.environ['MACHINE_NAME'] = 'cori'
os.environ['TUNER_NAME'] = 'GPTune'
params = [(60, 50, 80, '-a 0 0 0 ', '-c 1 1 1 ', '-laplacian ', 3, 2, 2, 2, 0.25, 0, 4, 10, 8, 6, 0, 6, 0, 1, 1),\
(60, 50, 80, '-a 0 0 0 ', '-c 1 1 1 ', '-laplacian ', 3, 2, 2, 2, 0.3, 0.2, 5, 10, 8, 6, 1, 6, 1, 1, 1)
]
times = hypredriver(params, niter=1)
print(times)
| [
"numpy.mean",
"os.makedirs",
"os.path.join",
"time.sleep",
"mpi4py.MPI.Info.Create",
"os.path.realpath",
"numpy.array",
"mpi4py.MPI.COMM_SELF.Spawn",
"os.getpid",
"os.system",
"re.findall"
] | [((251, 325), 'os.path.join', 'os.path.join', (['ROOTDIR', '"""hypre-driver/exp"""', "(MACHINE_NAME + '/' + TUNER_NAME)"], {}), "(ROOTDIR, 'hypre-driver/exp', MACHINE_NAME + '/' + TUNER_NAME)\n", (263, 325), False, 'import os, sys, re\n'), ((353, 395), 'os.path.join', 'os.path.join', (['ROOTDIR', '"""hypre/src/test/ij"""'], {}), "(ROOTDIR, 'hypre/src/test/ij')\n", (365, 395), False, 'import os, sys, re\n'), ((6044, 6085), 'os.makedirs', 'os.makedirs', (["('%s' % RUNDIR)"], {'exist_ok': '(True)'}), "('%s' % RUNDIR, exist_ok=True)\n", (6055, 6085), False, 'import os, sys, re\n'), ((6567, 6596), 'numpy.array', 'np.array', (['params'], {'dtype': 'dtype'}), '(params, dtype=dtype)\n', (6575, 6596), True, 'import numpy as np\n'), ((6810, 6841), 'os.system', 'os.system', (["('rm -fr %s' % RUNDIR)"], {}), "('rm -fr %s' % RUNDIR)\n", (6819, 6841), False, 'import os, sys, re\n'), ((175, 201), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (191, 201), False, 'import os, sys, re\n'), ((2071, 2284), 'os.path.join', 'os.path.join', (['RUNDIR', 'f"""ijoutput_{nx}_{ny}_{nz}_{Px}_{Py}_{Pz}_{strong_threshold}_{trunc_factor}_{P_max_elmts}_{coarsen_type}_{relax_type}_{smooth_type}_{smooth_num_levels}_{interp_type}_{agg_num_levels}"""'], {}), "(RUNDIR,\n f'ijoutput_{nx}_{ny}_{nz}_{Px}_{Py}_{Pz}_{strong_threshold}_{trunc_factor}_{P_max_elmts}_{coarsen_type}_{relax_type}_{smooth_type}_{smooth_num_levels}_{interp_type}_{agg_num_levels}'\n )\n", (2083, 2284), False, 'import os, sys, re\n'), ((4420, 4437), 'mpi4py.MPI.Info.Create', 'MPI.Info.Create', ([], {}), '()\n', (4435, 4437), False, 'from mpi4py import MPI\n'), ((5130, 5147), 'numpy.mean', 'np.mean', (['runtimes'], {}), '(runtimes)\n', (5137, 5147), True, 'import numpy as np\n'), ((5464, 5538), 'os.path.join', 'os.path.join', (['ROOTDIR', '"""hypre-driver/exp"""', "(MACHINE_NAME + '/' + TUNER_NAME)"], {}), "(ROOTDIR, 'hypre-driver/exp', MACHINE_NAME + '/' + TUNER_NAME)\n", (5476, 5538), False, 'import os, sys, re\n'), ((5965, 5976), 'os.getpid', 'os.getpid', ([], {}), '()\n', (5974, 5976), False, 'import os, sys, re\n'), ((4840, 4912), 'mpi4py.MPI.COMM_SELF.Spawn', 'MPI.COMM_SELF.Spawn', (['EXCUDIR'], {'args': 'myargslist', 'maxprocs': 'NProc', 'info': 'info'}), '(EXCUDIR, args=myargslist, maxprocs=NProc, info=info)\n', (4859, 4912), False, 'from mpi4py import MPI\n'), ((4954, 4969), 'time.sleep', 'time.sleep', (['(2.0)'], {}), '(2.0)\n', (4964, 4969), False, 'import time\n'), ((3755, 3805), 're.findall', 're.findall', (['"""\\\\d+\\\\.\\\\d+"""', 'setup_wallclocktime_str'], {}), "('\\\\d+\\\\.\\\\d+', setup_wallclocktime_str)\n", (3765, 3805), False, 'import os, sys, re\n'), ((4123, 4173), 're.findall', 're.findall', (['"""\\\\d+\\\\.\\\\d+"""', 'solve_wallclocktime_str'], {}), "('\\\\d+\\\\.\\\\d+', solve_wallclocktime_str)\n", (4133, 4173), False, 'import os, sys, re\n')] |
import logging
import os
from django.conf import settings
from django.core.management.base import BaseCommand
from core.management.commands import configure_logging
from core.models import Batch, OcrDump
configure_logging("dump_ocr_logging.config", "dump_ocr.log")
_logger = logging.getLogger(__name__)
class Command(BaseCommand):
help = "looks for batches that need to have ocr dump files created"
def handle(self, *args, **options):
if not os.path.isdir(settings.OCR_DUMP_STORAGE):
os.makedirs(settings.OCR_DUMP_STORAGE)
for batch in Batch.objects.filter(ocr_dump__isnull=True):
_logger.info("starting to dump ocr for %s", batch)
try:
if batch.ocr_dump:
_logger.info("Ocr is already generated for %s", batch)
continue
except OcrDump.DoesNotExist:
pass
dump = OcrDump.new_from_batch(batch)
_logger.info("created ocr dump %s for %s", dump, batch)
| [
"logging.getLogger",
"os.makedirs",
"core.models.Batch.objects.filter",
"core.models.OcrDump.new_from_batch",
"os.path.isdir",
"core.management.commands.configure_logging"
] | [((208, 268), 'core.management.commands.configure_logging', 'configure_logging', (['"""dump_ocr_logging.config"""', '"""dump_ocr.log"""'], {}), "('dump_ocr_logging.config', 'dump_ocr.log')\n", (225, 268), False, 'from core.management.commands import configure_logging\n'), ((279, 306), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (296, 306), False, 'import logging\n'), ((580, 623), 'core.models.Batch.objects.filter', 'Batch.objects.filter', ([], {'ocr_dump__isnull': '(True)'}), '(ocr_dump__isnull=True)\n', (600, 623), False, 'from core.models import Batch, OcrDump\n'), ((465, 505), 'os.path.isdir', 'os.path.isdir', (['settings.OCR_DUMP_STORAGE'], {}), '(settings.OCR_DUMP_STORAGE)\n', (478, 505), False, 'import os\n'), ((519, 557), 'os.makedirs', 'os.makedirs', (['settings.OCR_DUMP_STORAGE'], {}), '(settings.OCR_DUMP_STORAGE)\n', (530, 557), False, 'import os\n'), ((926, 955), 'core.models.OcrDump.new_from_batch', 'OcrDump.new_from_batch', (['batch'], {}), '(batch)\n', (948, 955), False, 'from core.models import Batch, OcrDump\n')] |
import os
import codecs
from setuptools import setup, find_packages
PROJECT_ROOT = os.path.abspath(os.path.dirname(__file__))
def read(*path):
full_path = os.path.join(PROJECT_ROOT, *path)
with codecs.open(full_path, 'r', encoding='utf-8') as f:
return f.read()
setup(
name='django-envsettings',
version='1.1.0',
author='<NAME>',
author_email='<EMAIL>',
url='http://github.com/evansd/django-envsettings',
packages=find_packages(exclude=['tests*']),
license='MIT',
description="One-stop shop for configuring 12-factor Django apps",
long_description=read('README.rst'),
classifiers=[
'Development Status :: 5 - Production/Stable',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
],
)
| [
"os.path.dirname",
"codecs.open",
"setuptools.find_packages",
"os.path.join"
] | [((101, 126), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (116, 126), False, 'import os\n'), ((162, 195), 'os.path.join', 'os.path.join', (['PROJECT_ROOT', '*path'], {}), '(PROJECT_ROOT, *path)\n', (174, 195), False, 'import os\n'), ((205, 250), 'codecs.open', 'codecs.open', (['full_path', '"""r"""'], {'encoding': '"""utf-8"""'}), "(full_path, 'r', encoding='utf-8')\n", (216, 250), False, 'import codecs\n'), ((458, 491), 'setuptools.find_packages', 'find_packages', ([], {'exclude': "['tests*']"}), "(exclude=['tests*'])\n", (471, 491), False, 'from setuptools import setup, find_packages\n')] |
#!/usr/bin/env python
"""A simple command line web client. I know curl is the best, but doing it in
python has much more fun!
Synopsis:
web_client.py [--method METHOD] url
Copyright 2017 <NAME>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import print_function
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
import argparse
import logging
import pprint
import sys
import subprocess
import requests
_log = logging.getLogger()
_plain_logger = None # will be created in main()
class App(object):
def __init__(
self,
url,
method="get",
):
self.url = url
self.method = method
def run(self):
r = getattr(requests, self.method)(self.url)
_log.info("Headers:")
pprint.pprint(r.headers)
_log.info("Body:")
print(r.text)
#
# logger-related functions
#
def setup_logger(
logger,
debug=False,
stdout=False,
log_to_file=None,
add_thread_id=False,
logging_level=None,
use_path=False,
):
if logging_level is None:
if debug:
logging_level = logging.DEBUG
else:
logging_level = logging.INFO
formatter = create_logging_formatter(
add_thread_id=add_thread_id,
use_path=use_path
)
hdlr = create_stream_handler(
formatter=formatter,
stdout=stdout,
)
logger.handlers = [] # clear the existing handlers
logger.addHandler(hdlr)
logger.setLevel(logging_level)
if log_to_file is not None:
init_command = 'mkdir -p %s' % os.path.dirname(log_to_file)
subprocess.check_call(init_command, shell=True)
hdlr = TimedRotatingFileHandler(
log_to_file,
when="midnight",
interval=1,
)
hdlr.setFormatter(formatter)
logger.addHandler(hdlr)
def create_plain_logger(logger_name, stdout=True):
plain_logger = logging.getLogger(logger_name)
plain_logger.propagate = False
plain_logger.setLevel(logging.INFO)
if stdout:
out = sys.stdout
else:
out = sys.stderr
handler = logging.StreamHandler(stream=out)
handler.setFormatter(
logging.Formatter(
fmt="%(message)s",
datefmt="%Y", # does not matter
)
)
plain_logger.addHandler(handler)
return plain_logger
def create_logging_formatter(add_thread_id=False, use_path=False):
format_str = "%(asctime)s"
if add_thread_id:
format_str += " thread:%(thread)s"
format_str += " %(levelname)s "
if use_path:
format_str += "%(pathname)s"
else:
format_str += "%(name)s"
format_str += ":%(lineno)s: %(message)s"
detail_formatter = logging.Formatter(
fmt=format_str,
datefmt="%Y-%m-%d %H:%M:%S"
)
return detail_formatter
def create_stream_handler(formatter=None, stdout=False):
if formatter is None:
formatter = create_logging_formatter()
if stdout:
stream = sys.stdout
else:
stream = sys.stderr
hdlr = logging.StreamHandler(stream=stream)
hdlr.setFormatter(formatter)
return hdlr
#
# end of logger-related functions
#
def parse_args(cmd_args):
description = """
A simple command line web client.
"""
parser = argparse.ArgumentParser(
description=description,
formatter_class=argparse.RawDescriptionHelpFormatter,
)
parser.add_argument(
"--debug",
action="store_true",
)
parser.add_argument("url")
parser.add_argument("--method", default="get")
args = parser.parse_args(cmd_args)
return args
def main():
global _plain_logger
args = parse_args(sys.argv[1:])
setup_logger(_log, debug=args.debug)
_plain_logger = create_plain_logger("PLAIN")
app = App(
args.url,
method=args.method,
)
app.run()
if __name__ == '__main__':
main()
| [
"logging.getLogger",
"logging.StreamHandler",
"argparse.ArgumentParser",
"subprocess.check_call",
"logging.Formatter",
"pprint.pprint"
] | [((1008, 1027), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (1025, 1027), False, 'import logging\n'), ((2499, 2529), 'logging.getLogger', 'logging.getLogger', (['logger_name'], {}), '(logger_name)\n', (2516, 2529), False, 'import logging\n'), ((2696, 2729), 'logging.StreamHandler', 'logging.StreamHandler', ([], {'stream': 'out'}), '(stream=out)\n', (2717, 2729), False, 'import logging\n'), ((3308, 3370), 'logging.Formatter', 'logging.Formatter', ([], {'fmt': 'format_str', 'datefmt': '"""%Y-%m-%d %H:%M:%S"""'}), "(fmt=format_str, datefmt='%Y-%m-%d %H:%M:%S')\n", (3325, 3370), False, 'import logging\n'), ((3647, 3683), 'logging.StreamHandler', 'logging.StreamHandler', ([], {'stream': 'stream'}), '(stream=stream)\n', (3668, 3683), False, 'import logging\n'), ((3876, 3983), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': 'description', 'formatter_class': 'argparse.RawDescriptionHelpFormatter'}), '(description=description, formatter_class=argparse.\n RawDescriptionHelpFormatter)\n', (3899, 3983), False, 'import argparse\n'), ((1336, 1360), 'pprint.pprint', 'pprint.pprint', (['r.headers'], {}), '(r.headers)\n', (1349, 1360), False, 'import pprint\n'), ((2180, 2227), 'subprocess.check_call', 'subprocess.check_call', (['init_command'], {'shell': '(True)'}), '(init_command, shell=True)\n', (2201, 2227), False, 'import subprocess\n'), ((2764, 2814), 'logging.Formatter', 'logging.Formatter', ([], {'fmt': '"""%(message)s"""', 'datefmt': '"""%Y"""'}), "(fmt='%(message)s', datefmt='%Y')\n", (2781, 2814), False, 'import logging\n')] |
# -*- coding: utf-8 -*-
from abc import ABCMeta, abstractmethod
from typing import Dict, List
from langml.tensor_typing import Models
from langml.tokenizer import Tokenizer
from langml.plm import load_albert, load_bert
from langml.log import info
class Template:
def __init__(self, template: List[str], label_tokens_map: Dict[str, List[str]], tokenizer: Tokenizer) -> None:
self.tokenizer = tokenizer
self.unk_id = self.tokenizer.token_to_id(self.tokenizer.special_tokens.UNK)
self.template_ids = self.encode_template(template)
self.label2tokens, self.id2label = self.encode_label_tokens_map(label_tokens_map)
info(f'template ids: {self.template_ids}')
def __len__(self) -> int:
return len(self.template_ids)
def encode_template(self, template: str) -> List[int]:
return [self.tokenizer.token_to_id(token) for token in template]
def encode_label_tokens_map(self, label_tokens_map: Dict[str, List[str]]) -> Dict[str, List[int]]:
label2ids, id2label = {}, {}
for label, tokens in label_tokens_map.items():
token_ids = []
for token in tokens:
token_id = self.tokenizer.token_to_id(token)
assert token_id != self.unk_id, f'unknown token {token}! please specify a token from vocabulary'
token_ids.append(token_id)
id2label[token_id] = label
label2ids[label] = token_ids
return label2ids, id2label
def decode_label(self, idx: int, default='<UNK>') -> str:
return self.id2label.get(idx, default)
class BasePromptModel(metaclass=ABCMeta):
def __init__(self,
plm_backbone: str,
plm_config_path: str,
plm_ckpt_path: str,
template: Template,
learning_rate: float = 1e-5,
freeze_plm: bool = True) -> None:
""" Initialize Prompt Model
Args:
- plm_backbone: str, backbone of pretrained language model
- plm_config_path: str, configure path of pretrained language model
- plm_ckpt_path: str, checkpoint path of pretrained language model
- template: List[str], template
- label_tokens_map: str, verbalizer, map of label to tokens
- tokenizer: langml.Tokenizer, tokenizer
- learning_rate: float, learning rate
- freeze_plm: bool, whether to freeze pretrained language model weights
"""
self.model = None
self.freeze_plm = freeze_plm
if plm_backbone == 'albert':
_, self.plm, self.lazy_restore_callback = load_albert(
config_path=plm_config_path,
checkpoint_path=plm_ckpt_path,
pretraining=True,
with_mlm=True,
with_nsp=False,
lazy_restore=True)
else:
_, self.plm, self.lazy_restore_callback = load_bert(
config_path=plm_config_path,
checkpoint_path=plm_ckpt_path,
pretraining=True,
with_mlm=True,
with_nsp=False,
lazy_restore=True)
self.template = template
self.learning_rate = learning_rate
@abstractmethod
def build_model(self) -> Models:
raise NotImplementedError
class BasePromptTask(metaclass=ABCMeta):
def __init__(self, prompt_model: BasePromptModel, tokenizer: Tokenizer) -> None:
self.prompt_model = prompt_model
self.template = prompt_model.template
self.tokenizer = tokenizer
self.mask_id = self.tokenizer.token_to_id(self.tokenizer.special_tokens.MASK)
self.model = self.prompt_model.build_model()
@abstractmethod
def fit(self):
raise NotImplementedError
@abstractmethod
def predict(self):
raise NotImplementedError
class BaseDataGenerator(metaclass=ABCMeta):
@abstractmethod
def make_iter(self, random: bool = False):
raise NotImplementedError
@abstractmethod
def __len__(self):
raise NotImplementedError
def __call__(self, random: bool = False):
while True:
for inputs, labels in self.make_iter(random=random):
yield inputs, labels
| [
"langml.log.info",
"langml.plm.load_bert",
"langml.plm.load_albert"
] | [((659, 701), 'langml.log.info', 'info', (['f"""template ids: {self.template_ids}"""'], {}), "(f'template ids: {self.template_ids}')\n", (663, 701), False, 'from langml.log import info\n'), ((2652, 2795), 'langml.plm.load_albert', 'load_albert', ([], {'config_path': 'plm_config_path', 'checkpoint_path': 'plm_ckpt_path', 'pretraining': '(True)', 'with_mlm': '(True)', 'with_nsp': '(False)', 'lazy_restore': '(True)'}), '(config_path=plm_config_path, checkpoint_path=plm_ckpt_path,\n pretraining=True, with_mlm=True, with_nsp=False, lazy_restore=True)\n', (2663, 2795), False, 'from langml.plm import load_albert, load_bert\n'), ((2957, 3098), 'langml.plm.load_bert', 'load_bert', ([], {'config_path': 'plm_config_path', 'checkpoint_path': 'plm_ckpt_path', 'pretraining': '(True)', 'with_mlm': '(True)', 'with_nsp': '(False)', 'lazy_restore': '(True)'}), '(config_path=plm_config_path, checkpoint_path=plm_ckpt_path,\n pretraining=True, with_mlm=True, with_nsp=False, lazy_restore=True)\n', (2966, 3098), False, 'from langml.plm import load_albert, load_bert\n')] |