code stringlengths 22 1.05M | apis listlengths 1 3.31k | extract_api stringlengths 75 3.25M |
|---|---|---|
from flask import Blueprint, jsonify, redirect, request
import pydantic
from conda_store_server import api, schema, utils
from conda_store_server.server.utils import get_conda_store, get_auth
from conda_store_server.server.auth import Permissions
app_api = Blueprint("api", __name__)
@app_api.route("/api/v1/")
def api_status():
return jsonify({"status": "ok"})
@app_api.route("/api/v1/namespace/")
def api_list_namespaces():
conda_store = get_conda_store()
auth = get_auth()
orm_environments = auth.filter_namespaces(api.list_namespaces(conda_store.db))
namespaces = [schema.Namespace.from_orm(_).dict() for _ in orm_environments.all()]
return jsonify(namespaces)
@app_api.route("/api/v1/environment/")
def api_list_environments():
conda_store = get_conda_store()
auth = get_auth()
orm_environments = auth.filter_environments(api.list_environments(conda_store.db))
environments = [
schema.Environment.from_orm(_).dict(exclude={"build"})
for _ in orm_environments.all()
]
return jsonify(environments)
@app_api.route("/api/v1/environment/<namespace>/<name>/", methods=["GET"])
def api_get_environment(namespace, name):
conda_store = get_conda_store()
auth = get_auth()
auth.authorize_request(
f"{namespace}/{name}", {Permissions.ENVIRONMENT_READ}, require=True
)
environment = api.get_environment(conda_store.db, namespace=namespace, name=name)
if environment is None:
return jsonify({"status": "error", "error": "environment does not exist"}), 404
return jsonify(schema.Environment.from_orm(environment).dict())
@app_api.route("/api/v1/environment/<namespace>/<name>/", methods=["PUT"])
def api_update_environment_build(namespace, name):
conda_store = get_conda_store()
auth = get_auth()
auth.authorize_request(
f"{namespace}/{name}", {Permissions.ENVIRONMENT_UPDATE}, require=True
)
data = request.json
if "buildId" not in data:
return jsonify({"status": "error", "message": "build id not specificated"}), 400
try:
build_id = data["buildId"]
conda_store.update_environment_build(namespace, name, build_id)
except utils.CondaStoreError as e:
return e.response
return jsonify({"status": "ok"})
@app_api.route("/api/v1/specification/", methods=["POST"])
def api_post_specification():
conda_store = get_conda_store()
try:
specification = schema.CondaSpecification.parse_obj(request.json)
api.post_specification(conda_store, specification)
return jsonify({"status": "ok"})
except pydantic.ValidationError as e:
return jsonify({"status": "error", "error": e.errors()}), 400
@app_api.route("/api/v1/build/", methods=["GET"])
def api_list_builds():
conda_store = get_conda_store()
auth = get_auth()
orm_builds = auth.filter_builds(api.list_builds(conda_store.db))
builds = [
schema.Build.from_orm(build).dict(exclude={"specification", "packages"})
for build in orm_builds.all()
]
return jsonify(builds)
@app_api.route("/api/v1/build/<build_id>/", methods=["GET"])
def api_get_build(build_id):
conda_store = get_conda_store()
auth = get_auth()
build = api.get_build(conda_store.db, build_id)
if build is None:
return jsonify({"status": "error", "error": "build id does not exist"}), 404
auth.authorize_request(
f"{build.namespace.name}/{build.specification.name}",
{Permissions.ENVIRONMENT_READ},
require=True,
)
return jsonify(schema.Build.from_orm(build).dict())
@app_api.route("/api/v1/build/<build_id>/", methods=["PUT"])
def api_put_build(build_id):
conda_store = get_conda_store()
auth = get_auth()
build = api.get_build(conda_store.db, build_id)
if build is None:
return jsonify({"status": "error", "error": "build id does not exist"}), 404
auth.authorize_request(
f"{build.namespace.name}/{build.specification.name}",
{Permissions.ENVIRONMENT_READ},
require=True,
)
conda_store.create_build(build.namespace_id, build.specification.sha256)
return jsonify({"status": "ok", "message": "rebuild triggered"})
@app_api.route("/api/v1/build/<build_id>/", methods=["DELETE"])
def api_delete_build(build_id):
conda_store = get_conda_store()
auth = get_auth()
build = api.get_build(conda_store.db, build_id)
if build is None:
return jsonify({"status": "error", "error": "build id does not exist"}), 404
auth.authorize_request(
f"{build.namespace.name}/{build.specification.name}",
{Permissions.ENVIRONMENT_DELETE},
require=True,
)
conda_store.delete_build(build_id)
return jsonify({"status": "ok"})
@app_api.route("/api/v1/build/<build_id>/logs/", methods=["GET"])
def api_get_build_logs(build_id):
conda_store = get_conda_store()
auth = get_auth()
build = api.get_build(conda_store.db, build_id)
if build is None:
return jsonify({"status": "error", "error": "build id does not exist"}), 404
auth.authorize_request(
f"{build.namespace.name}/{build.specification.name}",
{Permissions.ENVIRONMENT_DELETE},
require=True,
)
return redirect(conda_store.storage.get_url(build.log_key))
@app_api.route("/api/v1/channel/", methods=["GET"])
def api_list_channels():
conda_store = get_conda_store()
orm_channels = api.list_conda_channels(conda_store.db)
channels = [
schema.CondaChannel.from_orm(channel).dict() for channel in orm_channels
]
return jsonify(channels)
@app_api.route("/api/v1/package/", methods=["GET"])
def api_list_packages():
conda_store = get_conda_store()
orm_packages = api.list_conda_packages(conda_store.db)
packages = [
schema.CondaPackage.from_orm(package).dict() for package in orm_packages
]
return jsonify(packages)
| [
"conda_store_server.api.list_conda_channels",
"conda_store_server.api.get_build",
"conda_store_server.api.get_environment",
"conda_store_server.api.list_conda_packages",
"conda_store_server.schema.CondaPackage.from_orm",
"conda_store_server.schema.Environment.from_orm",
"conda_store_server.schema.Build.... | [((260, 286), 'flask.Blueprint', 'Blueprint', (['"""api"""', '__name__'], {}), "('api', __name__)\n", (269, 286), False, 'from flask import Blueprint, jsonify, redirect, request\n'), ((345, 370), 'flask.jsonify', 'jsonify', (["{'status': 'ok'}"], {}), "({'status': 'ok'})\n", (352, 370), False, 'from flask import Blueprint, jsonify, redirect, request\n'), ((455, 472), 'conda_store_server.server.utils.get_conda_store', 'get_conda_store', ([], {}), '()\n', (470, 472), False, 'from conda_store_server.server.utils import get_conda_store, get_auth\n'), ((484, 494), 'conda_store_server.server.utils.get_auth', 'get_auth', ([], {}), '()\n', (492, 494), False, 'from conda_store_server.server.utils import get_conda_store, get_auth\n'), ((678, 697), 'flask.jsonify', 'jsonify', (['namespaces'], {}), '(namespaces)\n', (685, 697), False, 'from flask import Blueprint, jsonify, redirect, request\n'), ((786, 803), 'conda_store_server.server.utils.get_conda_store', 'get_conda_store', ([], {}), '()\n', (801, 803), False, 'from conda_store_server.server.utils import get_conda_store, get_auth\n'), ((815, 825), 'conda_store_server.server.utils.get_auth', 'get_auth', ([], {}), '()\n', (823, 825), False, 'from conda_store_server.server.utils import get_conda_store, get_auth\n'), ((1055, 1076), 'flask.jsonify', 'jsonify', (['environments'], {}), '(environments)\n', (1062, 1076), False, 'from flask import Blueprint, jsonify, redirect, request\n'), ((1214, 1231), 'conda_store_server.server.utils.get_conda_store', 'get_conda_store', ([], {}), '()\n', (1229, 1231), False, 'from conda_store_server.server.utils import get_conda_store, get_auth\n'), ((1243, 1253), 'conda_store_server.server.utils.get_auth', 'get_auth', ([], {}), '()\n', (1251, 1253), False, 'from conda_store_server.server.utils import get_conda_store, get_auth\n'), ((1384, 1451), 'conda_store_server.api.get_environment', 'api.get_environment', (['conda_store.db'], {'namespace': 'namespace', 'name': 'name'}), '(conda_store.db, namespace=namespace, name=name)\n', (1403, 1451), False, 'from conda_store_server import api, schema, utils\n'), ((1783, 1800), 'conda_store_server.server.utils.get_conda_store', 'get_conda_store', ([], {}), '()\n', (1798, 1800), False, 'from conda_store_server.server.utils import get_conda_store, get_auth\n'), ((1812, 1822), 'conda_store_server.server.utils.get_auth', 'get_auth', ([], {}), '()\n', (1820, 1822), False, 'from conda_store_server.server.utils import get_conda_store, get_auth\n'), ((2274, 2299), 'flask.jsonify', 'jsonify', (["{'status': 'ok'}"], {}), "({'status': 'ok'})\n", (2281, 2299), False, 'from flask import Blueprint, jsonify, redirect, request\n'), ((2409, 2426), 'conda_store_server.server.utils.get_conda_store', 'get_conda_store', ([], {}), '()\n', (2424, 2426), False, 'from conda_store_server.server.utils import get_conda_store, get_auth\n'), ((2815, 2832), 'conda_store_server.server.utils.get_conda_store', 'get_conda_store', ([], {}), '()\n', (2830, 2832), False, 'from conda_store_server.server.utils import get_conda_store, get_auth\n'), ((2844, 2854), 'conda_store_server.server.utils.get_auth', 'get_auth', ([], {}), '()\n', (2852, 2854), False, 'from conda_store_server.server.utils import get_conda_store, get_auth\n'), ((3076, 3091), 'flask.jsonify', 'jsonify', (['builds'], {}), '(builds)\n', (3083, 3091), False, 'from flask import Blueprint, jsonify, redirect, request\n'), ((3202, 3219), 'conda_store_server.server.utils.get_conda_store', 'get_conda_store', ([], {}), '()\n', (3217, 3219), False, 'from conda_store_server.server.utils import get_conda_store, get_auth\n'), ((3231, 3241), 'conda_store_server.server.utils.get_auth', 'get_auth', ([], {}), '()\n', (3239, 3241), False, 'from conda_store_server.server.utils import get_conda_store, get_auth\n'), ((3255, 3294), 'conda_store_server.api.get_build', 'api.get_build', (['conda_store.db', 'build_id'], {}), '(conda_store.db, build_id)\n', (3268, 3294), False, 'from conda_store_server import api, schema, utils\n'), ((3728, 3745), 'conda_store_server.server.utils.get_conda_store', 'get_conda_store', ([], {}), '()\n', (3743, 3745), False, 'from conda_store_server.server.utils import get_conda_store, get_auth\n'), ((3757, 3767), 'conda_store_server.server.utils.get_auth', 'get_auth', ([], {}), '()\n', (3765, 3767), False, 'from conda_store_server.server.utils import get_conda_store, get_auth\n'), ((3781, 3820), 'conda_store_server.api.get_build', 'api.get_build', (['conda_store.db', 'build_id'], {}), '(conda_store.db, build_id)\n', (3794, 3820), False, 'from conda_store_server import api, schema, utils\n'), ((4176, 4233), 'flask.jsonify', 'jsonify', (["{'status': 'ok', 'message': 'rebuild triggered'}"], {}), "({'status': 'ok', 'message': 'rebuild triggered'})\n", (4183, 4233), False, 'from flask import Blueprint, jsonify, redirect, request\n'), ((4350, 4367), 'conda_store_server.server.utils.get_conda_store', 'get_conda_store', ([], {}), '()\n', (4365, 4367), False, 'from conda_store_server.server.utils import get_conda_store, get_auth\n'), ((4379, 4389), 'conda_store_server.server.utils.get_auth', 'get_auth', ([], {}), '()\n', (4387, 4389), False, 'from conda_store_server.server.utils import get_conda_store, get_auth\n'), ((4403, 4442), 'conda_store_server.api.get_build', 'api.get_build', (['conda_store.db', 'build_id'], {}), '(conda_store.db, build_id)\n', (4416, 4442), False, 'from conda_store_server import api, schema, utils\n'), ((4762, 4787), 'flask.jsonify', 'jsonify', (["{'status': 'ok'}"], {}), "({'status': 'ok'})\n", (4769, 4787), False, 'from flask import Blueprint, jsonify, redirect, request\n'), ((4908, 4925), 'conda_store_server.server.utils.get_conda_store', 'get_conda_store', ([], {}), '()\n', (4923, 4925), False, 'from conda_store_server.server.utils import get_conda_store, get_auth\n'), ((4937, 4947), 'conda_store_server.server.utils.get_auth', 'get_auth', ([], {}), '()\n', (4945, 4947), False, 'from conda_store_server.server.utils import get_conda_store, get_auth\n'), ((4961, 5000), 'conda_store_server.api.get_build', 'api.get_build', (['conda_store.db', 'build_id'], {}), '(conda_store.db, build_id)\n', (4974, 5000), False, 'from conda_store_server import api, schema, utils\n'), ((5431, 5448), 'conda_store_server.server.utils.get_conda_store', 'get_conda_store', ([], {}), '()\n', (5446, 5448), False, 'from conda_store_server.server.utils import get_conda_store, get_auth\n'), ((5468, 5507), 'conda_store_server.api.list_conda_channels', 'api.list_conda_channels', (['conda_store.db'], {}), '(conda_store.db)\n', (5491, 5507), False, 'from conda_store_server import api, schema, utils\n'), ((5623, 5640), 'flask.jsonify', 'jsonify', (['channels'], {}), '(channels)\n', (5630, 5640), False, 'from flask import Blueprint, jsonify, redirect, request\n'), ((5738, 5755), 'conda_store_server.server.utils.get_conda_store', 'get_conda_store', ([], {}), '()\n', (5753, 5755), False, 'from conda_store_server.server.utils import get_conda_store, get_auth\n'), ((5775, 5814), 'conda_store_server.api.list_conda_packages', 'api.list_conda_packages', (['conda_store.db'], {}), '(conda_store.db)\n', (5798, 5814), False, 'from conda_store_server import api, schema, utils\n'), ((5930, 5947), 'flask.jsonify', 'jsonify', (['packages'], {}), '(packages)\n', (5937, 5947), False, 'from flask import Blueprint, jsonify, redirect, request\n'), ((542, 577), 'conda_store_server.api.list_namespaces', 'api.list_namespaces', (['conda_store.db'], {}), '(conda_store.db)\n', (561, 577), False, 'from conda_store_server import api, schema, utils\n'), ((875, 912), 'conda_store_server.api.list_environments', 'api.list_environments', (['conda_store.db'], {}), '(conda_store.db)\n', (896, 912), False, 'from conda_store_server import api, schema, utils\n'), ((2460, 2509), 'conda_store_server.schema.CondaSpecification.parse_obj', 'schema.CondaSpecification.parse_obj', (['request.json'], {}), '(request.json)\n', (2495, 2509), False, 'from conda_store_server import api, schema, utils\n'), ((2518, 2568), 'conda_store_server.api.post_specification', 'api.post_specification', (['conda_store', 'specification'], {}), '(conda_store, specification)\n', (2540, 2568), False, 'from conda_store_server import api, schema, utils\n'), ((2584, 2609), 'flask.jsonify', 'jsonify', (["{'status': 'ok'}"], {}), "({'status': 'ok'})\n", (2591, 2609), False, 'from flask import Blueprint, jsonify, redirect, request\n'), ((2892, 2923), 'conda_store_server.api.list_builds', 'api.list_builds', (['conda_store.db'], {}), '(conda_store.db)\n', (2907, 2923), False, 'from conda_store_server import api, schema, utils\n'), ((1495, 1562), 'flask.jsonify', 'jsonify', (["{'status': 'error', 'error': 'environment does not exist'}"], {}), "({'status': 'error', 'error': 'environment does not exist'})\n", (1502, 1562), False, 'from flask import Blueprint, jsonify, redirect, request\n'), ((2006, 2074), 'flask.jsonify', 'jsonify', (["{'status': 'error', 'message': 'build id not specificated'}"], {}), "({'status': 'error', 'message': 'build id not specificated'})\n", (2013, 2074), False, 'from flask import Blueprint, jsonify, redirect, request\n'), ((3332, 3396), 'flask.jsonify', 'jsonify', (["{'status': 'error', 'error': 'build id does not exist'}"], {}), "({'status': 'error', 'error': 'build id does not exist'})\n", (3339, 3396), False, 'from flask import Blueprint, jsonify, redirect, request\n'), ((3858, 3922), 'flask.jsonify', 'jsonify', (["{'status': 'error', 'error': 'build id does not exist'}"], {}), "({'status': 'error', 'error': 'build id does not exist'})\n", (3865, 3922), False, 'from flask import Blueprint, jsonify, redirect, request\n'), ((4480, 4544), 'flask.jsonify', 'jsonify', (["{'status': 'error', 'error': 'build id does not exist'}"], {}), "({'status': 'error', 'error': 'build id does not exist'})\n", (4487, 4544), False, 'from flask import Blueprint, jsonify, redirect, request\n'), ((5038, 5102), 'flask.jsonify', 'jsonify', (["{'status': 'error', 'error': 'build id does not exist'}"], {}), "({'status': 'error', 'error': 'build id does not exist'})\n", (5045, 5102), False, 'from flask import Blueprint, jsonify, redirect, request\n'), ((598, 626), 'conda_store_server.schema.Namespace.from_orm', 'schema.Namespace.from_orm', (['_'], {}), '(_)\n', (623, 626), False, 'from conda_store_server import api, schema, utils\n'), ((943, 973), 'conda_store_server.schema.Environment.from_orm', 'schema.Environment.from_orm', (['_'], {}), '(_)\n', (970, 973), False, 'from conda_store_server import api, schema, utils\n'), ((1588, 1628), 'conda_store_server.schema.Environment.from_orm', 'schema.Environment.from_orm', (['environment'], {}), '(environment)\n', (1615, 1628), False, 'from conda_store_server import api, schema, utils\n'), ((2948, 2976), 'conda_store_server.schema.Build.from_orm', 'schema.Build.from_orm', (['build'], {}), '(build)\n', (2969, 2976), False, 'from conda_store_server import api, schema, utils\n'), ((3581, 3609), 'conda_store_server.schema.Build.from_orm', 'schema.Build.from_orm', (['build'], {}), '(build)\n', (3602, 3609), False, 'from conda_store_server import api, schema, utils\n'), ((5533, 5570), 'conda_store_server.schema.CondaChannel.from_orm', 'schema.CondaChannel.from_orm', (['channel'], {}), '(channel)\n', (5561, 5570), False, 'from conda_store_server import api, schema, utils\n'), ((5840, 5877), 'conda_store_server.schema.CondaPackage.from_orm', 'schema.CondaPackage.from_orm', (['package'], {}), '(package)\n', (5868, 5877), False, 'from conda_store_server import api, schema, utils\n')] |
import pandas as pd
import numpy as numpy
from env import host, user, password
import os
from sklearn.model_selection import train_test_split
import sklearn.preprocessing
############################# Acquire Zillow #############################
# defines function to create a sql url using personal credentials
def get_connection(db, user=user, host=host, password=password):
'''
This function uses my info from my env file to
create a connection url to access the Codeup db.
'''
return f'mysql+pymysql://{user}:{password}@{host}/{db}'
# defines function to get zillow data from MySQL and return as a pandas DataFrame
def get_zillow_data():
'''
This function reads in the zillow data from the Codeup db,
selects all columns from the properties_2017 table,
joins predictions_2017 table,
and acquires single unit properties with transactions during May 2017 - August 2017
and returns a pandas DataFrame with all columns.
'''
#create SQL query
sql_query = '''
SELECT *
FROM properties_2017
JOIN predictions_2017 USING(parcelid)
WHERE propertylandusetypeid IN (260, 261, 263, 264, 265, 266, 273, 275, 276, 279)
AND transactiondate >= "2017-05-01" AND transactiondate <= "2017-08-31";
'''
#read in dataframe from Codeup db
df = pd.read_sql(sql_query, get_connection('zillow'))
return df
# adds caching to get_zillow_data and checks for local filename (zillow_df.csv)
# if file exists, uses the .csv file
# if file doesn't exist, then produces SQL & pandas necessary to create a df, then write the df to a .csv file
def cached_zillow(cached=False):
'''
This function reads in zillow data from Codeup database and writes data to
a csv file if cached == False or if cached == True reads in zillow df from
a csv file, returns df.
'''
if cached == False or os.path.isfile('zillow_df.csv') == False:
# Read fresh data from db into a DataFrame.
df = get_zillow_data()
# Write DataFrame to a csv file.
df.to_csv('zillow_df.csv')
else:
# If csv file exists or cached == True, read in data from csv.
df = pd.read_csv('zillow_df.csv', index_col=0)
return df
############################# Prepare Zillow #############################
# defines function to clean zillow data and return as a cleaned pandas DataFrame
def clean_zillow(df):
'''
clean_zillow will take one argument df, a pandas dataframe and will:
grab the features needed for estimating home value and confirming property location,
set parcelid as new index,
rename columns for readability,
calculate age of home,
drop null values,
convert data types to integers,
remove outliers from square_feet and tax_value,
and calculate tax rate
return: a single pandas dataframe with the above operations performed
'''
#select only certain features needed for project
features = ['parcelid',
'bedroomcnt',
'bathroomcnt',
'calculatedfinishedsquarefeet',
'fips',
'yearbuilt',
'taxvaluedollarcnt',
'taxamount']
df = df[features]
#set parcelid as index
df = df.set_index("parcelid")
#rename columns
df = df.rename(columns={"parcelid": "parcel_id",
"bedroomcnt": "bedrooms",
"bathroomcnt": "bathrooms",
"calculatedfinishedsquarefeet":"square_feet",
"fips": "county_fips",
"taxamount": "taxes",
"taxvaluedollarcnt": "tax_value",
"yearbuilt": "age"})
#convert year built to get the property age
df.age = 2017 - df.age
#drop the nulls
df = df.dropna(subset=['square_feet', 'age', 'tax_value', 'taxes'])
df = df.fillna(0)
#convert dtypes to integers
df.bedrooms = df.bedrooms.astype('int64')
df.square_feet = df.square_feet.astype('int64')
df.county_fips = df.county_fips.astype('int64')
df.age = df.age.astype('int64')
df.tax_value = df.tax_value.astype('int64')
#remove outliers from square_feet
#calculate IQR
q1sf, q3sf = df.square_feet.quantile([.25, .75])
iqrsf = q3sf - q1sf
#calculate upper and lower bounds, outlier if above or below these
uppersf = q3sf + (1.5 * iqrsf)
lowersf = q1sf - (1.5 * iqrsf)
#filter out the lower and upper outliers
df = df[df.square_feet > lowersf]
df = df[df.square_feet < uppersf]
#remove outliers from tax_value
#calculate IQRover
q1tv, q3tv = df.tax_value.quantile([.25, .75])
iqrtv = q3tv - q1tv
#calculate upper and lower bounds, outlier if above or below these
uppertv = q3tv + (1.5 * iqrtv)
lowertv = q1tv - (1.5 * iqrtv)
#filter out the lower and upper outliers
df = df[df.tax_value > lowertv]
df = df[df.tax_value < uppertv]
#calculate tax rate using property's assessed value and the amount paid each year
#tax paid / tax value * 100 = tax rate%
df['tax_rate'] = round(((df.taxes / df.tax_value) * 100), 2)
return df
# splits a dataframe into train, validate, test
def split(df):
'''
take in a DataFrame and return train, validate, and test DataFrames.
return train, validate, test DataFrames.
'''
train_validate, test = train_test_split(df, test_size=.2, random_state=123)
train, validate = train_test_split(train_validate,
test_size=.3,
random_state=123)
return train, validate, test
# defines MinMaxScaler() and returns scaled data
def Min_Max_Scaler(X_train, X_validate, X_test):
"""
Takes in X_train, X_validate and X_test dfs with numeric values only
makes, fits, and uses/transforms the data,
Returns X_train_scaled, X_validate_scaled, X_test_scaled dfs
"""
#make and fit
scaler = sklearn.preprocessing.MinMaxScaler().fit(X_train)
#use and turn numpy arrays into dataframes
X_train_scaled = pd.DataFrame(scaler.transform(X_train), index = X_train.index, columns = X_train.columns)
X_validate_scaled = pd.DataFrame(scaler.transform(X_validate), index = X_validate.index, columns = X_validate.columns)
X_test_scaled = pd.DataFrame(scaler.transform(X_test), index = X_test.index, columns = X_test.columns)
return X_train_scaled, X_validate_scaled, X_test_scaled | [
"sklearn.model_selection.train_test_split",
"pandas.read_csv",
"os.path.isfile"
] | [((5683, 5736), 'sklearn.model_selection.train_test_split', 'train_test_split', (['df'], {'test_size': '(0.2)', 'random_state': '(123)'}), '(df, test_size=0.2, random_state=123)\n', (5699, 5736), False, 'from sklearn.model_selection import train_test_split\n'), ((5758, 5823), 'sklearn.model_selection.train_test_split', 'train_test_split', (['train_validate'], {'test_size': '(0.3)', 'random_state': '(123)'}), '(train_validate, test_size=0.3, random_state=123)\n', (5774, 5823), False, 'from sklearn.model_selection import train_test_split\n'), ((2295, 2336), 'pandas.read_csv', 'pd.read_csv', (['"""zillow_df.csv"""'], {'index_col': '(0)'}), "('zillow_df.csv', index_col=0)\n", (2306, 2336), True, 'import pandas as pd\n'), ((1964, 1995), 'os.path.isfile', 'os.path.isfile', (['"""zillow_df.csv"""'], {}), "('zillow_df.csv')\n", (1978, 1995), False, 'import os\n')] |
from blueqat import Circuit, ParametrizedCircuit
def compare_circuit(c1: Circuit, c2: Circuit) -> bool:
return repr(c1) == repr(c2)
def test_parametrized1():
assert compare_circuit(
ParametrizedCircuit().ry('a')[0].rz('b')[0].subs([1.2, 3.4]),
Circuit().ry(1.2)[0].rz(3.4)[0])
def test_parametrized2():
assert compare_circuit(
ParametrizedCircuit().ry('a')[0].rz('b')[0].subs({'a': 1.2, 'b': 3.4}),
Circuit().ry(1.2)[0].rz(3.4)[0])
def test_parametrized3():
assert compare_circuit(
ParametrizedCircuit().subs([]),
Circuit()
)
| [
"blueqat.ParametrizedCircuit",
"blueqat.Circuit"
] | [((584, 593), 'blueqat.Circuit', 'Circuit', ([], {}), '()\n', (591, 593), False, 'from blueqat import Circuit, ParametrizedCircuit\n'), ((544, 565), 'blueqat.ParametrizedCircuit', 'ParametrizedCircuit', ([], {}), '()\n', (563, 565), False, 'from blueqat import Circuit, ParametrizedCircuit\n'), ((270, 279), 'blueqat.Circuit', 'Circuit', ([], {}), '()\n', (277, 279), False, 'from blueqat import Circuit, ParametrizedCircuit\n'), ((447, 456), 'blueqat.Circuit', 'Circuit', ([], {}), '()\n', (454, 456), False, 'from blueqat import Circuit, ParametrizedCircuit\n'), ((200, 221), 'blueqat.ParametrizedCircuit', 'ParametrizedCircuit', ([], {}), '()\n', (219, 221), False, 'from blueqat import Circuit, ParametrizedCircuit\n'), ((367, 388), 'blueqat.ParametrizedCircuit', 'ParametrizedCircuit', ([], {}), '()\n', (386, 388), False, 'from blueqat import Circuit, ParametrizedCircuit\n')] |
import json
class ConfigObject(dict):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.load_attributes(kwargs)
def load_attributes(self, d):
for k, v in d.items():
if isinstance(v, dict):
self[k] = self.__class__(**v)
elif isinstance(v, list):
vs = []
for i in v:
if isinstance(i, dict):
vs.append(self.__class__(**i))
else:
vs.append(i)
self[k] = vs
def __getattr__(self, name):
try:
value = self[name]
return value
except Exception as _:
return None
def __setattr__(self, key, value):
self[key] = value
def __str__(self):
return json.dumps(self, indent=4, ensure_ascii=False)
| [
"json.dumps"
] | [((840, 886), 'json.dumps', 'json.dumps', (['self'], {'indent': '(4)', 'ensure_ascii': '(False)'}), '(self, indent=4, ensure_ascii=False)\n', (850, 886), False, 'import json\n')] |
import math
import pylab
from app import StandardRobot, LeastDistanceRobot, RandomWalkRobot, runSimulation
def timeNumberPlot(title, x_label, y_label, dim_length):
"""
Plots the relation between the number of robots and the average time
taken by different robots to clean a portion of the room.
"""
num_robot_range = range(1, 11)
times1, times2, times3 = ([] for i in range(3))
time_Robot_list = [times1, times2, times3]
Robots = [StandardRobot, LeastDistanceRobot, RandomWalkRobot]
for i in range(len(Robots)):
for num_robots in num_robot_range:
result = runSimulation(
num_robots, 1.0, dim_length, dim_length, 1, 100, Robots[i])
time_Robot_list[i].append(result[0])
for time in time_Robot_list:
pylab.plot(num_robot_range, time)
pylab.title(title+f"\n for room size of {dim_length}x{dim_length}")
pylab.legend(('StandardRobot', 'LeastDistanceRobot', 'RandomWalkRobot'))
pylab.xlabel(x_label)
pylab.ylabel(y_label)
pylab.show()
def timeAspectRatioPlot(title, x_label, y_label, area, num_robots):
"""
Plots the relation between the area of a square room and the average and
the average time taken by diffrent robots to clean a portion of th
room.
"""
aspect_ratios = []
times1, times2, times3 = ([] for i in range(3))
time_Robot_list = [times1, times2, times3]
Robots = [StandardRobot, LeastDistanceRobot, RandomWalkRobot]
start = math.sqrt(area)
aspect_dim_list = []
for dim in range(1, 11):
aspect_dim_list.append(start*dim)
for width in aspect_dim_list:
height = area / width
aspect_ratios.append("1 : {0}".format(int(width/height)))
for i in range(len(Robots)):
result = runSimulation(
num_robots, 1.0, int(width), int(height), 1, 100, Robots[i])
time_Robot_list[i].append(result[0])
for time in time_Robot_list:
pylab.plot(aspect_ratios, time)
pylab.title(
title+f"\n for {num_robots} Robots & Area of {area}")
pylab.legend(('StandardRobot', 'LeastDistanceRobot', "RandomWalkRobot"))
pylab.xlabel(x_label)
pylab.ylabel(y_label)
pylab.show()
def wasteAspectRatioPlot(title, x_label, y_label, num_robots, area):
"""
Plots the relation between room's aspect ratio and the waste percentage
for each robot.
"""
aspect_ratios = []
times1, times2, times3 = ([] for i in range(3))
waste_Robot_list = [times1, times2, times3]
Robots = [StandardRobot, LeastDistanceRobot, RandomWalkRobot]
start = math.sqrt(area)
aspect_dim_list = []
for dim in range(1, 11):
aspect_dim_list.append(start*dim)
for width in aspect_dim_list:
height = area / width
aspect_ratios.append("1 : {0}".format(int(width/height)))
for i in range(len(Robots)):
result = runSimulation(
num_robots, 1.0, int(width), int(height), 1, 100, Robots[i])
waste_Robot_list[i].append(result[1]/result[0])
for time in waste_Robot_list:
pylab.plot(aspect_ratios, time)
pylab.title(
title+"\n for {0} Robots & Area of {1}".format(num_robots, area))
pylab.legend(('StandardRobot', 'LeastDistanceRobot', "RandomWalkRobot"))
pylab.xlabel(x_label)
pylab.ylabel(y_label)
pylab.show()
def timeAreaPlot(title, x_label, y_label, num_robots):
"""
Plots the relation between the area of a room and the average time
taken by different robots to clean a certain portion of that room.
"""
dim_length_range = range(5, 31, 5)
times1, times2, times3 = ([] for i in range(3))
time_Robot_list = [times1, times2, times3]
Robots = [StandardRobot, LeastDistanceRobot, RandomWalkRobot]
for i in range(len(Robots)):
for dim_length in dim_length_range:
result = runSimulation(
num_robots, 1.0, dim_length, dim_length, 1, 100, Robots[i])
time_Robot_list[i].append(result[0])
for time in time_Robot_list:
pylab.plot(dim_length_range, time)
pylab.title(title+"\n for {0} Robots".format(num_robots))
pylab.legend(('StandardRobot', 'LeastDistanceRobot', "RandomWalkRobot"))
pylab.xlabel(x_label)
pylab.ylabel(y_label)
pylab.show()
def consistencyPlot(title, x_label, y_label, dim_length, num_robots):
"""
Performs the same exact experiment multiple of times
for a robot or a number of robots and plots the outcomes of
these experiments in terms of time taken for each individual
experiment to measure the consistency of performance for various robots.
"""
try_num_range = range(16)
times1, times2, times3 = ([] for i in range(3))
time_Robot_list = [times1, times2, times3]
Robots = [StandardRobot, LeastDistanceRobot, RandomWalkRobot]
for i in range(len(Robots)):
for try_num in try_num_range:
result = runSimulation(
num_robots, 1.0, dim_length, dim_length, 1, 1, Robots[i])
time_Robot_list[i].append(result[0])
for time in time_Robot_list:
pylab.plot(try_num_range, time)
pylab.title(
title+"\n for size of {0}x{0} & {1} Robots".format(dim_length, num_robots))
pylab.legend(('StandardRobot', 'LeastDistanceRobot', "RandomWalkRobot"))
pylab.xlabel(x_label)
pylab.ylabel(y_label)
pylab.show()
def timeAreaPortionPlot(title, x_label, y_label, num_robots, robot_type):
"""
Plots the relation between the area of square room and the average
time taken by a robot to clean a specific portion of the room,
(Different portions are plotted)
"""
dim_length_range = range(5, 31, 5)
coverage_percent_range = range(70, 105, 5)
coverage_percent_range = [i/100 for i in coverage_percent_range]
alist, blist, clist, dlist, elist, flist, glist = ([] for i in range(7))
coverage_percent_list = [alist, blist, clist, dlist, elist, flist, glist]
for dim_length in dim_length_range:
for i in range(len(coverage_percent_range)):
result = runSimulation(
num_robots, 1.0, dim_length, dim_length,
coverage_percent_range[i], 100, robot_type)
coverage_percent_list[i].append(result[0])
for percentlist in coverage_percent_list:
pylab.plot(dim_length_range, percentlist)
pylab.title(
title+f"\n for {num_robots} bots of {robot_type.__name__} type")
pylab.legend(("0.7", "0.75", "0.8", "0.85", "0.9", "0.95", "1.0"))
pylab.xlabel(x_label)
pylab.ylabel(y_label)
pylab.show()
# Not sure about it
def CoverageWasteRatio(title, x_label, y_label, num_robots, robot_type):
"""
efficiency
"""
dim_length_range = range(5, 31, 5)
coverage_percent_range = range(70, 105, 5)
coverage_percent_range = [i/100 for i in coverage_percent_range]
alist, blist, clist, dlist, elist, flist, glist = ([] for i in range(7))
coverage_percent_list = [alist, blist, clist, dlist, elist, flist, glist]
for dim_length in dim_length_range:
for i in range(len(coverage_percent_range)):
result = runSimulation(
num_robots, 1.0, dim_length, dim_length,
coverage_percent_range[i], 100, robot_type)
coverage_percent_list[i].append(result[1]/result[0])
for percentlist in coverage_percent_list:
pylab.plot(dim_length_range, percentlist)
pylab.title(
title+f"\n for {num_robots} bots of {robot_type.__name__} type")
pylab.legend(("0.7", "0.75", "0.8", "0.85", "0.9", "0.95", "1.0"))
pylab.xlabel(x_label)
pylab.ylabel(y_label)
pylab.show()
def timeNumberPortionPlot(title, x_label, y_label, dim_length, robot_type):
"""
Plots the relation between the number of robots and the average time
taken to clean a certain portion of the room,
(each portion is plotted)
"""
num_robot_range = range(1, 11)
coverage_percent_range = range(70, 105, 5)
coverage_percent_range = [i/100 for i in coverage_percent_range]
alist, blist, clist, dlist, elist, flist, glist = ([] for i in range(7))
coverage_percent_list = [alist, blist, clist, dlist, elist, flist, glist]
for num_robots in num_robot_range:
for i in range(len(coverage_percent_range)):
result = runSimulation(
num_robots, 1.0, dim_length, dim_length,
coverage_percent_range[i], 100, robot_type)
coverage_percent_list[i].append(result[0])
for percentlist in coverage_percent_list:
pylab.plot(num_robot_range, percentlist)
pylab.title(
title+f"\n for {num_robots} bots of {robot_type.__name__} type")
pylab.legend(("0.7", "0.75", "0.8", "0.85", "0.9", "0.95", "1.0"))
pylab.xlabel(x_label)
pylab.ylabel(y_label)
pylab.show()
def wasteAreaNumberPlot(title, x_label, y_label):
"""
Plots the relation between the waste percentage and the area of the
room for a different number of robots,
(each plotted individually)
"""
alist, blist, clist, dlist, elist = ([] for i in range(5))
num_robots_list = [alist, blist, clist, dlist, elist]
t1list, t2list, t3list, t4list, t5list = ([] for i in range(5))
time_robots_list = [t1list, t2list, t3list, t4list, t5list]
dim_length_range = range(10, 51, 5)
num_robots_range = range(5, 26, 5)
for dim_length in dim_length_range:
for i in range(len(num_robots_list)):
results = runSimulation(
num_robots_range[i], 1.0, dim_length,
dim_length, 1, 100, LeastDistanceRobot)
num_robots_list[i].append(results[1]/results[0])
time_robots_list[i].append(results[0])
for i in range(len(num_robots_range)):
pylab.plot(dim_length_range, num_robots_list[i])
pylab.title(title)
pylab.legend(('5', '10', "15", "20", "25"))
pylab.xlabel(x_label)
pylab.ylabel(y_label)
pylab.show()
# Not sure about it
def NumOfBotSizeWasteRatio(title, x_label, y_label, dim_length):
"""
What information does the plot produced by this function tell you?
waste on average with num on bots
to find the sweet spot for this room another function must be madfe with
"""
alist, blist, clist, dlist, elist = ([] for i in range(5))
num_robots_list = [alist, blist, clist, dlist, elist]
t1list, t2list, t3list, t4list, t5list = ([] for i in range(5))
time_robots_list = [t1list, t2list, t3list, t4list, t5list]
num_robots_range = range(5, 26, 5)
for i in range(len(num_robots_list)):
results = runSimulation(
num_robots_range[i], 1.0, dim_length, dim_length, 1, 50, LeastDistanceRobot)
num_robots_list[i].append(results[1]/results[0])
time_robots_list[i].append(results[0])
for i in range(len(num_robots_list)):
pylab.scatter(time_robots_list[i], num_robots_list[i], s=100)
pylab.title(title)
pylab.legend(('5', '10', "15", "20", "25"))
pylab.xlabel(x_label)
pylab.ylabel(y_label)
pylab.show()
# Plots
# timeNumberPlot('Number of robots & Time relation',
# 'Number of robots', 'Time (Tick)', 20)
# timeAspectRatioPlot('Aspect Ratio & Time relation',
# 'Room Aspect Ratio', 'Time', 100, 1)
# wasteAspectRatioPlot("AspectRatio & WasteRatio relation",
# "Room Aspect Ratio", "Waste Percentage", 1, 100)
# timeAreaPlot("Time & Room Area relation",
# "Length (squared)", "Time", 1)
# consistencyPlot("Consistency", "Try number", "Time", 20, 1)
# timeAreaPortionPlot('Room Portion & Time relation',
# 'Length (squared)', 'Time', 1, RandomWalkRobot)
# # CoverageWasteRatio('Coverage Percent & Size Relation','Length (squared)',
# # 'Waste Percentage', 5, LeastDistanceRobot)
# timeNumberPortionPlot("CostQualityTime", "Number of robots",
# "Time", 10, LeastDistanceRobot)
# wasteAreaNumberPlot('Waste & Size Relation', 'Length (squared)',
# 'Waste Percentage')
# # NumOfBotSizeWasteRatio('Waste & Size & Num of bots Relation\n LeastDistanceRobot',
# # 'Time', ' Waste Percentage', 20)
| [
"pylab.title",
"pylab.scatter",
"app.runSimulation",
"pylab.plot",
"pylab.xlabel",
"pylab.legend",
"math.sqrt",
"pylab.ylabel",
"pylab.show"
] | [((833, 905), 'pylab.title', 'pylab.title', (['(title + f"""\n for room size of {dim_length}x{dim_length}""")'], {}), '(title + f"""\n for room size of {dim_length}x{dim_length}""")\n', (844, 905), False, 'import pylab\n'), ((905, 977), 'pylab.legend', 'pylab.legend', (["('StandardRobot', 'LeastDistanceRobot', 'RandomWalkRobot')"], {}), "(('StandardRobot', 'LeastDistanceRobot', 'RandomWalkRobot'))\n", (917, 977), False, 'import pylab\n'), ((982, 1003), 'pylab.xlabel', 'pylab.xlabel', (['x_label'], {}), '(x_label)\n', (994, 1003), False, 'import pylab\n'), ((1008, 1029), 'pylab.ylabel', 'pylab.ylabel', (['y_label'], {}), '(y_label)\n', (1020, 1029), False, 'import pylab\n'), ((1034, 1046), 'pylab.show', 'pylab.show', ([], {}), '()\n', (1044, 1046), False, 'import pylab\n'), ((1491, 1506), 'math.sqrt', 'math.sqrt', (['area'], {}), '(area)\n', (1500, 1506), False, 'import math\n'), ((2010, 2080), 'pylab.title', 'pylab.title', (['(title + f"""\n for {num_robots} Robots & Area of {area}""")'], {}), '(title + f"""\n for {num_robots} Robots & Area of {area}""")\n', (2021, 2080), False, 'import pylab\n'), ((2089, 2161), 'pylab.legend', 'pylab.legend', (["('StandardRobot', 'LeastDistanceRobot', 'RandomWalkRobot')"], {}), "(('StandardRobot', 'LeastDistanceRobot', 'RandomWalkRobot'))\n", (2101, 2161), False, 'import pylab\n'), ((2166, 2187), 'pylab.xlabel', 'pylab.xlabel', (['x_label'], {}), '(x_label)\n', (2178, 2187), False, 'import pylab\n'), ((2192, 2213), 'pylab.ylabel', 'pylab.ylabel', (['y_label'], {}), '(y_label)\n', (2204, 2213), False, 'import pylab\n'), ((2218, 2230), 'pylab.show', 'pylab.show', ([], {}), '()\n', (2228, 2230), False, 'import pylab\n'), ((2615, 2630), 'math.sqrt', 'math.sqrt', (['area'], {}), '(area)\n', (2624, 2630), False, 'import math\n'), ((3237, 3309), 'pylab.legend', 'pylab.legend', (["('StandardRobot', 'LeastDistanceRobot', 'RandomWalkRobot')"], {}), "(('StandardRobot', 'LeastDistanceRobot', 'RandomWalkRobot'))\n", (3249, 3309), False, 'import pylab\n'), ((3314, 3335), 'pylab.xlabel', 'pylab.xlabel', (['x_label'], {}), '(x_label)\n', (3326, 3335), False, 'import pylab\n'), ((3340, 3361), 'pylab.ylabel', 'pylab.ylabel', (['y_label'], {}), '(y_label)\n', (3352, 3361), False, 'import pylab\n'), ((3366, 3378), 'pylab.show', 'pylab.show', ([], {}), '()\n', (3376, 3378), False, 'import pylab\n'), ((4178, 4250), 'pylab.legend', 'pylab.legend', (["('StandardRobot', 'LeastDistanceRobot', 'RandomWalkRobot')"], {}), "(('StandardRobot', 'LeastDistanceRobot', 'RandomWalkRobot'))\n", (4190, 4250), False, 'import pylab\n'), ((4255, 4276), 'pylab.xlabel', 'pylab.xlabel', (['x_label'], {}), '(x_label)\n', (4267, 4276), False, 'import pylab\n'), ((4281, 4302), 'pylab.ylabel', 'pylab.ylabel', (['y_label'], {}), '(y_label)\n', (4293, 4302), False, 'import pylab\n'), ((4307, 4319), 'pylab.show', 'pylab.show', ([], {}), '()\n', (4317, 4319), False, 'import pylab\n'), ((5274, 5346), 'pylab.legend', 'pylab.legend', (["('StandardRobot', 'LeastDistanceRobot', 'RandomWalkRobot')"], {}), "(('StandardRobot', 'LeastDistanceRobot', 'RandomWalkRobot'))\n", (5286, 5346), False, 'import pylab\n'), ((5351, 5372), 'pylab.xlabel', 'pylab.xlabel', (['x_label'], {}), '(x_label)\n', (5363, 5372), False, 'import pylab\n'), ((5377, 5398), 'pylab.ylabel', 'pylab.ylabel', (['y_label'], {}), '(y_label)\n', (5389, 5398), False, 'import pylab\n'), ((5403, 5415), 'pylab.show', 'pylab.show', ([], {}), '()\n', (5413, 5415), False, 'import pylab\n'), ((6396, 6481), 'pylab.title', 'pylab.title', (['(title + f"""\n for {num_robots} bots of {robot_type.__name__} type""")'], {}), '(title +\n f"""\n for {num_robots} bots of {robot_type.__name__} type""")\n', (6407, 6481), False, 'import pylab\n'), ((6486, 6552), 'pylab.legend', 'pylab.legend', (["('0.7', '0.75', '0.8', '0.85', '0.9', '0.95', '1.0')"], {}), "(('0.7', '0.75', '0.8', '0.85', '0.9', '0.95', '1.0'))\n", (6498, 6552), False, 'import pylab\n'), ((6557, 6578), 'pylab.xlabel', 'pylab.xlabel', (['x_label'], {}), '(x_label)\n', (6569, 6578), False, 'import pylab\n'), ((6583, 6604), 'pylab.ylabel', 'pylab.ylabel', (['y_label'], {}), '(y_label)\n', (6595, 6604), False, 'import pylab\n'), ((6609, 6621), 'pylab.show', 'pylab.show', ([], {}), '()\n', (6619, 6621), False, 'import pylab\n'), ((7471, 7556), 'pylab.title', 'pylab.title', (['(title + f"""\n for {num_robots} bots of {robot_type.__name__} type""")'], {}), '(title +\n f"""\n for {num_robots} bots of {robot_type.__name__} type""")\n', (7482, 7556), False, 'import pylab\n'), ((7561, 7627), 'pylab.legend', 'pylab.legend', (["('0.7', '0.75', '0.8', '0.85', '0.9', '0.95', '1.0')"], {}), "(('0.7', '0.75', '0.8', '0.85', '0.9', '0.95', '1.0'))\n", (7573, 7627), False, 'import pylab\n'), ((7632, 7653), 'pylab.xlabel', 'pylab.xlabel', (['x_label'], {}), '(x_label)\n', (7644, 7653), False, 'import pylab\n'), ((7658, 7679), 'pylab.ylabel', 'pylab.ylabel', (['y_label'], {}), '(y_label)\n', (7670, 7679), False, 'import pylab\n'), ((7684, 7696), 'pylab.show', 'pylab.show', ([], {}), '()\n', (7694, 7696), False, 'import pylab\n'), ((8652, 8737), 'pylab.title', 'pylab.title', (['(title + f"""\n for {num_robots} bots of {robot_type.__name__} type""")'], {}), '(title +\n f"""\n for {num_robots} bots of {robot_type.__name__} type""")\n', (8663, 8737), False, 'import pylab\n'), ((8742, 8808), 'pylab.legend', 'pylab.legend', (["('0.7', '0.75', '0.8', '0.85', '0.9', '0.95', '1.0')"], {}), "(('0.7', '0.75', '0.8', '0.85', '0.9', '0.95', '1.0'))\n", (8754, 8808), False, 'import pylab\n'), ((8813, 8834), 'pylab.xlabel', 'pylab.xlabel', (['x_label'], {}), '(x_label)\n', (8825, 8834), False, 'import pylab\n'), ((8839, 8860), 'pylab.ylabel', 'pylab.ylabel', (['y_label'], {}), '(y_label)\n', (8851, 8860), False, 'import pylab\n'), ((8865, 8877), 'pylab.show', 'pylab.show', ([], {}), '()\n', (8875, 8877), False, 'import pylab\n'), ((9875, 9893), 'pylab.title', 'pylab.title', (['title'], {}), '(title)\n', (9886, 9893), False, 'import pylab\n'), ((9898, 9941), 'pylab.legend', 'pylab.legend', (["('5', '10', '15', '20', '25')"], {}), "(('5', '10', '15', '20', '25'))\n", (9910, 9941), False, 'import pylab\n'), ((9946, 9967), 'pylab.xlabel', 'pylab.xlabel', (['x_label'], {}), '(x_label)\n', (9958, 9967), False, 'import pylab\n'), ((9972, 9993), 'pylab.ylabel', 'pylab.ylabel', (['y_label'], {}), '(y_label)\n', (9984, 9993), False, 'import pylab\n'), ((9998, 10010), 'pylab.show', 'pylab.show', ([], {}), '()\n', (10008, 10010), False, 'import pylab\n'), ((10978, 10996), 'pylab.title', 'pylab.title', (['title'], {}), '(title)\n', (10989, 10996), False, 'import pylab\n'), ((11001, 11044), 'pylab.legend', 'pylab.legend', (["('5', '10', '15', '20', '25')"], {}), "(('5', '10', '15', '20', '25'))\n", (11013, 11044), False, 'import pylab\n'), ((11049, 11070), 'pylab.xlabel', 'pylab.xlabel', (['x_label'], {}), '(x_label)\n', (11061, 11070), False, 'import pylab\n'), ((11075, 11096), 'pylab.ylabel', 'pylab.ylabel', (['y_label'], {}), '(y_label)\n', (11087, 11096), False, 'import pylab\n'), ((11101, 11113), 'pylab.show', 'pylab.show', ([], {}), '()\n', (11111, 11113), False, 'import pylab\n'), ((795, 828), 'pylab.plot', 'pylab.plot', (['num_robot_range', 'time'], {}), '(num_robot_range, time)\n', (805, 828), False, 'import pylab\n'), ((1973, 2004), 'pylab.plot', 'pylab.plot', (['aspect_ratios', 'time'], {}), '(aspect_ratios, time)\n', (1983, 2004), False, 'import pylab\n'), ((3109, 3140), 'pylab.plot', 'pylab.plot', (['aspect_ratios', 'time'], {}), '(aspect_ratios, time)\n', (3119, 3140), False, 'import pylab\n'), ((4077, 4111), 'pylab.plot', 'pylab.plot', (['dim_length_range', 'time'], {}), '(dim_length_range, time)\n', (4087, 4111), False, 'import pylab\n'), ((5137, 5168), 'pylab.plot', 'pylab.plot', (['try_num_range', 'time'], {}), '(try_num_range, time)\n', (5147, 5168), False, 'import pylab\n'), ((6349, 6390), 'pylab.plot', 'pylab.plot', (['dim_length_range', 'percentlist'], {}), '(dim_length_range, percentlist)\n', (6359, 6390), False, 'import pylab\n'), ((7424, 7465), 'pylab.plot', 'pylab.plot', (['dim_length_range', 'percentlist'], {}), '(dim_length_range, percentlist)\n', (7434, 7465), False, 'import pylab\n'), ((8606, 8646), 'pylab.plot', 'pylab.plot', (['num_robot_range', 'percentlist'], {}), '(num_robot_range, percentlist)\n', (8616, 8646), False, 'import pylab\n'), ((9822, 9870), 'pylab.plot', 'pylab.plot', (['dim_length_range', 'num_robots_list[i]'], {}), '(dim_length_range, num_robots_list[i])\n', (9832, 9870), False, 'import pylab\n'), ((10653, 10747), 'app.runSimulation', 'runSimulation', (['num_robots_range[i]', '(1.0)', 'dim_length', 'dim_length', '(1)', '(50)', 'LeastDistanceRobot'], {}), '(num_robots_range[i], 1.0, dim_length, dim_length, 1, 50,\n LeastDistanceRobot)\n', (10666, 10747), False, 'from app import StandardRobot, LeastDistanceRobot, RandomWalkRobot, runSimulation\n'), ((10912, 10973), 'pylab.scatter', 'pylab.scatter', (['time_robots_list[i]', 'num_robots_list[i]'], {'s': '(100)'}), '(time_robots_list[i], num_robots_list[i], s=100)\n', (10925, 10973), False, 'import pylab\n'), ((614, 687), 'app.runSimulation', 'runSimulation', (['num_robots', '(1.0)', 'dim_length', 'dim_length', '(1)', '(100)', 'Robots[i]'], {}), '(num_robots, 1.0, dim_length, dim_length, 1, 100, Robots[i])\n', (627, 687), False, 'from app import StandardRobot, LeastDistanceRobot, RandomWalkRobot, runSimulation\n'), ((3896, 3969), 'app.runSimulation', 'runSimulation', (['num_robots', '(1.0)', 'dim_length', 'dim_length', '(1)', '(100)', 'Robots[i]'], {}), '(num_robots, 1.0, dim_length, dim_length, 1, 100, Robots[i])\n', (3909, 3969), False, 'from app import StandardRobot, LeastDistanceRobot, RandomWalkRobot, runSimulation\n'), ((4958, 5029), 'app.runSimulation', 'runSimulation', (['num_robots', '(1.0)', 'dim_length', 'dim_length', '(1)', '(1)', 'Robots[i]'], {}), '(num_robots, 1.0, dim_length, dim_length, 1, 1, Robots[i])\n', (4971, 5029), False, 'from app import StandardRobot, LeastDistanceRobot, RandomWalkRobot, runSimulation\n'), ((6107, 6209), 'app.runSimulation', 'runSimulation', (['num_robots', '(1.0)', 'dim_length', 'dim_length', 'coverage_percent_range[i]', '(100)', 'robot_type'], {}), '(num_robots, 1.0, dim_length, dim_length,\n coverage_percent_range[i], 100, robot_type)\n', (6120, 6209), False, 'from app import StandardRobot, LeastDistanceRobot, RandomWalkRobot, runSimulation\n'), ((7172, 7274), 'app.runSimulation', 'runSimulation', (['num_robots', '(1.0)', 'dim_length', 'dim_length', 'coverage_percent_range[i]', '(100)', 'robot_type'], {}), '(num_robots, 1.0, dim_length, dim_length,\n coverage_percent_range[i], 100, robot_type)\n', (7185, 7274), False, 'from app import StandardRobot, LeastDistanceRobot, RandomWalkRobot, runSimulation\n'), ((8364, 8466), 'app.runSimulation', 'runSimulation', (['num_robots', '(1.0)', 'dim_length', 'dim_length', 'coverage_percent_range[i]', '(100)', 'robot_type'], {}), '(num_robots, 1.0, dim_length, dim_length,\n coverage_percent_range[i], 100, robot_type)\n', (8377, 8466), False, 'from app import StandardRobot, LeastDistanceRobot, RandomWalkRobot, runSimulation\n'), ((9533, 9628), 'app.runSimulation', 'runSimulation', (['num_robots_range[i]', '(1.0)', 'dim_length', 'dim_length', '(1)', '(100)', 'LeastDistanceRobot'], {}), '(num_robots_range[i], 1.0, dim_length, dim_length, 1, 100,\n LeastDistanceRobot)\n', (9546, 9628), False, 'from app import StandardRobot, LeastDistanceRobot, RandomWalkRobot, runSimulation\n')] |
import numpy as np
import cv2
import matplotlib.pyplot as plt
from openvino.inference_engine import IECore
import openvino_model_experiment_package as omep
# Load an IR model
model = 'intel/human-pose-estimation-0001/FP16/human-pose-estimation-0001'
ie, net, exenet, inblobs, outblobs, inshapes, outshapes = omep.load_IR_model(model)
# Load an image and run inference
img_orig = cv2.imread('people.jpg')
res = omep.infer_ocv_image(exenet, img_orig, inblobs[0])
img = cv2.cvtColor(img_orig, cv2.COLOR_BGR2RGB)
plt.imshow(img)
plt.show()
omep.display_heatmap(res['Mconv7_stage2_L2'], overlay_img=img_orig, statistics=False)
omep.display_heatmap(res['Mconv7_stage2_L1'], statistics=False)
| [
"matplotlib.pyplot.imshow",
"openvino_model_experiment_package.load_IR_model",
"openvino_model_experiment_package.display_heatmap",
"openvino_model_experiment_package.infer_ocv_image",
"cv2.cvtColor",
"cv2.imread",
"matplotlib.pyplot.show"
] | [((312, 337), 'openvino_model_experiment_package.load_IR_model', 'omep.load_IR_model', (['model'], {}), '(model)\n', (330, 337), True, 'import openvino_model_experiment_package as omep\n'), ((384, 408), 'cv2.imread', 'cv2.imread', (['"""people.jpg"""'], {}), "('people.jpg')\n", (394, 408), False, 'import cv2\n'), ((415, 465), 'openvino_model_experiment_package.infer_ocv_image', 'omep.infer_ocv_image', (['exenet', 'img_orig', 'inblobs[0]'], {}), '(exenet, img_orig, inblobs[0])\n', (435, 465), True, 'import openvino_model_experiment_package as omep\n'), ((473, 514), 'cv2.cvtColor', 'cv2.cvtColor', (['img_orig', 'cv2.COLOR_BGR2RGB'], {}), '(img_orig, cv2.COLOR_BGR2RGB)\n', (485, 514), False, 'import cv2\n'), ((515, 530), 'matplotlib.pyplot.imshow', 'plt.imshow', (['img'], {}), '(img)\n', (525, 530), True, 'import matplotlib.pyplot as plt\n'), ((531, 541), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (539, 541), True, 'import matplotlib.pyplot as plt\n'), ((543, 632), 'openvino_model_experiment_package.display_heatmap', 'omep.display_heatmap', (["res['Mconv7_stage2_L2']"], {'overlay_img': 'img_orig', 'statistics': '(False)'}), "(res['Mconv7_stage2_L2'], overlay_img=img_orig,\n statistics=False)\n", (563, 632), True, 'import openvino_model_experiment_package as omep\n'), ((630, 693), 'openvino_model_experiment_package.display_heatmap', 'omep.display_heatmap', (["res['Mconv7_stage2_L1']"], {'statistics': '(False)'}), "(res['Mconv7_stage2_L1'], statistics=False)\n", (650, 693), True, 'import openvino_model_experiment_package as omep\n')] |
import large_image
import urllib
import pytest
@pytest.mark.parametrize("item, output", [
('590346ff8d777f16d01e054c', '/tmp/Huron.Image2_JPEG2K.tif')
])
def test_tiff_tile_source(item, output):
"""Check whether large_image can return a tile with tiff sources."""
test_url = 'https://data.kitware.com/api/v1/item/{}/download'.format(item)
urllib.urlretrieve(test_url, output)
image = large_image.getTileSource(output)
# Make sure it is the tiff tile source
assert isinstance(image, large_image.tilesource.TiffFileTileSource)
# Make sure we can get a tile without an exception
assert type(image.getTile(0, 0, 0)) == str
| [
"urllib.urlretrieve",
"pytest.mark.parametrize",
"large_image.getTileSource"
] | [((50, 157), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""item, output"""', "[('590346ff8d777f16d01e054c', '/tmp/Huron.Image2_JPEG2K.tif')]"], {}), "('item, output', [('590346ff8d777f16d01e054c',\n '/tmp/Huron.Image2_JPEG2K.tif')])\n", (73, 157), False, 'import pytest\n'), ((357, 393), 'urllib.urlretrieve', 'urllib.urlretrieve', (['test_url', 'output'], {}), '(test_url, output)\n', (375, 393), False, 'import urllib\n'), ((406, 439), 'large_image.getTileSource', 'large_image.getTileSource', (['output'], {}), '(output)\n', (431, 439), False, 'import large_image\n')] |
import os
import sys
import pickle
import traceback
from XSTAF.core.logger import LOGGER
class ToolManager(object):
def __init__(self):
self.settings = {"ToolsLocation" : r"tools",
"ToolsConfigureFile" : "config.pickle"}
self.tool_name_list = []
self.abs_tools_location = ""
self.pickle_config_file = ""
def apply_settings(self, **kwargs):
for arg in kwargs.items():
if arg[0] in self.settings:
self.settings[arg[0]] = arg[1]
def config(self):
tools_location = self.settings["ToolsLocation"]
if not os.path.isabs(tools_location):
#check if tools location exist in XSTAF path
XSTAF_path = os.path.dirname(os.path.abspath(__file__))
self.abs_tools_location = os.path.join(XSTAF_path, "..", tools_location)
else:
self.abs_tools_location = tools_location
#try get tools name list from pickle file
self.pickle_config_file = os.path.join(self.abs_tools_location, self.settings["ToolsConfigureFile"])
self.load_config()
def get_tool(self, tool_module_name):
if not os.path.isdir(self.abs_tools_location):
LOGGER.warning("Can not find tools location: %s", self.abs_tools_location)
return None
#append abs tools path to python lib path
#so we can dynamic import them
sys.path.append(self.abs_tools_location)
try:
tool_module = __import__(tool_module_name)
#want to reload the tool if tool is updated
tool_module = reload(tool_module)
tool = tool_module.Tool
except (ImportError, AttributeError) as e:
LOGGER.info("Can not import tool: %s" % tool_module_name)
LOGGER.debug(traceback.format_exc())
return None
else:
return tool
def load_config(self):
if not os.path.isfile(self.pickle_config_file):
LOGGER.warning("Can not find config file: %s", self.pickle_config_file)
return
#we load tool names from pickle file
if os.path.isfile(self.pickle_config_file):
with open(self.pickle_config_file, 'r') as f:
self.tool_name_list = pickle.load(f)
def save_config(self):
if not os.path.isdir(self.abs_tools_location):
LOGGER.warning("Can not find tools location: %s", self.abs_tools_location)
return None
#we save current tool names to pickle file
with open(self.pickle_config_file, 'w') as f:
pickle.dump(self.tool_name_list, f)
@property
def available_tool_name_list(self):
if not os.path.isdir(self.abs_tools_location):
LOGGER.warning("Can not find tools location: %s", self.abs_tools_location)
while False:
yield None
else:
#check all packages under abs_tools_location
for name in os.listdir(self.abs_tools_location):
#only check dirs
abs_name = os.path.join(self.abs_tools_location, name)
if os.path.isdir(abs_name):
if not(name in self.tool_name_list) and not(self.get_tool(name) is None):
yield name | [
"traceback.format_exc",
"os.listdir",
"pickle.dump",
"os.path.isabs",
"XSTAF.core.logger.LOGGER.info",
"os.path.join",
"pickle.load",
"os.path.isfile",
"os.path.isdir",
"os.path.abspath",
"sys.path.append",
"XSTAF.core.logger.LOGGER.warning"
] | [((1045, 1119), 'os.path.join', 'os.path.join', (['self.abs_tools_location', "self.settings['ToolsConfigureFile']"], {}), "(self.abs_tools_location, self.settings['ToolsConfigureFile'])\n", (1057, 1119), False, 'import os\n'), ((1453, 1493), 'sys.path.append', 'sys.path.append', (['self.abs_tools_location'], {}), '(self.abs_tools_location)\n', (1468, 1493), False, 'import sys\n'), ((2187, 2226), 'os.path.isfile', 'os.path.isfile', (['self.pickle_config_file'], {}), '(self.pickle_config_file)\n', (2201, 2226), False, 'import os\n'), ((650, 679), 'os.path.isabs', 'os.path.isabs', (['tools_location'], {}), '(tools_location)\n', (663, 679), False, 'import os\n'), ((845, 891), 'os.path.join', 'os.path.join', (['XSTAF_path', '""".."""', 'tools_location'], {}), "(XSTAF_path, '..', tools_location)\n", (857, 891), False, 'import os\n'), ((1205, 1243), 'os.path.isdir', 'os.path.isdir', (['self.abs_tools_location'], {}), '(self.abs_tools_location)\n', (1218, 1243), False, 'import os\n'), ((1257, 1331), 'XSTAF.core.logger.LOGGER.warning', 'LOGGER.warning', (['"""Can not find tools location: %s"""', 'self.abs_tools_location'], {}), "('Can not find tools location: %s', self.abs_tools_location)\n", (1271, 1331), False, 'from XSTAF.core.logger import LOGGER\n'), ((1987, 2026), 'os.path.isfile', 'os.path.isfile', (['self.pickle_config_file'], {}), '(self.pickle_config_file)\n', (2001, 2026), False, 'import os\n'), ((2040, 2111), 'XSTAF.core.logger.LOGGER.warning', 'LOGGER.warning', (['"""Can not find config file: %s"""', 'self.pickle_config_file'], {}), "('Can not find config file: %s', self.pickle_config_file)\n", (2054, 2111), False, 'from XSTAF.core.logger import LOGGER\n'), ((2390, 2428), 'os.path.isdir', 'os.path.isdir', (['self.abs_tools_location'], {}), '(self.abs_tools_location)\n', (2403, 2428), False, 'import os\n'), ((2442, 2516), 'XSTAF.core.logger.LOGGER.warning', 'LOGGER.warning', (['"""Can not find tools location: %s"""', 'self.abs_tools_location'], {}), "('Can not find tools location: %s', self.abs_tools_location)\n", (2456, 2516), False, 'from XSTAF.core.logger import LOGGER\n'), ((2658, 2693), 'pickle.dump', 'pickle.dump', (['self.tool_name_list', 'f'], {}), '(self.tool_name_list, f)\n', (2669, 2693), False, 'import pickle\n'), ((2772, 2810), 'os.path.isdir', 'os.path.isdir', (['self.abs_tools_location'], {}), '(self.abs_tools_location)\n', (2785, 2810), False, 'import os\n'), ((2824, 2898), 'XSTAF.core.logger.LOGGER.warning', 'LOGGER.warning', (['"""Can not find tools location: %s"""', 'self.abs_tools_location'], {}), "('Can not find tools location: %s', self.abs_tools_location)\n", (2838, 2898), False, 'from XSTAF.core.logger import LOGGER\n'), ((3046, 3081), 'os.listdir', 'os.listdir', (['self.abs_tools_location'], {}), '(self.abs_tools_location)\n', (3056, 3081), False, 'import os\n'), ((779, 804), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (794, 804), False, 'import os\n'), ((1763, 1820), 'XSTAF.core.logger.LOGGER.info', 'LOGGER.info', (["('Can not import tool: %s' % tool_module_name)"], {}), "('Can not import tool: %s' % tool_module_name)\n", (1774, 1820), False, 'from XSTAF.core.logger import LOGGER\n'), ((2324, 2338), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (2335, 2338), False, 'import pickle\n'), ((3143, 3186), 'os.path.join', 'os.path.join', (['self.abs_tools_location', 'name'], {}), '(self.abs_tools_location, name)\n', (3155, 3186), False, 'import os\n'), ((3206, 3229), 'os.path.isdir', 'os.path.isdir', (['abs_name'], {}), '(abs_name)\n', (3219, 3229), False, 'import os\n'), ((1846, 1868), 'traceback.format_exc', 'traceback.format_exc', ([], {}), '()\n', (1866, 1868), False, 'import traceback\n')] |
"""
One-time migration script from sqlalchemy models and sqlite database to custom ORM & PostgreSQL.
Not designed to work as part of the regular alembic system, merely placed here for archive purposes.
Should never need to run this again.
2021-05-03
"""
from datetime import datetime, timedelta
import sqlite3
from data.post_data import PostData, PostModel
from data.snapshot_data import SnapshotData, SnapshotModel, SnapshotFrontpageModel
from data.user_data import UserData
from services import post_service
from utils.logger import logger
from utils.reddit import base36decode
_post_data = PostData()
_snapshot_data = SnapshotData()
_user_data = UserData()
DB_FILE = "src/database.db"
def migrate_posts(offset=0):
"""Grabs posts in batches of 1000 at a time and migrates them to the new database.
Returns number of processed rows. If less than 1000, at end of the table."""
conn = sqlite3.connect(DB_FILE)
conn.row_factory = sqlite3.Row
rows = conn.execute("SELECT * FROM posts LIMIT 1000 OFFSET ?;", (offset,)).fetchall()
conn.close()
row = None
for row in rows:
# If the post already exists in the database we don't need to do anything.
post_id36 = row["id"]
post = post_service.get_post_by_id(post_id36)
if post:
continue
# OH RIGHT NO USER DATA IS SAVED IN THE OLD DATABASE.
# username = row["name"]
# if not user_service.get_user(username):
# user = UserModel()
# user.username = username
# _user_data.insert(user, error_on_conflict=False)
post = PostModel()
post.set_id(post_id36)
# post.author = username
post.title = row["title"]
post.created_time = row["created_time"]
post.flair_text = row["flair"] # will add flair id in later mass update/backfill.. and user info
_post_data.insert(post, error_on_conflict=False)
if not row:
logger.warning("No rows processed!")
else:
logger.info(f"Most recent migrated row: psk={row['psk']}, id={row['id']}")
return len(rows)
def migrate_snapshots(date, hour):
conn = sqlite3.connect(DB_FILE)
conn.row_factory = sqlite3.Row
row = conn.execute("SELECT * FROM snapshots WHERE date=? and hour=?;", (date, hour)).fetchone()
# No data, past the last recorded snapshot?
if not row:
return
old_snapshot_psk = row["psk"]
snapshot = SnapshotModel()
snapshot.created_time = row["datetime"]
snapshot.date = date
snapshot.hour = hour
snapshot.subscribers = row["subscribers"]
new_snapshot = _snapshot_data.insert(snapshot)
rows = conn.execute(
"SELECT sf.*, p.id FROM snapshot_frontpage sf JOIN posts p on sf.post_psk = p.psk WHERE snapshot_psk=?;",
(old_snapshot_psk,),
).fetchall()
conn.close()
for row in rows:
sfp_model = SnapshotFrontpageModel()
sfp_model.post_id = base36decode(row["id"])
sfp_model.snapshot_id = new_snapshot.id
sfp_model.rank = row["rank"]
sfp_model.score = row["score"]
_snapshot_data.insert(sfp_model)
def main():
current_offset = 0
while True:
processed_posts = migrate_posts(current_offset)
current_offset += processed_posts
if processed_posts < 1000:
break
if current_offset % 1000 == 0:
logger.info(f"Migrated {current_offset} posts total")
current_datetime = datetime.fromisoformat("2020-05-12 04:00:00.000")
now = datetime.utcnow()
while current_datetime <= now:
try:
migrate_snapshots(current_datetime.date(), current_datetime.hour)
except Exception:
logger.exception(f"Failed to migrate {current_datetime.date()} - {current_datetime.hour}")
current_datetime += timedelta(hours=1)
if current_datetime.hour == 0:
logger.info(f"Finished migrating {current_datetime.date()}")
if __name__ == "__main__":
main()
| [
"data.snapshot_data.SnapshotFrontpageModel",
"sqlite3.connect",
"data.user_data.UserData",
"datetime.datetime.utcnow",
"data.post_data.PostModel",
"utils.reddit.base36decode",
"data.post_data.PostData",
"utils.logger.logger.info",
"data.snapshot_data.SnapshotModel",
"datetime.datetime.fromisoforma... | [((599, 609), 'data.post_data.PostData', 'PostData', ([], {}), '()\n', (607, 609), False, 'from data.post_data import PostData, PostModel\n'), ((627, 641), 'data.snapshot_data.SnapshotData', 'SnapshotData', ([], {}), '()\n', (639, 641), False, 'from data.snapshot_data import SnapshotData, SnapshotModel, SnapshotFrontpageModel\n'), ((655, 665), 'data.user_data.UserData', 'UserData', ([], {}), '()\n', (663, 665), False, 'from data.user_data import UserData\n'), ((905, 929), 'sqlite3.connect', 'sqlite3.connect', (['DB_FILE'], {}), '(DB_FILE)\n', (920, 929), False, 'import sqlite3\n'), ((2159, 2183), 'sqlite3.connect', 'sqlite3.connect', (['DB_FILE'], {}), '(DB_FILE)\n', (2174, 2183), False, 'import sqlite3\n'), ((2450, 2465), 'data.snapshot_data.SnapshotModel', 'SnapshotModel', ([], {}), '()\n', (2463, 2465), False, 'from data.snapshot_data import SnapshotData, SnapshotModel, SnapshotFrontpageModel\n'), ((3480, 3529), 'datetime.datetime.fromisoformat', 'datetime.fromisoformat', (['"""2020-05-12 04:00:00.000"""'], {}), "('2020-05-12 04:00:00.000')\n", (3502, 3529), False, 'from datetime import datetime, timedelta\n'), ((3540, 3557), 'datetime.datetime.utcnow', 'datetime.utcnow', ([], {}), '()\n', (3555, 3557), False, 'from datetime import datetime, timedelta\n'), ((1239, 1277), 'services.post_service.get_post_by_id', 'post_service.get_post_by_id', (['post_id36'], {}), '(post_id36)\n', (1266, 1277), False, 'from services import post_service\n'), ((1613, 1624), 'data.post_data.PostModel', 'PostModel', ([], {}), '()\n', (1622, 1624), False, 'from data.post_data import PostData, PostModel\n'), ((1959, 1995), 'utils.logger.logger.warning', 'logger.warning', (['"""No rows processed!"""'], {}), "('No rows processed!')\n", (1973, 1995), False, 'from utils.logger import logger\n'), ((2014, 2088), 'utils.logger.logger.info', 'logger.info', (['f"""Most recent migrated row: psk={row[\'psk\']}, id={row[\'id\']}"""'], {}), '(f"Most recent migrated row: psk={row[\'psk\']}, id={row[\'id\']}")\n', (2025, 2088), False, 'from utils.logger import logger\n'), ((2904, 2928), 'data.snapshot_data.SnapshotFrontpageModel', 'SnapshotFrontpageModel', ([], {}), '()\n', (2926, 2928), False, 'from data.snapshot_data import SnapshotData, SnapshotModel, SnapshotFrontpageModel\n'), ((2957, 2980), 'utils.reddit.base36decode', 'base36decode', (["row['id']"], {}), "(row['id'])\n", (2969, 2980), False, 'from utils.reddit import base36decode\n'), ((3841, 3859), 'datetime.timedelta', 'timedelta', ([], {'hours': '(1)'}), '(hours=1)\n', (3850, 3859), False, 'from datetime import datetime, timedelta\n'), ((3402, 3455), 'utils.logger.logger.info', 'logger.info', (['f"""Migrated {current_offset} posts total"""'], {}), "(f'Migrated {current_offset} posts total')\n", (3413, 3455), False, 'from utils.logger import logger\n')] |
from mfl_playoff_leagues import MFL
def run(request):
"""Responds to any HTTP request.
Args:
request (flask.Request): HTTP request object.
Returns:
The response text or any set of values that can be turned into a
Response object using
`make_response <http://flask.pocoo.org/docs/1.0/api/#flask.Flask.make_response>`.
"""
# get the query string parsed as a dict
request_json = request.get_json(silent=True)
args = request.args
m = MFL(year=args['year'], league=args['league'])
if request.args and 'live_scoring' in request.args:
return m.live_scoring_html()
if request.args and 'league' in request.args:
return m.league_html()
return {}
| [
"mfl_playoff_leagues.MFL"
] | [((503, 548), 'mfl_playoff_leagues.MFL', 'MFL', ([], {'year': "args['year']", 'league': "args['league']"}), "(year=args['year'], league=args['league'])\n", (506, 548), False, 'from mfl_playoff_leagues import MFL\n')] |
from pyObjective import Variable, Model
import numpy as np
"""This example script is written to demonstrate the use of classes, and how more complicated models can be built,
and still passed to the solver. As a rudimentary example, it has two cubes and a sphere, and we are trying to find
the dimensions such that the cube1 - cube2 + sphere volume is minimized, subject to the bounds. """
# define a new class
class Cube:
def __init__(self, model):
self.x = Variable('x', 1, (0.5, 2), "cube length x")
self.y = Variable('y', 1, (0.5, 2))
self.z = Variable('z', 1, (0.5, 2))
model.add_var(self.x)
model.add_var(self.y)
model.add_var(self.z)
def volume(self):
return self.x() * self.y() * self.z()
# define a sphere, but keep the variable definition on the outside. For fun
class Sphere:
def __init__(self, radius):
self.r = radius
def volume(self):
return (4 / 3) * np.pi * self.r() ** 3 # unfortunate brackets needed in here, and not before :(
# define simulation model
m = Model()
# create cube
c1 = Cube(m)
c2 = Cube(m)
# define the sphere radius
r = Variable("r", 1, (0.5, 2), "sphere radius")
m.add_var(r) # try commenting this line, and you will see that it was removed from the optimization
s = Sphere(r)
# define objective function (to be minimized)
def cost():
return c1.volume() - c2.volume() + s.volume()
m.objective = cost
# solve
m.solve()
# display results
m.display_results()
| [
"pyObjective.Model",
"pyObjective.Variable"
] | [((1077, 1084), 'pyObjective.Model', 'Model', ([], {}), '()\n', (1082, 1084), False, 'from pyObjective import Variable, Model\n'), ((1158, 1201), 'pyObjective.Variable', 'Variable', (['"""r"""', '(1)', '(0.5, 2)', '"""sphere radius"""'], {}), "('r', 1, (0.5, 2), 'sphere radius')\n", (1166, 1201), False, 'from pyObjective import Variable, Model\n'), ((476, 519), 'pyObjective.Variable', 'Variable', (['"""x"""', '(1)', '(0.5, 2)', '"""cube length x"""'], {}), "('x', 1, (0.5, 2), 'cube length x')\n", (484, 519), False, 'from pyObjective import Variable, Model\n'), ((537, 563), 'pyObjective.Variable', 'Variable', (['"""y"""', '(1)', '(0.5, 2)'], {}), "('y', 1, (0.5, 2))\n", (545, 563), False, 'from pyObjective import Variable, Model\n'), ((581, 607), 'pyObjective.Variable', 'Variable', (['"""z"""', '(1)', '(0.5, 2)'], {}), "('z', 1, (0.5, 2))\n", (589, 607), False, 'from pyObjective import Variable, Model\n')] |
#!/usr/bin/env python
#
# A library that provides a Python interface to the Telegram Bot API
# Copyright (C) 2015-2018
# <NAME> <<EMAIL>>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser Public License for more details.
#
# You should have received a copy of the GNU Lesser Public License
# along with this program. If not, see [http://www.gnu.org/licenses/].
import pytest
from telegram import (InlineKeyboardButton, InlineQueryResultGame,
InlineQueryResultVoice, InlineKeyboardMarkup)
@pytest.fixture(scope='class')
def inline_query_result_game():
return InlineQueryResultGame(TestInlineQueryResultGame.id,
TestInlineQueryResultGame.game_short_name,
reply_markup=TestInlineQueryResultGame.reply_markup)
class TestInlineQueryResultGame(object):
id = 'id'
type = 'game'
game_short_name = 'game short name'
reply_markup = InlineKeyboardMarkup([[InlineKeyboardButton('reply_markup')]])
def test_expected_values(self, inline_query_result_game):
assert inline_query_result_game.type == self.type
assert inline_query_result_game.id == self.id
assert inline_query_result_game.game_short_name == self.game_short_name
assert (inline_query_result_game.reply_markup.to_dict()
== self.reply_markup.to_dict())
def test_to_dict(self, inline_query_result_game):
inline_query_result_game_dict = inline_query_result_game.to_dict()
assert isinstance(inline_query_result_game_dict, dict)
assert inline_query_result_game_dict['type'] == inline_query_result_game.type
assert inline_query_result_game_dict['id'] == inline_query_result_game.id
assert (inline_query_result_game_dict['game_short_name']
== inline_query_result_game.game_short_name)
assert (inline_query_result_game_dict['reply_markup']
== inline_query_result_game.reply_markup.to_dict())
def test_equality(self):
a = InlineQueryResultGame(self.id, self.game_short_name)
b = InlineQueryResultGame(self.id, self.game_short_name)
c = InlineQueryResultGame(self.id, '')
d = InlineQueryResultGame('', self.game_short_name)
e = InlineQueryResultVoice(self.id, '', '')
assert a == b
assert hash(a) == hash(b)
assert a is not b
assert a == c
assert hash(a) == hash(c)
assert a != d
assert hash(a) != hash(d)
assert a != e
assert hash(a) != hash(e)
| [
"pytest.fixture",
"telegram.InlineQueryResultVoice",
"telegram.InlineQueryResultGame",
"telegram.InlineKeyboardButton"
] | [((925, 954), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""class"""'}), "(scope='class')\n", (939, 954), False, 'import pytest\n'), ((998, 1154), 'telegram.InlineQueryResultGame', 'InlineQueryResultGame', (['TestInlineQueryResultGame.id', 'TestInlineQueryResultGame.game_short_name'], {'reply_markup': 'TestInlineQueryResultGame.reply_markup'}), '(TestInlineQueryResultGame.id,\n TestInlineQueryResultGame.game_short_name, reply_markup=\n TestInlineQueryResultGame.reply_markup)\n', (1019, 1154), False, 'from telegram import InlineKeyboardButton, InlineQueryResultGame, InlineQueryResultVoice, InlineKeyboardMarkup\n'), ((2436, 2488), 'telegram.InlineQueryResultGame', 'InlineQueryResultGame', (['self.id', 'self.game_short_name'], {}), '(self.id, self.game_short_name)\n', (2457, 2488), False, 'from telegram import InlineKeyboardButton, InlineQueryResultGame, InlineQueryResultVoice, InlineKeyboardMarkup\n'), ((2501, 2553), 'telegram.InlineQueryResultGame', 'InlineQueryResultGame', (['self.id', 'self.game_short_name'], {}), '(self.id, self.game_short_name)\n', (2522, 2553), False, 'from telegram import InlineKeyboardButton, InlineQueryResultGame, InlineQueryResultVoice, InlineKeyboardMarkup\n'), ((2566, 2600), 'telegram.InlineQueryResultGame', 'InlineQueryResultGame', (['self.id', '""""""'], {}), "(self.id, '')\n", (2587, 2600), False, 'from telegram import InlineKeyboardButton, InlineQueryResultGame, InlineQueryResultVoice, InlineKeyboardMarkup\n'), ((2613, 2660), 'telegram.InlineQueryResultGame', 'InlineQueryResultGame', (['""""""', 'self.game_short_name'], {}), "('', self.game_short_name)\n", (2634, 2660), False, 'from telegram import InlineKeyboardButton, InlineQueryResultGame, InlineQueryResultVoice, InlineKeyboardMarkup\n'), ((2673, 2712), 'telegram.InlineQueryResultVoice', 'InlineQueryResultVoice', (['self.id', '""""""', '""""""'], {}), "(self.id, '', '')\n", (2695, 2712), False, 'from telegram import InlineKeyboardButton, InlineQueryResultGame, InlineQueryResultVoice, InlineKeyboardMarkup\n'), ((1369, 1405), 'telegram.InlineKeyboardButton', 'InlineKeyboardButton', (['"""reply_markup"""'], {}), "('reply_markup')\n", (1389, 1405), False, 'from telegram import InlineKeyboardButton, InlineQueryResultGame, InlineQueryResultVoice, InlineKeyboardMarkup\n')] |
import pygame as pg
class Sheet:
"""
Represents tool for extracting sprites from spritesheet.
"""
def __init__(self, sheet_path):
"""
Constructor for the sheet tool.
Loading the spritesheet.
"""
self.spritesheet = pg.image.load(sheet_path).convert_alpha()
def get_image(self, x, y, width, height, alpha=False):
"""
Extracts sprite of given point (x, y) (left, top) and width and height.
alpha boolean keyword argument for converting the sprite in alpha or non-alpha.
"""
image = pg.Surface((width, height))
image.blit(self.spritesheet, (0, 0), (x, y, width, height))
image.set_colorkey((0, 0, 0))
image.set_alpha(255)
if alpha:
return image.convert_alpha()
return image.convert()
| [
"pygame.image.load",
"pygame.Surface"
] | [((582, 609), 'pygame.Surface', 'pg.Surface', (['(width, height)'], {}), '((width, height))\n', (592, 609), True, 'import pygame as pg\n'), ((272, 297), 'pygame.image.load', 'pg.image.load', (['sheet_path'], {}), '(sheet_path)\n', (285, 297), True, 'import pygame as pg\n')] |
from config import secret
from utils import getLogger
DEBUG = getLogger()
class MysqlConnection:
def __init__(self, database, server_name):
self.host = secret[server_name]['mysql']['host']
self.port = secret[server_name]['mysql']['port']
self.username = secret[server_name]['mysql']['username']
self.password = secret[server_name]['mysql']['password']
self.database = database
| [
"utils.getLogger"
] | [((62, 73), 'utils.getLogger', 'getLogger', ([], {}), '()\n', (71, 73), False, 'from utils import getLogger\n')] |
#from selenium.webdriver.remote import webdriver
from selenium import webdriver
#from selenium.webdriver.chrome import options
from page_objects import EntryPage, LoginPage, RedactionPage, ContactTracePage, AddNewRecordPage, AddDataToRecordPage, StageForPublishingPage, PublishDataPage, SettingsPage, Tools
import unittest
import os
class TestRedaction(unittest.TestCase):
def setUp(self):
#Change this to TRUE if you don't want to use a dockerised stack
self.local_mode = False
#setup environment based on environment variables
if 'HOME_DIR' in os.environ.copy():
self.home_dir = os.environ['HOME_DIR']
else:
self.home_dir = os.getcwd()
if 'DATA_DIR' in os.environ.copy():
self.data_dir = os.environ['DATA_DIR']
else:
self.data_dir = 'tests/data/'
if 'TMP_DIR' in os.environ.copy():
self.tmp_dir = os.environ['TMP_DIR']
else:
self.tmp_dir = '/tmp/'
if 'BASE_TEST_URL' in os.environ.copy():
self.base_url = os.environ['BASE_TEST_URL']
else:
self.base_url = 'https://react.safeplaces.extremesolution.com/'
if 'SELENIUM_URL' in os.environ.copy():
self.sel_url = os.environ['SELENIUM_URL']
else:
self.sel_url = 'http://172.17.0.2:4444/wd/hub'
chrome_options = webdriver.ChromeOptions()
prefs = {'download.default_directory': '/tmp'}
chrome_options.add_experimental_option('prefs', prefs)
if self.local_mode:
self.driver = webdriver.Chrome(chrome_options=chrome_options)
else:
self.driver = webdriver.Remote(command_executor=self.sel_url, options=chrome_options)
def app_loads(self):
tools = Tools()
entry_page = EntryPage(self.driver,base_url=self.base_url)
entry_page.open_page()
def contact_trace(self):
tools = Tools()
entry_page = EntryPage(self.driver,base_url=self.base_url)
entry_page.open_page()
login_page = LoginPage(self.driver)
login_page.login_if_required()
entry_page.open_trace()
contact_trace_page = ContactTracePage(self.driver)
contact_trace_page.add_new_record()
add_record_page = AddNewRecordPage(self.driver)
add_record_page.create_manually()
contact_trace_page.more()
contact_trace_page.add_data_point()
# start to add a point and cancel editing the point
# if the test works this far, we can expand it later
point_editor_page = AddDataToRecordPage(self.driver)
point_editor_page.enter_location('-122.19732036472264, 37.718665250290684')
point_editor_page.enter_date('06/08/2020 07:00')
point_editor_page.close()
entry_page.open_settings()
settings_page = SettingsPage(self.driver)
settings_page.logout
# leaving test_ out of the method name until the SUT works
def settings(self):
login_page = LoginPage(self.driver)
login_page.login_if_required()
entry_page = EntryPage(self.driver,base_url=self.base_url)
entry_page.open_page()
entry_page.open_settings()
settings_page = SettingsPage(self.driver)
settings_page.set_health_authority_name('Test Health Authority')
settings_page.set_information_website_URL('https://cdc.gov')
settings_page.set_reference_website_URL('https://cdc.gov')
settings_page.set_api_endpoint('https://s3.aws.com/bucket_name/safepaths.json')
settings_page.set_privacy_policy_URL('https://www.cdc.gov/other/privacy.html')
# set retention policy slider to 50% of the way across, which would be 15 days
# commented out until we find how to get ActionChains working
# settings_page.set_retention_policy('50')
settings_page.reset_gps_coordinates
settings_page.save_and_continue
#def test_redaction(self): <--- removed test_ from the method name until the SUT works!
def redaction(self):
tools = Tools()
entry_page = EntryPage(self.driver,base_url=self.base_url)
entry_page.open_page()
entry_page.setup_case()
entry_page.open_redactor()
login_page = LoginPage(self.driver)
login_page.login_if_required()
redaction_page = RedactionPage(self.driver)
redaction_page.load_file(self.data_dir +'/privkit31A-synthetic-REDACTED.json')
redaction_page.check_start_date_is('1-Mar-2020 1:00pm GMT')
redaction_page.check_end_date_is('19-Mar-2020 10:00pm GMT')
redaction_page.check_duration_is('18 days 9 hrs')
redaction_page.save_file()
#TODO: this next step fails because it was designed for backend=OFF. To test this, we need to load the publisher screen and see what data is there when we hit load
#tools.compare_files(self.tmp_dir + '/privkit31A-synthetic-REDACTED-REDACTED.json', self.home_dir + '/' + self.data_dir + '/expected_results/privkit31A-synthetic-REDACTED-REDACTED.json')
def tearDown(self):
self.driver.close()
| [
"selenium.webdriver.Remote",
"selenium.webdriver.ChromeOptions",
"page_objects.SettingsPage",
"page_objects.EntryPage",
"selenium.webdriver.Chrome",
"page_objects.RedactionPage",
"os.environ.copy",
"page_objects.Tools",
"os.getcwd",
"page_objects.AddNewRecordPage",
"page_objects.LoginPage",
"p... | [((1398, 1423), 'selenium.webdriver.ChromeOptions', 'webdriver.ChromeOptions', ([], {}), '()\n', (1421, 1423), False, 'from selenium import webdriver\n'), ((1798, 1805), 'page_objects.Tools', 'Tools', ([], {}), '()\n', (1803, 1805), False, 'from page_objects import EntryPage, LoginPage, RedactionPage, ContactTracePage, AddNewRecordPage, AddDataToRecordPage, StageForPublishingPage, PublishDataPage, SettingsPage, Tools\n'), ((1827, 1873), 'page_objects.EntryPage', 'EntryPage', (['self.driver'], {'base_url': 'self.base_url'}), '(self.driver, base_url=self.base_url)\n', (1836, 1873), False, 'from page_objects import EntryPage, LoginPage, RedactionPage, ContactTracePage, AddNewRecordPage, AddDataToRecordPage, StageForPublishingPage, PublishDataPage, SettingsPage, Tools\n'), ((1950, 1957), 'page_objects.Tools', 'Tools', ([], {}), '()\n', (1955, 1957), False, 'from page_objects import EntryPage, LoginPage, RedactionPage, ContactTracePage, AddNewRecordPage, AddDataToRecordPage, StageForPublishingPage, PublishDataPage, SettingsPage, Tools\n'), ((1979, 2025), 'page_objects.EntryPage', 'EntryPage', (['self.driver'], {'base_url': 'self.base_url'}), '(self.driver, base_url=self.base_url)\n', (1988, 2025), False, 'from page_objects import EntryPage, LoginPage, RedactionPage, ContactTracePage, AddNewRecordPage, AddDataToRecordPage, StageForPublishingPage, PublishDataPage, SettingsPage, Tools\n'), ((2077, 2099), 'page_objects.LoginPage', 'LoginPage', (['self.driver'], {}), '(self.driver)\n', (2086, 2099), False, 'from page_objects import EntryPage, LoginPage, RedactionPage, ContactTracePage, AddNewRecordPage, AddDataToRecordPage, StageForPublishingPage, PublishDataPage, SettingsPage, Tools\n'), ((2200, 2229), 'page_objects.ContactTracePage', 'ContactTracePage', (['self.driver'], {}), '(self.driver)\n', (2216, 2229), False, 'from page_objects import EntryPage, LoginPage, RedactionPage, ContactTracePage, AddNewRecordPage, AddDataToRecordPage, StageForPublishingPage, PublishDataPage, SettingsPage, Tools\n'), ((2300, 2329), 'page_objects.AddNewRecordPage', 'AddNewRecordPage', (['self.driver'], {}), '(self.driver)\n', (2316, 2329), False, 'from page_objects import EntryPage, LoginPage, RedactionPage, ContactTracePage, AddNewRecordPage, AddDataToRecordPage, StageForPublishingPage, PublishDataPage, SettingsPage, Tools\n'), ((2599, 2631), 'page_objects.AddDataToRecordPage', 'AddDataToRecordPage', (['self.driver'], {}), '(self.driver)\n', (2618, 2631), False, 'from page_objects import EntryPage, LoginPage, RedactionPage, ContactTracePage, AddNewRecordPage, AddDataToRecordPage, StageForPublishingPage, PublishDataPage, SettingsPage, Tools\n'), ((2866, 2891), 'page_objects.SettingsPage', 'SettingsPage', (['self.driver'], {}), '(self.driver)\n', (2878, 2891), False, 'from page_objects import EntryPage, LoginPage, RedactionPage, ContactTracePage, AddNewRecordPage, AddDataToRecordPage, StageForPublishingPage, PublishDataPage, SettingsPage, Tools\n'), ((3037, 3059), 'page_objects.LoginPage', 'LoginPage', (['self.driver'], {}), '(self.driver)\n', (3046, 3059), False, 'from page_objects import EntryPage, LoginPage, RedactionPage, ContactTracePage, AddNewRecordPage, AddDataToRecordPage, StageForPublishingPage, PublishDataPage, SettingsPage, Tools\n'), ((3120, 3166), 'page_objects.EntryPage', 'EntryPage', (['self.driver'], {'base_url': 'self.base_url'}), '(self.driver, base_url=self.base_url)\n', (3129, 3166), False, 'from page_objects import EntryPage, LoginPage, RedactionPage, ContactTracePage, AddNewRecordPage, AddDataToRecordPage, StageForPublishingPage, PublishDataPage, SettingsPage, Tools\n'), ((3256, 3281), 'page_objects.SettingsPage', 'SettingsPage', (['self.driver'], {}), '(self.driver)\n', (3268, 3281), False, 'from page_objects import EntryPage, LoginPage, RedactionPage, ContactTracePage, AddNewRecordPage, AddDataToRecordPage, StageForPublishingPage, PublishDataPage, SettingsPage, Tools\n'), ((4101, 4108), 'page_objects.Tools', 'Tools', ([], {}), '()\n', (4106, 4108), False, 'from page_objects import EntryPage, LoginPage, RedactionPage, ContactTracePage, AddNewRecordPage, AddDataToRecordPage, StageForPublishingPage, PublishDataPage, SettingsPage, Tools\n'), ((4130, 4176), 'page_objects.EntryPage', 'EntryPage', (['self.driver'], {'base_url': 'self.base_url'}), '(self.driver, base_url=self.base_url)\n', (4139, 4176), False, 'from page_objects import EntryPage, LoginPage, RedactionPage, ContactTracePage, AddNewRecordPage, AddDataToRecordPage, StageForPublishingPage, PublishDataPage, SettingsPage, Tools\n'), ((4295, 4317), 'page_objects.LoginPage', 'LoginPage', (['self.driver'], {}), '(self.driver)\n', (4304, 4317), False, 'from page_objects import EntryPage, LoginPage, RedactionPage, ContactTracePage, AddNewRecordPage, AddDataToRecordPage, StageForPublishingPage, PublishDataPage, SettingsPage, Tools\n'), ((4382, 4408), 'page_objects.RedactionPage', 'RedactionPage', (['self.driver'], {}), '(self.driver)\n', (4395, 4408), False, 'from page_objects import EntryPage, LoginPage, RedactionPage, ContactTracePage, AddNewRecordPage, AddDataToRecordPage, StageForPublishingPage, PublishDataPage, SettingsPage, Tools\n'), ((585, 602), 'os.environ.copy', 'os.environ.copy', ([], {}), '()\n', (600, 602), False, 'import os\n'), ((697, 708), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (706, 708), False, 'import os\n'), ((734, 751), 'os.environ.copy', 'os.environ.copy', ([], {}), '()\n', (749, 751), False, 'import os\n'), ((884, 901), 'os.environ.copy', 'os.environ.copy', ([], {}), '()\n', (899, 901), False, 'import os\n'), ((1031, 1048), 'os.environ.copy', 'os.environ.copy', ([], {}), '()\n', (1046, 1048), False, 'import os\n'), ((1225, 1242), 'os.environ.copy', 'os.environ.copy', ([], {}), '()\n', (1240, 1242), False, 'import os\n'), ((1596, 1643), 'selenium.webdriver.Chrome', 'webdriver.Chrome', ([], {'chrome_options': 'chrome_options'}), '(chrome_options=chrome_options)\n', (1612, 1643), False, 'from selenium import webdriver\n'), ((1684, 1755), 'selenium.webdriver.Remote', 'webdriver.Remote', ([], {'command_executor': 'self.sel_url', 'options': 'chrome_options'}), '(command_executor=self.sel_url, options=chrome_options)\n', (1700, 1755), False, 'from selenium import webdriver\n')] |
import unittest
import os
from subprocess import call
import z5py
import vigra
from test_class import McLuigiTestCase
class TestDataTasks(McLuigiTestCase):
@classmethod
def setUpClass(cls):
super(TestDataTasks, cls).setUpClass()
@classmethod
def tearDownClass(cls):
super(TestDataTasks, cls).tearDownClass()
def check_features(self, feature_path):
rag_path = './cache/StackedRegionAdjacencyGraph_sampleA_watershed.h5'
self.assertTrue(os.path.exists(rag_path))
n_edges = vigra.readHDF5(rag_path, 'numberOfEdges')
self.assertTrue(os.path.exists(feature_path))
features = z5py.File(feature_path, use_zarr_format=False)['data'][:]
self.assertEqual(n_edges, len(features))
for feat_id in range(features.shape[1]):
self.assertFalse((features[:, feat_id] == 0).all())
def test_region_features(self):
call(['python', './executables/features.py', 'region'])
feat_path = ''
self.check_features(feat_path)
if __name__ == '__main__':
unittest.main()
| [
"os.path.exists",
"vigra.readHDF5",
"z5py.File",
"subprocess.call",
"unittest.main"
] | [((1069, 1084), 'unittest.main', 'unittest.main', ([], {}), '()\n', (1082, 1084), False, 'import unittest\n'), ((537, 578), 'vigra.readHDF5', 'vigra.readHDF5', (['rag_path', '"""numberOfEdges"""'], {}), "(rag_path, 'numberOfEdges')\n", (551, 578), False, 'import vigra\n'), ((918, 973), 'subprocess.call', 'call', (["['python', './executables/features.py', 'region']"], {}), "(['python', './executables/features.py', 'region'])\n", (922, 973), False, 'from subprocess import call\n'), ((493, 517), 'os.path.exists', 'os.path.exists', (['rag_path'], {}), '(rag_path)\n', (507, 517), False, 'import os\n'), ((604, 632), 'os.path.exists', 'os.path.exists', (['feature_path'], {}), '(feature_path)\n', (618, 632), False, 'import os\n'), ((653, 699), 'z5py.File', 'z5py.File', (['feature_path'], {'use_zarr_format': '(False)'}), '(feature_path, use_zarr_format=False)\n', (662, 699), False, 'import z5py\n')] |
# Generated by Django 3.0.8 on 2020-07-13 19:43
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
("images", "0003_image_creation_date"),
]
operations = [
migrations.AddField(
model_name="image",
name="feed",
field=models.BooleanField(default=False, verbose_name="In feed"),
),
migrations.AlterField(
model_name="image",
name="creation_date",
field=models.DateTimeField(auto_now_add=True, verbose_name="Creation date"),
),
migrations.AlterField(
model_name="image",
name="files",
field=models.ManyToManyField(to="images.ImageFile", verbose_name="Files"),
),
migrations.AlterField(
model_name="image",
name="thumbnail",
field=models.ForeignKey(
blank=True,
null=True,
on_delete=django.db.models.deletion.CASCADE,
related_name="container",
to="images.ImageFile",
verbose_name="Thumbnail",
),
),
migrations.AlterField(
model_name="image",
name="title",
field=models.CharField(max_length=200, verbose_name="Title"),
),
migrations.AlterField(
model_name="imagefile",
name="height",
field=models.IntegerField(verbose_name="Height"),
),
migrations.AlterField(
model_name="imagefile",
name="image_file",
field=models.FileField(upload_to="", verbose_name="Image file"),
),
migrations.AlterField(
model_name="imagefile",
name="width",
field=models.IntegerField(verbose_name="Width"),
),
]
| [
"django.db.models.ForeignKey",
"django.db.models.IntegerField",
"django.db.models.ManyToManyField",
"django.db.models.FileField",
"django.db.models.BooleanField",
"django.db.models.DateTimeField",
"django.db.models.CharField"
] | [((364, 422), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(False)', 'verbose_name': '"""In feed"""'}), "(default=False, verbose_name='In feed')\n", (383, 422), False, 'from django.db import migrations, models\n'), ((550, 619), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now_add': '(True)', 'verbose_name': '"""Creation date"""'}), "(auto_now_add=True, verbose_name='Creation date')\n", (570, 619), False, 'from django.db import migrations, models\n'), ((739, 806), 'django.db.models.ManyToManyField', 'models.ManyToManyField', ([], {'to': '"""images.ImageFile"""', 'verbose_name': '"""Files"""'}), "(to='images.ImageFile', verbose_name='Files')\n", (761, 806), False, 'from django.db import migrations, models\n'), ((930, 1099), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'blank': '(True)', 'null': '(True)', 'on_delete': 'django.db.models.deletion.CASCADE', 'related_name': '"""container"""', 'to': '"""images.ImageFile"""', 'verbose_name': '"""Thumbnail"""'}), "(blank=True, null=True, on_delete=django.db.models.\n deletion.CASCADE, related_name='container', to='images.ImageFile',\n verbose_name='Thumbnail')\n", (947, 1099), False, 'from django.db import migrations, models\n'), ((1321, 1375), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(200)', 'verbose_name': '"""Title"""'}), "(max_length=200, verbose_name='Title')\n", (1337, 1375), False, 'from django.db import migrations, models\n'), ((1500, 1542), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'verbose_name': '"""Height"""'}), "(verbose_name='Height')\n", (1519, 1542), False, 'from django.db import migrations, models\n'), ((1671, 1728), 'django.db.models.FileField', 'models.FileField', ([], {'upload_to': '""""""', 'verbose_name': '"""Image file"""'}), "(upload_to='', verbose_name='Image file')\n", (1687, 1728), False, 'from django.db import migrations, models\n'), ((1852, 1893), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'verbose_name': '"""Width"""'}), "(verbose_name='Width')\n", (1871, 1893), False, 'from django.db import migrations, models\n')] |
import numba
import numpy as np
from scipy.sparse import csr_matrix
from .base import BasePointer, GraphBlasContainer
from .context import handle_panic, return_error
from .exceptions import GrB_Info
class MatrixPtr(BasePointer):
def set_matrix(self, matrix):
self.instance = matrix
class Matrix(GraphBlasContainer):
def __init__(self, matrix):
assert isinstance(matrix, csr_matrix)
self.matrix = matrix
@classmethod
def new_from_dtype(cls, dtype, nrows, ncols):
matrix = csr_matrix((nrows, ncols), dtype=dtype)
return cls(matrix)
@classmethod
def new_from_existing(cls, other):
matrix = csr_matrix(other)
return cls(matrix)
@classmethod
def get_pointer(cls):
return MatrixPtr()
@handle_panic
def Matrix_new(A: MatrixPtr, dtype: type, nrows: int, ncols: int):
if nrows <= 0:
return_error(GrB_Info.GrB_INVALID_VALUE, "nrows must be > 0")
if ncols <= 0:
return_error(GrB_Info.GrB_INVALID_VALUE, "ncols must be > 0")
matrix = Matrix.new_from_dtype(dtype, nrows, ncols)
A.set_matrix(matrix)
return GrB_Info.GrB_SUCCESS
@handle_panic
def Matrix_dup(C: MatrixPtr, A: Matrix):
matrix = Matrix.new_from_existing(A)
C.set_matrix(matrix)
return GrB_Info.GrB_SUCCESS
@handle_panic
def Matrix_resize(C: Matrix, nrows: int, ncols: int):
if nrows <= 0:
return_error(GrB_Info.GrB_INVALID_VALUE, "nrows must be > 0")
if ncols <= 0:
return_error(GrB_Info.GrB_INVALID_VALUE, "ncols must be > 0")
C.matrix.resize((nrows, ncols))
return GrB_Info.GrB_SUCCESS
# TODO: this is just the essential code; it needs to handle descriptors, masks, accumulators, etc
@handle_panic
def mxm(C, A, B, semiring):
cr, cc = C.shape
ar, ac = A.shape
br, bc = B.shape
if cr != ar:
return_error(GrB_Info.GrB_DIMENSION_MISMATCH, "C.nrows != A.nrows")
if cc != bc:
return_error(GrB_Info.GrB_DIMENSION_MISMATCH, "C.ncols != B.ncols")
if ac != br:
return_error(GrB_Info.GrB_DIMENSION_MISMATCH, "A.nrows != B.ncols")
b = B.tocsc()
d, i, ip = _sparse_matmul(
A.data,
A.indices,
A.indptr,
b.data,
b.indices,
b.indptr,
semiring.plus.op,
semiring.times,
semiring.plus.identity,
C.dtype,
)
C.data = d
C.indices = i
C.indptr = ip
return GrB_Info.GrB_SUCCESS
@numba.njit
def _sparse_matmul(
a_data,
a_indices,
a_indptr,
b_data,
b_indices,
b_indptr,
plus,
times,
identity,
dtype,
):
# Final array size is unknown, so we give ourselves room and then adjust on the fly
tmp_output_size = a_data.size * 2
data = np.empty((tmp_output_size,), dtype=dtype)
indices = np.empty((tmp_output_size,), dtype=a_indices.dtype)
indptr = np.empty((a_indptr.size,), dtype=a_indptr.dtype)
output_counter = 0
for iptr in range(a_indptr.size - 1):
indptr[iptr] = output_counter
for jptr in range(b_indptr.size - 1):
a_counter = a_indptr[iptr]
a_stop = a_indptr[iptr + 1]
b_counter = b_indptr[jptr]
b_stop = b_indptr[jptr + 1]
val = identity
nonempty = False
while a_counter < a_stop and b_counter < b_stop:
a_k = a_indices[a_counter]
b_k = b_indices[b_counter]
if a_k == b_k:
val = plus(val, times(a_data[a_counter], b_data[b_counter]))
nonempty = True
a_counter += 1
b_counter += 1
elif a_k < b_k:
a_counter += 1
else:
b_counter += 1
if nonempty:
if output_counter >= tmp_output_size:
# We filled up the allocated space; copy existing data to a larger array
tmp_output_size *= 2
new_data = np.empty((tmp_output_size,), dtype=data.dtype)
new_indices = np.empty((tmp_output_size,), dtype=indices.dtype)
new_data[:output_counter] = data[:output_counter]
new_indices[:output_counter] = indices[:output_counter]
data = new_data
indices = new_indices
data[output_counter] = val
indices[output_counter] = jptr
output_counter += 1
# Add final entry to indptr (should indicate nnz in the output)
nnz = output_counter
indptr[iptr + 1] = nnz
# Trim output arrays
data = data[:nnz]
indices = indices[:nnz]
return (data, indices, indptr)
| [
"scipy.sparse.csr_matrix",
"numpy.empty"
] | [((2755, 2796), 'numpy.empty', 'np.empty', (['(tmp_output_size,)'], {'dtype': 'dtype'}), '((tmp_output_size,), dtype=dtype)\n', (2763, 2796), True, 'import numpy as np\n'), ((2811, 2862), 'numpy.empty', 'np.empty', (['(tmp_output_size,)'], {'dtype': 'a_indices.dtype'}), '((tmp_output_size,), dtype=a_indices.dtype)\n', (2819, 2862), True, 'import numpy as np\n'), ((2876, 2924), 'numpy.empty', 'np.empty', (['(a_indptr.size,)'], {'dtype': 'a_indptr.dtype'}), '((a_indptr.size,), dtype=a_indptr.dtype)\n', (2884, 2924), True, 'import numpy as np\n'), ((524, 563), 'scipy.sparse.csr_matrix', 'csr_matrix', (['(nrows, ncols)'], {'dtype': 'dtype'}), '((nrows, ncols), dtype=dtype)\n', (534, 563), False, 'from scipy.sparse import csr_matrix\n'), ((665, 682), 'scipy.sparse.csr_matrix', 'csr_matrix', (['other'], {}), '(other)\n', (675, 682), False, 'from scipy.sparse import csr_matrix\n'), ((4021, 4067), 'numpy.empty', 'np.empty', (['(tmp_output_size,)'], {'dtype': 'data.dtype'}), '((tmp_output_size,), dtype=data.dtype)\n', (4029, 4067), True, 'import numpy as np\n'), ((4102, 4151), 'numpy.empty', 'np.empty', (['(tmp_output_size,)'], {'dtype': 'indices.dtype'}), '((tmp_output_size,), dtype=indices.dtype)\n', (4110, 4151), True, 'import numpy as np\n')] |
import json
import os
import shutil
import tempfile
from datetime import timedelta
from unittest import mock
from unittest import TestCase
from pytuber.storage import Registry
class RegistryTests(TestCase):
def tearDown(self):
Registry.clear()
Registry._obj = {}
def test_singleton(self):
a = Registry()
b = Registry()
self.assertIs(a, b)
a[1] = 2
self.assertEqual({1: 2}, b)
def test_set(self):
Registry.set(1, 2, 3, 4, 5)
self.assertEqual({1: {2: {3: {4: 5}}}}, Registry())
Registry.set(1, 3, 5)
self.assertEqual({1: {2: {3: {4: 5}}, 3: 5}}, Registry())
def test_get(self):
Registry.set(1, 2, 3, 4, 5)
self.assertEqual({4: 5}, Registry.get(1, 2, 3))
with self.assertRaises(KeyError):
Registry.get(2)
def test_clear(self):
Registry.set(1, 2, 3, 4, 5)
self.assertEqual({4: 5}, Registry.get(1, 2, 3))
Registry.clear()
self.assertEqual({}, Registry())
def test_from_file(self):
try:
tmp = tempfile.mkdtemp()
file_path = os.path.join(tmp, "foo.json")
with open(file_path, "w") as fp:
json.dump({"a": True}, fp)
Registry.from_file(file_path)
self.assertEqual({"a": True}, Registry())
Registry.set("a", False)
self.assertFalse(Registry.get("a"))
Registry.from_file(file_path)
self.assertFalse(Registry.get("a"))
finally:
shutil.rmtree(tmp)
def test_persist(self):
try:
Registry.set(1, 2, 3, 4)
tmp = tempfile.mkdtemp()
file_path = os.path.join(tmp, "foo.json")
Registry.persist(file_path)
Registry.set(1, 2, 3, 5)
Registry._obj = {}
Registry.from_file(file_path)
self.assertEqual({"1": {"2": {"3": 4}}}, Registry())
finally:
shutil.rmtree(tmp)
@mock.patch("pytuber.storage.time.time")
def test_cache(self, time):
time.side_effect = [10, 20.1, 20.1, 20.5, 20.8]
def callme(ttl, value, refresh=False):
return Registry.cache(
key="foo",
ttl=timedelta(seconds=ttl),
func=lambda: value,
refresh=refresh,
)
self.assertEqual("first", callme(10, "first"))
self.assertEqual(("first", 20.0), Registry.get("foo"))
self.assertEqual("second", callme(1, "second"))
self.assertEqual(("second", 21.1), Registry.get("foo"))
self.assertEqual("second", callme(1, "third"))
self.assertEqual(("second", 21.1), Registry.get("foo"))
self.assertEqual("third", callme(100, "third", refresh=True))
self.assertEqual(("third", 120.8), Registry.get("foo"))
self.assertEqual(5, time.call_count)
| [
"pytuber.storage.Registry.from_file",
"pytuber.storage.Registry.clear",
"os.path.join",
"pytuber.storage.Registry.get",
"pytuber.storage.Registry.set",
"pytuber.storage.Registry.persist",
"tempfile.mkdtemp",
"pytuber.storage.Registry",
"shutil.rmtree",
"datetime.timedelta",
"unittest.mock.patch"... | [((2027, 2066), 'unittest.mock.patch', 'mock.patch', (['"""pytuber.storage.time.time"""'], {}), "('pytuber.storage.time.time')\n", (2037, 2066), False, 'from unittest import mock\n'), ((242, 258), 'pytuber.storage.Registry.clear', 'Registry.clear', ([], {}), '()\n', (256, 258), False, 'from pytuber.storage import Registry\n'), ((329, 339), 'pytuber.storage.Registry', 'Registry', ([], {}), '()\n', (337, 339), False, 'from pytuber.storage import Registry\n'), ((352, 362), 'pytuber.storage.Registry', 'Registry', ([], {}), '()\n', (360, 362), False, 'from pytuber.storage import Registry\n'), ((478, 505), 'pytuber.storage.Registry.set', 'Registry.set', (['(1)', '(2)', '(3)', '(4)', '(5)'], {}), '(1, 2, 3, 4, 5)\n', (490, 505), False, 'from pytuber.storage import Registry\n'), ((575, 596), 'pytuber.storage.Registry.set', 'Registry.set', (['(1)', '(3)', '(5)'], {}), '(1, 3, 5)\n', (587, 596), False, 'from pytuber.storage import Registry\n'), ((696, 723), 'pytuber.storage.Registry.set', 'Registry.set', (['(1)', '(2)', '(3)', '(4)', '(5)'], {}), '(1, 2, 3, 4, 5)\n', (708, 723), False, 'from pytuber.storage import Registry\n'), ((886, 913), 'pytuber.storage.Registry.set', 'Registry.set', (['(1)', '(2)', '(3)', '(4)', '(5)'], {}), '(1, 2, 3, 4, 5)\n', (898, 913), False, 'from pytuber.storage import Registry\n'), ((979, 995), 'pytuber.storage.Registry.clear', 'Registry.clear', ([], {}), '()\n', (993, 995), False, 'from pytuber.storage import Registry\n'), ((554, 564), 'pytuber.storage.Registry', 'Registry', ([], {}), '()\n', (562, 564), False, 'from pytuber.storage import Registry\n'), ((651, 661), 'pytuber.storage.Registry', 'Registry', ([], {}), '()\n', (659, 661), False, 'from pytuber.storage import Registry\n'), ((757, 778), 'pytuber.storage.Registry.get', 'Registry.get', (['(1)', '(2)', '(3)'], {}), '(1, 2, 3)\n', (769, 778), False, 'from pytuber.storage import Registry\n'), ((835, 850), 'pytuber.storage.Registry.get', 'Registry.get', (['(2)'], {}), '(2)\n', (847, 850), False, 'from pytuber.storage import Registry\n'), ((947, 968), 'pytuber.storage.Registry.get', 'Registry.get', (['(1)', '(2)', '(3)'], {}), '(1, 2, 3)\n', (959, 968), False, 'from pytuber.storage import Registry\n'), ((1025, 1035), 'pytuber.storage.Registry', 'Registry', ([], {}), '()\n', (1033, 1035), False, 'from pytuber.storage import Registry\n'), ((1099, 1117), 'tempfile.mkdtemp', 'tempfile.mkdtemp', ([], {}), '()\n', (1115, 1117), False, 'import tempfile\n'), ((1142, 1171), 'os.path.join', 'os.path.join', (['tmp', '"""foo.json"""'], {}), "(tmp, 'foo.json')\n", (1154, 1171), False, 'import os\n'), ((1273, 1302), 'pytuber.storage.Registry.from_file', 'Registry.from_file', (['file_path'], {}), '(file_path)\n', (1291, 1302), False, 'from pytuber.storage import Registry\n'), ((1371, 1395), 'pytuber.storage.Registry.set', 'Registry.set', (['"""a"""', '(False)'], {}), "('a', False)\n", (1383, 1395), False, 'from pytuber.storage import Registry\n'), ((1458, 1487), 'pytuber.storage.Registry.from_file', 'Registry.from_file', (['file_path'], {}), '(file_path)\n', (1476, 1487), False, 'from pytuber.storage import Registry\n'), ((1566, 1584), 'shutil.rmtree', 'shutil.rmtree', (['tmp'], {}), '(tmp)\n', (1579, 1584), False, 'import shutil\n'), ((1639, 1663), 'pytuber.storage.Registry.set', 'Registry.set', (['(1)', '(2)', '(3)', '(4)'], {}), '(1, 2, 3, 4)\n', (1651, 1663), False, 'from pytuber.storage import Registry\n'), ((1682, 1700), 'tempfile.mkdtemp', 'tempfile.mkdtemp', ([], {}), '()\n', (1698, 1700), False, 'import tempfile\n'), ((1725, 1754), 'os.path.join', 'os.path.join', (['tmp', '"""foo.json"""'], {}), "(tmp, 'foo.json')\n", (1737, 1754), False, 'import os\n'), ((1767, 1794), 'pytuber.storage.Registry.persist', 'Registry.persist', (['file_path'], {}), '(file_path)\n', (1783, 1794), False, 'from pytuber.storage import Registry\n'), ((1808, 1832), 'pytuber.storage.Registry.set', 'Registry.set', (['(1)', '(2)', '(3)', '(5)'], {}), '(1, 2, 3, 5)\n', (1820, 1832), False, 'from pytuber.storage import Registry\n'), ((1877, 1906), 'pytuber.storage.Registry.from_file', 'Registry.from_file', (['file_path'], {}), '(file_path)\n', (1895, 1906), False, 'from pytuber.storage import Registry\n'), ((2002, 2020), 'shutil.rmtree', 'shutil.rmtree', (['tmp'], {}), '(tmp)\n', (2015, 2020), False, 'import shutil\n'), ((2490, 2509), 'pytuber.storage.Registry.get', 'Registry.get', (['"""foo"""'], {}), "('foo')\n", (2502, 2509), False, 'from pytuber.storage import Registry\n'), ((2611, 2630), 'pytuber.storage.Registry.get', 'Registry.get', (['"""foo"""'], {}), "('foo')\n", (2623, 2630), False, 'from pytuber.storage import Registry\n'), ((2731, 2750), 'pytuber.storage.Registry.get', 'Registry.get', (['"""foo"""'], {}), "('foo')\n", (2743, 2750), False, 'from pytuber.storage import Registry\n'), ((2866, 2885), 'pytuber.storage.Registry.get', 'Registry.get', (['"""foo"""'], {}), "('foo')\n", (2878, 2885), False, 'from pytuber.storage import Registry\n'), ((1233, 1259), 'json.dump', 'json.dump', (["{'a': True}", 'fp'], {}), "({'a': True}, fp)\n", (1242, 1259), False, 'import json\n'), ((1346, 1356), 'pytuber.storage.Registry', 'Registry', ([], {}), '()\n', (1354, 1356), False, 'from pytuber.storage import Registry\n'), ((1426, 1443), 'pytuber.storage.Registry.get', 'Registry.get', (['"""a"""'], {}), "('a')\n", (1438, 1443), False, 'from pytuber.storage import Registry\n'), ((1517, 1534), 'pytuber.storage.Registry.get', 'Registry.get', (['"""a"""'], {}), "('a')\n", (1529, 1534), False, 'from pytuber.storage import Registry\n'), ((1961, 1971), 'pytuber.storage.Registry', 'Registry', ([], {}), '()\n', (1969, 1971), False, 'from pytuber.storage import Registry\n'), ((2285, 2307), 'datetime.timedelta', 'timedelta', ([], {'seconds': 'ttl'}), '(seconds=ttl)\n', (2294, 2307), False, 'from datetime import timedelta\n')] |
# -*- coding: utf-8 -*-
"""
file: module_topology_distmat_test.py
Unit tests for distance matrix computations
"""
import os
from pandas import DataFrame
from interact.md_system import System
from tests.module.unittest_baseclass import UnittestPythonCompatibility
class DistanceMatrixTests(UnittestPythonCompatibility):
currpath = os.path.dirname(__file__)
pdb_file = os.path.abspath(os.path.join(currpath, '../files/1acj.pdb'))
mol_file = os.path.abspath(os.path.join(currpath, '../files/1acj.mol2'))
def setUp(self):
"""
Prepare TopologyDataFrame once for every test
"""
self.top = System(self.pdb_file, mol2file=self.mol_file).topology
def test_distmat_overflow_exception(self):
"""
Test OverflowError exception for (too) large distance matrix
"""
# Unable to compute distance matrix > max_distmat_size
self.assertRaises(OverflowError, self.top.distances, max_distmat_size=10000)
def test_distmat_attribute_exception(self):
"""
Test AttributeError on missing or incomplete coordinates
"""
# No coordinates
self.top._coordinates = None
self.assertRaises(AttributeError, self.top.distances)
def test_distmat_square(self):
"""
Test computation of default square matrix
"""
distmat = self.top.distances()
self.assertIsInstance(distmat, DataFrame)
self.assertEqual(distmat.shape[0], distmat.shape[1])
self.assertEqual(list(distmat.columns), list(distmat.index))
def test_distmat_target(self):
"""
Test computation of matrix with custom source and target selection
"""
source = self.top[self.top['resSeq'] == 999]
target = self.top[self.top['resName'] == 'HIS']
distmat = source.distances(target=target)
self.assertIsInstance(distmat, DataFrame)
self.assertEqual(distmat.shape, (17, 138))
self.assertEqual(len(distmat.columns), len(target))
self.assertEqual(len(distmat.index), len(source))
def test_distmat_empty_selection(self):
"""
Test compuation of matrix when (one of) the input selections is empty
"""
source = self.top[self.top['resSeq'] == 9999]
target = self.top[self.top['resName'] == 'HIS']
self.assertTrue(source.distances().empty)
self.assertTrue(source.distances(target=target).empty) | [
"interact.md_system.System",
"os.path.dirname",
"os.path.join"
] | [((341, 366), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (356, 366), False, 'import os\n'), ((398, 441), 'os.path.join', 'os.path.join', (['currpath', '"""../files/1acj.pdb"""'], {}), "(currpath, '../files/1acj.pdb')\n", (410, 441), False, 'import os\n'), ((474, 518), 'os.path.join', 'os.path.join', (['currpath', '"""../files/1acj.mol2"""'], {}), "(currpath, '../files/1acj.mol2')\n", (486, 518), False, 'import os\n'), ((640, 685), 'interact.md_system.System', 'System', (['self.pdb_file'], {'mol2file': 'self.mol_file'}), '(self.pdb_file, mol2file=self.mol_file)\n', (646, 685), False, 'from interact.md_system import System\n')] |
import typing
import bs4
import requests
class ScrapeAllComicIds():
def __call__(
self,
) -> typing.List[int]:
self.__find()
return self.__ids
def __find(
self,
) -> typing.NoReturn:
self.__ids = []
for q in self.__query:
self.__find_per_page(q)
def __find_per_page(
self,
query: str,
) -> typing.NoReturn:
response = requests.get(
f'{self.__base_url}'
f'{query}',
)
soup = bs4.BeautifulSoup(
response.content,
'html.parser',
)
elms = soup.find(
id='all_title',
).find('ul').find_all('li')
for elm in elms:
url = elm.find(
'a',
).get('href')
self.__ids.append(
int(url.split('=')[-1])
)
def __init__(
self,
) -> typing.NoReturn:
self.__base_url = (
'http://ruijianime.com/'
'comic/title/all_title'
'.php?q='
)
self.__query = (
'a', 'ka', 'sa', 'ta',
'na', 'ha', 'ma', 'ya',
'ra', 'wa',
) | [
"bs4.BeautifulSoup",
"requests.get"
] | [((385, 426), 'requests.get', 'requests.get', (['f"""{self.__base_url}{query}"""'], {}), "(f'{self.__base_url}{query}')\n", (397, 426), False, 'import requests\n'), ((461, 511), 'bs4.BeautifulSoup', 'bs4.BeautifulSoup', (['response.content', '"""html.parser"""'], {}), "(response.content, 'html.parser')\n", (478, 511), False, 'import bs4\n')] |
"""
Path Converter.
pymdownx.pathconverter
An extension for Python Markdown.
An extension to covert tag paths to relative or absolute:
Given an absolute base and a target relative path, this extension searches for file
references that are relative and converts them to a path relative
to the base path.
-or-
Given an absolute base path, this extension searches for file
references that are relative and converts them to absolute paths.
MIT license.
Copyright (c) 2014 - 2017 <NAME> <<EMAIL>>
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions
of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
"""
from __future__ import unicode_literals
from markdown import Extension
from markdown.postprocessors import Postprocessor
from . import util
import os
import re
from urllib.parse import urlunparse
RE_TAG_HTML = r'''(?xus)
(?:
(?P<comments>(\r?\n?\s*)<!--[\s\S]*?-->(\s*)(?=\r?\n)|<!--[\s\S]*?-->)|
(?P<open><(?P<tag>(?:%s)))
(?P<attr>(?:\s+[\w\-:]+(?:\s*=\s*(?:"[^"]*"|'[^']*'))?)*)
(?P<close>\s*(?:\/?)>)
)
'''
RE_TAG_LINK_ATTR = re.compile(
r'''(?xus)
(?P<attr>
(?:
(?P<name>\s+(?:href|src)\s*=\s*)
(?P<path>"[^"]*"|'[^']*')
)
)
'''
)
def repl_relative(m, base_path, relative_path):
"""Replace path with relative path."""
link = m.group(0)
try:
scheme, netloc, path, params, query, fragment, is_url, is_absolute = util.parse_url(m.group('path')[1:-1])
if not is_url:
# Get the absolute path of the file or return
# if we can't resolve the path
path = util.url2path(path)
if (not is_absolute):
# Convert current relative path to absolute
path = os.path.relpath(
os.path.normpath(os.path.join(base_path, path)),
os.path.normpath(relative_path)
)
# Convert the path, URL encode it, and format it as a link
path = util.path2url(path)
link = '%s"%s"' % (
m.group('name'),
urlunparse((scheme, netloc, path, params, query, fragment))
)
except Exception: # pragma: no cover
# Parsing crashed and burned; no need to continue.
pass
return link
def repl_absolute(m, base_path):
"""Replace path with absolute path."""
link = m.group(0)
try:
scheme, netloc, path, params, query, fragment, is_url, is_absolute = util.parse_url(m.group('path')[1:-1])
if (not is_absolute and not is_url):
path = util.url2path(path)
path = os.path.normpath(os.path.join(base_path, path))
path = util.path2url(path)
start = '/' if not path.startswith('/') else ''
link = '%s"%s%s"' % (
m.group('name'),
start,
urlunparse((scheme, netloc, path, params, query, fragment))
)
except Exception: # pragma: no cover
# Parsing crashed and burned; no need to continue.
pass
return link
def repl(m, base_path, rel_path=None):
"""Replace."""
if m.group('comments'):
tag = m.group('comments')
else:
tag = m.group('open')
if rel_path is None:
tag += RE_TAG_LINK_ATTR.sub(lambda m2: repl_absolute(m2, base_path), m.group('attr'))
else:
tag += RE_TAG_LINK_ATTR.sub(lambda m2: repl_relative(m2, base_path, rel_path), m.group('attr'))
tag += m.group('close')
return tag
class PathConverterPostprocessor(Postprocessor):
"""Post process to find tag lings to convert."""
def run(self, text):
"""Find and convert paths."""
basepath = self.config['base_path']
relativepath = self.config['relative_path']
absolute = bool(self.config['absolute'])
tags = re.compile(RE_TAG_HTML % '|'.join(self.config['tags'].split()))
if not absolute and basepath and relativepath:
text = tags.sub(lambda m: repl(m, basepath, relativepath), text)
elif absolute and basepath:
text = tags.sub(lambda m: repl(m, basepath), text)
return text
class PathConverterExtension(Extension):
"""PathConverter extension."""
def __init__(self, *args, **kwargs):
"""Initialize."""
self.config = {
'base_path': ["", "Base path used to find files - Default: \"\""],
'relative_path': ["", "Path that files will be relative to (not needed if using absolute) - Default: \"\""],
'absolute': [False, "Paths are absolute by default; disable for relative - Default: False"],
'tags': ["img script a link", "tags to convert src and/or href in - Default: 'img scripts a link'"]
}
super(PathConverterExtension, self).__init__(*args, **kwargs)
def extendMarkdown(self, md):
"""Add post processor to Markdown instance."""
rel_path = PathConverterPostprocessor(md)
rel_path.config = self.getConfigs()
md.postprocessors.register(rel_path, "path-converter", 2)
md.registerExtension(self)
def makeExtension(*args, **kwargs):
"""Return extension."""
return PathConverterExtension(*args, **kwargs)
| [
"os.path.normpath",
"urllib.parse.urlunparse",
"os.path.join",
"re.compile"
] | [((2003, 2170), 're.compile', 're.compile', (['"""(?xus)\n (?P<attr>\n (?:\n (?P<name>\\\\s+(?:href|src)\\\\s*=\\\\s*)\n (?P<path>"[^"]*"|\'[^\']*\')\n )\n )\n """'], {}), '(\n """(?xus)\n (?P<attr>\n (?:\n (?P<name>\\\\s+(?:href|src)\\\\s*=\\\\s*)\n (?P<path>"[^"]*"|\'[^\']*\')\n )\n )\n """\n )\n', (2013, 2170), False, 'import re\n'), ((3608, 3637), 'os.path.join', 'os.path.join', (['base_path', 'path'], {}), '(base_path, path)\n', (3620, 3637), False, 'import os\n'), ((2792, 2823), 'os.path.normpath', 'os.path.normpath', (['relative_path'], {}), '(relative_path)\n', (2808, 2823), False, 'import os\n'), ((3844, 3903), 'urllib.parse.urlunparse', 'urlunparse', (['(scheme, netloc, path, params, query, fragment)'], {}), '((scheme, netloc, path, params, query, fragment))\n', (3854, 3903), False, 'from urllib.parse import urlunparse\n'), ((2740, 2769), 'os.path.join', 'os.path.join', (['base_path', 'path'], {}), '(base_path, path)\n', (2752, 2769), False, 'import os\n'), ((3053, 3112), 'urllib.parse.urlunparse', 'urlunparse', (['(scheme, netloc, path, params, query, fragment)'], {}), '((scheme, netloc, path, params, query, fragment))\n', (3063, 3112), False, 'from urllib.parse import urlunparse\n')] |
# Copyright (c) OpenMMLab. All rights reserved.
import copy
import logging
from collections import defaultdict
from itertools import chain
from torch.nn.utils import clip_grad
from mmcv.utils import TORCH_VERSION, _BatchNorm, digit_version
# from ..dist_utils import allreduce_grads
# from ..fp16_utils import LossScaler, wrap_fp16_model
from mmcv.runner.hooks import HOOKS, Hook
try:
# If PyTorch version >= 1.6.0, torch.cuda.amp.GradScaler would be imported
# and used; otherwise, auto fp16 will adopt mmcv's implementation.
from torch.cuda.amp import GradScaler
except ImportError:
pass
@HOOKS.register_module()
class MyHook(Hook):
"""A hook contains custom operations for the optimizer.
Args:
grad_clip (dict, optional): A config dict to control the clip_grad.
Default: None.
detect_anomalous_params (bool): This option is only used for
debugging which will slow down the training speed.
Detect anomalous parameters that are not included in
the computational graph with `loss` as the root.
There are two cases
- Parameters were not used during
forward pass.
- Parameters were not used to produce
loss.
Default: False.
"""
def __init__(self, grad_clip=None, detect_anomalous_params=False):
self.grad_clip = grad_clip
self.detect_anomalous_params = detect_anomalous_params
def clip_grads(self, params):
params = list(
filter(lambda p: p.requires_grad and p.grad is not None, params))
if len(params) > 0:
return clip_grad.clip_grad_norm_(params, **self.grad_clip)
def after_train_iter(self, runner):
runner.optimizer.zero_grad()
if self.detect_anomalous_params:
self.detect_anomalous_parameters(runner.outputs['loss'], runner)
if runner.outputs['loss'] is not None:
runner.outputs['loss'].backward()
if self.grad_clip is not None:
grad_norm = self.clip_grads(runner.model.parameters())
if grad_norm is not None:
# Add grad norm to the logger
runner.log_buffer.update({'grad_norm': float(grad_norm)},
runner.outputs['num_samples'])
runner.optimizer.step()
def detect_anomalous_parameters(self, loss, runner):
logger = runner.logger
parameters_in_graph = set()
visited = set()
def traverse(grad_fn):
if grad_fn is None:
return
if grad_fn not in visited:
visited.add(grad_fn)
if hasattr(grad_fn, 'variable'):
parameters_in_graph.add(grad_fn.variable)
parents = grad_fn.next_functions
if parents is not None:
for parent in parents:
grad_fn = parent[0]
traverse(grad_fn)
traverse(loss.grad_fn)
for n, p in runner.model.named_parameters():
if p not in parameters_in_graph and p.requires_grad:
logger.log(
level=logging.ERROR,
msg=f'{n} with shape {p.size()} is not '
f'in the computational graph \n')
| [
"torch.nn.utils.clip_grad.clip_grad_norm_",
"mmcv.runner.hooks.HOOKS.register_module"
] | [((612, 635), 'mmcv.runner.hooks.HOOKS.register_module', 'HOOKS.register_module', ([], {}), '()\n', (633, 635), False, 'from mmcv.runner.hooks import HOOKS, Hook\n'), ((1670, 1721), 'torch.nn.utils.clip_grad.clip_grad_norm_', 'clip_grad.clip_grad_norm_', (['params'], {}), '(params, **self.grad_clip)\n', (1695, 1721), False, 'from torch.nn.utils import clip_grad\n')] |
import numpy as np
class CellularAutomationModel:
grid_width = 40
grid_height = 40
def __init__(self):
self.grid = self._randomised_grid()
def evolve(self):
"""
Evolve the current grid state using Conway's Game of Life algorithm.
:returns
dict: A dictionary representation of the state of cells in the grid
"""
base_grid = self.grid.copy()
for y in range(self.grid_height):
for x in range(self.grid_width):
cell_state = base_grid[x, y]
n_neighbours = self._calculate_alive_neighbours(x, y, cell_state, grid=base_grid)
self.grid[x, y] = self._next_cell_state(cell_state, n_neighbours)
return self._json_formatted_grid()
def toggle_cell_state(self, x, y):
"""
Reverses the cell state for a particular cell coordinate.
"""
self.grid[x][y] = 0 if self.grid[x][y] == 1 else 1
def reset_grid(self):
"""
Resets the grid array to a random state.
:returns
dict: A dictionary representation of the state of cells in the grid
"""
self.grid = self._randomised_grid()
return self._json_formatted_grid()
def _calculate_alive_neighbours(self, x, y, cell_state, grid):
"""
Returns the number of alive nearest neighbours.
"""
surrounding_arr = self._surrounding_arr(x, y, grid)
n_alive = sum(sum(surrounding_arr))
return n_alive - cell_state
def _json_formatted_grid(self):
"""
Returns a python dictionary which represents the current state of the cells in the grid.
key: An integer that represents a single cell based on the coordinate position.
value: The cell state <0 or 1> to represent whether a cell is dead or alive.
"""
json_grid = {}
for x in range(self.grid_width):
for y in range(self.grid_height):
cell_id = int(x + y*self.grid_width)
json_grid[cell_id] = int(self.grid[x, y])
return json_grid
def _randomised_grid(self):
"""
Returns a 2d array with values of randomly assigned values of 0 or 1.
"""
return np.random.randint(2, size=(self.grid_height, self.grid_width))
@staticmethod
def _surrounding_arr(x, y, grid):
"""
Returns an 2d array containing all the adjacent cells for a particular coordinate (radius = 1 cell).
"""
if x != 0 and y != 0:
return grid[x - 1:x + 2, y - 1:y + 2]
elif x == 0:
return grid[x:x + 2, y - 1:y + 2]
elif y == 0:
return grid[x - 1:x + 2, y:y + 2]
else:
return grid[x:x + 2, y:y + 2]
@staticmethod
def _next_cell_state(cell_state, n_neighbours):
"""
Returns the new cell state 0 (dead) or 1 (alive). New state is determined using the current cell state
and number of alive neighbours based on the rules in Conway's Game of Life.
"""
if (cell_state == 1 and (n_neighbours not in range(2, 4))) or (cell_state == 0 and n_neighbours != 3):
return 0
return 1
| [
"numpy.random.randint"
] | [((2271, 2333), 'numpy.random.randint', 'np.random.randint', (['(2)'], {'size': '(self.grid_height, self.grid_width)'}), '(2, size=(self.grid_height, self.grid_width))\n', (2288, 2333), True, 'import numpy as np\n')] |
from encoder import Encoder
from decoder import Decoder
from parser import Parser
from baseline import *
from language_model import LanguageModel
from util import Reader
import dynet as dy
from misc import compute_eval_score, compute_perplexity
import os
initializers = {'glorot': dy.GlorotInitializer(),
'constant': dy.ConstInitializer(0.01),
'uniform': dy.UniformInitializer(0.1),
'normal': dy.NormalInitializer(mean = 0, var = 1)
}
optimizers = {
"sgd": dy.SimpleSGDTrainer,
"adam": dy.AdamTrainer,
"adadelta": dy.AdadeltaTrainer,
"adagrad": dy.AdagradTrainer
}
class Session(object):
def __init__(self, options):
self.reader = Reader(options.data_dir, options.data_augment)
self.options = options
def supervised_enc(self):
encoder = self.create_encoder()
if os.path.exists(self.options.result_dir + 'model_enc'):
self.load_encoder(encoder)
enc_trainer = optimizers[self.options.optimizer](encoder.model)
lr = self.options.lr #used only for sgd
i = 0
best_f1 = 0
print ('supervised training for encoder...')
for epoch in range(self.options.epochs):
sents = 0
total_loss = 0.0
train = self.reader.next_example(0)
train_size = len(self.reader.data[0])
for data in train:
s1, s2, s3, pos, act = data[0], data[1], data[2], data[3], data[4]
loss = encoder.train(s1, s2, s3, pos, act, self.options.enc_dropout)
sents += 1
if loss is not None:
total_loss += loss.scalar_value()
loss.backward()
if self.options.optimizer == 'sgd':
enc_trainer.update(lr)
else:
enc_trainer.update()
e = float(i) / train_size
if i % self.options.print_every == 0:
print('epoch {}: loss per sentence: {}'.format(e, total_loss / sents))
sents = 0
total_loss = 0.0
if i!=0 and i % self.options.save_every == 0:
print('computing loss on validation set...')
valid = self.reader.next_example(2) #fix this
valid_size = len(self.reader.data[2])
rf = open(self.options.result_dir+'result', 'w')
for vdata in valid:
s1, s2, s3, pos, act = vdata[0], vdata[1], vdata[2], vdata[3], vdata[4]
_, output, _ = encoder.parse(s1, s2, s3, pos)
rf.write(output + '\n')
rf.close()
f1 = compute_eval_score(self.options.result_dir)
if f1 > best_f1:
best_f1 = f1
print ('highest f1: {}'.format(f1))
print ('saving model...')
encoder.Save(self.options.result_dir + 'model_enc')
else:
lr = lr * self.options.decay
i += 1
def supervised_dec(self):
decoder = self.create_decoder()
if os.path.exists(self.options.result_dir + 'model_dec'):
self.load_decoder(decoder)
dec_trainer = optimizers[self.options.optimizer](decoder.model)
lr = self.options.lr #used only for sgd
i = 0
lowest_valid_loss = 9999
print ('supervised training for decoder...')
for epoch in range(self.options.epochs):
sents = 0
total_loss = 0.0
train = self.reader.next_example(0)
train_size = len(self.reader.data[0])
for data in train:
s1, s2, s3, pos, act = data[0], data[1], data[2], data[3], data[4]
loss, loss_act, loss_word = decoder.compute_loss(s3, act, self.options.dec_dropout)
sents += 1
if loss is not None:
total_loss += loss.scalar_value()
loss.backward()
if self.options.optimizer == 'sgd':
dec_trainer.update(lr)
else:
dec_trainer.update()
e = float(i) / train_size
if i % self.options.print_every == 0:
print('epoch {}: loss per sentence: {}'.format(e, total_loss / sents))
sents = 0
total_loss = 0.0
if i!=0 and i % self.options.save_every == 0:
print('computing loss on validation set...')
total_valid_loss = 0
valid = self.reader.next_example(1)
valid_size = len(self.reader.data[1])
for vdata in valid:
s1, s2, s3, pos, act = vdata[0], vdata[1], vdata[2], vdata[3], vdata[4]
valid_loss, _, _ = decoder.compute_loss(s3, act)
if valid_loss is not None:
total_valid_loss += valid_loss.scalar_value()
total_valid_loss = total_valid_loss * 1.0 / valid_size
if total_valid_loss < lowest_valid_loss:
lowest_valid_loss = total_valid_loss
print ('saving model...')
decoder.Save(self.options.result_dir + 'model_dec')
else:
lr = lr * self.options.decay
i += 1
def unsupervised_with_baseline(self):
decoder = self.create_decoder()
assert(os.path.exists(self.options.result_dir + 'model_dec'))
self.load_decoder(decoder)
encoder = self.create_encoder()
assert(os.path.exists(self.options.result_dir + 'model_enc'))
self.load_encoder(encoder)
baseline = self.create_baseline()
if os.path.exists(self.options.result_dir + 'baseline'):
self.load_baseline(baseline)
enc_trainer = optimizers[self.options.optimizer](encoder.model)
dec_trainer = optimizers[self.options.optimizer](decoder.model)
baseline_trainer = optimizers[self.options.optimizer](baseline.model)
lr = self.options.lr #used only for sgd
i = 0
lowest_valid_loss = 9999
print ('unsupervised training...')
for epoch in range(self.options.epochs):
sents = 0
total_loss = 0.0
train = self.reader.next_example(0)
train_size = len(self.reader.data[0])
for data in train:
s1, s2, s3, pos, act = data[0], data[1], data[2], data[3], data[4]
sents += 1
# random sample
enc_loss_act, _, act = encoder.parse(s1, s2, s3, pos, sample=True)
_, dec_loss_act, dec_loss_word = decoder.compute_loss(s3, act)
# save reward
logpx = -dec_loss_word.scalar_value()
total_loss -= logpx
# reconstruction and regularization loss backprop to theta_d
dec_loss_total = dec_loss_word + dec_loss_act * dy.scalarInput(self.options.dec_reg)
dec_loss_total = dec_loss_total * dy.scalarInput(1.0 / self.options.mcsamples)
dec_loss_total.scalar_value()
dec_loss_total.backward()
# update decoder
if self.options.optimizer == 'sgd':
dec_trainer.update(lr)
else:
dec_trainer.update()
if self.options.enc_update > 0:
# compute baseline and backprop to theta_b
b = baseline(s3)
logpxb = b.scalar_value()
b_loss = dy.squared_distance(b, dy.scalarInput(logpx))
b_loss.value()
b_loss.backward()
# update baseline
if self.options.optimizer == 'sgd':
baseline_trainer.update(lr)
else:
baseline_trainer.update()
# policy and and regularization loss backprop to theta_e
enc_loss_act = encoder.train(s1, s2, s3, pos, act)
enc_loss_policy = enc_loss_act * dy.scalarInput((logpx - logpxb) / len(s1))
enc_loss_total = enc_loss_policy * dy.scalarInput(self.options.enc_update) - enc_loss_act * dy.scalarInput(self.options.enc_reg)
enc_loss_total = enc_loss_total * dy.scalarInput(1.0 / self.options.mcsamples)
enc_loss_total.value()
enc_loss_total.backward()
# update encoder
if self.options.optimizer == 'sgd':
enc_trainer.update(lr)
else:
enc_trainer.update()
e = float(i) / train_size
if i % self.options.print_every == 0:
print('epoch {}: loss per sentence: {}'.format(e, total_loss / sents))
sents = 0
total_loss = 0.0
if i!=0 and i % self.options.save_every == 0:
print('computing loss on validation set...')
total_valid_loss = 0
valid = self.reader.next_example(1)
valid_size = len(self.reader.data[1])
for vdata in valid:
s1, s2, s3, pos, act = vdata[0], vdata[1], vdata[2], vdata[3], vdata[4]
_, _, valid_word_loss = decoder.compute_loss(s3, act)
if valid_word_loss is not None:
total_valid_loss += valid_word_loss.scalar_value()
total_valid_loss = total_valid_loss * 1.0 / valid_size
if total_valid_loss < lowest_valid_loss:
lowest_valid_loss = total_valid_loss
print ('saving model...')
encoder.Save(self.options.result_dir + 'model_enc')
decoder.Save(self.options.result_dir + 'model_dec')
baseline.Save(self.options.result_dir + 'baseline')
else:
lr = lr * self.options.decay
i += 1
def unsupervised_without_baseline(self):
decoder = self.create_decoder()
assert(os.path.exists(self.options.result_dir + 'model_dec'))
self.load_decoder(decoder)
encoder = self.create_encoder()
assert(os.path.exists(self.options.result_dir + 'model_enc'))
self.load_encoder(encoder)
enc_trainer = optimizers[self.options.optimizer](encoder.model)
dec_trainer = optimizers[self.options.optimizer](decoder.model)
lr = self.options.lr #used only for sgd
i = 0
lowest_valid_loss = 9999
print ('unsupervised training...')
for epoch in range(self.options.epochs):
sents = 0
total_loss = 0.0
train = self.reader.next_example(0)
train_size = len(self.reader.data[0])
for data in train:
s1, s2, s3, pos, act = data[0], data[1], data[2], data[3], data[4]
sents += 1
# max sample
enc_loss_act, _, act = encoder.parse(s1, s2, s3, pos, sample=False)
_, dec_loss_act, dec_loss_word = decoder.compute_loss(s3, act)
logpxb = -dec_loss_word.scalar_value()
total_loss -= logpxb
# random sample
enc_loss_act, _, act = encoder.parse(s1, s2, s3, pos, sample=True)
_, dec_loss_act, dec_loss_word = decoder.compute_loss(s3, act)
# save reward
logpx = -dec_loss_word.scalar_value()
# reconstruction and regularization loss backprop to theta_d
dec_loss_total = dec_loss_word + dec_loss_act * dy.scalarInput(self.options.dec_reg)
dec_loss_total = dec_loss_total * dy.scalarInput(1.0 / self.options.mcsamples)
dec_loss_total.scalar_value()
dec_loss_total.backward()
# update decoder
if self.options.optimizer == 'sgd':
dec_trainer.update(lr)
else:
dec_trainer.update()
if self.options.enc_update > 0:
# policy and and regularization loss backprop to theta_e
enc_loss_act = encoder.train(s1, s2, s3, pos, act)
enc_loss_policy = enc_loss_act * dy.scalarInput((logpx - logpxb) / len(s1))
enc_loss_total = enc_loss_policy * dy.scalarInput(self.options.enc_update) - enc_loss_act * dy.scalarInput(self.options.enc_reg)
enc_loss_total = enc_loss_total * dy.scalarInput(1.0 / self.options.mcsamples)
enc_loss_total.value()
enc_loss_total.backward()
if self.options.optimizer == 'sgd':
enc_trainer.update(lr)
else:
enc_trainer.update()
e = float(i) / train_size
if i % self.options.print_every == 0:
print('epoch {}: loss per sentence: {}'.format(e, total_loss / sents))
sents = 0
total_loss = 0.0
if i!=0 and i % self.options.save_every == 0:
print('computing loss on validation set...')
total_valid_loss = 0
valid = self.reader.next_example(1)
valid_size = len(self.reader.data[1])
for vdata in valid:
s1, s2, s3, pos, act = vdata[0], vdata[1], vdata[2], vdata[3], vdata[4]
_, _, valid_word_loss = decoder.compute_loss(s3, act)
if valid_word_loss is not None:
total_valid_loss += valid_word_loss.scalar_value()
total_valid_loss = total_valid_loss * 1.0 / valid_size
if total_valid_loss < lowest_valid_loss:
lowest_valid_loss = total_valid_loss
print ('saving model...')
encoder.Save(self.options.result_dir + 'model_enc')
decoder.Save(self.options.result_dir + 'model_dec')
else:
lr = lr * self.options.decay
i += 1
def pretrain_baseline(self):
baseline = self.create_baseline()
if os.path.exists(self.options.result_dir + 'baseline'):
self.load_baseline(baseline)
baseline_trainer = optimizers[self.options.optimizer](baseline.model)
lr = self.options.lr #used only for sgd
i = 0
lowest_valid_loss = 9999
print ('train baseline, for simplicity use the same data here')
for epoch in range(self.options.epochs):
sents = 0
total_loss = 0.0
train = self.reader.next_example(0)
train_size = len(self.reader.data[0])
for data in train:
s1, s2, s3, pos, act = data[0], data[1], data[2], data[3], data[4]
sents += 1
loss = -baseline(s3)
if loss is not None:
total_loss += loss.scalar_value()
loss.backward()
if self.options.optimizer == 'sgd':
baseline_trainer.update(lr)
else:
baseline_trainer.update()
e = float(i) / train_size
if i % self.options.print_every == 0:
print('epoch {}: loss per sentence: {}'.format(e, total_loss / sents))
sents = 0
total_loss = 0.0
if i!=0 and i % self.options.save_every == 0:
print('computing loss on validation set...')
total_valid_loss = 0
valid = self.reader.next_example(1)
valid_size = len(self.reader.data[1])
for vdata in valid:
s1, s2, s3, pos, act = vdata[0], vdata[1], vdata[2], vdata[3], vdata[4]
valid_loss = -baseline(s3)
if valid_loss is not None:
total_valid_loss += valid_loss.scalar_value()
total_valid_loss = total_valid_loss * 1.0 / valid_size
if total_valid_loss < lowest_valid_loss:
lowest_valid_loss = total_valid_loss
print ('saving model...')
baseline.Save(self.options.result_dir + 'baseline')
else:
lr = lr * self.options.decay
i += 1
def parsing(self):
decoder = self.create_decoder()
assert(os.path.exists(self.options.result_dir + 'model_dec'))
self.load_decoder(decoder)
encoder = self.create_encoder()
assert(os.path.exists(self.options.result_dir + 'model_enc'))
self.load_encoder(encoder)
print('parsing...')
rf = open(os.path.join(self.options.result_dir, 'result'), 'w')
test = self.reader.next_example(2)
p = Parser(encoder, decoder)
for dataid, data in enumerate(test):
s1, s2, s3, pos, act = data[0], data[1], data[2], data[3], data[4]
output = p(s1, s2, s3, pos, self.options.nsamples)
rf.write(output + '\n')
rf.close()
f1 = compute_eval_score(self.options.result_dir)
print('bracket F1 score is {}'.format(f1))
def language_modeling(self):
decoder = self.create_decoder()
assert(os.path.exists(self.options.result_dir + 'model_dec'))
self.load_decoder(decoder)
encoder = self.create_encoder()
assert(os.path.exists(self.options.result_dir + 'model_enc'))
self.load_encoder(encoder)
print('computing language model score...')
test = self.reader.next_example(2)
lm = LanguageModel(encoder, decoder)
total_ll = 0
total_tokens = 0
for dataid, data in enumerate(test):
s1, s2, s3, pos, act = data[0], data[1], data[2], data[3], data[4]
if len(s1) <= 1:
continue
total_ll += lm(s1, s2, s3, pos, self.options.nsamples)
total_tokens += len(s1)
perp = compute_perplexity(total_ll, total_tokens)
print('perplexity: {}'.format(perp))
def create_decoder(self):
return Decoder(self.reader,
self.options.nlayers,
self.options.word_dim,
self.options.pretrained_dim,
self.options.action_dim,
self.options.dec_lstm_dim,
self.options.embedding_file)
def create_encoder(self):
return Encoder(self.reader,
self.options.nlayers,
self.options.word_dim,
self.options.pretrained_dim,
self.options.pos_dim,
self.options.action_dim,
self.options.enc_lstm_dim,
self.options.embedding_file)
def create_baseline(self):
baseline = None
if self.options.baseline == 'rnnlm':
baseline = LanguageModelBaseline(self.reader,
self.options.word_dim,
self.options.pretrained_dim,
self.options.dec_lstm_dim,
self.options.embedding_file)
elif self.options.baseline == 'rnnauto':
baseline = RNNAutoencBaseline(self.reader,
self.options.word_dim,
self.options.pretrained_dim,
self.options.dec_lstm_dim,
self.options.embedding_file)
elif self.options.baseline == 'mlp':
baseline = MLPAutoencBaseline(self.reader,
self.options.word_dim,
self.options.pretrained_dim,
self.options.embedding_file)
else:
raise NotImplementedError("Baseline Not Implmented")
return baseline
def load_decoder(self, decoder):
decoder.Load(self.options.result_dir + 'model_dec')
def load_encoder(self, encoder):
encoder.Load(self.options.result_dir + 'model_enc')
def load_baseline(self, baseline):
baseline.Load(self.options.result_dir + 'baseline')
| [
"os.path.exists",
"dynet.ConstInitializer",
"encoder.Encoder",
"dynet.scalarInput",
"parser.Parser",
"util.Reader",
"os.path.join",
"dynet.UniformInitializer",
"decoder.Decoder",
"dynet.NormalInitializer",
"language_model.LanguageModel",
"misc.compute_eval_score",
"misc.compute_perplexity",
... | [((283, 305), 'dynet.GlorotInitializer', 'dy.GlorotInitializer', ([], {}), '()\n', (303, 305), True, 'import dynet as dy\n'), ((335, 360), 'dynet.ConstInitializer', 'dy.ConstInitializer', (['(0.01)'], {}), '(0.01)\n', (354, 360), True, 'import dynet as dy\n'), ((389, 415), 'dynet.UniformInitializer', 'dy.UniformInitializer', (['(0.1)'], {}), '(0.1)\n', (410, 415), True, 'import dynet as dy\n'), ((443, 478), 'dynet.NormalInitializer', 'dy.NormalInitializer', ([], {'mean': '(0)', 'var': '(1)'}), '(mean=0, var=1)\n', (463, 478), True, 'import dynet as dy\n'), ((785, 831), 'util.Reader', 'Reader', (['options.data_dir', 'options.data_augment'], {}), '(options.data_dir, options.data_augment)\n', (791, 831), False, 'from util import Reader\n'), ((945, 998), 'os.path.exists', 'os.path.exists', (["(self.options.result_dir + 'model_enc')"], {}), "(self.options.result_dir + 'model_enc')\n", (959, 998), False, 'import os\n'), ((3354, 3407), 'os.path.exists', 'os.path.exists', (["(self.options.result_dir + 'model_dec')"], {}), "(self.options.result_dir + 'model_dec')\n", (3368, 3407), False, 'import os\n'), ((5802, 5855), 'os.path.exists', 'os.path.exists', (["(self.options.result_dir + 'model_dec')"], {}), "(self.options.result_dir + 'model_dec')\n", (5816, 5855), False, 'import os\n'), ((5948, 6001), 'os.path.exists', 'os.path.exists', (["(self.options.result_dir + 'model_enc')"], {}), "(self.options.result_dir + 'model_enc')\n", (5962, 6001), False, 'import os\n'), ((6100, 6152), 'os.path.exists', 'os.path.exists', (["(self.options.result_dir + 'baseline')"], {}), "(self.options.result_dir + 'baseline')\n", (6114, 6152), False, 'import os\n'), ((10718, 10771), 'os.path.exists', 'os.path.exists', (["(self.options.result_dir + 'model_dec')"], {}), "(self.options.result_dir + 'model_dec')\n", (10732, 10771), False, 'import os\n'), ((10864, 10917), 'os.path.exists', 'os.path.exists', (["(self.options.result_dir + 'model_enc')"], {}), "(self.options.result_dir + 'model_enc')\n", (10878, 10917), False, 'import os\n'), ((15004, 15056), 'os.path.exists', 'os.path.exists', (["(self.options.result_dir + 'baseline')"], {}), "(self.options.result_dir + 'baseline')\n", (15018, 15056), False, 'import os\n'), ((17394, 17447), 'os.path.exists', 'os.path.exists', (["(self.options.result_dir + 'model_dec')"], {}), "(self.options.result_dir + 'model_dec')\n", (17408, 17447), False, 'import os\n'), ((17540, 17593), 'os.path.exists', 'os.path.exists', (["(self.options.result_dir + 'model_enc')"], {}), "(self.options.result_dir + 'model_enc')\n", (17554, 17593), False, 'import os\n'), ((17787, 17811), 'parser.Parser', 'Parser', (['encoder', 'decoder'], {}), '(encoder, decoder)\n', (17793, 17811), False, 'from parser import Parser\n'), ((18068, 18111), 'misc.compute_eval_score', 'compute_eval_score', (['self.options.result_dir'], {}), '(self.options.result_dir)\n', (18086, 18111), False, 'from misc import compute_eval_score, compute_perplexity\n'), ((18253, 18306), 'os.path.exists', 'os.path.exists', (["(self.options.result_dir + 'model_dec')"], {}), "(self.options.result_dir + 'model_dec')\n", (18267, 18306), False, 'import os\n'), ((18399, 18452), 'os.path.exists', 'os.path.exists', (["(self.options.result_dir + 'model_enc')"], {}), "(self.options.result_dir + 'model_enc')\n", (18413, 18452), False, 'import os\n'), ((18598, 18629), 'language_model.LanguageModel', 'LanguageModel', (['encoder', 'decoder'], {}), '(encoder, decoder)\n', (18611, 18629), False, 'from language_model import LanguageModel\n'), ((18978, 19020), 'misc.compute_perplexity', 'compute_perplexity', (['total_ll', 'total_tokens'], {}), '(total_ll, total_tokens)\n', (18996, 19020), False, 'from misc import compute_eval_score, compute_perplexity\n'), ((19125, 19310), 'decoder.Decoder', 'Decoder', (['self.reader', 'self.options.nlayers', 'self.options.word_dim', 'self.options.pretrained_dim', 'self.options.action_dim', 'self.options.dec_lstm_dim', 'self.options.embedding_file'], {}), '(self.reader, self.options.nlayers, self.options.word_dim, self.\n options.pretrained_dim, self.options.action_dim, self.options.\n dec_lstm_dim, self.options.embedding_file)\n', (19132, 19310), False, 'from decoder import Decoder\n'), ((19486, 19692), 'encoder.Encoder', 'Encoder', (['self.reader', 'self.options.nlayers', 'self.options.word_dim', 'self.options.pretrained_dim', 'self.options.pos_dim', 'self.options.action_dim', 'self.options.enc_lstm_dim', 'self.options.embedding_file'], {}), '(self.reader, self.options.nlayers, self.options.word_dim, self.\n options.pretrained_dim, self.options.pos_dim, self.options.action_dim,\n self.options.enc_lstm_dim, self.options.embedding_file)\n', (19493, 19692), False, 'from encoder import Encoder\n'), ((17678, 17725), 'os.path.join', 'os.path.join', (['self.options.result_dir', '"""result"""'], {}), "(self.options.result_dir, 'result')\n", (17690, 17725), False, 'import os\n'), ((2863, 2906), 'misc.compute_eval_score', 'compute_eval_score', (['self.options.result_dir'], {}), '(self.options.result_dir)\n', (2881, 2906), False, 'from misc import compute_eval_score, compute_perplexity\n'), ((7445, 7489), 'dynet.scalarInput', 'dy.scalarInput', (['(1.0 / self.options.mcsamples)'], {}), '(1.0 / self.options.mcsamples)\n', (7459, 7489), True, 'import dynet as dy\n'), ((12393, 12437), 'dynet.scalarInput', 'dy.scalarInput', (['(1.0 / self.options.mcsamples)'], {}), '(1.0 / self.options.mcsamples)\n', (12407, 12437), True, 'import dynet as dy\n'), ((7358, 7394), 'dynet.scalarInput', 'dy.scalarInput', (['self.options.dec_reg'], {}), '(self.options.dec_reg)\n', (7372, 7394), True, 'import dynet as dy\n'), ((8017, 8038), 'dynet.scalarInput', 'dy.scalarInput', (['logpx'], {}), '(logpx)\n', (8031, 8038), True, 'import dynet as dy\n'), ((8785, 8829), 'dynet.scalarInput', 'dy.scalarInput', (['(1.0 / self.options.mcsamples)'], {}), '(1.0 / self.options.mcsamples)\n', (8799, 8829), True, 'import dynet as dy\n'), ((12306, 12342), 'dynet.scalarInput', 'dy.scalarInput', (['self.options.dec_reg'], {}), '(self.options.dec_reg)\n', (12320, 12342), True, 'import dynet as dy\n'), ((13221, 13265), 'dynet.scalarInput', 'dy.scalarInput', (['(1.0 / self.options.mcsamples)'], {}), '(1.0 / self.options.mcsamples)\n', (13235, 13265), True, 'import dynet as dy\n'), ((8637, 8676), 'dynet.scalarInput', 'dy.scalarInput', (['self.options.enc_update'], {}), '(self.options.enc_update)\n', (8651, 8676), True, 'import dynet as dy\n'), ((8694, 8730), 'dynet.scalarInput', 'dy.scalarInput', (['self.options.enc_reg'], {}), '(self.options.enc_reg)\n', (8708, 8730), True, 'import dynet as dy\n'), ((13073, 13112), 'dynet.scalarInput', 'dy.scalarInput', (['self.options.enc_update'], {}), '(self.options.enc_update)\n', (13087, 13112), True, 'import dynet as dy\n'), ((13130, 13166), 'dynet.scalarInput', 'dy.scalarInput', (['self.options.enc_reg'], {}), '(self.options.enc_reg)\n', (13144, 13166), True, 'import dynet as dy\n')] |
#!/usr/bin/env python
# Copyright (c) 2019 Riverbed Technology, Inc.
#
# This software is licensed under the terms and conditions of the MIT License
# accompanying the software ("License"). This software is distributed "AS IS"
# as set forth in the License.
import csv
import sys
import string
import optparse
from collections import defaultdict
from steelscript.netprofiler.core.app import NetProfilerApp
from steelscript.netprofiler.core.hostgroup import HostGroupType, HostGroup
from steelscript.commands.steel import prompt_yn
from steelscript.common.exceptions import RvbdException
# This script will take a file with subnets and SiteNames
# and create a HostGroupType on the target NetProfiler.
# If the HostGroupType already exists, it will be deleted,
# before creating a new one with the same name.
#
# See the EXAMPLE text below for the format of the input
# file. Note that multiple SiteNames with different
# IP address spaces can be included.
EXAMPLE_WARN = """
Invalid file format
Ensure file has correct header.
example file:
subnet SiteName
10.143.58.64/26 CZ-Prague-HG
10.194.32.0/23 MX-SantaFe-HG
10.170.55.0/24 KR-Seoul-HG
10.234.9.0/24 ID-Surabaya-HG
10.143.58.63/23 CZ-Prague-HG
"""
class HostGroupImport(NetProfilerApp):
def add_options(self, parser):
super(HostGroupImport, self).add_options(parser)
group = optparse.OptionGroup(parser, "HostGroup Options")
group.add_option('--hostgroup', action='store',
help='Name of hostgroup to overwrite')
group.add_option('-i', '--input-file', action='store',
help='File path to hostgroup file')
parser.add_option_group(group)
def validate_args(self):
"""Ensure all arguments are present."""
super(HostGroupImport, self).validate_args()
if not self.options.input_file:
self.parser.error('Host group file is required, specify with '
'"-i" or "--input-file"')
if not self.options.hostgroup:
self.parser.error('Hostgroup name is required, specify with '
'"--hostgroup"')
def validate(self, name):
valid = set(string.letters + string.digits + '.-_')
return all(c in valid for c in name)
def import_file(self):
"""Process the input file and load into dict."""
groups = defaultdict(list)
with open(self.options.input_file, 'rb') as f:
dialect = csv.Sniffer().sniff(f.read(1024))
f.seek(0)
reader = csv.reader(f, dialect)
header = reader.next()
if header != ['subnet', 'SiteName']:
print(EXAMPLE_WARN)
for i, row in enumerate(reader):
cidr, group = row
if not self.validate(group):
print('Invalid group name on line {0}: {1}'
''.format(i+2, group))
sys.exit()
groups[group].append(cidr)
return groups
def update_hostgroups(self, groups):
"""Replace existing HostGroupType with contents of groups dict."""
# First find any existing HostGroupType
try:
hgtype = HostGroupType.find_by_name(self.netprofiler,
self.options.hostgroup)
hgtype.config = []
hgtype.groups = {}
print('Existing HostGroupType "{0}" found.'
''.format(self.options.hostgroup))
except RvbdException:
print('No existing HostGroupType found, creating a new one.')
hgtype = HostGroupType.create(self.netprofiler,
self.options.hostgroup)
# Add new values
for group, cidrs in groups.items():
hg = HostGroup(hgtype, group)
hg.add(cidrs)
# Save to NetProfiler
hgtype.save()
print ('HostGroupType "%s" configuration saved.'
% self.options.hostgroup)
def main(self):
"""Confirm overwrite then update hostgroups."""
confirm = ('The contents of hostgroup {0} will be overwritten '
'by the file {1}, are you sure?'
''.format(self.options.hostgroup, self.options.input_file))
if not prompt_yn(confirm):
print('Okay, aborting.')
sys.exit()
groups = self.import_file()
self.update_hostgroups(groups)
print('Successfully updated {0} on {1}'.format(self.options.hostgroup,
self.netprofiler.host))
if __name__ == '__main__':
HostGroupImport().run()
| [
"optparse.OptionGroup",
"collections.defaultdict",
"steelscript.netprofiler.core.hostgroup.HostGroupType.create",
"csv.Sniffer",
"sys.exit",
"steelscript.netprofiler.core.hostgroup.HostGroupType.find_by_name",
"steelscript.netprofiler.core.hostgroup.HostGroup",
"steelscript.commands.steel.prompt_yn",
... | [((1364, 1413), 'optparse.OptionGroup', 'optparse.OptionGroup', (['parser', '"""HostGroup Options"""'], {}), "(parser, 'HostGroup Options')\n", (1384, 1413), False, 'import optparse\n'), ((2399, 2416), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (2410, 2416), False, 'from collections import defaultdict\n'), ((2572, 2594), 'csv.reader', 'csv.reader', (['f', 'dialect'], {}), '(f, dialect)\n', (2582, 2594), False, 'import csv\n'), ((3249, 3317), 'steelscript.netprofiler.core.hostgroup.HostGroupType.find_by_name', 'HostGroupType.find_by_name', (['self.netprofiler', 'self.options.hostgroup'], {}), '(self.netprofiler, self.options.hostgroup)\n', (3275, 3317), False, 'from steelscript.netprofiler.core.hostgroup import HostGroupType, HostGroup\n'), ((3855, 3879), 'steelscript.netprofiler.core.hostgroup.HostGroup', 'HostGroup', (['hgtype', 'group'], {}), '(hgtype, group)\n', (3864, 3879), False, 'from steelscript.netprofiler.core.hostgroup import HostGroupType, HostGroup\n'), ((4353, 4371), 'steelscript.commands.steel.prompt_yn', 'prompt_yn', (['confirm'], {}), '(confirm)\n', (4362, 4371), False, 'from steelscript.commands.steel import prompt_yn\n'), ((4422, 4432), 'sys.exit', 'sys.exit', ([], {}), '()\n', (4430, 4432), False, 'import sys\n'), ((3663, 3725), 'steelscript.netprofiler.core.hostgroup.HostGroupType.create', 'HostGroupType.create', (['self.netprofiler', 'self.options.hostgroup'], {}), '(self.netprofiler, self.options.hostgroup)\n', (3683, 3725), False, 'from steelscript.netprofiler.core.hostgroup import HostGroupType, HostGroup\n'), ((2495, 2508), 'csv.Sniffer', 'csv.Sniffer', ([], {}), '()\n', (2506, 2508), False, 'import csv\n'), ((2973, 2983), 'sys.exit', 'sys.exit', ([], {}), '()\n', (2981, 2983), False, 'import sys\n')] |
from datasets.hscic.hscic_datasets import scrape as datasets_scrape
from datasets.hscic.hscic_indicators import scrape as indicators_scrape
def main(workspace):
datasets_scrape(workspace)
indicators_scrape(workspace) | [
"datasets.hscic.hscic_indicators.scrape",
"datasets.hscic.hscic_datasets.scrape"
] | [((170, 196), 'datasets.hscic.hscic_datasets.scrape', 'datasets_scrape', (['workspace'], {}), '(workspace)\n', (185, 196), True, 'from datasets.hscic.hscic_datasets import scrape as datasets_scrape\n'), ((201, 229), 'datasets.hscic.hscic_indicators.scrape', 'indicators_scrape', (['workspace'], {}), '(workspace)\n', (218, 229), True, 'from datasets.hscic.hscic_indicators import scrape as indicators_scrape\n')] |
"""***********************************************************
*** Copyright Tektronix, Inc. ***
*** See www.tek.com/sample-license for licensing terms. ***
***********************************************************"""
import socket
import struct
import math
import time
import sys
echo_cmd = 0
"""*********************************************************************************
Function: instrument_connect(my_socket, ip_address string, my_port int, timeout
do_reset, do_id_query)
Purpose: Open an instance of an instrument object for remote communication
over LAN/Ethernet.
Parameters:
my_socket - Instance of a socket object.
ip_address (string) - The TCP/IP address string associated with the
target instrument.
my_port (int) - The instrument connection port.
timeout (int) - The timeout limit for query/communication exchanges.
do_reset (int) - Determines whether the instrument is to be reset
upon connection to the instrument. Setting to 1
will perform the reset; setting to zero avoids it.
do_clear (int) - Determines whether the instrument is to be cleared
do_id_query (int) - Deterines when the instrument is to echho its
identification string after it is initialized.
Returns:
my_socket - Updated instance of a socket object that includes
attributes of a valid connection.
Revisions:
2019-07-30 JJB Initial revision.
*********************************************************************************"""
def instrument_connect(my_socket, my_address, my_port, timeout, do_reset, do_clear, do_id_query):
my_socket.connect((my_address, my_port)) # input to connect must be a tuple
my_socket.settimeout(timeout)
my_socket.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
if do_reset == 1:
instrument_write(my_socket, "*RST")
if do_clear == 1:
instrument_write(my_socket, "*CLS")
if do_id_query == 1:
tmp_id = instrument_query(my_socket, "*IDN?", 100)
print(tmp_id)
return my_socket
"""*********************************************************************************
Function: instrument_disconnect(my_socket)
Purpose: Break the LAN/Ethernet connection between the controlling computer
and the target instrument.
Parameters:
my_socket - The TCP instrument connection object used for sending
and receiving data.
Returns:
None
Revisions:
2019-07-30 JJB Initial revision.
*********************************************************************************"""
def instrument_disconnect(my_socket):
my_socket.close()
return
"""*********************************************************************************
Function: instrument_write(my_socket, my_command)
Purpose: This function issues control commands to the target instrument.
Parameters:
my_socket - The TCP instrument connection object used for sending
and receiving data.
my_command (string) - The command issued to the instrument to make it
perform some action or service.
Returns:
None
Revisions:
2019-07-30 JJB Initial revision.
*********************************************************************************"""
def instrument_write(my_socket, my_command):
if echo_cmd == 1:
print(my_command)
cmd = "{0}\n".format(my_command)
my_socket.send(cmd.encode())
return
"""*********************************************************************************
Function: instrument_read(my_socket, receive_size)
Purpose: This function asks the connected instrument to reply with some
previously requested information, typically queued up from a call
to instrument_write().
Parameters:
my_socket - The TCP instrument connection object used for sending
and receiving data.
receive_size (int) - Size of the data/string to be returned to
the caller.
Returns:
reply_string (string) - The requested information returned from the
target instrument.
Revisions:
2019-07-30 JJB Initial revision.
*********************************************************************************"""
def instrument_read(my_socket, receive_size):
return my_socket.recv(receive_size).decode()
"""*********************************************************************************
Function: instrument_query(my_socket, my_command, receive_size)
Purpose: This function issues control commands to the target instrument with
the expectation that data will be returned. For this function
instance, the returned data is (typically) in string format.
Parameters:
my_socket - The TCP instrument connection object used for sending
and receiving data.
my_command (string) - The command issued to the instrument to make it
perform some action or service.
receive_size (int) - The approximate number of bytes of data the caller
expects to be returned in the response from the
instrument.
Returns:
reply_string (string) - The requested information returned from the
target instrument. Obtained by way of a caller
to instrument_read().
Revisions:
2019-07-30 JJB Initial revision.
*********************************************************************************"""
def instrument_query(my_socket, my_command, receive_size):
instrument_write(my_socket, my_command)
return instrument_read(my_socket, receive_size)
"""*********************************************************************************
Function: write_data(output_data_path, data_str)
Purpose: This function issues control commands to the target instrument.
Parameters:
output_data_path (string) - The file name and path of the file to be written
to. Note that the file is opened in append mode
and no previously existing data will be over-
written.
data_str (string) - The data to be written to file. It is up to the
user to format this data external to this
function prior to using it.
Returns:
None
Revisions:
2020-01-03 JJB Initial revision.
*********************************************************************************"""
def write_data(output_data_path, data_str):
# This function writes the floating point data to the
# target file.
# for f in floats:
ofile = open(output_data_path, "a") # append the target data
dataStr = "{0}\n".format(data_str)
ofile.write(dataStr)
ofile.close() # Close the data file.
return
def upload_test_sequence(instrument_object, file_and_path):
with open(file_and_path) as file_in:
n = 1 # The first line in the sequence file is the header and not intended to be part of the test sequence
for line in file_in:
if n != 1:
instrument_write(instrument_object, "append_test_to_global_table(\"{0}\")".format(line.rstrip('\r\n')))
n += 1
return
"""*********************************************************************************
This example shows how the user of a Keithley DMM6500 can load a TSP script file
and execute embedded functions. This allow the user to customize test operations
at the instrument level. In particular, this example shows how a user might
create a direct socket connection to the Series 2260B power supply and execute
a supply output test sequence that defines voltage/current levels, durations for
each defined step, and slew control.
This program is dependendent on two additional files:
A. The series_2260B_sequence_control.tsp script which....
1. Promotes the transfer of the test sequence file to a Lua table
on the DMM.
2. Initiates the sockets connection to the 2260B
3. Executes the uploaded test sequence.
B. A 2260B test sequence in *.csv format.
*********************************************************************************"""
my_ip_address = "192.168.1.104" # Define your instrument's IP address here.
my_port = 5025 # Define your instrument's port number here.
do_instr_reset = 1
do_instr_clear = 1
do_instr_id_query = 1
t1 = time.time()
# Open the socket connections...
my_instr = socket.socket(socket.AF_INET, socket.SOCK_STREAM) # Establish a TCP/IP socket object
instrument_connect(my_instr, my_ip_address, my_port, 20000, do_instr_reset, do_instr_clear, do_instr_id_query)
# Ready the instrument to receive the target TSP file contents
file = "series_2260B_sequence_control.tsp"
func_file = open(file, "r")
contents = func_file.read()
func_file.close()
instrument_write(my_instr, "if loadfuncs ~= nil then script.delete('loadfuncs') end")
# Load the script file in one large chunk then close out the loadfuncs wrapper script.
instrument_write(my_instr, "loadscript loadfuncs\n{0}\nendscript\n".format(contents))
# Call loadfuncs to load the contents of the script file into active memory
print(instrument_query(my_instr, "loadfuncs()", 32)) # Note that we are echoing a queried function here.
# You will note that the final line in the functions.tsp
# script file is a print() command that will push its
# contents to the output data queue.
instrument_write(my_instr, "do_beep(0.250, 1000, 3)")
file = "Test_Sequence_06.csv"
upload_test_sequence(my_instr, file)
ip_address_2260B = "192.168.1.117"
instrument_write(my_instr, "connect_to_2260B(\"{0}\")".format(ip_address_2260B))
instrument_write(my_instr, "enable_2260B_output({0}, {1}, {2})".format(0.0, 1.0, "ON"))
instrument_write(my_instr, "ps2260_execute_test_sequence()")
instrument_write(my_instr, "enable_2260B_output({0}, {1}, {2})".format(0.0, 1.0, "OFF"))
instrument_write(my_instr, "disconnect_from_2260B()")
instrument_disconnect(my_instr)
t2 = time.time() # Stop the timer...
# Notify the user of completion and the data streaming rate achieved.
print("done")
print("Total Time Elapsed: {0:.3f} s".format(t2 - t1))
input("Press Enter to continue...")
exit() | [
"time.time",
"socket.socket"
] | [((8559, 8570), 'time.time', 'time.time', ([], {}), '()\n', (8568, 8570), False, 'import time\n'), ((8616, 8665), 'socket.socket', 'socket.socket', (['socket.AF_INET', 'socket.SOCK_STREAM'], {}), '(socket.AF_INET, socket.SOCK_STREAM)\n', (8629, 8665), False, 'import socket\n'), ((10341, 10352), 'time.time', 'time.time', ([], {}), '()\n', (10350, 10352), False, 'import time\n')] |
import cv2
from dev_autopilot import autopilot
from emulation import get_bindings, clear_input
import threading
import kthread
from pynput import keyboard
from programInfo import showInfo
STATE = 0
def start_action():
stop_action()
kthread.KThread(target = autopilot, name = "EDAutopilot").start()
def stop_action():
cv2.destroyAllWindows()
for thread in threading.enumerate():
if thread.getName() == 'EDAutopilot':
thread.kill()
clear_input(get_bindings())
def on_press(key):
try:
if key == keyboard.Key.home:
print('start action')
start_action()
if key == keyboard.Key.end:
print('stop action')
stop_action()
except AttributeError:
print('special key {0} pressed'.format(key))
def on_release(key):
if key == keyboard.Key.esc:
# Stop listener
cv2.destroyAllWindows()
stop_action()
return False
showInfo();
# Collect events until released
with keyboard.Listener(on_press=on_press,on_release=on_release) as listener:
listener.join()
| [
"pynput.keyboard.Listener",
"threading.enumerate",
"kthread.KThread",
"programInfo.showInfo",
"cv2.destroyAllWindows",
"emulation.get_bindings"
] | [((996, 1006), 'programInfo.showInfo', 'showInfo', ([], {}), '()\n', (1004, 1006), False, 'from programInfo import showInfo\n'), ((347, 370), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (368, 370), False, 'import cv2\n'), ((390, 411), 'threading.enumerate', 'threading.enumerate', ([], {}), '()\n', (409, 411), False, 'import threading\n'), ((1049, 1108), 'pynput.keyboard.Listener', 'keyboard.Listener', ([], {'on_press': 'on_press', 'on_release': 'on_release'}), '(on_press=on_press, on_release=on_release)\n', (1066, 1108), False, 'from pynput import keyboard\n'), ((504, 518), 'emulation.get_bindings', 'get_bindings', ([], {}), '()\n', (516, 518), False, 'from emulation import get_bindings, clear_input\n'), ((924, 947), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (945, 947), False, 'import cv2\n'), ((254, 307), 'kthread.KThread', 'kthread.KThread', ([], {'target': 'autopilot', 'name': '"""EDAutopilot"""'}), "(target=autopilot, name='EDAutopilot')\n", (269, 307), False, 'import kthread\n')] |
"""
Showcases correlated colour temperature computations.
"""
import colour
from colour.utilities import message_box
message_box("Correlated Colour Temperature Computations")
cmfs = colour.MSDS_CMFS["CIE 1931 2 Degree Standard Observer"]
illuminant = colour.SDS_ILLUMINANTS["D65"]
xy = colour.XYZ_to_xy(colour.sd_to_XYZ(illuminant, cmfs) / 100)
uv = colour.UCS_to_uv(colour.XYZ_to_UCS(colour.xy_to_XYZ(xy)))
message_box(
f'Converting to "CCT" and "D_uv" from given "CIE UCS" colourspace "uv" '
f'chromaticity coordinates using "Ohno (2013)" method:\n\n\t{uv}'
)
print(colour.uv_to_CCT(uv, cmfs=cmfs))
print(colour.temperature.uv_to_CCT_Ohno2013(uv, cmfs=cmfs))
print("\n")
message_box("Faster computation with 3 iterations but a lot less precise.")
print(colour.uv_to_CCT(uv, cmfs=cmfs, iterations=3))
print(colour.temperature.uv_to_CCT_Ohno2013(uv, cmfs=cmfs, iterations=3))
print("\n")
message_box(
f'Converting to "CCT" and "D_uv" from given "CIE UCS" colourspace "uv" '
f'chromaticity coordinates using "Robertson (1968)" method:\n\n\t{uv}'
)
print(colour.uv_to_CCT(uv, method="Robertson 1968"))
print(colour.temperature.uv_to_CCT_Robertson1968(uv))
print("\n")
CCT_D_uv = [6503.49254150, 0.00320598]
message_box(
f'Converting to "CIE UCS" colourspace "uv" chromaticity coordinates from '
f'given "CCT" and "D_uv" using "Ohno (2013)" method:\n\n\t{CCT_D_uv}'
)
print(colour.CCT_to_uv(CCT_D_uv, cmfs=cmfs))
print(colour.temperature.CCT_to_uv_Ohno2013(CCT_D_uv, cmfs=cmfs))
print("\n")
message_box(
f'Converting to "CIE UCS" colourspace "uv" chromaticity coordinates from '
f'given "CCT" and "D_uv" using "Robertson (1968)" method:\n\n\t{CCT_D_uv}'
)
print(colour.CCT_to_uv(CCT_D_uv, method="Robertson 1968"))
print(colour.temperature.CCT_to_uv_Robertson1968(CCT_D_uv))
print("\n")
CCT = 6503.49254150
message_box(
f'Converting to "CIE UCS" colourspace "uv" chromaticity coordinates from '
f'given "CCT" using "Krystek (1985)" method:\n\n\t({CCT})'
)
print(colour.CCT_to_uv(CCT, method="Krystek 1985"))
print(colour.temperature.CCT_to_uv_Krystek1985(CCT))
print("\n")
xy = colour.CCS_ILLUMINANTS["CIE 1931 2 Degree Standard Observer"]["D65"]
message_box(
f'Converting to "CCT" from given "CIE xy" chromaticity coordinates using '
f'"McCamy (1992)" method:\n\n\t{xy}'
)
print(colour.xy_to_CCT(xy, method="McCamy 1992"))
print(colour.temperature.xy_to_CCT_McCamy1992(xy))
print("\n")
message_box(
f'Converting to "CCT" from given "CIE xy" chromaticity coordinates using '
f'"Hernandez-<NAME> and Romero (1999)" method:\n\n\t{xy}'
)
print(colour.xy_to_CCT(xy, method="Hernandez 1999"))
print(colour.temperature.xy_to_CCT_Hernandez1999(xy))
print("\n")
CCT = 6503.49254150
message_box(
f'Converting to "CIE xy" chromaticity coordinates from given "CCT" using '
f'"<NAME>, <NAME> and Kim (2002)" method:\n\n\t{CCT}'
)
print(colour.CCT_to_xy(CCT, method="Kang 2002"))
print(colour.temperature.CCT_to_xy_Kang2002(CCT))
print("\n")
message_box(
f'Converting to "CIE xy" chromaticity coordinates from given "CCT" using '
f'"CIE Illuminant D Series" method:\n\n\t{CCT}'
)
print(colour.CCT_to_xy(CCT, method="CIE Illuminant D Series"))
print(colour.temperature.CCT_to_xy_CIE_D(CCT))
| [
"colour.temperature.uv_to_CCT_Robertson1968",
"colour.temperature.CCT_to_uv_Robertson1968",
"colour.CCT_to_xy",
"colour.temperature.xy_to_CCT_McCamy1992",
"colour.xy_to_XYZ",
"colour.xy_to_CCT",
"colour.temperature.CCT_to_xy_CIE_D",
"colour.CCT_to_uv",
"colour.temperature.CCT_to_uv_Krystek1985",
"... | [((119, 176), 'colour.utilities.message_box', 'message_box', (['"""Correlated Colour Temperature Computations"""'], {}), "('Correlated Colour Temperature Computations')\n", (130, 176), False, 'from colour.utilities import message_box\n'), ((411, 569), 'colour.utilities.message_box', 'message_box', (['f"""Converting to "CCT" and "D_uv" from given "CIE UCS" colourspace "uv" chromaticity coordinates using "Ohno (2013)" method:\n\n\t{uv}"""'], {}), '(\n f"""Converting to "CCT" and "D_uv" from given "CIE UCS" colourspace "uv" chromaticity coordinates using "Ohno (2013)" method:\n\n\t{uv}"""\n )\n', (422, 569), False, 'from colour.utilities import message_box\n'), ((686, 761), 'colour.utilities.message_box', 'message_box', (['"""Faster computation with 3 iterations but a lot less precise."""'], {}), "('Faster computation with 3 iterations but a lot less precise.')\n", (697, 761), False, 'from colour.utilities import message_box\n'), ((903, 1066), 'colour.utilities.message_box', 'message_box', (['f"""Converting to "CCT" and "D_uv" from given "CIE UCS" colourspace "uv" chromaticity coordinates using "Robertson (1968)" method:\n\n\t{uv}"""'], {}), '(\n f"""Converting to "CCT" and "D_uv" from given "CIE UCS" colourspace "uv" chromaticity coordinates using "Robertson (1968)" method:\n\n\t{uv}"""\n )\n', (914, 1066), False, 'from colour.utilities import message_box\n'), ((1230, 1394), 'colour.utilities.message_box', 'message_box', (['f"""Converting to "CIE UCS" colourspace "uv" chromaticity coordinates from given "CCT" and "D_uv" using "Ohno (2013)" method:\n\n\t{CCT_D_uv}"""'], {}), '(\n f"""Converting to "CIE UCS" colourspace "uv" chromaticity coordinates from given "CCT" and "D_uv" using "Ohno (2013)" method:\n\n\t{CCT_D_uv}"""\n )\n', (1241, 1394), False, 'from colour.utilities import message_box\n'), ((1523, 1692), 'colour.utilities.message_box', 'message_box', (['f"""Converting to "CIE UCS" colourspace "uv" chromaticity coordinates from given "CCT" and "D_uv" using "Robertson (1968)" method:\n\n\t{CCT_D_uv}"""'], {}), '(\n f"""Converting to "CIE UCS" colourspace "uv" chromaticity coordinates from given "CCT" and "D_uv" using "Robertson (1968)" method:\n\n\t{CCT_D_uv}"""\n )\n', (1534, 1692), False, 'from colour.utilities import message_box\n'), ((1849, 2002), 'colour.utilities.message_box', 'message_box', (['f"""Converting to "CIE UCS" colourspace "uv" chromaticity coordinates from given "CCT" using "Krystek (1985)" method:\n\n\t({CCT})"""'], {}), '(\n f"""Converting to "CIE UCS" colourspace "uv" chromaticity coordinates from given "CCT" using "Krystek (1985)" method:\n\n\t({CCT})"""\n )\n', (1860, 2002), False, 'from colour.utilities import message_box\n'), ((2199, 2330), 'colour.utilities.message_box', 'message_box', (['f"""Converting to "CCT" from given "CIE xy" chromaticity coordinates using "McCamy (1992)" method:\n\n\t{xy}"""'], {}), '(\n f"""Converting to "CCT" from given "CIE xy" chromaticity coordinates using "McCamy (1992)" method:\n\n\t{xy}"""\n )\n', (2210, 2330), False, 'from colour.utilities import message_box\n'), ((2449, 2601), 'colour.utilities.message_box', 'message_box', (['f"""Converting to "CCT" from given "CIE xy" chromaticity coordinates using "Hernandez-<NAME> and Romero (1999)" method:\n\n\t{xy}"""'], {}), '(\n f"""Converting to "CCT" from given "CIE xy" chromaticity coordinates using "Hernandez-<NAME> and Romero (1999)" method:\n\n\t{xy}"""\n )\n', (2460, 2601), False, 'from colour.utilities import message_box\n'), ((2746, 2894), 'colour.utilities.message_box', 'message_box', (['f"""Converting to "CIE xy" chromaticity coordinates from given "CCT" using "<NAME>, <NAME> and Kim (2002)" method:\n\n\t{CCT}"""'], {}), '(\n f"""Converting to "CIE xy" chromaticity coordinates from given "CCT" using "<NAME>, <NAME> and Kim (2002)" method:\n\n\t{CCT}"""\n )\n', (2757, 2894), False, 'from colour.utilities import message_box\n'), ((3011, 3153), 'colour.utilities.message_box', 'message_box', (['f"""Converting to "CIE xy" chromaticity coordinates from given "CCT" using "CIE Illuminant D Series" method:\n\n\t{CCT}"""'], {}), '(\n f"""Converting to "CIE xy" chromaticity coordinates from given "CCT" using "CIE Illuminant D Series" method:\n\n\t{CCT}"""\n )\n', (3022, 3153), False, 'from colour.utilities import message_box\n'), ((579, 610), 'colour.uv_to_CCT', 'colour.uv_to_CCT', (['uv'], {'cmfs': 'cmfs'}), '(uv, cmfs=cmfs)\n', (595, 610), False, 'import colour\n'), ((618, 670), 'colour.temperature.uv_to_CCT_Ohno2013', 'colour.temperature.uv_to_CCT_Ohno2013', (['uv'], {'cmfs': 'cmfs'}), '(uv, cmfs=cmfs)\n', (655, 670), False, 'import colour\n'), ((768, 813), 'colour.uv_to_CCT', 'colour.uv_to_CCT', (['uv'], {'cmfs': 'cmfs', 'iterations': '(3)'}), '(uv, cmfs=cmfs, iterations=3)\n', (784, 813), False, 'import colour\n'), ((821, 887), 'colour.temperature.uv_to_CCT_Ohno2013', 'colour.temperature.uv_to_CCT_Ohno2013', (['uv'], {'cmfs': 'cmfs', 'iterations': '(3)'}), '(uv, cmfs=cmfs, iterations=3)\n', (858, 887), False, 'import colour\n'), ((1076, 1121), 'colour.uv_to_CCT', 'colour.uv_to_CCT', (['uv'], {'method': '"""Robertson 1968"""'}), "(uv, method='Robertson 1968')\n", (1092, 1121), False, 'import colour\n'), ((1129, 1175), 'colour.temperature.uv_to_CCT_Robertson1968', 'colour.temperature.uv_to_CCT_Robertson1968', (['uv'], {}), '(uv)\n', (1171, 1175), False, 'import colour\n'), ((1404, 1441), 'colour.CCT_to_uv', 'colour.CCT_to_uv', (['CCT_D_uv'], {'cmfs': 'cmfs'}), '(CCT_D_uv, cmfs=cmfs)\n', (1420, 1441), False, 'import colour\n'), ((1449, 1507), 'colour.temperature.CCT_to_uv_Ohno2013', 'colour.temperature.CCT_to_uv_Ohno2013', (['CCT_D_uv'], {'cmfs': 'cmfs'}), '(CCT_D_uv, cmfs=cmfs)\n', (1486, 1507), False, 'import colour\n'), ((1702, 1753), 'colour.CCT_to_uv', 'colour.CCT_to_uv', (['CCT_D_uv'], {'method': '"""Robertson 1968"""'}), "(CCT_D_uv, method='Robertson 1968')\n", (1718, 1753), False, 'import colour\n'), ((1761, 1813), 'colour.temperature.CCT_to_uv_Robertson1968', 'colour.temperature.CCT_to_uv_Robertson1968', (['CCT_D_uv'], {}), '(CCT_D_uv)\n', (1803, 1813), False, 'import colour\n'), ((2012, 2056), 'colour.CCT_to_uv', 'colour.CCT_to_uv', (['CCT'], {'method': '"""Krystek 1985"""'}), "(CCT, method='Krystek 1985')\n", (2028, 2056), False, 'import colour\n'), ((2064, 2109), 'colour.temperature.CCT_to_uv_Krystek1985', 'colour.temperature.CCT_to_uv_Krystek1985', (['CCT'], {}), '(CCT)\n', (2104, 2109), False, 'import colour\n'), ((2340, 2382), 'colour.xy_to_CCT', 'colour.xy_to_CCT', (['xy'], {'method': '"""McCamy 1992"""'}), "(xy, method='McCamy 1992')\n", (2356, 2382), False, 'import colour\n'), ((2390, 2433), 'colour.temperature.xy_to_CCT_McCamy1992', 'colour.temperature.xy_to_CCT_McCamy1992', (['xy'], {}), '(xy)\n', (2429, 2433), False, 'import colour\n'), ((2611, 2656), 'colour.xy_to_CCT', 'colour.xy_to_CCT', (['xy'], {'method': '"""Hernandez 1999"""'}), "(xy, method='Hernandez 1999')\n", (2627, 2656), False, 'import colour\n'), ((2664, 2710), 'colour.temperature.xy_to_CCT_Hernandez1999', 'colour.temperature.xy_to_CCT_Hernandez1999', (['xy'], {}), '(xy)\n', (2706, 2710), False, 'import colour\n'), ((2904, 2945), 'colour.CCT_to_xy', 'colour.CCT_to_xy', (['CCT'], {'method': '"""Kang 2002"""'}), "(CCT, method='Kang 2002')\n", (2920, 2945), False, 'import colour\n'), ((2953, 2995), 'colour.temperature.CCT_to_xy_Kang2002', 'colour.temperature.CCT_to_xy_Kang2002', (['CCT'], {}), '(CCT)\n', (2990, 2995), False, 'import colour\n'), ((3163, 3218), 'colour.CCT_to_xy', 'colour.CCT_to_xy', (['CCT'], {'method': '"""CIE Illuminant D Series"""'}), "(CCT, method='CIE Illuminant D Series')\n", (3179, 3218), False, 'import colour\n'), ((3226, 3265), 'colour.temperature.CCT_to_xy_CIE_D', 'colour.temperature.CCT_to_xy_CIE_D', (['CCT'], {}), '(CCT)\n', (3260, 3265), False, 'import colour\n'), ((306, 340), 'colour.sd_to_XYZ', 'colour.sd_to_XYZ', (['illuminant', 'cmfs'], {}), '(illuminant, cmfs)\n', (322, 340), False, 'import colour\n'), ((388, 408), 'colour.xy_to_XYZ', 'colour.xy_to_XYZ', (['xy'], {}), '(xy)\n', (404, 408), False, 'import colour\n')] |
#!/usr/bin/env python
import drmaa
import shlex
from optparse import OptionParser
from sys import stderr, stdin, exit
from datetime import datetime
import traceback
def Stop(pool, jt, info):
'''Job failure function that stops synchronization.'''
pool.shall_stop = True
pool.all_done = False
pool.failed_jobs.append(jt)
def Proceed(pool, jt, info):
'''Job failure function that proceeds with the remaining jobs.'''
pool.all_done = False
pool.failed_jobs.append(jt)
def Resubmit(max_tries, fail):
'''Job failure function factory that resubmits a failed job.
:Parameters:
max_tries: maximum number of submissions for a job.
fail: failure function to call if the maximum number of tries has been reached.
'''
def resubmit_function(pool, jt, info):
if jt.failures >= max_tries:
fail(pool, jt, info)
else:
jt.jobid = pool.session.runJob(jt)
pool.log('job specified at ' + jt.source + ' resubmitted with id ' + jt.jobid)
pool.current_jobs[jt.jobid] = jt
return resubmit_function
class JobPool:
'''
A pool of jobs.
:Members:
session: DRMAA session
logfile: file where actions and status are written
current_jobs: jobs that have been submitted and that are not finished
all_done: either all finished jobs were successful
shall_stop: either this object should stop the synchronization
'''
def __init__(self, session, logfile):
self.session = session
self.logfile = logfile
self.current_jobs = {}
self.all_done = True
self.shall_stop = False
self.failed_jobs = []
def log(self, msg=''):
'''Logs a message'''
d = datetime.now()
self.logfile.write('[' + d.strftime('%Y-%m-%d %H:%M:%S') + '] ' + msg + '\n')
self.logfile.flush()
def createJobTemplate(self):
'''Creates a job template (delegates to self.session)'''
return self.session.createJobTemplate()
def runJob(self, jt):
'''Submits a job.
This method delegates to self.session, then keeps track of the submitted job
:Parameters:
jt: job template, with a member 'source' indicating where this template was specified
'''
jt.jobid = self.session.runJob(jt)
if jt.source is None:
jt.source = jobid
jt.failures = 0
self.log('job specified at ' + jt.source + ' submitted with id ' + jt.jobid)
self.current_jobs[jt.jobid] = jt
return jt.jobid
def waitall(self, fail=Proceed, interval=60):
'''Waits for all submitted jobs to finish.
:Parameters:
fail: function called in case of failure, the function must accept 3 paremeters: this object, the JobTemplate object and the DRMAA JobInfo object.
interval: check for job status every number of seconds.
'''
start = datetime.now()
running = 0
while self.current_jobs:
joblist = list(self.current_jobs.keys()) # create fresh list to work around Python 3 iterator
try:
self.log('synchronizing %d jobs (%d running), see you in %d seconds' % (len(joblist), running, interval))
self.session.synchronize(joblist, interval, False)
except drmaa.errors.ExitTimeoutException:
pass
running = 0
for jobid in joblist:
status = self.session.jobStatus(jobid)
if status == drmaa.JobState.DONE:
try:
info = self.session.wait(jobid, drmaa.Session.TIMEOUT_NO_WAIT)
jt = self.current_jobs[jobid]
except drmaa.errors.ExitTimeoutException:
pass
if info.wasAborted:
self.log('job specified at %s with id %s aborted' % (self.current_jobs[jobid].source, jobid))
self._failed(jobid, fail, info)
elif info.hasSignal:
self.log('job specified at %s with id %s aborted received signal %d' % (self.current_jobs[jobid].source, jobid, info.terminatedSignal))
self._failed(jobid, fail, info)
elif info.exitStatus != 0:
self.log('job specified at %s with id %s aborted exited with status %d' % (self.current_jobs[jobid].source, jobid, info.exitStatus))
self._failed(jobid, fail, info)
else:
self.log('job specified at %s with id %s is done' % (self.current_jobs[jobid].source, jobid))
del self.current_jobs[jobid]
elif status == drmaa.JobState.FAILED:
self.log('job specified at %s with id %s failed somehow' % (self.current_jobs[jobid].source, jobid))
self._failed(jobid, fail, None)
elif status == drmaa.JobState.RUNNING:
running += 1
if self.shall_stop:
break
if self.all_done:
delta = datetime.now() - start
self.log('all jobs completed successfully in ' + str(delta) + ', you\'re welcome')
else:
self.log('sorry, the following jobs have failed:')
for job in self.failed_jobs:
self.log(job.source + ' with id ' + str(job.jobid))
def _failed(self, jobid, fail, info):
jt = self.current_jobs[jobid]
jt.failures += 1
del self.current_jobs[jobid]
fail(self, jt, info)
def runall(self, jobs, fail=Proceed, interval=60):
'''Submits jobs and waits for them to finish.
:Parameters:
jobs: a sequence of job templates
fail: job failure function
interval: job status check interval in seconds
:Return value:
True if all jobs finished successfully, False otherwise.
'''
for jt in jobs:
self.runJob(jt)
self.waitall(fail, interval)
return self.all_done
def terminate(self):
'''Terminates all remaining jobs.'''
self.log('terminating remaining jobs')
self.session.control(drmaa.Session.JOB_IDS_SESSION_ALL, drmaa.JobControlAction.TERMINATE)
self.current_jobs = {}
class QSyncBase:
def __init__(self):
pass
def create_jobs(self, session):
raise NotImplemented()
def go(self, interval=60, force_interval=False, fail=Proceed, logfile=stderr):
if interval < 1:
raise Exception('illegal interval: %d' % interval)
if interval <= 10 and not force_interval:
raise Exception('unwise interval: %d (use force interval if you want this anyway')
session = drmaa.Session()
session.initialize()
jobs = self.create_jobs(session)
pool = JobPool(session, logfile)
try:
r = pool.runall(jobs, fail, interval)
if not r:
pool.terminate()
return r
except BaseException as e:
pool.log('wow, some exception here...')
traceback.print_exc()
pool.terminate()
finally:
session.exit()
class QSync(OptionParser, QSyncBase):
def __init__(self):
OptionParser.__init__(self, usage='Usage: %prog [OPTIONS] [FILE...]')
self.set_defaults(fail=Proceed)
self.add_option('-s', '--stop-on-failure', action='store_const', const=Stop, dest='fail', help='if one job fails, stop synchronization and terminate all remaining jobs')
self.add_option('-p', '--proceed-on-failure', action='store_const', const=Proceed, dest='fail', help='continue running jobs even if some fail (default behaviour)')
self.add_option('-r', '--resubmit-on-failure', action='store', type='int', dest='resubmit', help='resubmit failed jobs at most N times each', metavar='N')
self.add_option('-l', '--log-file', action='store', type='string', dest='logfile', default=None, help='write log into FILE (default: stderr)', metavar='FILE')
self.add_option('-i', '--interval', action='store', type='int', dest='interval', default=60, help='wait T seconds before polling job status, values below 10 require --force-interval (default: %default)', metavar='T')
self.add_option('--force-interval', action='store_true', dest='force_interval', default=False, help='accept poll intervals below 10 seconds')
def run(self):
options, self.filenames = self.parse_args()
fail = options.fail
if options.resubmit:
if options.resubmit < 1:
raise Exception('illegal number of resubmissions: %d' % options.resubmit)
fail = Resubmit(options.resubmit, fail)
logfile = stderr
if options.logfile:
logfile = open(options.logfile, 'w')
self.go(interval=options.interval, force_interval=options.force_interval, fail=fail, logfile=logfile)
@staticmethod
def _create_job(session, filename, f):
for n, line in enumerate(f):
jt = session.createJobTemplate()
b, dd, a = line.partition('--')
if dd != '':
jt.nativeSpecification = b
line = a
args = shlex.split(line)
jt.remoteCommand = args[0]
jt.args = args[1:]
jt.source = '%s:%d' % (filename, n + 1)
yield jt
def create_jobs(self, session):
if self.filenames:
for filename in self.filenames:
f = open(filename)
for p in QSync._create_job(session, filename, f):
yield p
f.close()
else:
for p in QSync._create_job(session, '<stdin>', stdin):
yield p
if __name__ == '__main__':
if not QSync().run():
exit(1)
| [
"shlex.split",
"optparse.OptionParser.__init__",
"datetime.datetime.now",
"sys.exit",
"traceback.print_exc",
"drmaa.Session"
] | [((1738, 1752), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (1750, 1752), False, 'from datetime import datetime\n'), ((2930, 2944), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (2942, 2944), False, 'from datetime import datetime\n'), ((6816, 6831), 'drmaa.Session', 'drmaa.Session', ([], {}), '()\n', (6829, 6831), False, 'import drmaa\n'), ((7348, 7417), 'optparse.OptionParser.__init__', 'OptionParser.__init__', (['self'], {'usage': '"""Usage: %prog [OPTIONS] [FILE...]"""'}), "(self, usage='Usage: %prog [OPTIONS] [FILE...]')\n", (7369, 7417), False, 'from optparse import OptionParser\n'), ((9932, 9939), 'sys.exit', 'exit', (['(1)'], {}), '(1)\n', (9936, 9939), False, 'from sys import stderr, stdin, exit\n'), ((9333, 9350), 'shlex.split', 'shlex.split', (['line'], {}), '(line)\n', (9344, 9350), False, 'import shlex\n'), ((5148, 5162), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (5160, 5162), False, 'from datetime import datetime\n'), ((7181, 7202), 'traceback.print_exc', 'traceback.print_exc', ([], {}), '()\n', (7200, 7202), False, 'import traceback\n')] |
import sys
from collections import defaultdict
import torch
from varclr.utils.infer import MockArgs
from varclr.data.preprocessor import CodePreprocessor
if __name__ == "__main__":
ret = torch.load(sys.argv[2])
vars, embs = ret["vars"], ret["embs"]
embs /= embs.norm(dim=1, keepdim=True)
embs = embs.cuda()
var2idx = dict([(var, idx) for idx, var in enumerate(vars)])
processor = CodePreprocessor(MockArgs())
Ks = [1, 5, 10, 25, 50, 100, 250, 500, 1000]
topk_succ = defaultdict(int)
tot = 0
with open(sys.argv[1], "r") as f:
for line in f:
try:
var1, var2 = line.strip().split()
except ValueError:
print("skpped: ", line)
def canon(var):
return "".join(
[
word.capitalize() if idx > 0 else word
for idx, word in enumerate(processor(var).split())
]
)
var1, var2 = canon(var1), canon(var2)
if var1 not in var2idx or var2 not in var2idx:
print(f"variable {var1} or {var2} not found")
continue
tot += 1
for k in Ks:
result = torch.topk(embs @ embs[var2idx[var1]], k=k + 1)
topk_succ[k] += var2 in [vars[idx] for idx in result.indices][1:]
print(f"Total {tot} variable pairs")
for k in Ks:
print(f"Recall@{k} = {100 * topk_succ[k] / tot:.1f}")
| [
"torch.topk",
"torch.load",
"collections.defaultdict",
"varclr.utils.infer.MockArgs"
] | [((194, 217), 'torch.load', 'torch.load', (['sys.argv[2]'], {}), '(sys.argv[2])\n', (204, 217), False, 'import torch\n'), ((501, 517), 'collections.defaultdict', 'defaultdict', (['int'], {}), '(int)\n', (512, 517), False, 'from collections import defaultdict\n'), ((424, 434), 'varclr.utils.infer.MockArgs', 'MockArgs', ([], {}), '()\n', (432, 434), False, 'from varclr.utils.infer import MockArgs\n'), ((1258, 1305), 'torch.topk', 'torch.topk', (['(embs @ embs[var2idx[var1]])'], {'k': '(k + 1)'}), '(embs @ embs[var2idx[var1]], k=k + 1)\n', (1268, 1305), False, 'import torch\n')] |
# AUTOGENERATED! DO NOT EDIT! File to edit: nbs/models/models.dnn.ipynb (unless otherwise specified).
__all__ = ['Multi_Layer_Perceptron', 'CollabFNet']
# Cell
import torch
import torch.nn as nn
import torch.nn.functional as F
# Cell
class Multi_Layer_Perceptron(nn.Module):
def __init__(self, args, num_users, num_items):
super(Multi_Layer_Perceptron, self).__init__()
self.num_users = num_users
self.num_items = num_items
self.factor_num = args.factor_num
self.layers = args.layers
self.embedding_user = nn.Embedding(num_embeddings=self.num_users, embedding_dim=self.factor_num)
self.embedding_item = nn.Embedding(num_embeddings=self.num_items, embedding_dim=self.factor_num)
self.fc_layers = nn.ModuleList()
for idx, (in_size, out_size) in enumerate(zip(self.layers[:-1], self.layers[1:])):
self.fc_layers.append(nn.Linear(in_size, out_size))
self.affine_output = nn.Linear(in_features=self.layers[-1], out_features=1)
self.logistic = nn.Sigmoid()
def forward(self, user_indices, item_indices):
user_embedding = self.embedding_user(user_indices)
item_embedding = self.embedding_item(item_indices)
vector = torch.cat([user_embedding, item_embedding], dim=-1) # the concat latent vector
for idx, _ in enumerate(range(len(self.fc_layers))):
vector = self.fc_layers[idx](vector)
vector = nn.ReLU()(vector)
# vector = nn.BatchNorm1d()(vector)
# vector = nn.Dropout(p=0.5)(vector)
logits = self.affine_output(vector)
rating = self.logistic(logits)
return rating
def init_weight(self):
pass
# Cell
class CollabFNet(nn.Module):
def __init__(self, num_users, num_items, emb_size=100, n_hidden=10):
super(CollabFNet, self).__init__()
self.user_emb = nn.Embedding(num_users, emb_size)
self.item_emb = nn.Embedding(num_items, emb_size)
self.lin1 = nn.Linear(emb_size*2, n_hidden)
self.lin2 = nn.Linear(n_hidden, 1)
self.drop1 = nn.Dropout(0.1)
def forward(self, u, v):
U = self.user_emb(u)
V = self.item_emb(v)
x = F.relu(torch.cat([U, V], dim=1))
x = self.drop1(x)
x = F.relu(self.lin1(x))
x = self.lin2(x)
return x | [
"torch.nn.Sigmoid",
"torch.nn.ReLU",
"torch.nn.Dropout",
"torch.nn.ModuleList",
"torch.cat",
"torch.nn.Linear",
"torch.nn.Embedding"
] | [((562, 636), 'torch.nn.Embedding', 'nn.Embedding', ([], {'num_embeddings': 'self.num_users', 'embedding_dim': 'self.factor_num'}), '(num_embeddings=self.num_users, embedding_dim=self.factor_num)\n', (574, 636), True, 'import torch.nn as nn\n'), ((667, 741), 'torch.nn.Embedding', 'nn.Embedding', ([], {'num_embeddings': 'self.num_items', 'embedding_dim': 'self.factor_num'}), '(num_embeddings=self.num_items, embedding_dim=self.factor_num)\n', (679, 741), True, 'import torch.nn as nn\n'), ((768, 783), 'torch.nn.ModuleList', 'nn.ModuleList', ([], {}), '()\n', (781, 783), True, 'import torch.nn as nn\n'), ((969, 1023), 'torch.nn.Linear', 'nn.Linear', ([], {'in_features': 'self.layers[-1]', 'out_features': '(1)'}), '(in_features=self.layers[-1], out_features=1)\n', (978, 1023), True, 'import torch.nn as nn\n'), ((1048, 1060), 'torch.nn.Sigmoid', 'nn.Sigmoid', ([], {}), '()\n', (1058, 1060), True, 'import torch.nn as nn\n'), ((1248, 1299), 'torch.cat', 'torch.cat', (['[user_embedding, item_embedding]'], {'dim': '(-1)'}), '([user_embedding, item_embedding], dim=-1)\n', (1257, 1299), False, 'import torch\n'), ((1897, 1930), 'torch.nn.Embedding', 'nn.Embedding', (['num_users', 'emb_size'], {}), '(num_users, emb_size)\n', (1909, 1930), True, 'import torch.nn as nn\n'), ((1955, 1988), 'torch.nn.Embedding', 'nn.Embedding', (['num_items', 'emb_size'], {}), '(num_items, emb_size)\n', (1967, 1988), True, 'import torch.nn as nn\n'), ((2009, 2042), 'torch.nn.Linear', 'nn.Linear', (['(emb_size * 2)', 'n_hidden'], {}), '(emb_size * 2, n_hidden)\n', (2018, 2042), True, 'import torch.nn as nn\n'), ((2061, 2083), 'torch.nn.Linear', 'nn.Linear', (['n_hidden', '(1)'], {}), '(n_hidden, 1)\n', (2070, 2083), True, 'import torch.nn as nn\n'), ((2105, 2120), 'torch.nn.Dropout', 'nn.Dropout', (['(0.1)'], {}), '(0.1)\n', (2115, 2120), True, 'import torch.nn as nn\n'), ((2228, 2252), 'torch.cat', 'torch.cat', (['[U, V]'], {'dim': '(1)'}), '([U, V], dim=1)\n', (2237, 2252), False, 'import torch\n'), ((909, 937), 'torch.nn.Linear', 'nn.Linear', (['in_size', 'out_size'], {}), '(in_size, out_size)\n', (918, 937), True, 'import torch.nn as nn\n'), ((1459, 1468), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (1466, 1468), True, 'import torch.nn as nn\n')] |
import csv
import operator
if __name__ == "__main__":
# We use try-except statements to properly handle errors, as in case the
# input file does not exist in the directory.
try:
# We open the .csv file loading the filds separated by a ; delimiter:
csv_archivo_locales = open("Locales.csv", encoding="utf8", errors='ignore')
locales = csv.reader(csv_archivo_locales, delimiter=";")
csv_archivo_terrazas = open("Terrazas.csv", encoding="utf8", errors='ignore')
terrazas = csv.reader(csv_archivo_terrazas, delimiter=";")
# We skip the first line before saving the results into a list for later
# processing by using the next() statement. The reason why we do not use
# the line_num function is because we would need to include it in a loop
# and read file line by line whereas this way, with just two instructions
# we get the same result:
next(locales, None)
lista_locales = list(locales)
next(terrazas, None)
lista_terrazas = list(terrazas)
# When we read the fields from the CSV, they are stored as strings, so we
# need to explicitely convert the ID to int to be able to sort them:
for i in lista_locales:
i[0] = int(i[0])
for j in lista_terrazas:
j[0] = int(j[0])
# We sort the lists taking into account the ID column which is in the
# first position to get it with the itemergetter function:
sorted_lista_locales = sorted(lista_locales, key=operator.itemgetter(0), reverse = False)
sorted_lista_terrazas = sorted(lista_terrazas, key=operator.itemgetter(0), reverse = False)
# For each entry in lista_terrazas, we check where is its corresponding
# entry in lista_locales. As they are sorted ascendently, all the entries
# before the wanted one are included in the result list as they do not
# appear in lista_terrazas. Moreover, we keep the index counter to just
# traverse the lista_locales once.
index = 0
no_terrazas = []
for terraza in sorted_lista_terrazas:
while (terraza[0] > sorted_lista_locales[index][0]):
no_terrazas.append(sorted_lista_locales[index])
index += 1
# It is important to perform this step once we have reached the local
# entry from the lista_locales, as if not, the next entry from
# lista_terrazas will be greater than the found one in lista_locales
# and will wrongly include it in the result list:
if (terraza[0] == sorted_lista_locales[index][0]):
index += 1
# We open the output file to store the data retrieved:
csvFileObj = open("NoTerrazas.csv", "w")
csvWriter = csv.writer(csvFileObj, delimiter=";")
# For each row in the final list, we write it to the CSV output file:
for row in no_terrazas:
csvWriter.writerow(row)
print ("The file has been successfully created.")
# We close the opened CSV file:
csvFileObj.close()
# NOTE: we have realized that a few error occurs when comparing the total
# amount of entries in NoTerrazas.csv with the entries in Locales.csv and
# Terrazas.csv, as it is expected to have NoTerrazas = Locales - Terrazas
# However, as the exercice statement indicates that some inconsistency
# may exist in the files, we assume the error is due to this unsteadiness.
except IOError:
print("The file does not exist.")
| [
"operator.itemgetter",
"csv.writer",
"csv.reader"
] | [((371, 417), 'csv.reader', 'csv.reader', (['csv_archivo_locales'], {'delimiter': '""";"""'}), "(csv_archivo_locales, delimiter=';')\n", (381, 417), False, 'import csv\n'), ((523, 570), 'csv.reader', 'csv.reader', (['csv_archivo_terrazas'], {'delimiter': '""";"""'}), "(csv_archivo_terrazas, delimiter=';')\n", (533, 570), False, 'import csv\n'), ((2828, 2865), 'csv.writer', 'csv.writer', (['csvFileObj'], {'delimiter': '""";"""'}), "(csvFileObj, delimiter=';')\n", (2838, 2865), False, 'import csv\n'), ((1553, 1575), 'operator.itemgetter', 'operator.itemgetter', (['(0)'], {}), '(0)\n', (1572, 1575), False, 'import operator\n'), ((1653, 1675), 'operator.itemgetter', 'operator.itemgetter', (['(0)'], {}), '(0)\n', (1672, 1675), False, 'import operator\n')] |
#!/usr/bin/env python
import os
if not os.path.isfile("/proc/sys/fs/binfmt_misc/WSLInterop"):
# windows
LOGFILE = os.environ['LOCALAPPDATA'] + "\\AGS\\New World\\Game.log"
else:
# wsl
LOGFILE = os.popen('cmd.exe /c "echo %LocalAppData%"').read().strip() + "\\AGS\\New World\\Game.log"
LOGFILE = os.popen("wslpath '{}'".format(LOGFILE)).read().strip()
# INSTRUCTIONS
# 1. Uncomment the names of the plugins you wish to use
# 2. Configure the relevant variables below
# Need help? Check out the wiki for a sample config file - https://github.com/Rawr0/nwwatch/wiki
###############################
## PLUGIN SETTINGS ##
###############################
# It is recommended you leave these two as default, no testing has been performed with different values
NW_FILE_CHECK_FREQUENCY = 60 # How frequently the script checks your queue position. Default: 60
NW_ALERT_AT_QUEUE_POSITION = 25 # Send a notification when you are at this position in queue (or less). It is recommended this be 25 or greater.
PLUGINS_ENABLED = { # Remove the hash (#) before a line to enable it
#"NotifyByPushover",
#"NotifyBySMS",
#"NotifyByDiscord"
}
# Want to test your notifications? Enable it above and then set "TEST_MODE" to True. A notification will be triggered as soon as the script starts
TEST_MODE = True
###############################
## PLUGIN SPECIFIC VARIABLES ##
###############################
# Plugin: NotifyByPushover
PUSHOVER_TOKEN = "<VALUEHERE>"
PUSHOVER_USER = "<VALUEHERE>"
PUSHOVER_DEVICE = "<VALUEHERE>"
PUSHOVER_HIGHPRIORITY = True
# Plugin: NotifyBySMS (sinch.com) (Note: Paid service)
# Note: The SMS provider is currently going through a rebranding and, as a result, the APIs below could stop working.
# If any issues are encountered, please raise an issue on Github
SMS_PLAN_ID = ""
SMS_TOKEN = ""
SMS_SOURCE = "New World" # Source phone number, including country code (eg. +15551231234) or alphanumeric string
SMS_TARGET = "" # Destination phone number, including country code (eg. +15555551234 or +61411000000)
# Plugin: NotifyByDiscord
# In a Discord server you own/manage, navigate to Server Settings -> Integrations -> Webooks -> New Webhook. Click "Copy Webhook URL" and paste it below
DISCORD_WEBHOOKURL = "https://discord.com/api/webhooks/xxxxxxxxxxxxxx/xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx"
DISCORD_TTS = False # Use Discord "Text to speech" to speak the announcement
###############################
## INTERNAL VARIABLES ##
###############################
# Don't change these unless you're having issues
NW_LOGFILE_CHECK_LENGTH = 100 # Number of lines to monitor in the logfile
NW_SEARCH_REGEX = ".*Waiting in login queue.*Position \((.*)\)$"
NW_SEARCH_REGEX_INDEX = 1
| [
"os.path.isfile",
"os.popen"
] | [((40, 93), 'os.path.isfile', 'os.path.isfile', (['"""/proc/sys/fs/binfmt_misc/WSLInterop"""'], {}), "('/proc/sys/fs/binfmt_misc/WSLInterop')\n", (54, 93), False, 'import os\n'), ((200, 244), 'os.popen', 'os.popen', (['"""cmd.exe /c "echo %LocalAppData%\\""""'], {}), '(\'cmd.exe /c "echo %LocalAppData%"\')\n', (208, 244), False, 'import os\n')] |
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
import argparse
from azure.cli.command_modules.monitor.util import get_aggregation_map, get_operator_map
def period_type(value):
import re
def _get_substring(indices):
if indices == tuple([-1, -1]):
return ''
return value[indices[0]: indices[1]]
regex = r'(p)?(\d+y)?(\d+m)?(\d+d)?(t)?(\d+h)?(\d+m)?(\d+s)?'
match = re.match(regex, value.lower())
match_len = match.regs[0]
if match_len != tuple([0, len(value)]):
raise ValueError
# simply return value if a valid ISO8601 string is supplied
if match.regs[1] != tuple([-1, -1]) and match.regs[5] != tuple([-1, -1]):
return value
# if shorthand is used, only support days, minutes, hours, seconds
# ensure M is interpretted as minutes
days = _get_substring(match.regs[4])
minutes = _get_substring(match.regs[6]) or _get_substring(match.regs[3])
hours = _get_substring(match.regs[7])
seconds = _get_substring(match.regs[8])
return 'P{}T{}{}{}'.format(days, minutes, hours, seconds).upper()
# pylint: disable=too-few-public-methods
class ConditionAction(argparse.Action):
def __call__(self, parser, namespace, values, option_string=None):
from azure.mgmt.monitor.models import ThresholdRuleCondition, RuleMetricDataSource
# get default description if not specified
if namespace.description is None:
namespace.description = ' '.join(values)
if len(values) == 1:
# workaround because CMD.exe eats > character... Allows condition to be
# specified as a quoted expression
values = values[0].split(' ')
if len(values) < 5:
from knack.util import CLIError
raise CLIError('usage error: --condition METRIC {>,>=,<,<=} THRESHOLD {avg,min,max,total,last} DURATION')
metric_name = ' '.join(values[:-4])
operator = get_operator_map()[values[-4]]
threshold = int(values[-3])
aggregation = get_aggregation_map()[values[-2].lower()]
window = period_type(values[-1])
metric = RuleMetricDataSource(None, metric_name) # target URI will be filled in later
condition = ThresholdRuleCondition(operator, threshold, metric, window, aggregation)
namespace.condition = condition
# pylint: disable=protected-access
class AlertAddAction(argparse._AppendAction):
def __call__(self, parser, namespace, values, option_string=None):
action = self.get_action(values, option_string)
super(AlertAddAction, self).__call__(parser, namespace, action, option_string)
def get_action(self, values, option_string): # pylint: disable=no-self-use
from knack.util import CLIError
_type = values[0].lower()
if _type == 'email':
from azure.mgmt.monitor.models import RuleEmailAction
return RuleEmailAction(custom_emails=values[1:])
elif _type == 'webhook':
from azure.mgmt.monitor.models import RuleWebhookAction
uri = values[1]
try:
properties = dict(x.split('=', 1) for x in values[2:])
except ValueError:
raise CLIError('usage error: {} webhook URI [KEY=VALUE ...]'.format(option_string))
return RuleWebhookAction(uri, properties)
raise CLIError('usage error: {} TYPE KEY [ARGS]'.format(option_string))
class AlertRemoveAction(argparse._AppendAction):
def __call__(self, parser, namespace, values, option_string=None):
action = self.get_action(values, option_string)
super(AlertRemoveAction, self).__call__(parser, namespace, action, option_string)
def get_action(self, values, option_string): # pylint: disable=no-self-use
# TYPE is artificially enforced to create consistency with the --add-action argument
# but it could be enhanced to do additional validation in the future.
from knack.util import CLIError
_type = values[0].lower()
if _type not in ['email', 'webhook']:
raise CLIError('usage error: {} TYPE KEY [KEY ...]'.format(option_string))
return values[1:]
class MultiObjectsDeserializeAction(argparse._AppendAction): # pylint: disable=protected-access
def __call__(self, parser, namespace, values, option_string=None):
type_name = values[0]
type_properties = values[1:]
try:
super(MultiObjectsDeserializeAction, self).__call__(parser,
namespace,
self.get_deserializer(type_name)(*type_properties),
option_string)
except KeyError:
raise ValueError('usage error: the type "{}" is not recognizable.'.format(type_name))
except TypeError:
raise ValueError(
'usage error: Failed to parse "{}" as object of type "{}".'.format(' '.join(values), type_name))
except ValueError as ex:
raise ValueError(
'usage error: Failed to parse "{}" as object of type "{}". {}'.format(
' '.join(values), type_name, str(ex)))
def get_deserializer(self, type_name):
raise NotImplementedError()
class ActionGroupReceiverParameterAction(MultiObjectsDeserializeAction):
def get_deserializer(self, type_name):
from azure.mgmt.monitor.models import EmailReceiver, SmsReceiver, WebhookReceiver
return {'email': EmailReceiver, 'sms': SmsReceiver, 'webhook': WebhookReceiver}[type_name]
| [
"azure.mgmt.monitor.models.RuleMetricDataSource",
"azure.cli.command_modules.monitor.util.get_operator_map",
"azure.cli.command_modules.monitor.util.get_aggregation_map",
"azure.mgmt.monitor.models.RuleWebhookAction",
"azure.mgmt.monitor.models.ThresholdRuleCondition",
"knack.util.CLIError",
"azure.mgmt... | [((2428, 2467), 'azure.mgmt.monitor.models.RuleMetricDataSource', 'RuleMetricDataSource', (['None', 'metric_name'], {}), '(None, metric_name)\n', (2448, 2467), False, 'from azure.mgmt.monitor.models import ThresholdRuleCondition, RuleMetricDataSource\n'), ((2526, 2598), 'azure.mgmt.monitor.models.ThresholdRuleCondition', 'ThresholdRuleCondition', (['operator', 'threshold', 'metric', 'window', 'aggregation'], {}), '(operator, threshold, metric, window, aggregation)\n', (2548, 2598), False, 'from azure.mgmt.monitor.models import ThresholdRuleCondition, RuleMetricDataSource\n'), ((2076, 2185), 'knack.util.CLIError', 'CLIError', (['"""usage error: --condition METRIC {>,>=,<,<=} THRESHOLD {avg,min,max,total,last} DURATION"""'], {}), "(\n 'usage error: --condition METRIC {>,>=,<,<=} THRESHOLD {avg,min,max,total,last} DURATION'\n )\n", (2084, 2185), False, 'from knack.util import CLIError\n'), ((2239, 2257), 'azure.cli.command_modules.monitor.util.get_operator_map', 'get_operator_map', ([], {}), '()\n', (2255, 2257), False, 'from azure.cli.command_modules.monitor.util import get_aggregation_map, get_operator_map\n'), ((2328, 2349), 'azure.cli.command_modules.monitor.util.get_aggregation_map', 'get_aggregation_map', ([], {}), '()\n', (2347, 2349), False, 'from azure.cli.command_modules.monitor.util import get_aggregation_map, get_operator_map\n'), ((3205, 3246), 'azure.mgmt.monitor.models.RuleEmailAction', 'RuleEmailAction', ([], {'custom_emails': 'values[1:]'}), '(custom_emails=values[1:])\n', (3220, 3246), False, 'from azure.mgmt.monitor.models import RuleEmailAction\n'), ((3614, 3648), 'azure.mgmt.monitor.models.RuleWebhookAction', 'RuleWebhookAction', (['uri', 'properties'], {}), '(uri, properties)\n', (3631, 3648), False, 'from azure.mgmt.monitor.models import RuleWebhookAction\n')] |
# !/usr/bin/env python
# -*- coding = utf-8 -*-
# @Author:wanghui
# @Time:
# @File:getStartActivityConfig.py
import configparser
import os
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
class GetStartActivityConfig(object):
def __init__(self):
self.config = configparser.ConfigParser()
self.read_config()
def read_config(self):
"""读取配置文件"""
try:
self.config.read(os.path.join(BASE_DIR, ''))
except Exception as e:
print(f'配置文件不正确: ', e)
return None
def get_set_up(self):
"""获取setUp的信息"""
return self.config.items('SETUP')
def get_tear_down(self):
"""获取setUp的信息"""
return self.config.items('TEARDOWN')
def get_sections(self):
""" 获取所有的section """
return self.config.sections()
def get_option(self, section):
"""获取当前section下的所有options"""
return self.config.options(section)
def get_section_items(self, section):
"""获取当前section下的所有键值对"""
return self.config.items(section)
def get_section_password(self, section, option):
"""获取当前option对应的值"""
return self.config.get(section, option)
def check_config(self, *arg):
"""检查配置文件信息是否正确"""
try:
self.read_config()
if len(arg) == 1: # 判断是否有section
return self.config.has_section(arg[0])
elif len(arg) == 3: # 判断section下 option是否正确
if self.config[arg[0]][arg[1]] == arg[2]:
return True
else:
return False
else:
return False
except Exception as e:
return False
def main():
config = GetStartActivityConfig()
print(config.get_set_up())
# print(config.get_section_items('360'))
# print(config.get_section_password('<PASSWORD>', 'platformVersion'))
if __name__ == '__main__':
main()
| [
"os.path.abspath",
"os.path.join",
"configparser.ConfigParser"
] | [((169, 194), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (184, 194), False, 'import os\n'), ((283, 310), 'configparser.ConfigParser', 'configparser.ConfigParser', ([], {}), '()\n', (308, 310), False, 'import configparser\n'), ((429, 455), 'os.path.join', 'os.path.join', (['BASE_DIR', '""""""'], {}), "(BASE_DIR, '')\n", (441, 455), False, 'import os\n')] |
"""Decorators used for adding functionality to the library."""
from deserialize.exceptions import NoDefaultSpecifiedException
def default(key_name, default_value):
"""A decorator function for mapping default values to key names."""
def store_defaults_map(class_reference):
"""Store the defaults map."""
if not hasattr(class_reference, "__deserialize_defaults_map__"):
setattr(class_reference, "__deserialize_defaults_map__", {})
class_reference.__deserialize_defaults_map__[key_name] = default_value
return class_reference
return store_defaults_map
def _has_default(class_reference, key_name):
"""Returns True if this key has a default, False otherwise.
:returns: True if this key has a default, False otherwise.
"""
if not hasattr(class_reference, "__deserialize_defaults_map__"):
return False
return key_name in class_reference.__deserialize_defaults_map__
def _get_default(class_reference, key_name):
"""Get the default value for the given class and key name.
:raises NoDefaultSpecifiedException: If a default hasn't been specified
"""
if not hasattr(class_reference, "__deserialize_defaults_map__"):
raise NoDefaultSpecifiedException()
if key_name in class_reference.__deserialize_defaults_map__:
return class_reference.__deserialize_defaults_map__[key_name]
raise NoDefaultSpecifiedException()
| [
"deserialize.exceptions.NoDefaultSpecifiedException"
] | [((1412, 1441), 'deserialize.exceptions.NoDefaultSpecifiedException', 'NoDefaultSpecifiedException', ([], {}), '()\n', (1439, 1441), False, 'from deserialize.exceptions import NoDefaultSpecifiedException\n'), ((1235, 1264), 'deserialize.exceptions.NoDefaultSpecifiedException', 'NoDefaultSpecifiedException', ([], {}), '()\n', (1262, 1264), False, 'from deserialize.exceptions import NoDefaultSpecifiedException\n')] |
import configparser
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
config = configparser.ConfigParser()
config.read('alembic.ini')
connection_url = config['alembic']['sqlalchemy.url']
Engine = create_engine(connection_url, connect_args={'check_same_thread': False})
Session = sessionmaker(bind=Engine)
| [
"sqlalchemy.orm.sessionmaker",
"sqlalchemy.create_engine",
"configparser.ConfigParser"
] | [((107, 134), 'configparser.ConfigParser', 'configparser.ConfigParser', ([], {}), '()\n', (132, 134), False, 'import configparser\n'), ((225, 297), 'sqlalchemy.create_engine', 'create_engine', (['connection_url'], {'connect_args': "{'check_same_thread': False}"}), "(connection_url, connect_args={'check_same_thread': False})\n", (238, 297), False, 'from sqlalchemy import create_engine\n'), ((308, 333), 'sqlalchemy.orm.sessionmaker', 'sessionmaker', ([], {'bind': 'Engine'}), '(bind=Engine)\n', (320, 333), False, 'from sqlalchemy.orm import sessionmaker\n')] |
# Copyright 2014-2016 ARM Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#pylint: disable=E1101,W0201
import os
import re
from wlauto import Workload, Parameter, Executable
from wlauto.common.resources import File
from wlauto.exceptions import ConfigError
regex_map = {
"Richards": (re.compile(r'Richards: (\d+.*)')),
"DeltaBlue": (re.compile(r'DeltaBlue: (\d+.*)')),
"Crypto": (re.compile(r'Crypto: (\d+.*)')),
"RayTrace": (re.compile(r'RayTrace: (\d+.*)')),
"EarleyBoyer": (re.compile(r'EarleyBoyer: (\d+.*)')),
"RegExp": (re.compile(r'RegExp: (\d+.*)')),
"Splay": (re.compile(r'Splay: (\d+.*)')),
"SplayLatency": (re.compile(r'SplayLatency: (\d+.*)')),
"NavierStokes": (re.compile(r'NavierStokes: (\d+.*)')),
"PdfJS": (re.compile(r'PdfJS: (\d+.*)')),
"Mandreel": (re.compile(r'Mandreel: (\d+.*)')),
"MandreelLatency": (re.compile(r'MandreelLatency: (\d+.*)')),
"Gameboy": (re.compile(r'Gameboy: (\d+.*)')),
"CodeLoad": (re.compile(r'CodeLoad: (\d+.*)')),
"Box2D": (re.compile(r'Box2D: (\d+.*)')),
"zlib": (re.compile(r'zlib: (\d+.*)')),
"Score": (re.compile(r'Score .*: (\d+.*)'))
}
class Octaned8(Workload):
name = 'octaned8'
description = """
Runs the Octane d8 benchmark.
This workload runs d8 binaries built from source and placed in the dependencies folder along
with test assets from https://github.com/chromium/octane which also need to be placed in an
assets folder within the dependencies folder.
Original source from::
https://github.com/v8/v8/wiki/D8%20on%20Android
"""
parameters = [
Parameter('run_timeout', kind=int, default=180,
description='Timeout, in seconds, for the test execution.'),
]
supported_platforms = ['android']
executables = ['d8', 'natives_blob.bin', 'snapshot_blob.bin']
def initialize(self, context): # pylint: disable=no-self-use
assets_dir = self.device.path.join(self.device.working_directory, 'assets')
self.device.execute('mkdir -p {}'.format(assets_dir))
assets_tar = 'octaned8-assets.tar'
fpath = context.resolver.get(File(self, assets_tar))
self.device.push_file(fpath, assets_dir, timeout=300)
self.command = 'cd {}; {} busybox tar -x -f {}'.format(assets_dir, self.device.busybox, assets_tar)
self.output = self.device.execute(self.command, timeout=self.run_timeout)
for f in self.executables:
binFile = context.resolver.get(Executable(self, self.device.abi, f))
self.device_exe = self.device.install(binFile)
def setup(self, context):
self.logger.info('Copying d8 binaries to device')
assets_dir = self.device.path.join(self.device.working_directory, 'assets')
device_file = self.device.path.join(self.device.working_directory, 'octaned8.output')
self.command = 'cd {}; {}/d8 ./run.js >> {} 2>&1'.format(assets_dir, self.device.binaries_directory, device_file)
def run(self, context):
self.logger.info('Starting d8 tests')
self.output = self.device.execute(self.command, timeout=self.run_timeout)
def update_result(self, context):
host_file = os.path.join(context.output_directory, 'octaned8.output')
device_file = self.device.path.join(self.device.working_directory, 'octaned8.output')
self.device.pull_file(device_file, host_file)
with open(os.path.join(host_file)) as octaned8_file:
for line in octaned8_file:
for label, regex in regex_map.iteritems():
match = regex.search(line)
if match:
context.result.add_metric(label, float(match.group(1)))
self.device.execute('rm {}'.format(device_file))
def finalize(self, context):
for f in self.executables:
self.device.uninstall_executable(f)
self.device.execute('rm {}'.format(self.device.path.join(self.device.working_directory, f)))
assets_dir = self.device.path.join(self.device.working_directory, 'assets')
self.device.execute('rm -rf {}'.format(assets_dir))
| [
"wlauto.common.resources.File",
"wlauto.Executable",
"re.compile",
"os.path.join",
"wlauto.Parameter"
] | [((804, 836), 're.compile', 're.compile', (['"""Richards: (\\\\d+.*)"""'], {}), "('Richards: (\\\\d+.*)')\n", (814, 836), False, 'import re\n'), ((857, 890), 're.compile', 're.compile', (['"""DeltaBlue: (\\\\d+.*)"""'], {}), "('DeltaBlue: (\\\\d+.*)')\n", (867, 890), False, 'import re\n'), ((908, 938), 're.compile', 're.compile', (['"""Crypto: (\\\\d+.*)"""'], {}), "('Crypto: (\\\\d+.*)')\n", (918, 938), False, 'import re\n'), ((958, 990), 're.compile', 're.compile', (['"""RayTrace: (\\\\d+.*)"""'], {}), "('RayTrace: (\\\\d+.*)')\n", (968, 990), False, 'import re\n'), ((1013, 1048), 're.compile', 're.compile', (['"""EarleyBoyer: (\\\\d+.*)"""'], {}), "('EarleyBoyer: (\\\\d+.*)')\n", (1023, 1048), False, 'import re\n'), ((1066, 1096), 're.compile', 're.compile', (['"""RegExp: (\\\\d+.*)"""'], {}), "('RegExp: (\\\\d+.*)')\n", (1076, 1096), False, 'import re\n'), ((1113, 1142), 're.compile', 're.compile', (['"""Splay: (\\\\d+.*)"""'], {}), "('Splay: (\\\\d+.*)')\n", (1123, 1142), False, 'import re\n'), ((1166, 1202), 're.compile', 're.compile', (['"""SplayLatency: (\\\\d+.*)"""'], {}), "('SplayLatency: (\\\\d+.*)')\n", (1176, 1202), False, 'import re\n'), ((1226, 1262), 're.compile', 're.compile', (['"""NavierStokes: (\\\\d+.*)"""'], {}), "('NavierStokes: (\\\\d+.*)')\n", (1236, 1262), False, 'import re\n'), ((1279, 1308), 're.compile', 're.compile', (['"""PdfJS: (\\\\d+.*)"""'], {}), "('PdfJS: (\\\\d+.*)')\n", (1289, 1308), False, 'import re\n'), ((1328, 1360), 're.compile', 're.compile', (['"""Mandreel: (\\\\d+.*)"""'], {}), "('Mandreel: (\\\\d+.*)')\n", (1338, 1360), False, 'import re\n'), ((1387, 1426), 're.compile', 're.compile', (['"""MandreelLatency: (\\\\d+.*)"""'], {}), "('MandreelLatency: (\\\\d+.*)')\n", (1397, 1426), False, 'import re\n'), ((1445, 1476), 're.compile', 're.compile', (['"""Gameboy: (\\\\d+.*)"""'], {}), "('Gameboy: (\\\\d+.*)')\n", (1455, 1476), False, 'import re\n'), ((1496, 1528), 're.compile', 're.compile', (['"""CodeLoad: (\\\\d+.*)"""'], {}), "('CodeLoad: (\\\\d+.*)')\n", (1506, 1528), False, 'import re\n'), ((1545, 1574), 're.compile', 're.compile', (['"""Box2D: (\\\\d+.*)"""'], {}), "('Box2D: (\\\\d+.*)')\n", (1555, 1574), False, 'import re\n'), ((1590, 1618), 're.compile', 're.compile', (['"""zlib: (\\\\d+.*)"""'], {}), "('zlib: (\\\\d+.*)')\n", (1600, 1618), False, 'import re\n'), ((1635, 1667), 're.compile', 're.compile', (['"""Score .*: (\\\\d+.*)"""'], {}), "('Score .*: (\\\\d+.*)')\n", (1645, 1667), False, 'import re\n'), ((2144, 2256), 'wlauto.Parameter', 'Parameter', (['"""run_timeout"""'], {'kind': 'int', 'default': '(180)', 'description': '"""Timeout, in seconds, for the test execution."""'}), "('run_timeout', kind=int, default=180, description=\n 'Timeout, in seconds, for the test execution.')\n", (2153, 2256), False, 'from wlauto import Workload, Parameter, Executable\n'), ((3734, 3791), 'os.path.join', 'os.path.join', (['context.output_directory', '"""octaned8.output"""'], {}), "(context.output_directory, 'octaned8.output')\n", (3746, 3791), False, 'import os\n'), ((2677, 2699), 'wlauto.common.resources.File', 'File', (['self', 'assets_tar'], {}), '(self, assets_tar)\n', (2681, 2699), False, 'from wlauto.common.resources import File\n'), ((3032, 3068), 'wlauto.Executable', 'Executable', (['self', 'self.device.abi', 'f'], {}), '(self, self.device.abi, f)\n', (3042, 3068), False, 'from wlauto import Workload, Parameter, Executable\n'), ((3959, 3982), 'os.path.join', 'os.path.join', (['host_file'], {}), '(host_file)\n', (3971, 3982), False, 'import os\n')] |
'''
n-gram은 이와 같은 확률적 언어 모델의 대표적인 것으로서,
n개 단어의 연쇄를 확률적으로 표현해 두면 실제로
발성된 문장의 기록을 계산할 수 있다.
'''
# Step 1 : Bag of Words
from nltk.corpus import reuters
from collections import Counter, defaultdict
counts = Counter(reuters.words())
total_count = len(reuters.words())
# 공통적으로 가장 많이 나타나는 20개의 단어
print(counts.most_common(n=20))
# 빈도 비율 계산
for word in counts:
counts[word] /= float(total_count)
# 빈도 비율의 총합 계산
print(sum(counts.values()))
import random
# 100개의 단어 생성
text = []
for _ in range(100) :
r = random.random()
accumulator = 0
for word, freq in counts.items():
accumulator += freq
if accumulator >= r:
text.append(word)
break
print(" ".join(text))
# 텍스트의 확률 계산
from operator import mul
from functools import reduce
print(reduce(mul, [counts[w] for w in text], 1.0))
# Step 2 : Bi-gram & Tri-gram
from nltk import bigrams, trigrams
first_sentence = reuters.sents()[0]
print(first_sentence)
### Bi-gram 결과
print("\n----- Bi-gram 결과 확 확인인 -----")
print(list(bigrams(first_sentence)))
### Bi-gram 결과
print("\n----- 패딩된 Bi-gram 결과 확인 -----")
print((list(bigrams(first_sentence, pad_left=True, pad_right=True))))
### Bi-gram 결과
print("\n----- Tri-gram 결과 확인 -----")
print(list(trigrams(first_sentence)))
### Bi-gram 결과
print("\n----- 패딩된 Tri-gram 결과 확인 -----")
print((list(trigrams(first_sentence, pad_left=True, pad_right=True))))
### Reuters 데이터(말뭉치)를 이용한 Tri-gram 모델 생성
print("\n----- Tri-gram 모델 생성 -----")
model = defaultdict(lambda : defaultdict(lambda : 0))
for sentence in reuters.sents() :
print("\n문장 : ", sentence)
for w1, w2, w3 in trigrams(sentence, pad_right=True, pad_left=True):
model[(w1,w2)][w3] += 1
# 'what the' 다음에 'economists'가 나오는 것이 2개 존재
print(model['what','the']['economists'])
for w1_w2 in model:
total_count = float(sum(model[w1_w2].values()))
for w3 in model[w1_w2]:
model[w1_w2][w3] /= total_count
print(model['what', 'the']['economists'])
### Language Model을 이용해 텍스트 생성하기
print("\n----- 언어 모델을 이용해 텍스트 생성하기 -----")
import random
text = [None, None]
prob = 1.0
sentence_finished = False
while not sentence_finished:
r = random.random()
accumulator = .0
for word in model[tuple(text[-2:])].keys():
accumulator += model[tuple(text[-2:])][word]
if accumulator >= r:
prob *= model[tuple(text[-2:])][word]
text.append(word)
print("aa : " ,text)
break
if text[-2:] == [None, None]:
sentence_finished = True
print("텍스트의 확률 : ", prob)
print(" ".join([t for t in text if t])) | [
"nltk.corpus.reuters.sents",
"nltk.corpus.reuters.words",
"functools.reduce",
"collections.defaultdict",
"nltk.bigrams",
"random.random",
"nltk.trigrams"
] | [((1550, 1565), 'nltk.corpus.reuters.sents', 'reuters.sents', ([], {}), '()\n', (1563, 1565), False, 'from nltk.corpus import reuters\n'), ((217, 232), 'nltk.corpus.reuters.words', 'reuters.words', ([], {}), '()\n', (230, 232), False, 'from nltk.corpus import reuters\n'), ((252, 267), 'nltk.corpus.reuters.words', 'reuters.words', ([], {}), '()\n', (265, 267), False, 'from nltk.corpus import reuters\n'), ((514, 529), 'random.random', 'random.random', ([], {}), '()\n', (527, 529), False, 'import random\n'), ((792, 835), 'functools.reduce', 'reduce', (['mul', '[counts[w] for w in text]', '(1.0)'], {}), '(mul, [counts[w] for w in text], 1.0)\n', (798, 835), False, 'from functools import reduce\n'), ((920, 935), 'nltk.corpus.reuters.sents', 'reuters.sents', ([], {}), '()\n', (933, 935), False, 'from nltk.corpus import reuters\n'), ((1621, 1670), 'nltk.trigrams', 'trigrams', (['sentence'], {'pad_right': '(True)', 'pad_left': '(True)'}), '(sentence, pad_right=True, pad_left=True)\n', (1629, 1670), False, 'from nltk import bigrams, trigrams\n'), ((2163, 2178), 'random.random', 'random.random', ([], {}), '()\n', (2176, 2178), False, 'import random\n'), ((1028, 1051), 'nltk.bigrams', 'bigrams', (['first_sentence'], {}), '(first_sentence)\n', (1035, 1051), False, 'from nltk import bigrams, trigrams\n'), ((1122, 1176), 'nltk.bigrams', 'bigrams', (['first_sentence'], {'pad_left': '(True)', 'pad_right': '(True)'}), '(first_sentence, pad_left=True, pad_right=True)\n', (1129, 1176), False, 'from nltk import bigrams, trigrams\n'), ((1244, 1268), 'nltk.trigrams', 'trigrams', (['first_sentence'], {}), '(first_sentence)\n', (1252, 1268), False, 'from nltk import bigrams, trigrams\n'), ((1340, 1395), 'nltk.trigrams', 'trigrams', (['first_sentence'], {'pad_left': '(True)', 'pad_right': '(True)'}), '(first_sentence, pad_left=True, pad_right=True)\n', (1348, 1395), False, 'from nltk import bigrams, trigrams\n'), ((1508, 1531), 'collections.defaultdict', 'defaultdict', (['(lambda : 0)'], {}), '(lambda : 0)\n', (1519, 1531), False, 'from collections import Counter, defaultdict\n')] |
import os
import requests
import urllib3.util.retry
import logging
import subprocess
import re
import pprint
from datetime import datetime, timedelta
import pygit2
from github import Github
from github.GithubException import UnknownObjectException
# Create logger with logging level set to all
LOGGER = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO)
def get_project_and_feedstock_repos(github_client, repo_name):
"""
Get the project and feedstock repos from the repo name.
Parameters
----------
github_client : github.MainClass.Github
The Github client.
repo_name : str
The repo name.
Returns
-------
project_repo : github.Repository.Repository
The project repository.
feedstock_repo : github.Repository.Repository
The feedstock repository.
feedstock_repo_name : str
The feedstock repository name.
"""
feedstock_repo_name = repo_name + "-feedstock"
project_repo = github_client.get_repo(repo_name)
# Get feedstock repository
try:
feedstock_repo = github_client.get_repo(feedstock_repo_name)
# If feedstock repo does not exist, log an error and exit
except UnknownObjectException:
LOGGER.error(
"repository_dispatch event: feedstock repository of '%s' not found" % repo_name)
return None, None, None
return project_repo, feedstock_repo, feedstock_repo_name
def get_project_version(repo_dir, VERSION_PEP440=re.compile(r'(?P<major>\d+)\.(?P<minor>\d+)\.(?P<patch>\d+)(\.(?P<release>[a-z]+)(?P<dev>\d+))?')):
"""
Get the project version from the version file in a given repository.
Parameters
----------
repo_dir : str
The path to the repo directory.
VERSION_PEP440 : re.compile
The regex to match the version.
Returns
-------
str
The project version.
"""
# Get version from version file in project repo
with open(os.path.join(repo_dir, "version"), 'r') as fp:
version = fp.read().rstrip("\n")
# Match version from file to pep440 and retrieve groups
match = VERSION_PEP440.match(version)
if match:
major = match.group("major")
minor = match.group("minor")
patch = match.group("patch")
release = match.group("release")
dev = match.group("dev")
if release:
version = f"{major}.{minor}.{patch}.{release}{dev}"
else:
version = f"{major}.{minor}.{patch}"
LOGGER.info("version: %s", version)
LOGGER.info("major: %s", major)
LOGGER.info("minor: %s", minor)
LOGGER.info("patch: %s", patch)
LOGGER.info("release: %s", release)
LOGGER.info("dev: %s", dev)
return version
else:
LOGGER.error(
"repository_dispatch event: could not parse version")
return None
def get_commit_tags(repo, commit_hash, supported_tags=["ci", "rerender"]):
"""
Get the tags of a commit.
Parameters
----------
repo : github.Repository.Repository
The repository.
commit_hash : str
The commit hash.
supported_tags : list[str]
List of tags that are to be searched for.
Returns
-------
tags : dict[str, bool]
Dictionary of tags found, with supported tags as keys and the value denoting wether they were found.
commit_message : str
New commit message cleaned of the tag in brackets, but with the tag in front instead.
"""
# Get commit from its sha
commit = repo.get_commit(sha=commit_hash)
message = commit.raw_data["commit"]["message"]
# Extract commit tag if there is one
tag = re.search(r'\[(.*?)\]', message)
if tag:
tag = tag.group(1)
LOGGER.info("tag: %s", tag)
if tag.lower() in supported_tags:
# Remove tag from message, and add it in front
commit_message = message.replace(f'[{tag}]', "")
# Clean excess spaces
commit_message = re.sub(r'\s+', " ", commit_message).strip()
# Add tag in front of commit message
commit_message = "%s: %s" % (tag.upper(), commit_message)
# Return True for the tag that was found
return {possible_tag: tag.lower() == possible_tag for possible_tag in supported_tags}, commit_message
else:
# Quit if the tag is not in the list of supported ones
LOGGER.info(
"no supported tag detected (was '%s', supported are %s" % (tag, supported_tags)
)
return {possible_tag: False for possible_tag in supported_tags}, None
else:
# Quit if there is not tag
LOGGER.info("no tag detected")
return {possible_tag: False for possible_tag in supported_tags}, None
def was_branch_last_commit_recent(repo, branch_name, time_treshold=timedelta(hours=24)):
"""
Check if the last commit of a branch is recent.
Parameters
----------
repo : github.Repository.Repository
The repository.
branch_name : str
The branch name.
time_treshold : datetime.timedelta
The time threshold under which the last commit will be considered recent.
Returns
-------
bool
True if the last commit is recent, False otherwise.
"""
# Get info of latest commit for given branch
branch = repo.get_branch(branch_name)
date_string = branch.commit.raw_data["commit"]["author"]["date"]
last_commit_time = datetime.strptime(date_string, "%Y-%m-%dT%H:%M:%SZ")
# Trigger release if last commit time was less than some time ago
if last_commit_time > datetime.now() - time_treshold:
LOGGER.info(
"since is within specified time, nightly release will follow")
return True
return False
def push_all_to_github(repo, branch_name, directory, commit_message):
"""
Push all files in a directory to a github repository.
Parameters
----------
repo : github.Repository.Repository
The repository.
branch_name : str
The branch name.
directory : str
The directory to push.
commit_message : str
The commit message.
"""
# Add all files
subprocess.run(["git", "add", "."], cwd=directory)
# Commit with proper commit message
subprocess.run(["git", "commit", "-m", commit_message], cwd=directory)
# Get url to push to
repo_auth_url = "https://%s@github.com/%s.git" % (os.environ["GH_TOKEN"], repo)
# Push changes and tags
subprocess.run(["git", "push", "--all", "-f", repo_auth_url], cwd=directory)
subprocess.run(["git", "push", repo_auth_url, branch_name, "--tags"], cwd=directory)
def create_api_sessions(github_token):
"""Create API sessions for GitHub.
Parameters
----------
github_token : str
The GitHub access token.
Returns
-------
session : requests.Session
A `requests` session w/ the beta `check_run` API configured.
gh : github.MainClass.Github
A `Github` object from the PyGithub package.
"""
# based on
# https://alexwlchan.net/2019/03/
# creating-a-github-action-to-auto-merge-pull-requests/
# with lots of edits
sess = requests.Session()
sess.headers = {
"Accept": "; ".join([
"application/vnd.github.v3+json",
# special beta api for check_suites endpoint
"application/vnd.github.antiope-preview+json",
]),
"Authorization": f"token {github_token}",
"User-Agent": f"GitHub Actions script in {__file__}"
}
def raise_for_status(resp, *args, **kwargs):
try:
resp.raise_for_status()
except Exception as e:
print('ERROR:', resp.text)
raise e
sess.hooks["response"].append(raise_for_status)
# build a github object too
gh = Github(
github_token,
retry=urllib3.util.retry.Retry(total=10, backoff_factor=0.1))
return sess, gh
def clone_repo(clone_url, clone_path, branch, auth_token):
# Use pygit2 to clone the repo to disk
# if using github app pem key token, use x-access-token like below
# if you were using a personal access token, use auth_method = 'x-oauth-basic' AND reverse the auth_method and token parameters
auth_method = 'x-access-token'
callbacks = pygit2.RemoteCallbacks(pygit2.UserPass(auth_method, auth_token))
pygit2_repo = pygit2.clone_repository(clone_url, clone_path,
callbacks=callbacks)
pygit2_branch = pygit2_repo.branches['origin/' + branch]
pygit2_ref = pygit2_repo.lookup_reference(pygit2_branch.name)
pygit2_repo.checkout(pygit2_ref)
# Checkout correct branch
subprocess.run(["git", "checkout", branch], cwd=clone_path)
return pygit2_repo, pygit2_ref
def get_var_values(var_retrieve, root=''):
ret = {}
for var, file, regex in var_retrieve:
with open(os.path.join(root, file), 'r') as f:
s = f.read()
m = regex.search(s)
v = m.group(1)
ret[var] = v
return ret
def update_var_values(var_retrieved, version_tag, git_rev=None, root=''):
ret = {}
git_rev = version_tag if git_rev is None else git_rev
for k, v in var_retrieved.items():
if k == 'build':
if var_retrieved['version'] == version_tag:
# NOTE(Geoffrey): we are only bumping build number at the moment.
v = int(v) + 1
else:
v = 0
elif k == 'git_rev':
v = git_rev
elif k == 'version':
v = version_tag
ret[k] = v
return ret
def substitute_vars_in_file(vars_substitute, directory):
# Substitute variables in files
for file, regex, subst, val in vars_substitute:
path = os.path.join(directory, file)
# Read file
with open(path, "r") as f:
s = f.read()
# Substitute
s = regex.sub(subst.replace("{}", str(val)), s)
# Write file
with open(path, "w") as f:
f.write(s) | [
"logging.getLogger",
"logging.basicConfig",
"requests.Session",
"re.compile",
"datetime.datetime.strptime",
"pygit2.UserPass",
"subprocess.run",
"os.path.join",
"datetime.datetime.now",
"re.sub",
"datetime.timedelta",
"pygit2.clone_repository",
"re.search"
] | [((305, 332), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (322, 332), False, 'import logging\n'), ((333, 372), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.INFO'}), '(level=logging.INFO)\n', (352, 372), False, 'import logging\n'), ((1484, 1597), 're.compile', 're.compile', (['"""(?P<major>\\\\d+)\\\\.(?P<minor>\\\\d+)\\\\.(?P<patch>\\\\d+)(\\\\.(?P<release>[a-z]+)(?P<dev>\\\\d+))?"""'], {}), "(\n '(?P<major>\\\\d+)\\\\.(?P<minor>\\\\d+)\\\\.(?P<patch>\\\\d+)(\\\\.(?P<release>[a-z]+)(?P<dev>\\\\d+))?'\n )\n", (1494, 1597), False, 'import re\n'), ((3685, 3718), 're.search', 're.search', (['"""\\\\[(.*?)\\\\]"""', 'message'], {}), "('\\\\[(.*?)\\\\]', message)\n", (3694, 3718), False, 'import re\n'), ((4878, 4897), 'datetime.timedelta', 'timedelta', ([], {'hours': '(24)'}), '(hours=24)\n', (4887, 4897), False, 'from datetime import datetime, timedelta\n'), ((5506, 5558), 'datetime.datetime.strptime', 'datetime.strptime', (['date_string', '"""%Y-%m-%dT%H:%M:%SZ"""'], {}), "(date_string, '%Y-%m-%dT%H:%M:%SZ')\n", (5523, 5558), False, 'from datetime import datetime, timedelta\n'), ((6239, 6289), 'subprocess.run', 'subprocess.run', (["['git', 'add', '.']"], {'cwd': 'directory'}), "(['git', 'add', '.'], cwd=directory)\n", (6253, 6289), False, 'import subprocess\n'), ((6335, 6405), 'subprocess.run', 'subprocess.run', (["['git', 'commit', '-m', commit_message]"], {'cwd': 'directory'}), "(['git', 'commit', '-m', commit_message], cwd=directory)\n", (6349, 6405), False, 'import subprocess\n'), ((6549, 6625), 'subprocess.run', 'subprocess.run', (["['git', 'push', '--all', '-f', repo_auth_url]"], {'cwd': 'directory'}), "(['git', 'push', '--all', '-f', repo_auth_url], cwd=directory)\n", (6563, 6625), False, 'import subprocess\n'), ((6630, 6719), 'subprocess.run', 'subprocess.run', (["['git', 'push', repo_auth_url, branch_name, '--tags']"], {'cwd': 'directory'}), "(['git', 'push', repo_auth_url, branch_name, '--tags'], cwd=\n directory)\n", (6644, 6719), False, 'import subprocess\n'), ((7251, 7269), 'requests.Session', 'requests.Session', ([], {}), '()\n', (7267, 7269), False, 'import requests\n'), ((8457, 8524), 'pygit2.clone_repository', 'pygit2.clone_repository', (['clone_url', 'clone_path'], {'callbacks': 'callbacks'}), '(clone_url, clone_path, callbacks=callbacks)\n', (8480, 8524), False, 'import pygit2\n'), ((8765, 8824), 'subprocess.run', 'subprocess.run', (["['git', 'checkout', branch]"], {'cwd': 'clone_path'}), "(['git', 'checkout', branch], cwd=clone_path)\n", (8779, 8824), False, 'import subprocess\n'), ((8397, 8437), 'pygit2.UserPass', 'pygit2.UserPass', (['auth_method', 'auth_token'], {}), '(auth_method, auth_token)\n', (8412, 8437), False, 'import pygit2\n'), ((9862, 9891), 'os.path.join', 'os.path.join', (['directory', 'file'], {}), '(directory, file)\n', (9874, 9891), False, 'import os\n'), ((1961, 1994), 'os.path.join', 'os.path.join', (['repo_dir', '"""version"""'], {}), "(repo_dir, 'version')\n", (1973, 1994), False, 'import os\n'), ((5660, 5674), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (5672, 5674), False, 'from datetime import datetime, timedelta\n'), ((8977, 9001), 'os.path.join', 'os.path.join', (['root', 'file'], {}), '(root, file)\n', (8989, 9001), False, 'import os\n'), ((4019, 4054), 're.sub', 're.sub', (['"""\\\\s+"""', '""" """', 'commit_message'], {}), "('\\\\s+', ' ', commit_message)\n", (4025, 4054), False, 'import re\n')] |
from django.core.validators import MinLengthValidator
from django.contrib.auth.models import User
from django.contrib.auth.validators import UnicodeUsernameValidator
from django.db import models
from django.db.models.signals import pre_save
from django.dispatch import receiver
from django.utils.encoding import iri_to_uri
from mongo.objectid import ObjectId
from mur.commonmark import commonmark
def _objectid():
return str(ObjectId())
class Post(models.Model):
path_validators = [MinLengthValidator(6), UnicodeUsernameValidator()]
objectid = models.CharField(max_length=24, default=_objectid, editable=False, unique=True)
user = models.ForeignKey(User, on_delete=models.PROTECT, related_name='posts')
path = models.CharField(max_length=127, validators=path_validators)
contents = models.TextField()
contents_html = models.TextField(default='', editable=False)
created = models.DateTimeField(auto_now_add=True)
updated = models.DateTimeField(auto_now=True)
def get_absolute_url(self):
# The string returned from get_absolute_url() must contain only ASCII characters.
return iri_to_uri(f'/{self.user}/{self.path}/')
def __str__(self):
return f'Post({self.user}/{self.path})'
class Meta:
unique_together = ('user', 'path')
@receiver(pre_save, sender=Post)
def update_html(sender, instance, update_fields, **kwargs):
if update_fields and 'contents' not in update_fields:
return
instance.contents_html = commonmark(instance.contents)
| [
"django.db.models.TextField",
"django.db.models.ForeignKey",
"mur.commonmark.commonmark",
"django.db.models.DateTimeField",
"mongo.objectid.ObjectId",
"django.core.validators.MinLengthValidator",
"django.utils.encoding.iri_to_uri",
"django.contrib.auth.validators.UnicodeUsernameValidator",
"django.d... | [((1314, 1345), 'django.dispatch.receiver', 'receiver', (['pre_save'], {'sender': 'Post'}), '(pre_save, sender=Post)\n', (1322, 1345), False, 'from django.dispatch import receiver\n'), ((562, 641), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(24)', 'default': '_objectid', 'editable': '(False)', 'unique': '(True)'}), '(max_length=24, default=_objectid, editable=False, unique=True)\n', (578, 641), False, 'from django.db import models\n'), ((653, 724), 'django.db.models.ForeignKey', 'models.ForeignKey', (['User'], {'on_delete': 'models.PROTECT', 'related_name': '"""posts"""'}), "(User, on_delete=models.PROTECT, related_name='posts')\n", (670, 724), False, 'from django.db import models\n'), ((736, 796), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(127)', 'validators': 'path_validators'}), '(max_length=127, validators=path_validators)\n', (752, 796), False, 'from django.db import models\n'), ((812, 830), 'django.db.models.TextField', 'models.TextField', ([], {}), '()\n', (828, 830), False, 'from django.db import models\n'), ((851, 895), 'django.db.models.TextField', 'models.TextField', ([], {'default': '""""""', 'editable': '(False)'}), "(default='', editable=False)\n", (867, 895), False, 'from django.db import models\n'), ((910, 949), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now_add': '(True)'}), '(auto_now_add=True)\n', (930, 949), False, 'from django.db import models\n'), ((964, 999), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now': '(True)'}), '(auto_now=True)\n', (984, 999), False, 'from django.db import models\n'), ((1508, 1537), 'mur.commonmark.commonmark', 'commonmark', (['instance.contents'], {}), '(instance.contents)\n', (1518, 1537), False, 'from mur.commonmark import commonmark\n'), ((432, 442), 'mongo.objectid.ObjectId', 'ObjectId', ([], {}), '()\n', (440, 442), False, 'from mongo.objectid import ObjectId\n'), ((495, 516), 'django.core.validators.MinLengthValidator', 'MinLengthValidator', (['(6)'], {}), '(6)\n', (513, 516), False, 'from django.core.validators import MinLengthValidator\n'), ((518, 544), 'django.contrib.auth.validators.UnicodeUsernameValidator', 'UnicodeUsernameValidator', ([], {}), '()\n', (542, 544), False, 'from django.contrib.auth.validators import UnicodeUsernameValidator\n'), ((1138, 1178), 'django.utils.encoding.iri_to_uri', 'iri_to_uri', (['f"""/{self.user}/{self.path}/"""'], {}), "(f'/{self.user}/{self.path}/')\n", (1148, 1178), False, 'from django.utils.encoding import iri_to_uri\n')] |
# Import external modules.
from google.appengine.ext import ndb
import logging
# Import local modules.
from configAutocomplete import const as conf
from constants import Constants
class Survey(ndb.Model):
surveyId = ndb.StringProperty() # Primary key
title = ndb.StringProperty()
introduction = ndb.StringProperty()
creator = ndb.StringProperty()
allowEdit = ndb.BooleanProperty()
freezeUserInput = ndb.BooleanProperty( default=False )
hideReasons = ndb.BooleanProperty( default=False ) # Experimental option
questionIds = ndb.StringProperty( repeated=True ) # Ordered
| [
"google.appengine.ext.ndb.BooleanProperty",
"google.appengine.ext.ndb.StringProperty"
] | [((223, 243), 'google.appengine.ext.ndb.StringProperty', 'ndb.StringProperty', ([], {}), '()\n', (241, 243), False, 'from google.appengine.ext import ndb\n'), ((273, 293), 'google.appengine.ext.ndb.StringProperty', 'ndb.StringProperty', ([], {}), '()\n', (291, 293), False, 'from google.appengine.ext import ndb\n'), ((313, 333), 'google.appengine.ext.ndb.StringProperty', 'ndb.StringProperty', ([], {}), '()\n', (331, 333), False, 'from google.appengine.ext import ndb\n'), ((348, 368), 'google.appengine.ext.ndb.StringProperty', 'ndb.StringProperty', ([], {}), '()\n', (366, 368), False, 'from google.appengine.ext import ndb\n'), ((385, 406), 'google.appengine.ext.ndb.BooleanProperty', 'ndb.BooleanProperty', ([], {}), '()\n', (404, 406), False, 'from google.appengine.ext import ndb\n'), ((429, 463), 'google.appengine.ext.ndb.BooleanProperty', 'ndb.BooleanProperty', ([], {'default': '(False)'}), '(default=False)\n', (448, 463), False, 'from google.appengine.ext import ndb\n'), ((484, 518), 'google.appengine.ext.ndb.BooleanProperty', 'ndb.BooleanProperty', ([], {'default': '(False)'}), '(default=False)\n', (503, 518), False, 'from google.appengine.ext import ndb\n'), ((562, 595), 'google.appengine.ext.ndb.StringProperty', 'ndb.StringProperty', ([], {'repeated': '(True)'}), '(repeated=True)\n', (580, 595), False, 'from google.appengine.ext import ndb\n')] |
from django.contrib.auth import models as auth_models
from django.db import models
from django.urls import reverse
class User(auth_models.AbstractUser):
"""Extends default model to add project specific fields."""
birth_date = models.DateField(help_text="Employee Date of birth.")
address = models.TextField(help_text="Employee permanent address")
locked = models.BooleanField(
default=False,
help_text="Whether the account has been locked by Store owner.",
)
| [
"django.db.models.DateField",
"django.db.models.TextField",
"django.db.models.BooleanField"
] | [((237, 290), 'django.db.models.DateField', 'models.DateField', ([], {'help_text': '"""Employee Date of birth."""'}), "(help_text='Employee Date of birth.')\n", (253, 290), False, 'from django.db import models\n'), ((305, 361), 'django.db.models.TextField', 'models.TextField', ([], {'help_text': '"""Employee permanent address"""'}), "(help_text='Employee permanent address')\n", (321, 361), False, 'from django.db import models\n'), ((375, 479), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(False)', 'help_text': '"""Whether the account has been locked by Store owner."""'}), "(default=False, help_text=\n 'Whether the account has been locked by Store owner.')\n", (394, 479), False, 'from django.db import models\n')] |
# test builtin sorted
try:
sorted
set
except:
import sys
print("SKIP")
sys.exit()
print(sorted(set(range(100))))
print(sorted(set(range(100)), key=lambda x: x + 100*(x % 2)))
# need to use keyword argument
try:
sorted([], None)
except TypeError:
print("TypeError")
| [
"sys.exit"
] | [((91, 101), 'sys.exit', 'sys.exit', ([], {}), '()\n', (99, 101), False, 'import sys\n')] |
# -*- coding: utf-8 -*-
"""Serializer tests for the Mendeley addon."""
import pytest
from addons.base.tests.serializers import CitationAddonSerializerTestSuiteMixin
from addons.base.tests.utils import MockFolder
from addons.mendeley.tests.factories import MendeleyAccountFactory
from addons.mendeley.serializer import MendeleySerializer
from tests.base import OsfTestCase
pytestmark = pytest.mark.django_db
class TestMendeleySerializer(CitationAddonSerializerTestSuiteMixin, OsfTestCase):
addon_short_name = 'mendeley'
Serializer = MendeleySerializer
ExternalAccountFactory = MendeleyAccountFactory
folder = MockFolder()
| [
"addons.base.tests.utils.MockFolder"
] | [((630, 642), 'addons.base.tests.utils.MockFolder', 'MockFolder', ([], {}), '()\n', (640, 642), False, 'from addons.base.tests.utils import MockFolder\n')] |
# -*- coding: utf-8 -*-
r"""
Information-set decoding for linear codes
Information-set decoding is a probabilistic decoding strategy that
essentially tries to guess `k` correct positions in the received word,
where `k` is the dimension of the code. A codeword agreeing with the
received word on the guessed position can easily be computed, and their
difference is one possible error vector. A "correct" guess is assumed when
this error vector has low Hamming weight.
This simple algorithm is not very efficient in itself, but there are numerous
refinements to the strategy that make it very capable over rather large codes.
Still, the decoding algorithm is exponential in dimension of the code and the
log of the field size.
The ISD strategy requires choosing how many errors is deemed acceptable. One
choice could be `d/2`, where `d` is the minimum distance of the code, but
sometimes `d` is not known, or sometimes more errors are expected. If one
chooses anything above `d/2`, the algorithm does not guarantee to return a
nearest codeword.
AUTHORS:
- <NAME>, <NAME>, <NAME> (2016-02, 2017-06): initial
version
"""
#******************************************************************************
# Copyright (C) 2017 <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
# <NAME>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# http://www.gnu.org/licenses/
#******************************************************************************
from sage.all import ZZ, Integer, vector, SageObject, binomial
from .decoder import Decoder
def _format_decoding_interval(decoding_interval):
r"""
Format the decoding interval of an ISD decoder when calling ``_repr_`` or
``_latex_``.
EXAMPLES::
sage: from sage.coding.information_set_decoder import _format_decoding_interval
sage: _format_decoding_interval((0,3))
'up to 3'
sage: _format_decoding_interval((2,3))
'between 2 and 3'
sage: _format_decoding_interval((3,3))
'exactly 3'
"""
if decoding_interval[0] == 0:
return "up to {0}".format(decoding_interval[1])
if decoding_interval[0] == decoding_interval[1]:
return "exactly {0}".format(decoding_interval[0])
return "between {0} and {1}".format(decoding_interval[0], decoding_interval[1])
class InformationSetAlgorithm(SageObject):
r"""
Abstract class for algorithms for
:class:`sage.coding.information_set_decoder.LinearCodeInformationSetDecoder`.
To sub-class this class, override ``decode`` and ``calibrate``, and call the
super constructor from ``__init__``.
INPUT:
- ``code`` -- A linear code for which to decode.
- ``number_errors`` -- an integer, the maximal number of errors to accept as
correct decoding. An interval can also be specified by giving a pair of
integers, where both end values are taken to be in the interval.
- ``algorithm_name`` -- A name for the specific ISD algorithm used (used for
printing).
- ``parameters`` -- (optional) A dictionary for setting the parameters of
this ISD algorithm. Note that sanity checking this dictionary for the
individual sub-classes should be done in the sub-class constructor.
EXAMPLES::
sage: from sage.coding.information_set_decoder import LeeBrickellISDAlgorithm
sage: LeeBrickellISDAlgorithm(codes.GolayCode(GF(2)), (0,4))
ISD Algorithm (Lee-Brickell) for [24, 12, 8] Extended Golay code over GF(2) decoding up to 4 errors
A minimal working example of how to sub-class::
sage: from sage.coding.information_set_decoder import InformationSetAlgorithm
sage: from sage.coding.decoder import DecodingError
sage: class MinimalISD(InformationSetAlgorithm):
....: def __init__(self, code, decoding_interval):
....: super(MinimalISD, self).__init__(code, decoding_interval, "MinimalISD")
....: def calibrate(self):
....: self._parameters = { } # calibrate parameters here
....: self._time_estimate = 10.0 # calibrated time estimate
....: def decode(self, r):
....: # decoding algorithm here
....: raise DecodingError("I failed")
sage: MinimalISD(codes.GolayCode(GF(2)), (0,4))
ISD Algorithm (MinimalISD) for [24, 12, 8] Extended Golay code over GF(2) decoding up to 4 errors
"""
def __init__(self, code, decoding_interval, algorithm_name, parameters = None):
r"""
TESTS::
sage: from sage.coding.information_set_decoder import LeeBrickellISDAlgorithm
sage: LeeBrickellISDAlgorithm(codes.GolayCode(GF(2)), (0,4))
ISD Algorithm (Lee-Brickell) for [24, 12, 8] Extended Golay code over GF(2) decoding up to 4 errors
"""
self._code = code
self._decoding_interval = decoding_interval
self._algorithm_name = algorithm_name
if parameters:
self._parameters = parameters
self._parameters_specified = True
else:
self._parameters_specified = False
def name(self):
r"""
Return the name of this ISD algorithm.
EXAMPLES::
sage: C = codes.GolayCode(GF(2))
sage: from sage.coding.information_set_decoder import LeeBrickellISDAlgorithm
sage: A = LeeBrickellISDAlgorithm(C, (0,2))
sage: A.name()
'Lee-Brickell'
"""
return self._algorithm_name
def decode(self, r):
r"""
Decode a received word using this ISD decoding algorithm.
Must be overridden by sub-classes.
EXAMPLES::
sage: M = matrix(GF(2), [[1, 0, 0, 0, 0, 1, 0, 0, 1, 0, 1, 0],\
[0, 0, 1, 0, 0, 1, 0, 1, 0, 0, 1, 1],\
[0, 0, 0, 1, 0, 1, 0, 1, 1, 0, 0, 0],\
[0, 0, 0, 0, 1, 1, 0, 0, 1, 1, 0, 1],\
[0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 1]])
sage: C = codes.LinearCode(M)
sage: from sage.coding.information_set_decoder import LeeBrickellISDAlgorithm
sage: A = LeeBrickellISDAlgorithm(C, (2,2))
sage: r = vector(GF(2), [1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0])
sage: A.decode(r)
(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)
"""
raise NotImplementedError
def time_estimate(self):
"""
Estimate for how long this ISD algorithm takes to perform a single decoding.
The estimate is for a received word whose number of errors is within the
decoding interval of this ISD algorithm.
EXAMPLES::
sage: C = codes.GolayCode(GF(2))
sage: from sage.coding.information_set_decoder import LeeBrickellISDAlgorithm
sage: A = LeeBrickellISDAlgorithm(C, (0,2))
sage: A.time_estimate() #random
0.0008162108571427874
"""
if not hasattr(self, "_time_estimate"):
self.calibrate()
return self._time_estimate
def calibrate(self):
"""
Uses test computations to estimate optimal values for any parameters
this ISD algorithm may take.
Must be overridden by sub-classes.
If ``self._parameters_specified`` is ``False``, this method shall set
``self._parameters`` to the best parameters estimated. It shall always
set ``self._time_estimate`` to the time estimate of using
``self._parameters``.
EXAMPLES::
sage: from sage.coding.information_set_decoder import LeeBrickellISDAlgorithm
sage: C = codes.GolayCode(GF(2))
sage: A = LeeBrickellISDAlgorithm(C, (0,3))
sage: A.calibrate()
sage: A.parameters() #random
{'search_size': 1}
"""
raise NotImplementedError
def code(self):
r"""
Return the code associated to this ISD algorithm.
EXAMPLES::
sage: from sage.coding.information_set_decoder import LeeBrickellISDAlgorithm
sage: C = codes.GolayCode(GF(2))
sage: A = LeeBrickellISDAlgorithm(C, (0,3))
sage: A.code()
[24, 12, 8] Extended Golay code over GF(2)
"""
return self._code
def decoding_interval(self):
r"""
A pair of integers specifying the interval of number of errors this
ISD algorithm will attempt to correct.
The interval includes both end values.
EXAMPLES::
sage: C = codes.GolayCode(GF(2))
sage: from sage.coding.information_set_decoder import LeeBrickellISDAlgorithm
sage: A = LeeBrickellISDAlgorithm(C, (0,2))
sage: A.decoding_interval()
(0, 2)
"""
return self._decoding_interval
def parameters(self):
"""
Return any parameters this ISD algorithm uses.
If the parameters have not already been set, efficient values will first
be calibrated and returned.
EXAMPLES::
sage: C = codes.GolayCode(GF(2))
sage: from sage.coding.information_set_decoder import LeeBrickellISDAlgorithm
sage: A = LeeBrickellISDAlgorithm(C, (0,4), search_size=3)
sage: A.parameters()
{'search_size': 3}
If not set, calibration will determine a sensible value::
sage: A = LeeBrickellISDAlgorithm(C, (0,4))
sage: A.parameters() #random
{'search_size': 1}
"""
if not hasattr(self, "_parameters"):
self.calibrate()
return self._parameters
def __eq__(self, other):
r"""
Tests equality between ISD algorithm objects.
EXAMPLES::
sage: C = codes.GolayCode(GF(2))
sage: from sage.coding.information_set_decoder import LeeBrickellISDAlgorithm
sage: A = LeeBrickellISDAlgorithm(C, (0,4))
sage: A == LeeBrickellISDAlgorithm(C, (0,4))
True
sage: A == LeeBrickellISDAlgorithm(C, (0,5))
False
sage: other_search = 1 if A.parameters()['search_size'] != 1 else 2
sage: A == LeeBrickellISDAlgorithm(C, (0,4), search_size=other_search)
False
ISD Algorithm objects can be equal only if they have both calibrated
the parameters, or if they both had it set and to the same value::
sage: A2 = LeeBrickellISDAlgorithm(C, (0,4), search_size=A.parameters()['search_size'])
sage: A == A2
False
sage: A2 == LeeBrickellISDAlgorithm(C, (0,4), search_size=A.parameters()['search_size'])
True
"""
return isinstance(other, self.__class__)\
and self.code() == other.code()\
and self.decoding_interval() == other.decoding_interval()\
and self._parameters_specified == other._parameters_specified\
and (not self._parameters_specified or self.parameters() == other.parameters())
def __hash__(self):
r"""
Returns the hash value of ``self``.
EXAMPLES::
sage: C = codes.GolayCode(GF(2))
sage: from sage.coding.information_set_decoder import LeeBrickellISDAlgorithm
sage: A = LeeBrickellISDAlgorithm(C, (0,4))
sage: hash(A) #random
5884357732955478461
sage: C2 = codes.GolayCode(GF(3))
sage: A2 = LeeBrickellISDAlgorithm(C2, (0,4))
sage: hash(A) != hash(A2)
True
"""
return hash(str(self))
def _repr_(self):
r"""
Returns a string representation of this ISD algorithm.
EXAMPLES::
sage: C = codes.GolayCode(GF(2))
sage: from sage.coding.information_set_decoder import LeeBrickellISDAlgorithm
sage: A = LeeBrickellISDAlgorithm(C, (0,4))
sage: A
ISD Algorithm (Lee-Brickell) for [24, 12, 8] Extended Golay code over GF(2) decoding up to 4 errors
"""
return "ISD Algorithm ({}) for {} decoding {} errors ".format(self._algorithm_name, self.code(), _format_decoding_interval(self.decoding_interval()))
def _latex_(self):
r"""
Returns a latex representation of this ISD algorithm.
EXAMPLES::
sage: C = codes.GolayCode(GF(2))
sage: from sage.coding.information_set_decoder import LeeBrickellISDAlgorithm
sage: A = LeeBrickellISDAlgorithm(C, (0,4))
sage: latex(A)
\textnormal{ISD Algorithm (Lee-Brickell) for }[24, 12, 8] \textnormal{ Extended Golay Code over } \Bold{F}_{2} \textnormal{decoding up to 4 errors}
"""
return "\\textnormal{{ISD Algorithm ({}) for }}{} \\textnormal{{decoding {} errors}}".format(self._algorithm_name, self.code()._latex_(), _format_decoding_interval(self.decoding_interval()))
class LeeBrickellISDAlgorithm(InformationSetAlgorithm):
r"""
The Lee-Brickell algorithm for information-set decoding.
For a description of the information-set decoding paradigm (ISD), see
:class:`sage.coding.information_set_decoder.LinearCodeInformationSetDecoder`.
This implements the Lee-Brickell variant of ISD, see [LB1988]_ for the
original binary case, and [Pet2010]_ for the `q`-ary extension.
Let `C` be a `[n, k]`-linear code over `GF(q)`, and let `r \in GF(q)^{n}` be
a received word in a transmission. We seek the codeword whose Hamming
distance from `r` is minimal. Let `p` and `w` be integers, such that `0\leq
p\leq w`, Let `G` be a generator matrix of `C`, and for any set of indices
`I`, we write `G_{I}` for the matrix formed by the columns of `G` indexed by
`I`. The Lee-Brickell ISD loops the following until it is successful:
1. Choose an information set `I` of `C`.
2. Compute `r' = r - r_{I}\times G_I^{-1} \times G`
3. Consider every size-`p` subset of `I`, `\{a_1, \dots, a_p\}`.
For each `m = (m_1, \dots, m_p) \in GF(q)^{p}`, compute
the error vector `e = r' - \sum_{i=1}^{p} m_i\times g_{a_i}`,
4. If `e` has a Hamming weight at most `w`, return `r-e`.
INPUT:
- ``code`` -- A linear code for which to decode.
- ``decoding_interval`` -- a pair of integers specifying an interval of
number of errors to correct. Includes both end values.
- ``search_size`` -- (optional) the size of subsets to use on step 3 of the
algorithm as described above. Usually a small number. It has to be at most
the largest allowed number of errors. A good choice will be approximated
if this option is not set; see
:meth:`sage.coding.LeeBrickellISDAlgorithm.calibrate`
for details.
EXAMPLES::
sage: C = codes.GolayCode(GF(2))
sage: from sage.coding.information_set_decoder import LeeBrickellISDAlgorithm
sage: A = LeeBrickellISDAlgorithm(C, (0,4)); A
ISD Algorithm (Lee-Brickell) for [24, 12, 8] Extended Golay code over GF(2) decoding up to 4 errors
sage: C = codes.GolayCode(GF(2))
sage: A = LeeBrickellISDAlgorithm(C, (2,3)); A
ISD Algorithm (Lee-Brickell) for [24, 12, 8] Extended Golay code over GF(2) decoding between 2 and 3 errors
"""
def __init__(self, code, decoding_interval, search_size = None):
r"""
TESTS:
If ``search_size`` is not a positive integer, or is bigger than the
decoding radius, an error will be raised::
sage: C = codes.GolayCode(GF(2))
sage: from sage.coding.information_set_decoder import LeeBrickellISDAlgorithm
sage: LeeBrickellISDAlgorithm(C, (1, 3), search_size=-1)
Traceback (most recent call last):
...
ValueError: The search size parameter has to be a positive integer
sage: LeeBrickellISDAlgorithm(C, (1, 3), search_size=4)
Traceback (most recent call last):
...
ValueError: The search size parameter has to be at most the maximal number of allowed errors
"""
if search_size is not None:
if not isinstance(search_size, (Integer, int)) or search_size < 0:
raise ValueError("The search size parameter has to be a positive integer")
if search_size > decoding_interval[1]:
raise ValueError("The search size parameter has to be at most"
" the maximal number of allowed errors")
super(LeeBrickellISDAlgorithm, self).__init__(code, decoding_interval, "Lee-Brickell",
parameters={ 'search_size': search_size })
self._parameters_specified = True
else:
self._parameters_specified = False
super(LeeBrickellISDAlgorithm, self).__init__(code, decoding_interval, "Lee-Brickell")
def decode(self, r):
r"""
The Lee-Brickell algorithm as described in the class doc.
Note that either parameters must be given at construction time or
:meth:`sage.coding.information_set_decoder.InformationSetAlgorithm.calibrate()`
should be called before calling this method.
INPUT:
- `r` -- a received word, i.e. a vector in the ambient space of
:meth:`decoder.Decoder.code`.
OUTPUT: A codeword whose distance to `r` satisfies ``self.decoding_interval()``.
EXAMPLES::
sage: M = matrix(GF(2), [[1, 0, 0, 0, 0, 1, 0, 0, 1, 0, 1, 0],\
[0, 0, 1, 0, 0, 1, 0, 1, 0, 0, 1, 1],\
[0, 0, 0, 1, 0, 1, 0, 1, 1, 0, 0, 0],\
[0, 0, 0, 0, 1, 1, 0, 0, 1, 1, 0, 1],\
[0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 1]])
sage: C = codes.LinearCode(M)
sage: from sage.coding.information_set_decoder import LeeBrickellISDAlgorithm
sage: A = LeeBrickellISDAlgorithm(C, (2,2))
sage: c = C.random_element()
sage: Chan = channels.StaticErrorRateChannel(C.ambient_space(), 2)
sage: r = Chan(c)
sage: c_out = A.decode(r)
sage: (r - c).hamming_weight() == 2
True
"""
import itertools
from sage.misc.prandom import sample
C = self.code()
n, k = C.length(), C.dimension()
tau = self.decoding_interval()
p = self.parameters()['search_size']
F = C.base_ring()
G = C.generator_matrix()
Fstar = F.list()[1:]
while True:
# step 1.
I = sample(range(n), k)
Gi = G.matrix_from_columns(I)
try:
Gi_inv = Gi.inverse()
except ZeroDivisionError:
# I was not an information set
continue
Gt = Gi_inv * G
#step 2.
y = r - vector([r[i] for i in I]) * Gt
g = Gt.rows()
#step 3.
for pi in range(p+1):
for A in itertools.combinations(range(k), pi):
for m in itertools.product(Fstar, repeat=pi):
e = y - sum(m[i]*g[A[i]] for i in range(pi))
errs = e.hamming_weight()
if errs >= tau[0] and errs <= tau[1]:
return r - e
def calibrate(self):
r"""
Run some test computations to estimate the optimal search size.
Let `p` be the search size. We should simply choose `p` such that the
average expected time is minimal. The algorithm succeeds when it chooses
an information set with at least `k - p` correct positions, where `k` is
the dimension of the code and `p` the search size. The expected number
of trials we need before this occurs is:
.. MATH::
\binom{n}{k}/(\rho \sum_{i=0}^p \binom{n-\tau}{k-i} \binom{\tau}{i})
Here `\rho` is the fraction of `k` subsets of indices which are
information sets. If `T` is the average time for steps 1 and 2
(including selecting `I` until an information set is found), while `P(i)`
is the time for the body of the ``for``-loop in step 3 for `m` of weight
`i`, then each information set trial takes roughly time `T +
\sum_{i=0}^{p} P(i) \binom{k}{i} (q-1)^i`, where `\GF{q}` is the base
field.
The values `T` and `P` are here estimated by running a few test
computations similar to those done by the decoding algorithm.
We don't explicitly estimate `\rho`.
OUTPUT: Does not output anything but sets private fields used by
:meth:`sage.coding.information_set_decoder.InformationSetAlgorithm.parameters()`
and
:meth:`sage.coding.information_set_decoder.InformationSetAlgorithm.time_estimate()``.
EXAMPLES::
sage: from sage.coding.information_set_decoder import LeeBrickellISDAlgorithm
sage: C = codes.GolayCode(GF(2))
sage: A = LeeBrickellISDAlgorithm(C, (0,3)); A
ISD Algorithm (Lee-Brickell) for [24, 12, 8] Extended Golay code over GF(2) decoding up to 3 errors
sage: A.calibrate()
sage: A.parameters() #random
{'search_size': 1}
sage: A.time_estimate() #random
0.0008162108571427874
If we specify the parameter at construction time, calibrate does not override this choice::
sage: A = LeeBrickellISDAlgorithm(C, (0,3), search_size=2); A
ISD Algorithm (Lee-Brickell) for [24, 12, 8] Extended Golay code over GF(2) decoding up to 3 errors
sage: A.parameters()
{'search_size': 2}
sage: A.calibrate()
sage: A.parameters()
{'search_size': 2}
sage: A.time_estimate() #random
0.0008162108571427874
"""
from sage.matrix.special import random_matrix
from sage.misc.prandom import sample, randint
from sage.modules.free_module_element import random_vector
from time import process_time
C = self.code()
G = C.generator_matrix()
n, k = C.length(), C.dimension()
tau = self.decoding_interval()[1]
F = C.base_ring()
q = F.cardinality()
Fstar = F.list()[1:]
def time_information_set_steps():
before = process_time()
while True:
I = sample(range(n), k)
Gi = G.matrix_from_columns(I)
try:
Gi_inv = Gi.inverse()
except ZeroDivisionError:
continue
return process_time() - before
def time_search_loop(p):
y = random_vector(F, n)
g = random_matrix(F, p, n).rows()
scalars = [ [ Fstar[randint(0,q-2)] for i in range(p) ]
for s in range(100) ]
before = process_time()
for m in scalars:
e = y - sum(m[i]*g[i] for i in range(p))
return (process_time() - before) / 100.
T = sum([ time_information_set_steps() for s in range(5) ]) / 5.
P = [ time_search_loop(p) for p in range(tau+1) ]
def compute_estimate(p):
iters = 1.* binomial(n, k)/ \
sum( binomial(n-tau, k-i)*binomial(tau,i) for i in range(p+1) )
estimate = iters*(T + \
sum(P[pi] * (q-1)**pi * binomial(k, pi) for pi in range(p+1) ))
return estimate
if self._parameters_specified:
self._time_estimate = compute_estimate(self._parameters['search_size'])
else:
self._calibrate_select([ compute_estimate(p) for p in range(tau+1) ])
def _calibrate_select(self, estimates):
r"""
Internal method used by ``self.calibrate()``.
Given the timing estimates, select the best parameter and set the
appropriate private fields.
INPUT:
- `estimates` - list of time estimates, for the search size set to the
index of the list entry.
OUTPUT: None, but sets the private fields `self._parameters` and
`self._time_estimate`.
TESTS::
sage: from sage.coding.information_set_decoder import LeeBrickellISDAlgorithm
sage: C = codes.GolayCode(GF(2))
sage: A = LeeBrickellISDAlgorithm(C, (0,3)); A
ISD Algorithm (Lee-Brickell) for [24, 12, 8] Extended Golay code over GF(2) decoding up to 3 errors
sage: A._calibrate_select([ 1.0, 2.0, 3.0, 0.5, 0.6, 1.0 ])
sage: A._time_estimate
0.500000000000000
sage: A._parameters
{'search_size': 3}
"""
search_size = 0
for p in range(1, len(estimates)):
if estimates[p] < estimates[search_size]:
search_size = p
self._parameters = { 'search_size': search_size }
self._time_estimate = estimates[search_size]
class LinearCodeInformationSetDecoder(Decoder):
r"""
Information-set decoder for any linear code.
Information-set decoding is a probabilistic decoding strategy that
essentially tries to guess `k` correct positions in the received word,
where `k` is the dimension of the code. A codeword agreeing with the
received word on the guessed position can easily be computed, and their
difference is one possible error vector. A "correct" guess is assumed when
this error vector has low Hamming weight.
The ISD strategy requires choosing how many errors is deemed acceptable. One
choice could be `d/2`, where `d` is the minimum distance of the code, but
sometimes `d` is not known, or sometimes more errors are expected. If one
chooses anything above `d/2`, the algorithm does not guarantee to return a
nearest codeword.
This simple algorithm is not very efficient in itself, but there are numerous
refinements to the strategy. Specifying which strategy to use among those
that Sage knows is done using the ``algorithm`` keyword. If this is not set,
an efficient choice will be made for you.
The various ISD algorithms all need to select a number of parameters. If you
choose a specific algorithm to use, you can pass these parameters as named
parameters directly to this class' constructor. If you don't, efficient
choices will be calibrated for you.
.. WARNING::
If there is no codeword within the specified decoding distance, then the
decoder may never terminate, or it may raise a
:exc:`sage.coding.decoder.DecodingError` exception, depending on the ISD
algorithm used.
INPUT:
- ``code`` -- A linear code for which to decode.
- ``number_errors`` -- an integer, the maximal number of errors to accept as
correct decoding. An interval can also be specified by giving a pair of
integers, where both end values are taken to be in the interval.
- ``algorithm`` -- (optional) the string name of the ISD algorithm to
employ. If this is not set, an appropriate one will be chosen.
A constructed
:class:`sage.coding.information_set_decoder.InformationSetAlgorithm`
object may also be given. In this case ``number_errors`` must match that
of the passed algorithm.
- ``**kwargs`` -- (optional) any number of named arguments passed on to the
ISD algorithm. Such are usually not required, and they can only be set if
``algorithm`` is set to a specific algorithm. See the documentation for
each individual ISD algorithm class for information on any named arguments
they may accept. The easiest way to access this documentation is to first
construct the decoder without passing any named arguments, then accessing
the ISD algorithm using
:meth:`sage.coding.information_set_decoder.LinearCodeInformationSetDecoder.algorithm`,
and then reading the `?` help on the constructed object.
EXAMPLES:
The principal way to access this class is through the
:meth:`sage.code.linear_code.AbstractLinearCode.decoder` method::
sage: C = codes.GolayCode(GF(3))
sage: D = C.decoder("InformationSet", 2); D
Information-set decoder (Lee-Brickell) for [12, 6, 6] Extended Golay code over GF(3) decoding up to 2 errors
You can specify which algorithm you wish to use, and you should do so in
order to pass special parameters to it::
sage: C = codes.GolayCode(GF(3))
sage: D2 = C.decoder("InformationSet", 2, algorithm="Lee-Brickell", search_size=2); D2
Information-set decoder (Lee-Brickell) for [12, 6, 6] Extended Golay code over GF(3) decoding up to 2 errors
sage: D2.algorithm()
ISD Algorithm (Lee-Brickell) for [12, 6, 6] Extended Golay code over GF(3) decoding up to 2 errors
sage: D2.algorithm().parameters()
{'search_size': 2}
If you specify an algorithm which is not known, you get a friendly error message::
sage: C.decoder("InformationSet", 2, algorithm="NoSuchThing")
Traceback (most recent call last):
...
ValueError: Unknown ISD algorithm 'NoSuchThing'. The known algorithms are ['Lee-Brickell'].
You can also construct an ISD algorithm separately and pass that. This is
mostly useful if you write your own ISD algorithms::
sage: from sage.coding.information_set_decoder import LeeBrickellISDAlgorithm
sage: A = LeeBrickellISDAlgorithm(C, (0, 2))
sage: D = C.decoder("InformationSet", 2, algorithm=A); D
Information-set decoder (Lee-Brickell) for [12, 6, 6] Extended Golay code over GF(3) decoding up to 2 errors
When passing an already constructed ISD algorithm, you can't also pass
parameters to the ISD algorithm when constructing the decoder::
sage: C.decoder("InformationSet", 2, algorithm=A, search_size=2)
Traceback (most recent call last):
...
ValueError: ISD algorithm arguments are not allowed when supplying a constructed ISD algorithm
We can also information-set decode non-binary codes::
sage: C = codes.GolayCode(GF(3))
sage: D = C.decoder("InformationSet", 2); D
Information-set decoder (Lee-Brickell) for [12, 6, 6] Extended Golay code over GF(3) decoding up to 2 errors
There are two other ways to access this class::
sage: D = codes.decoders.LinearCodeInformationSetDecoder(C, 2); D
Information-set decoder (Lee-Brickell) for [12, 6, 6] Extended Golay code over GF(3) decoding up to 2 errors
sage: from sage.coding.information_set_decoder import LinearCodeInformationSetDecoder
sage: D = LinearCodeInformationSetDecoder(C, 2); D
Information-set decoder (Lee-Brickell) for [12, 6, 6] Extended Golay code over GF(3) decoding up to 2 errors
"""
def __init__(self, code, number_errors, algorithm=None, **kwargs):
r"""
TESTS:
``number_errors`` has to be either a list of Integers/ints, a tuple of Integers/ints,
or an Integer/int::
sage: C = codes.GolayCode(GF(2))
sage: D = C.decoder("InformationSet", "aa")
Traceback (most recent call last):
...
ValueError: number_errors should be an integer or a pair of integers
If ``number_errors`` is passed as a list/tuple, it has to contain only
two values, the first one being at most the second one::
sage: C = codes.GolayCode(GF(2))
sage: D = C.decoder("InformationSet", (4, 2))
Traceback (most recent call last):
...
ValueError: number_errors should be a positive integer or a valid interval within the positive integers
You cannot ask the decoder to correct more errors than the code length::
sage: D = C.decoder("InformationSet", 25)
Traceback (most recent call last):
...
ValueError: The provided number of errors should be at most the code's length
If ``algorithm`` is not set, additional parameters cannot be passed to
the ISD algorithm::
sage: D = C.decoder("InformationSet", 2, search_size=2)
Traceback (most recent call last):
...
ValueError: Additional arguments to an information-set decoder algorithm are only allowed if a specific algorithm is selected by setting the algorithm keyword
If ``algorithm`` is set to a constructed ISD algorithm, additional
parameters cannot be passed to the ISD algorithm::
sage: from sage.coding.information_set_decoder import LeeBrickellISDAlgorithm
sage: A = LeeBrickellISDAlgorithm(C, (0, 2))
sage: D = C.decoder("InformationSet", 2, A, search_size=3)
Traceback (most recent call last):
...
ValueError: ISD algorithm arguments are not allowed when supplying a constructed ISD algorithm
If ``algorithm`` is set to a constructed
:class:`sage.coding.information_set_decoder.InformationSetAlgorithm`,
then ``number_errors`` must match that of the algorithm::
sage: C = codes.GolayCode(GF(2))
sage: from sage.coding.information_set_decoder import LeeBrickellISDAlgorithm
sage: A = LeeBrickellISDAlgorithm(C, (0, 2))
sage: D = C.decoder("InformationSet", 2, A); D
Information-set decoder (Lee-Brickell) for [24, 12, 8] Extended Golay code over GF(2) decoding up to 2 errors
sage: D = C.decoder("InformationSet", (0,2), A); D
Information-set decoder (Lee-Brickell) for [24, 12, 8] Extended Golay code over GF(2) decoding up to 2 errors
sage: D = C.decoder("InformationSet", 3, A); D
Traceback (most recent call last):
...
ValueError: number_errors must match that of the passed ISD algorithm
"""
if isinstance(number_errors, (Integer, int)):
number_errors = (0, number_errors)
if isinstance(number_errors, (tuple, list)) and len(number_errors) == 2 \
and number_errors[0] in ZZ and number_errors[1] in ZZ:
if 0 > number_errors[0] or number_errors[0] > number_errors[1]:
raise ValueError(
"number_errors should be a positive integer or"
" a valid interval within the positive integers")
if number_errors[1] > code.length():
raise ValueError("The provided number of errors should be at"
" most the code's length")
else:
raise ValueError("number_errors should be an integer or a pair of integers")
self._number_errors = number_errors
super(LinearCodeInformationSetDecoder, self).__init__(
code, code.ambient_space(), code._default_encoder_name)
if algorithm is None:
if kwargs:
raise ValueError("Additional arguments to an information-set decoder"
" algorithm are only allowed if a specific"
" algorithm is selected by setting the algorithm"
" keyword")
algorithm = "Lee-Brickell"
algorithm_names = LinearCodeInformationSetDecoder.known_algorithms(dictionary=True)
if isinstance(algorithm, InformationSetAlgorithm):
if kwargs:
raise ValueError("ISD algorithm arguments are not allowed when"
" supplying a constructed ISD algorithm")
if number_errors != algorithm.decoding_interval():
raise ValueError("number_errors must match that of the passed"
" ISD algorithm")
self._algorithm = algorithm
elif algorithm in algorithm_names:
self._algorithm = algorithm_names[algorithm](code, number_errors, **kwargs)
else:
raise ValueError("Unknown ISD algorithm '{}'."
" The known algorithms are {}."\
.format(algorithm, sorted(algorithm_names)))
_known_algorithms = {
"Lee-Brickell": LeeBrickellISDAlgorithm
}
@staticmethod
def known_algorithms(dictionary=False):
r"""
Return the list of ISD algorithms that Sage knows.
Passing any of these to the constructor of
:class:`sage.coding.information_set_decoder.LinearCodeInformationSetDecoder`
will make the ISD decoder use that algorithm.
INPUT:
- ``dictionary`` - optional. If set to ``True``, return a ``dict``
mapping decoding algorithm name to its class.
OUTPUT: a list of strings or a ``dict`` from string to ISD algorithm class.
EXAMPLES::
sage: from sage.coding.information_set_decoder import LinearCodeInformationSetDecoder
sage: sorted(LinearCodeInformationSetDecoder.known_algorithms())
['Lee-Brickell']
"""
if dictionary:
return LinearCodeInformationSetDecoder._known_algorithms
else:
return LinearCodeInformationSetDecoder._known_algorithms.keys()
def algorithm(self):
r"""
Return the ISD algorithm used by this ISD decoder.
EXAMPLES::
sage: C = codes.GolayCode(GF(2))
sage: D = C.decoder("InformationSet", (2,4), "Lee-Brickell")
sage: D.algorithm()
ISD Algorithm (Lee-Brickell) for [24, 12, 8] Extended Golay code over GF(2) decoding between 2 and 4 errors
"""
return self._algorithm
def decode_to_code(self, r):
r"""
Decodes a received word with respect to the associated code of this decoder.
.. WARNING::
If there is no codeword within the decoding radius of this decoder, this
method may never terminate, or it may raise a
:exc:`sage.coding.decoder.DecodingError` exception, depending on the ISD
algorithm used.
INPUT:
- ``r`` -- a vector in the ambient space of :meth:`decoder.Decoder.code`.
OUTPUT: a codeword of :meth:`decoder.Decoder.code`.
EXAMPLES::
sage: M = matrix(GF(2), [[1,0,0,0,0,0,1,0,1,0,1,1,0,0,1],\
[0,1,0,0,0,1,1,1,1,0,0,0,0,1,1],\
[0,0,1,0,0,0,0,1,0,1,1,1,1,1,0],\
[0,0,0,1,0,0,1,0,1,0,0,0,1,1,0],\
[0,0,0,0,1,0,0,0,1,0,1,1,0,1,0]])
sage: C = LinearCode(M)
sage: c = C.random_element()
sage: Chan = channels.StaticErrorRateChannel(C.ambient_space(), 2)
sage: r = Chan(c)
sage: D = C.decoder('InformationSet', 2)
sage: c == D.decode_to_code(r)
True
Information-set decoding a non-binary code::
sage: C = codes.GolayCode(GF(3)); C
[12, 6, 6] Extended Golay code over GF(3)
sage: c = C.random_element()
sage: Chan = channels.StaticErrorRateChannel(C.ambient_space(), 2)
sage: r = Chan(c)
sage: D = C.decoder('InformationSet', 2)
sage: c == D.decode_to_code(r)
True
Let's take a bigger example, for which syndrome decoding or
nearest-neighbor decoding would be infeasible: the `[59, 30]` Quadratic
Residue code over `\GF{3}` has true minimum distance 17, so we can
correct 8 errors::
sage: C = codes.QuadraticResidueCode(59, GF(3))
sage: c = C.random_element()
sage: Chan = channels.StaticErrorRateChannel(C.ambient_space(), 2)
sage: r = Chan(c)
sage: D = C.decoder('InformationSet', 8)
sage: c == D.decode_to_code(r) # long time
True
"""
C = self.code()
if r in C:
return r
return self.algorithm().decode(r)
def decoding_radius(self):
r"""
Return the maximal number of errors this decoder can decode.
EXAMPLES::
sage: C = codes.GolayCode(GF(2))
sage: D = C.decoder("InformationSet", 2)
sage: D.decoding_radius()
2
"""
return self._number_errors[1]
def decoding_interval(self):
r"""
A pair of integers specifying the interval of number of errors this
decoder will attempt to correct.
The interval includes both end values.
EXAMPLES::
sage: C = codes.GolayCode(GF(2))
sage: D = C.decoder("InformationSet", 2)
sage: D.decoding_interval()
(0, 2)
"""
return self._number_errors
def _repr_(self):
r"""
Returns a string representation of this decoding algorithm.
EXAMPLES::
sage: C = codes.GolayCode(GF(2))
sage: D = C.decoder("InformationSet", 2)
sage: D
Information-set decoder (Lee-Brickell) for [24, 12, 8] Extended Golay code over GF(2) decoding up to 2 errors
"""
return "Information-set decoder ({}) for {} decoding {} errors ".format(self.algorithm().name(), self.code(), _format_decoding_interval(self.decoding_interval()))
def _latex_(self):
r"""
Returns a latex representation of this decoding algorithm.
EXAMPLES::
sage: C = codes.GolayCode(GF(2))
sage: from sage.coding.information_set_decoder import LeeBrickellISDAlgorithm
sage: D = C.decoder("InformationSet", 2)
sage: latex(D)
\textnormal{Information-set decoder (Lee-Brickell) for }[24, 12, 8] \textnormal{ Extended Golay Code over } \Bold{F}_{2} \textnormal{decoding up to 2 errors}
"""
return "\\textnormal{{Information-set decoder ({}) for }}{} \\textnormal{{decoding {} errors}}".format(self.algorithm().name(), self.code()._latex_(), _format_decoding_interval(self.decoding_interval()))
LinearCodeInformationSetDecoder._decoder_type = {"hard-decision",
"probabilistic", "not-always-closest", "bounded-distance", "might-fail"}
| [
"sage.all.vector",
"sage.misc.prandom.randint",
"itertools.product",
"sage.matrix.special.random_matrix",
"sage.all.binomial",
"time.process_time",
"sage.modules.free_module_element.random_vector"
] | [((22867, 22881), 'time.process_time', 'process_time', ([], {}), '()\n', (22879, 22881), False, 'from time import process_time\n'), ((23222, 23241), 'sage.modules.free_module_element.random_vector', 'random_vector', (['F', 'n'], {}), '(F, n)\n', (23235, 23241), False, 'from sage.modules.free_module_element import random_vector\n'), ((23429, 23443), 'time.process_time', 'process_time', ([], {}), '()\n', (23441, 23443), False, 'from time import process_time\n'), ((19349, 19374), 'sage.all.vector', 'vector', (['[r[i] for i in I]'], {}), '([r[i] for i in I])\n', (19355, 19374), False, 'from sage.all import ZZ, Integer, vector, SageObject, binomial\n'), ((19553, 19588), 'itertools.product', 'itertools.product', (['Fstar'], {'repeat': 'pi'}), '(Fstar, repeat=pi)\n', (19570, 19588), False, 'import itertools\n'), ((23149, 23163), 'time.process_time', 'process_time', ([], {}), '()\n', (23161, 23163), False, 'from time import process_time\n'), ((23258, 23280), 'sage.matrix.special.random_matrix', 'random_matrix', (['F', 'p', 'n'], {}), '(F, p, n)\n', (23271, 23280), False, 'from sage.matrix.special import random_matrix\n'), ((23551, 23565), 'time.process_time', 'process_time', ([], {}), '()\n', (23563, 23565), False, 'from time import process_time\n'), ((23772, 23786), 'sage.all.binomial', 'binomial', (['n', 'k'], {}), '(n, k)\n', (23780, 23786), False, 'from sage.all import ZZ, Integer, vector, SageObject, binomial\n'), ((23321, 23338), 'sage.misc.prandom.randint', 'randint', (['(0)', '(q - 2)'], {}), '(0, q - 2)\n', (23328, 23338), False, 'from sage.misc.prandom import sample, randint\n'), ((23811, 23835), 'sage.all.binomial', 'binomial', (['(n - tau)', '(k - i)'], {}), '(n - tau, k - i)\n', (23819, 23835), False, 'from sage.all import ZZ, Integer, vector, SageObject, binomial\n'), ((23832, 23848), 'sage.all.binomial', 'binomial', (['tau', 'i'], {}), '(tau, i)\n', (23840, 23848), False, 'from sage.all import ZZ, Integer, vector, SageObject, binomial\n'), ((23946, 23961), 'sage.all.binomial', 'binomial', (['k', 'pi'], {}), '(k, pi)\n', (23954, 23961), False, 'from sage.all import ZZ, Integer, vector, SageObject, binomial\n')] |
import math
import tensorflow as tf
from mayo.log import log
from mayo.util import (
Percent, memoize_method, memoize_property, object_from_params)
from mayo.session.base import SessionBase
class Train(SessionBase):
mode = 'train'
def __init__(self, config):
super().__init__(config)
self._run_train_ops = True
self._setup_train_operation()
self._init()
self._checkpoint_epoch = ''
@memoize_property
def learning_rate(self):
params = self.config.train.learning_rate
lr_class, params = object_from_params(params)
if lr_class is tf.train.piecewise_constant:
# `tf.train.piecewise_constant` uses argument name 'x' instead
# just to make life more difficult
step_name = 'x'
else:
step_name = 'global_step'
params[step_name] = self.num_epochs
log.debug(
'Using learning rate {!r} with params {}.'
.format(lr_class.__name__, params))
return lr_class(**params)
@memoize_property
def optimizer(self):
params = self.config.train.optimizer
optimizer_class, params = object_from_params(params)
log.debug('Using optimizer {!r}.'.format(optimizer_class.__name__))
return optimizer_class(self.learning_rate, **params)
@staticmethod
def _average_gradients(tower_grads):
tower_grads = list(tower_grads)
if len(tower_grads) == 1:
return tower_grads[0]
average_grads = []
for grad_and_vars in zip(*tower_grads):
grads = []
for g, v in grad_and_vars:
# add 0 dimension to the gradients to represent the tower
if g is None:
raise ValueError(
'Gradient for variable {} is None, please check '
'connection.'.format(v))
g = tf.expand_dims(g, 0)
grads.append(g)
# average over the 'tower' dimension.
grad = tf.concat(axis=0, values=grads)
grad = tf.reduce_mean(grad, 0)
# simply return the first tower's pointer to the Variable
v = grad_and_vars[0][1]
grad_and_var = (grad, v)
average_grads.append(grad_and_var)
return average_grads
@staticmethod
def _loss_formatter(key, name):
def formatter(estimator):
loss_mean, loss_std = estimator.get_mean_std(key)
if math.isnan(loss_mean):
raise ValueError('Model diverged with a nan-valued loss.')
loss_std = '±{}'.format(Percent(loss_std / loss_mean))
return '{}: {:10f}{:5}'.format(name, loss_mean, loss_std)
return formatter
@memoize_method
def _losses_and_gradients(self):
formatter = self._loss_formatter('regularization', 'regu')
regularization = self.get_collection(
tf.GraphKeys.REGULARIZATION_LOSSES, first_gpu=True)
if regularization:
self.estimator.register(
tf.add_n(regularization), 'regularization',
formatter=formatter)
def gradient(net, prediction, truth):
loss = [self.task.train(net, prediction, truth)] + regularization
loss = tf.add_n(loss)
return loss, self.optimizer.compute_gradients(loss)
tower_losses, tower_grads = zip(*self.task.map(gradient))
return tower_losses, self._average_gradients(tower_grads)
def _setup_train_operation(self):
ops = {}
self._losses, gradients = self._losses_and_gradients()
self._mean_loss = tf.reduce_mean(self._losses)
ops['app_grad'] = self.optimizer.apply_gradients(gradients)
# update ops
update_ops = list(self.get_collection(tf.GraphKeys.UPDATE_OPS))
ops['update'] = tf.group(*update_ops, name='update')
log.debug('Using update operations: {}'.format(update_ops))
log.debug('Using training operations: {}'.format(ops))
if self.extra_train_ops:
ops['extra'] = self.extra_train_ops
self._train_op = ops
def _init(self):
self.load_checkpoint(self.config.system.checkpoint.load)
formatter = self._loss_formatter('loss', 'loss')
self.estimator.register(self._mean_loss, 'loss', formatter=formatter)
def reset_num_epochs(self):
log.info('Reseting number of training epochs of the model...')
self.run(self.imgs_seen.initializer)
self.change.reset('checkpoint.epoch')
self.change.reset('step')
def once(self):
train_op = self._train_op if self._run_train_ops else []
tasks = [train_op, self.num_epochs]
_, num_epochs = self.run(tasks, batch=True)
return num_epochs
def overriders_assign(self):
log.info('Assigning overridden values of parameters to parameters...')
self._overriders_call('assign')
def overriders_update(self):
log.info('Updating overrider internal variables...')
self._overriders_call('update')
def overriders_reset(self):
log.info('Resetting overriders internal variables...')
self._overriders_call('reset')
def _iteration(self, max_epochs=None):
system = self.config.system
epoch = self.once()
floor_epoch = math.floor(epoch)
cp_interval = system.checkpoint.get('save.interval', 0)
if self.change.every('checkpoint.epoch', floor_epoch, cp_interval):
log.info(
'Saving checkpoint at epoch {}...'.format(epoch), update=True)
with log.demote():
self.save_checkpoint(floor_epoch)
self._checkpoint_epoch = floor_epoch
max_epochs = max_epochs or system.max_epochs
if max_epochs and epoch >= max_epochs:
log.info(
'Maximum epoch count {} reached.'.format(max_epochs))
if self._checkpoint_epoch and floor_epoch > self._checkpoint_epoch:
log.info('Saving final checkpoint...')
self.save_checkpoint(floor_epoch)
return False
return True
def train(self, max_epochs=None):
# final debug outputs
lr = self.run(self.learning_rate)
log.info('Training start with a learning rate {}.'.format(lr))
try:
# train iterations
while self._iteration(max_epochs=max_epochs):
pass
except KeyboardInterrupt:
log.info('Stopped.')
save = self.config.system.checkpoint.get('save', {})
if save:
countdown = save.get('countdown', 0)
if log.countdown('Saving checkpoint', countdown):
self.save_checkpoint('latest')
| [
"tensorflow.expand_dims",
"math.floor",
"mayo.log.log.countdown",
"mayo.util.Percent",
"tensorflow.group",
"mayo.util.object_from_params",
"mayo.log.log.info",
"tensorflow.concat",
"tensorflow.add_n",
"mayo.log.log.demote",
"tensorflow.reduce_mean",
"math.isnan"
] | [((567, 593), 'mayo.util.object_from_params', 'object_from_params', (['params'], {}), '(params)\n', (585, 593), False, 'from mayo.util import Percent, memoize_method, memoize_property, object_from_params\n'), ((1175, 1201), 'mayo.util.object_from_params', 'object_from_params', (['params'], {}), '(params)\n', (1193, 1201), False, 'from mayo.util import Percent, memoize_method, memoize_property, object_from_params\n'), ((3668, 3696), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['self._losses'], {}), '(self._losses)\n', (3682, 3696), True, 'import tensorflow as tf\n'), ((3882, 3918), 'tensorflow.group', 'tf.group', (['*update_ops'], {'name': '"""update"""'}), "(*update_ops, name='update')\n", (3890, 3918), True, 'import tensorflow as tf\n'), ((4423, 4485), 'mayo.log.log.info', 'log.info', (['"""Reseting number of training epochs of the model..."""'], {}), "('Reseting number of training epochs of the model...')\n", (4431, 4485), False, 'from mayo.log import log\n'), ((4861, 4931), 'mayo.log.log.info', 'log.info', (['"""Assigning overridden values of parameters to parameters..."""'], {}), "('Assigning overridden values of parameters to parameters...')\n", (4869, 4931), False, 'from mayo.log import log\n'), ((5014, 5066), 'mayo.log.log.info', 'log.info', (['"""Updating overrider internal variables..."""'], {}), "('Updating overrider internal variables...')\n", (5022, 5066), False, 'from mayo.log import log\n'), ((5148, 5202), 'mayo.log.log.info', 'log.info', (['"""Resetting overriders internal variables..."""'], {}), "('Resetting overriders internal variables...')\n", (5156, 5202), False, 'from mayo.log import log\n'), ((5372, 5389), 'math.floor', 'math.floor', (['epoch'], {}), '(epoch)\n', (5382, 5389), False, 'import math\n'), ((2051, 2082), 'tensorflow.concat', 'tf.concat', ([], {'axis': '(0)', 'values': 'grads'}), '(axis=0, values=grads)\n', (2060, 2082), True, 'import tensorflow as tf\n'), ((2102, 2125), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['grad', '(0)'], {}), '(grad, 0)\n', (2116, 2125), True, 'import tensorflow as tf\n'), ((2511, 2532), 'math.isnan', 'math.isnan', (['loss_mean'], {}), '(loss_mean)\n', (2521, 2532), False, 'import math\n'), ((3311, 3325), 'tensorflow.add_n', 'tf.add_n', (['loss'], {}), '(loss)\n', (3319, 3325), True, 'import tensorflow as tf\n'), ((1929, 1949), 'tensorflow.expand_dims', 'tf.expand_dims', (['g', '(0)'], {}), '(g, 0)\n', (1943, 1949), True, 'import tensorflow as tf\n'), ((2646, 2675), 'mayo.util.Percent', 'Percent', (['(loss_std / loss_mean)'], {}), '(loss_std / loss_mean)\n', (2653, 2675), False, 'from mayo.util import Percent, memoize_method, memoize_property, object_from_params\n'), ((3086, 3110), 'tensorflow.add_n', 'tf.add_n', (['regularization'], {}), '(regularization)\n', (3094, 3110), True, 'import tensorflow as tf\n'), ((5648, 5660), 'mayo.log.log.demote', 'log.demote', ([], {}), '()\n', (5658, 5660), False, 'from mayo.log import log\n'), ((6049, 6087), 'mayo.log.log.info', 'log.info', (['"""Saving final checkpoint..."""'], {}), "('Saving final checkpoint...')\n", (6057, 6087), False, 'from mayo.log import log\n'), ((6534, 6554), 'mayo.log.log.info', 'log.info', (['"""Stopped."""'], {}), "('Stopped.')\n", (6542, 6554), False, 'from mayo.log import log\n'), ((6713, 6758), 'mayo.log.log.countdown', 'log.countdown', (['"""Saving checkpoint"""', 'countdown'], {}), "('Saving checkpoint', countdown)\n", (6726, 6758), False, 'from mayo.log import log\n')] |
def is_lazy_user(user):
""" Return True if the passed user is a lazy user. """
# Anonymous users are not lazy.
if user.is_anonymous:
return False
# Check the user backend. If the lazy signup backend
# authenticated them, then the user is lazy.
backend = getattr(user, 'backend', None)
if backend == 'lazysignup.backends.LazySignupBackend':
return True
# Otherwise, we have to fall back to checking the database.
from lazysignup.models import LazyUser
return bool(LazyUser.objects.filter(user=user).count() > 0)
| [
"lazysignup.models.LazyUser.objects.filter"
] | [((522, 556), 'lazysignup.models.LazyUser.objects.filter', 'LazyUser.objects.filter', ([], {'user': 'user'}), '(user=user)\n', (545, 556), False, 'from lazysignup.models import LazyUser\n')] |
# run.py
"""
Script for running a specific pipeline from a given yaml config file
"""
import os
import argparse
import yaml
from importlib import import_module
import numpy as np
import time
import pandas as pd
def import_from_path(path_to_module, obj_name = None):
"""
Import an object from a module based on the filepath of
the module and the string name of the object.
If obj_name is None, return the module instead.
"""
module_name = path_to_module.replace("/",".").strip(".py")
module = import_module(module_name)
if obj_name == None:
return module
obj = getattr(module, obj_name)
return obj
if __name__ == "__main__":
parser = argparse.ArgumentParser(description = __doc__)
parser.add_argument("-c", "--config", help = "File path to the config file")
parser.add_argument("-o", "--output", help = "Path to the output file")
args = parser.parse_args()
with open(args.config) as config_file:
config = yaml.safe_load(config_file)
if args.output != None:
output = True
out_csv = args.output
dfs = []
else:
output = False
# Importing pipeline elements
ds_splitter = import_from_path(config["split"]["filepath"],
config["split"]["class"]) (**config["split"]["parameters"])
preprocess = import_from_path(config["preprocess"]["filepath"])
model_params = config["model"]["parameters"]
if "kernel" in model_params:
kernel_func = import_from_path(model_params["kernel"]["filepath"],
model_params["kernel"]["class"])
kernel_params = model_params["kernel"]["parameters"]
model_params["kernel"] = lambda X, Y: kernel_func(X,Y,**kernel_params)
model = import_from_path(config["model"]["filepath"],
config["model"]["class"])(**config["model"]["parameters"])
evaluation = import_from_path(config["evaluation"]["filepath"])
# Evaluation output directory
out_dir = 'submissions'
if output and not os.path.isdir(out_dir):
os.makedirs(out_dir)
# Lists filling information for the output dataframe
datasets = []
metrics = []
values = []
# Applying pipeline
# Iterate over datasets
for i, dataset in enumerate(config["datasets"]):
time_beg = time.time()
print("Working on dataset ", i)
# Read dataset
X = pd.read_csv(dataset["X"]["filepath"],
**dataset["X"]["parameters"])
## It is currently very important to drop Id before splitting or preprocessing
y = pd.read_csv(dataset["y"]["filepath"],
**dataset["y"]["parameters"]).drop("Id", axis = 1)
if output:
test = pd.read_csv(dataset["test"]["filepath"],
**dataset["test"]["parameters"])
# Split dataset
ds_splitter.generate_idx(y)
X_train, X_test = ds_splitter.split(X)
y_train, y_test = ds_splitter.split(y)
# Preprocess dataset
for transform in config["preprocess"]["X"]:
X_train = getattr(preprocess, transform["transform"])(X_train, **transform["parameters"])
X_test = getattr(preprocess, transform["transform"])(X_test, **transform["parameters"])
for transform in config["preprocess"]["y"]:
y_train = getattr(preprocess, transform["transform"])(y_train, **transform["parameters"])
y_test = getattr(preprocess, transform["transform"])(y_test, **transform["parameters"])
if output:
for transform in config["preprocess"]["X"]:
test = getattr(preprocess, transform["transform"])(test, **transform["parameters"])
# Fit model
model.fit(X_train, y_train)
y_pred = model.predict(X_test)
if output:
y_pred_test = model.predict(test)
y_pred_test = (y_pred_test + 1)/2
id = np.arange(1000*i, 1000*(i + 1))
dic = {'Id': id, 'Bound': y_pred_test}
df = pd.DataFrame(data = dic)
dfs.append(df)
# Evaluate model
for metric in config["evaluation"]["metrics"]:
datasets.append(dataset["name"])
metrics.append(metric)
values.append(getattr(evaluation, metric)(y_pred, y_test))
print("Done ! In {} s".format(time.time() - time_beg))
if output:
df = pd.concat(dfs).astype('int32')
df.to_csv(os.path.join(out_dir, out_csv), index = False)
results = {"datasets": datasets, "metrics": metrics, "values": values}
print(pd.DataFrame.from_dict(results)) | [
"importlib.import_module",
"os.makedirs",
"argparse.ArgumentParser",
"pandas.read_csv",
"os.path.join",
"pandas.DataFrame.from_dict",
"yaml.safe_load",
"os.path.isdir",
"pandas.concat",
"pandas.DataFrame",
"time.time",
"numpy.arange"
] | [((525, 551), 'importlib.import_module', 'import_module', (['module_name'], {}), '(module_name)\n', (538, 551), False, 'from importlib import import_module\n'), ((691, 735), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '__doc__'}), '(description=__doc__)\n', (714, 735), False, 'import argparse\n'), ((987, 1014), 'yaml.safe_load', 'yaml.safe_load', (['config_file'], {}), '(config_file)\n', (1001, 1014), False, 'import yaml\n'), ((2127, 2147), 'os.makedirs', 'os.makedirs', (['out_dir'], {}), '(out_dir)\n', (2138, 2147), False, 'import os\n'), ((2382, 2393), 'time.time', 'time.time', ([], {}), '()\n', (2391, 2393), False, 'import time\n'), ((2469, 2536), 'pandas.read_csv', 'pd.read_csv', (["dataset['X']['filepath']"], {}), "(dataset['X']['filepath'], **dataset['X']['parameters'])\n", (2480, 2536), True, 'import pandas as pd\n'), ((4725, 4756), 'pandas.DataFrame.from_dict', 'pd.DataFrame.from_dict', (['results'], {}), '(results)\n', (4747, 4756), True, 'import pandas as pd\n'), ((2095, 2117), 'os.path.isdir', 'os.path.isdir', (['out_dir'], {}), '(out_dir)\n', (2108, 2117), False, 'import os\n'), ((2820, 2893), 'pandas.read_csv', 'pd.read_csv', (["dataset['test']['filepath']"], {}), "(dataset['test']['filepath'], **dataset['test']['parameters'])\n", (2831, 2893), True, 'import pandas as pd\n'), ((4033, 4068), 'numpy.arange', 'np.arange', (['(1000 * i)', '(1000 * (i + 1))'], {}), '(1000 * i, 1000 * (i + 1))\n', (4042, 4068), True, 'import numpy as np\n'), ((4133, 4155), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': 'dic'}), '(data=dic)\n', (4145, 4155), True, 'import pandas as pd\n'), ((4584, 4614), 'os.path.join', 'os.path.join', (['out_dir', 'out_csv'], {}), '(out_dir, out_csv)\n', (4596, 4614), False, 'import os\n'), ((2660, 2727), 'pandas.read_csv', 'pd.read_csv', (["dataset['y']['filepath']"], {}), "(dataset['y']['filepath'], **dataset['y']['parameters'])\n", (2671, 2727), True, 'import pandas as pd\n'), ((4535, 4549), 'pandas.concat', 'pd.concat', (['dfs'], {}), '(dfs)\n', (4544, 4549), True, 'import pandas as pd\n'), ((4481, 4492), 'time.time', 'time.time', ([], {}), '()\n', (4490, 4492), False, 'import time\n')] |
import pygame
from pygame.sprite import Group
from button import Button
from game_stats import GameStats
from settings import Settings
from ship import Ship
from scoreboard import Scoreboard
import game_funcitons as gf
def run_game():
pygame.init()
pygame.mixer.init()
ai_settings = Settings()
screen = pygame.display.set_mode(
(ai_settings.screen_width, ai_settings.screen_height))
pygame.display.set_caption("Alien Invasion")
play_button = Button(ai_settings, screen, 'PLAY')
stats = GameStats(ai_settings)
scoreboard = Scoreboard(ai_settings, screen, stats)
bg_img1 = pygame.image.load("images/map.jpg").convert()
bg_img2 = bg_img1.copy()
pos_y1 = -1024
pos_y2 = 0
ship = Ship(ai_settings, screen)
aliens = Group()
bullets = Group()
alien_bullets = Group()
gf.create_fleet(ai_settings, screen, aliens, alien_bullets)
#背景音乐
gf.play_music('bgm')
clock = pygame.time.Clock()
while True:
# 按键事件
gf.check_events(ai_settings, screen, stats, scoreboard, play_button, ship, aliens, bullets, alien_bullets)
gf.update_bullets(ai_settings, screen, stats, scoreboard, aliens, bullets, alien_bullets)
time_passed = clock.tick()
if stats.game_active:
stats.increase_time(time_passed)
# 飞机/子弹 更新
ship.update()
#敌机位置
gf.update_aliens(ai_settings, stats, scoreboard, screen, ship, aliens, bullets, alien_bullets, time_passed)
# 背景滚动
screen.blit(bg_img1, (0, pos_y1))
screen.blit(bg_img2, (0, pos_y2))
pos_y1 += ai_settings.bg_roll_speed_factor
pos_y2 += ai_settings.bg_roll_speed_factor
if pos_y1 > 0:
pos_y1 = -1024
if pos_y2 > 1024:
pos_y2 = 0
gf.update_screen(ai_settings, screen, stats, scoreboard, ship, aliens, bullets, alien_bullets, play_button, time_passed)
run_game() | [
"ship.Ship",
"game_funcitons.create_fleet",
"pygame.init",
"game_stats.GameStats",
"game_funcitons.update_aliens",
"game_funcitons.play_music",
"pygame.display.set_mode",
"pygame.sprite.Group",
"button.Button",
"pygame.time.Clock",
"scoreboard.Scoreboard",
"game_funcitons.check_events",
"gam... | [((241, 254), 'pygame.init', 'pygame.init', ([], {}), '()\n', (252, 254), False, 'import pygame\n'), ((259, 278), 'pygame.mixer.init', 'pygame.mixer.init', ([], {}), '()\n', (276, 278), False, 'import pygame\n'), ((297, 307), 'settings.Settings', 'Settings', ([], {}), '()\n', (305, 307), False, 'from settings import Settings\n'), ((321, 399), 'pygame.display.set_mode', 'pygame.display.set_mode', (['(ai_settings.screen_width, ai_settings.screen_height)'], {}), '((ai_settings.screen_width, ai_settings.screen_height))\n', (344, 399), False, 'import pygame\n'), ((414, 458), 'pygame.display.set_caption', 'pygame.display.set_caption', (['"""Alien Invasion"""'], {}), "('Alien Invasion')\n", (440, 458), False, 'import pygame\n'), ((477, 512), 'button.Button', 'Button', (['ai_settings', 'screen', '"""PLAY"""'], {}), "(ai_settings, screen, 'PLAY')\n", (483, 512), False, 'from button import Button\n'), ((531, 553), 'game_stats.GameStats', 'GameStats', (['ai_settings'], {}), '(ai_settings)\n', (540, 553), False, 'from game_stats import GameStats\n'), ((571, 609), 'scoreboard.Scoreboard', 'Scoreboard', (['ai_settings', 'screen', 'stats'], {}), '(ai_settings, screen, stats)\n', (581, 609), False, 'from scoreboard import Scoreboard\n'), ((750, 775), 'ship.Ship', 'Ship', (['ai_settings', 'screen'], {}), '(ai_settings, screen)\n', (754, 775), False, 'from ship import Ship\n'), ((790, 797), 'pygame.sprite.Group', 'Group', ([], {}), '()\n', (795, 797), False, 'from pygame.sprite import Group\n'), ((812, 819), 'pygame.sprite.Group', 'Group', ([], {}), '()\n', (817, 819), False, 'from pygame.sprite import Group\n'), ((840, 847), 'pygame.sprite.Group', 'Group', ([], {}), '()\n', (845, 847), False, 'from pygame.sprite import Group\n'), ((853, 912), 'game_funcitons.create_fleet', 'gf.create_fleet', (['ai_settings', 'screen', 'aliens', 'alien_bullets'], {}), '(ai_settings, screen, aliens, alien_bullets)\n', (868, 912), True, 'import game_funcitons as gf\n'), ((928, 948), 'game_funcitons.play_music', 'gf.play_music', (['"""bgm"""'], {}), "('bgm')\n", (941, 948), True, 'import game_funcitons as gf\n'), ((962, 981), 'pygame.time.Clock', 'pygame.time.Clock', ([], {}), '()\n', (979, 981), False, 'import pygame\n'), ((1022, 1132), 'game_funcitons.check_events', 'gf.check_events', (['ai_settings', 'screen', 'stats', 'scoreboard', 'play_button', 'ship', 'aliens', 'bullets', 'alien_bullets'], {}), '(ai_settings, screen, stats, scoreboard, play_button, ship,\n aliens, bullets, alien_bullets)\n', (1037, 1132), True, 'import game_funcitons as gf\n'), ((1137, 1230), 'game_funcitons.update_bullets', 'gf.update_bullets', (['ai_settings', 'screen', 'stats', 'scoreboard', 'aliens', 'bullets', 'alien_bullets'], {}), '(ai_settings, screen, stats, scoreboard, aliens, bullets,\n alien_bullets)\n', (1154, 1230), True, 'import game_funcitons as gf\n'), ((1838, 1962), 'game_funcitons.update_screen', 'gf.update_screen', (['ai_settings', 'screen', 'stats', 'scoreboard', 'ship', 'aliens', 'bullets', 'alien_bullets', 'play_button', 'time_passed'], {}), '(ai_settings, screen, stats, scoreboard, ship, aliens,\n bullets, alien_bullets, play_button, time_passed)\n', (1854, 1962), True, 'import game_funcitons as gf\n'), ((624, 659), 'pygame.image.load', 'pygame.image.load', (['"""images/map.jpg"""'], {}), "('images/map.jpg')\n", (641, 659), False, 'import pygame\n'), ((1418, 1529), 'game_funcitons.update_aliens', 'gf.update_aliens', (['ai_settings', 'stats', 'scoreboard', 'screen', 'ship', 'aliens', 'bullets', 'alien_bullets', 'time_passed'], {}), '(ai_settings, stats, scoreboard, screen, ship, aliens,\n bullets, alien_bullets, time_passed)\n', (1434, 1529), True, 'import game_funcitons as gf\n')] |
from enum import Enum, auto
from string import Template
class MessageRegister:
def __init__(self, dispatch_table=None):
self.dispatch = dict() if dispatch_table is None else dispatch_table
def register(self, ref):
def decorator(func):
self.dispatch[ref] = func
return func
return decorator
def get(self, ref):
return self.dispatch.get(ref)
def __getitem__(self, ref):
return self.dispatch[ref]
messages = MessageRegister()
class MessageBank(Enum):
code_block_needed = auto()
inline_code_misuse = auto()
class MessageStorage:
code_block = '''Looks like your Batch file code isn’t wrapped in a code block.
To format code correctly on **new.reddit.com**, highlight the code and select *‘Code Block’* in the editing toolbar.
If you’re on **old.reddit.com**, separate the code from your text with a blank line and precede each line of code with **4 spaces** or a **tab**.
---
^(*Beep-boop. I am a bot.*)
'''
code_block_with_example = '''Looks like your Batch file code isn’t wrapped in a code block.
To format code correctly on **new.reddit.com**, highlight the code and select *‘Code Block’* in the editing toolbar.
If you’re on **old.reddit.com**, separate the code from your text with a blank line and precede each line of code with **4 spaces** or a **tab**. E.g.,
This is normal text.
@echo off
echo This is code!
> This is normal text.
>
> @echo off
> echo This is code!
---
^(*Beep-boop. I am a bot.*)
'''
inline_code = '''Looks like you used *inline code* formatting where a **code block** should have been used.
The inline code text styling is for use in paragraphs of text. For larger sequences of code, consider using a code bock. This can be done by selecting your code then clicking the *‘Code Block’* button.
---
^(*Beep-boop. I am a bot.*)
'''
class MessageData:
@messages.register(MessageBank.code_block_needed)
def code_block_needed(example=False, **kws):
return MessageStorage.code_block_with_example if example else MessageStorage.code_block
@messages.register(MessageBank.inline_code_misuse)
def inline_code_misuse(**kws):
return MessageStorage.inline_code
| [
"enum.auto"
] | [((496, 502), 'enum.auto', 'auto', ([], {}), '()\n', (500, 502), False, 'from enum import Enum, auto\n'), ((525, 531), 'enum.auto', 'auto', ([], {}), '()\n', (529, 531), False, 'from enum import Enum, auto\n')] |
import os
import shutil
import pychemia
import tempfile
import unittest
class MyTestCase(unittest.TestCase):
def test_incar(self):
"""
Test (pychemia.code.vasp) [INCAR parsing and writing] :
"""
print(os.getcwd())
iv = pychemia.code.vasp.read_incar('tests/data/vasp_01/INCAR')
self.assertEqual(len(iv), 12)
self.assertEqual(iv.EDIFF, 1E-7)
wf = tempfile.NamedTemporaryFile()
iv.write(wf.name)
wf.close()
iv4dir = pychemia.code.vasp.read_incar('tests/data/vasp_01')
self.assertEqual(iv, iv4dir)
self.assertRaises(ValueError, pychemia.code.vasp.read_incar, 'tests/data')
iv3 = pychemia.code.vasp.VaspInput(variables={'EDIFF': 1E-6})
self.assertEqual(iv3['EDIFF'], 1E-6)
iv = pychemia.code.vasp.read_incar('tests/data/vasp_02')
iv.EDIFF *= 1.3
td = tempfile.mkdtemp()
pychemia.code.vasp.write_incar(iv, td)
self.assertRaises(ValueError, iv.write_key, 'EDIF')
shutil.rmtree(td)
def test_bad_outcar(self):
"""
Test (pychemia.code.vasp) [corrupted VASP OUTCAR] :
"""
vo = pychemia.code.vasp.VaspOutput('tests/data/vasp_04/OUTCAR')
self.assertTrue(vo.is_finished)
def test_encut_setup(self):
"""
Test (pychemia.code.vasp) [ENCUT setup] :
"""
iv = pychemia.code.vasp.read_incar('tests/data/vasp_06')
iv.set_encut(ENCUT=1.2, POTCAR='tests/data/vasp_06/POTCAR')
self.assertEqual(iv.ENCUT, 307)
iv.set_rough_relaxation()
self.assertEqual(iv.EDIFFG, -1E-2)
iv.set_mit_settings()
def test_vaspjob(self):
"""
Test (pychemia.code.vasp) [VaspJob] :
"""
td = tempfile.mkdtemp()
st = pychemia.code.vasp.read_poscar('tests/data/vasp_06')
kp = pychemia.code.vasp.read_kpoints('tests/data/vasp_06')
self.assertEqual(kp.number_of_kpoints, 693)
iv = pychemia.code.vasp.read_incar('tests/data/vasp_06')
vj = pychemia.code.vasp.VaspJob(workdir=td,)
vj.initialize(st, kpoints=kp)
vj.set_input_variables(iv)
vj.write_poscar()
vj.write_kpoints()
vj.write_incar()
shutil.rmtree(td)
def test_outcar(self):
"""
Test (pychemia.code.vasp) [outcar] :
"""
vo = pychemia.code.vasp.VaspOutput('tests/data/vasp_06/OUTCAR')
self.assertEqual(vo.get_memory_used()['grid'], (1028.0, 'kBytes'))
self.assertAlmostEqual(vo.to_dict['energy'], -19.67192646)
print(vo)
self.assertTrue(vo.has_forces_stress_energy())
def test_poscar(self):
"""
Test (pychemia.code.vasp) [poscar] :
"""
# Temporal directory for outputs
tmpdir = tempfile.mkdtemp()
# Read a POSCAR by directory
st = pychemia.code.vasp.read_poscar('tests/data/vasp_06')
self.assertEqual(st.natom, 4)
# Opening old format POSCAR without POTCAR
with self.assertRaises(ValueError) as context:
st = pychemia.code.vasp.read_poscar('tests/data/vasp_07/POSCAR')
st = pychemia.code.vasp.read_poscar('tests/data/vasp_08/POSCAR_old')
self.assertEqual(st.natom, 2)
st = pychemia.code.vasp.read_poscar('tests/data/vasp_08/POSCAR_new')
self.assertEqual(st.natom, 2)
with self.assertRaises(ValueError) as context:
pychemia.code.vasp.write_potcar(st, filepath=tmpdir + os.sep + 'POTCAR', basepsp='/no/existing/path')
with self.assertRaises(ValueError) as context:
pychemia.code.vasp.write_potcar(st, filepath=tmpdir + os.sep + 'POTCAR', basepsp='tests/data')
cwd = os.getcwd()
os.chdir('tests/data/vasp_07')
st = pychemia.code.vasp.read_poscar('POSCAR_new')
os.chdir(cwd)
self.assertEqual(st.natom, 44)
st = pychemia.code.vasp.read_poscar('tests/data/vasp_07/POSCAR_alt')
pychemia.code.vasp.write_poscar(st, tmpdir + os.sep + 'POSCAR1')
pychemia.code.vasp.write_poscar(st, tmpdir + os.sep + 'POSCAR2', direct=False)
pychemia.code.vasp.write_poscar(st, tmpdir + os.sep + 'POSCAR3', newformat=False)
st = pychemia.code.vasp.read_poscar(tmpdir + os.sep + 'POSCAR1')
self.assertAlmostEqual(st.volume, 584.47161926043907)
sym = pychemia.crystal.CrystalSymmetry(st)
self.assertEqual(sym.symbol(), 'C2/c')
st = pychemia.code.vasp.read_poscar(tmpdir + os.sep + 'POSCAR2')
self.assertAlmostEqual(st.volume, 584.47161926043907)
sym = pychemia.crystal.CrystalSymmetry(st)
self.assertEqual(sym.symbol(), 'C2/c')
st = pychemia.code.vasp.read_poscar(tmpdir + os.sep + 'POSCAR3')
self.assertAlmostEqual(st.volume, 584.47161926043907)
sym = pychemia.crystal.CrystalSymmetry(st)
self.assertEqual(sym.symbol(), 'C2/c')
pychemia.code.vasp.write_potcar(st, filepath=tmpdir + os.sep + 'POTCAR', basepsp='tests/data')
pychemia.code.vasp.get_potcar_info(tmpdir + os.sep + 'POTCAR')
shutil.rmtree(tmpdir)
| [
"pychemia.code.vasp.write_potcar",
"pychemia.code.vasp.VaspJob",
"pychemia.code.vasp.read_kpoints",
"pychemia.code.vasp.read_poscar",
"pychemia.code.vasp.read_incar",
"pychemia.code.vasp.VaspInput",
"shutil.rmtree",
"os.getcwd",
"os.chdir",
"pychemia.crystal.CrystalSymmetry",
"tempfile.mkdtemp",... | [((272, 329), 'pychemia.code.vasp.read_incar', 'pychemia.code.vasp.read_incar', (['"""tests/data/vasp_01/INCAR"""'], {}), "('tests/data/vasp_01/INCAR')\n", (301, 329), False, 'import pychemia\n'), ((422, 451), 'tempfile.NamedTemporaryFile', 'tempfile.NamedTemporaryFile', ([], {}), '()\n', (449, 451), False, 'import tempfile\n'), ((514, 565), 'pychemia.code.vasp.read_incar', 'pychemia.code.vasp.read_incar', (['"""tests/data/vasp_01"""'], {}), "('tests/data/vasp_01')\n", (543, 565), False, 'import pychemia\n'), ((700, 756), 'pychemia.code.vasp.VaspInput', 'pychemia.code.vasp.VaspInput', ([], {'variables': "{'EDIFF': 1e-06}"}), "(variables={'EDIFF': 1e-06})\n", (728, 756), False, 'import pychemia\n'), ((814, 865), 'pychemia.code.vasp.read_incar', 'pychemia.code.vasp.read_incar', (['"""tests/data/vasp_02"""'], {}), "('tests/data/vasp_02')\n", (843, 865), False, 'import pychemia\n'), ((903, 921), 'tempfile.mkdtemp', 'tempfile.mkdtemp', ([], {}), '()\n', (919, 921), False, 'import tempfile\n'), ((930, 968), 'pychemia.code.vasp.write_incar', 'pychemia.code.vasp.write_incar', (['iv', 'td'], {}), '(iv, td)\n', (960, 968), False, 'import pychemia\n'), ((1037, 1054), 'shutil.rmtree', 'shutil.rmtree', (['td'], {}), '(td)\n', (1050, 1054), False, 'import shutil\n'), ((1194, 1252), 'pychemia.code.vasp.VaspOutput', 'pychemia.code.vasp.VaspOutput', (['"""tests/data/vasp_04/OUTCAR"""'], {}), "('tests/data/vasp_04/OUTCAR')\n", (1223, 1252), False, 'import pychemia\n'), ((1433, 1484), 'pychemia.code.vasp.read_incar', 'pychemia.code.vasp.read_incar', (['"""tests/data/vasp_06"""'], {}), "('tests/data/vasp_06')\n", (1462, 1484), False, 'import pychemia\n'), ((1836, 1854), 'tempfile.mkdtemp', 'tempfile.mkdtemp', ([], {}), '()\n', (1852, 1854), False, 'import tempfile\n'), ((1868, 1920), 'pychemia.code.vasp.read_poscar', 'pychemia.code.vasp.read_poscar', (['"""tests/data/vasp_06"""'], {}), "('tests/data/vasp_06')\n", (1898, 1920), False, 'import pychemia\n'), ((1934, 1987), 'pychemia.code.vasp.read_kpoints', 'pychemia.code.vasp.read_kpoints', (['"""tests/data/vasp_06"""'], {}), "('tests/data/vasp_06')\n", (1965, 1987), False, 'import pychemia\n'), ((2053, 2104), 'pychemia.code.vasp.read_incar', 'pychemia.code.vasp.read_incar', (['"""tests/data/vasp_06"""'], {}), "('tests/data/vasp_06')\n", (2082, 2104), False, 'import pychemia\n'), ((2118, 2156), 'pychemia.code.vasp.VaspJob', 'pychemia.code.vasp.VaspJob', ([], {'workdir': 'td'}), '(workdir=td)\n', (2144, 2156), False, 'import pychemia\n'), ((2317, 2334), 'shutil.rmtree', 'shutil.rmtree', (['td'], {}), '(td)\n', (2330, 2334), False, 'import shutil\n'), ((2470, 2528), 'pychemia.code.vasp.VaspOutput', 'pychemia.code.vasp.VaspOutput', (['"""tests/data/vasp_06/OUTCAR"""'], {}), "('tests/data/vasp_06/OUTCAR')\n", (2499, 2528), False, 'import pychemia\n'), ((2924, 2942), 'tempfile.mkdtemp', 'tempfile.mkdtemp', ([], {}), '()\n', (2940, 2942), False, 'import tempfile\n'), ((2994, 3046), 'pychemia.code.vasp.read_poscar', 'pychemia.code.vasp.read_poscar', (['"""tests/data/vasp_06"""'], {}), "('tests/data/vasp_06')\n", (3024, 3046), False, 'import pychemia\n'), ((3283, 3346), 'pychemia.code.vasp.read_poscar', 'pychemia.code.vasp.read_poscar', (['"""tests/data/vasp_08/POSCAR_old"""'], {}), "('tests/data/vasp_08/POSCAR_old')\n", (3313, 3346), False, 'import pychemia\n'), ((3399, 3462), 'pychemia.code.vasp.read_poscar', 'pychemia.code.vasp.read_poscar', (['"""tests/data/vasp_08/POSCAR_new"""'], {}), "('tests/data/vasp_08/POSCAR_new')\n", (3429, 3462), False, 'import pychemia\n'), ((3849, 3860), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (3858, 3860), False, 'import os\n'), ((3869, 3899), 'os.chdir', 'os.chdir', (['"""tests/data/vasp_07"""'], {}), "('tests/data/vasp_07')\n", (3877, 3899), False, 'import os\n'), ((3913, 3957), 'pychemia.code.vasp.read_poscar', 'pychemia.code.vasp.read_poscar', (['"""POSCAR_new"""'], {}), "('POSCAR_new')\n", (3943, 3957), False, 'import pychemia\n'), ((3966, 3979), 'os.chdir', 'os.chdir', (['cwd'], {}), '(cwd)\n', (3974, 3979), False, 'import os\n'), ((4033, 4096), 'pychemia.code.vasp.read_poscar', 'pychemia.code.vasp.read_poscar', (['"""tests/data/vasp_07/POSCAR_alt"""'], {}), "('tests/data/vasp_07/POSCAR_alt')\n", (4063, 4096), False, 'import pychemia\n'), ((4106, 4170), 'pychemia.code.vasp.write_poscar', 'pychemia.code.vasp.write_poscar', (['st', "(tmpdir + os.sep + 'POSCAR1')"], {}), "(st, tmpdir + os.sep + 'POSCAR1')\n", (4137, 4170), False, 'import pychemia\n'), ((4179, 4257), 'pychemia.code.vasp.write_poscar', 'pychemia.code.vasp.write_poscar', (['st', "(tmpdir + os.sep + 'POSCAR2')"], {'direct': '(False)'}), "(st, tmpdir + os.sep + 'POSCAR2', direct=False)\n", (4210, 4257), False, 'import pychemia\n'), ((4266, 4352), 'pychemia.code.vasp.write_poscar', 'pychemia.code.vasp.write_poscar', (['st', "(tmpdir + os.sep + 'POSCAR3')"], {'newformat': '(False)'}), "(st, tmpdir + os.sep + 'POSCAR3', newformat=\n False)\n", (4297, 4352), False, 'import pychemia\n'), ((4362, 4421), 'pychemia.code.vasp.read_poscar', 'pychemia.code.vasp.read_poscar', (["(tmpdir + os.sep + 'POSCAR1')"], {}), "(tmpdir + os.sep + 'POSCAR1')\n", (4392, 4421), False, 'import pychemia\n'), ((4498, 4534), 'pychemia.crystal.CrystalSymmetry', 'pychemia.crystal.CrystalSymmetry', (['st'], {}), '(st)\n', (4530, 4534), False, 'import pychemia\n'), ((4595, 4654), 'pychemia.code.vasp.read_poscar', 'pychemia.code.vasp.read_poscar', (["(tmpdir + os.sep + 'POSCAR2')"], {}), "(tmpdir + os.sep + 'POSCAR2')\n", (4625, 4654), False, 'import pychemia\n'), ((4731, 4767), 'pychemia.crystal.CrystalSymmetry', 'pychemia.crystal.CrystalSymmetry', (['st'], {}), '(st)\n', (4763, 4767), False, 'import pychemia\n'), ((4828, 4887), 'pychemia.code.vasp.read_poscar', 'pychemia.code.vasp.read_poscar', (["(tmpdir + os.sep + 'POSCAR3')"], {}), "(tmpdir + os.sep + 'POSCAR3')\n", (4858, 4887), False, 'import pychemia\n'), ((4964, 5000), 'pychemia.crystal.CrystalSymmetry', 'pychemia.crystal.CrystalSymmetry', (['st'], {}), '(st)\n', (4996, 5000), False, 'import pychemia\n'), ((5056, 5154), 'pychemia.code.vasp.write_potcar', 'pychemia.code.vasp.write_potcar', (['st'], {'filepath': "(tmpdir + os.sep + 'POTCAR')", 'basepsp': '"""tests/data"""'}), "(st, filepath=tmpdir + os.sep + 'POTCAR',\n basepsp='tests/data')\n", (5087, 5154), False, 'import pychemia\n'), ((5159, 5221), 'pychemia.code.vasp.get_potcar_info', 'pychemia.code.vasp.get_potcar_info', (["(tmpdir + os.sep + 'POTCAR')"], {}), "(tmpdir + os.sep + 'POTCAR')\n", (5193, 5221), False, 'import pychemia\n'), ((5231, 5252), 'shutil.rmtree', 'shutil.rmtree', (['tmpdir'], {}), '(tmpdir)\n', (5244, 5252), False, 'import shutil\n'), ((246, 257), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (255, 257), False, 'import os\n'), ((3209, 3268), 'pychemia.code.vasp.read_poscar', 'pychemia.code.vasp.read_poscar', (['"""tests/data/vasp_07/POSCAR"""'], {}), "('tests/data/vasp_07/POSCAR')\n", (3239, 3268), False, 'import pychemia\n'), ((3569, 3674), 'pychemia.code.vasp.write_potcar', 'pychemia.code.vasp.write_potcar', (['st'], {'filepath': "(tmpdir + os.sep + 'POTCAR')", 'basepsp': '"""/no/existing/path"""'}), "(st, filepath=tmpdir + os.sep + 'POTCAR',\n basepsp='/no/existing/path')\n", (3600, 3674), False, 'import pychemia\n'), ((3739, 3837), 'pychemia.code.vasp.write_potcar', 'pychemia.code.vasp.write_potcar', (['st'], {'filepath': "(tmpdir + os.sep + 'POTCAR')", 'basepsp': '"""tests/data"""'}), "(st, filepath=tmpdir + os.sep + 'POTCAR',\n basepsp='tests/data')\n", (3770, 3837), False, 'import pychemia\n')] |
import os
class Material:
def __init__(self, name, color, outputs):
self.name = name
self.color = color
self.outputs = outputs
materials = [Material("dilithium", 0xddcecb, ("DUST", "GEM")),
Material("iron", 0xafafaf, ("SHEET", "STICK", "DUST", "PLATE")),
Material("gold", 0xffff5d, ("DUST", "COIL", "PLATE")),
Material("silicon", 0x2c2c2b, ("INGOT", "DUST", "BOULE", "NUGGET", "PLATE")),
Material("copper", 0xd55e28, ("ORE", "COIL", "BLOCK", "STICK", "INGOT", "NUGGET", "DUST", "PLATE", "SHEET")),
Material("tin", 0xcdd5d8, ("ORE", "BLOCK", "PLATE", "INGOT", "NUGGET", "DUST")),
Material("steel", 0x55555d, ("BLOCK", "FAN", "PLATE", "INGOT", "NUGGET", "DUST", "STICK", "GEAR", "SHEET")),
Material("titanium", 0xb2669e, ("PLATE", "COIL", "INGOT", "NUGGET", "DUST", "STICK", "BLOCK", "GEAR", "SHEET")),
Material("rutile", 0xbf936a, ("ORE",)),
Material("aluminum", 0xb3e4dc, ("ORE", "COIL", "BLOCK", "INGOT", "PLATE", "SHEET", "DUST", "NUGGET", "SHEET")),
Material("iridium", 0xdedcce, ("ORE", "COIL", "BLOCK", "DUST", "INGOT", "NUGGET", "PLATE", "STICK"))]
materials = [Material("dilithium", 0xddcecb, ("DUST", "GEM")),
Material("titaniumaluminide", 0xaec2de, ("GEAR", "COIL", "BLOCK", "INGOT", "PLATE", "SHEET", "DUST", "NUGGET", "SHEET")),
Material("titaniumiridium", 0xd7dfe4, ("GEAR", "COIL", "BLOCK", "INGOT", "PLATE", "SHEET", "DUST", "NUGGET", "SHEET"))]
blockTypes = ['COIL', 'BLOCK', 'ORE']
coilTypes = ['COIL']
noIconGenTypes = ['ORE']
itemSample = '{\n "parent": "item/generated",\n "textures": {\n "layer0": "libvulpes:items/@TYPE@@MATERIAL@"\n }\n}'
blockItemSample = '{\n "parent": "libvulpes:block/@TYPE@@MATERIAL@"\n}'
blockSample = '{\n "parent": "minecraft:block/cube_all",\n "textures": {\n "all": "libvulpes:blocks/@TYPE@@MATERIAL@"\n }\n}'
coilSample = '{\n "parent": "libvulpes:block/tintedcubecolumn",\n "textures": {\n "end": "libvulpes:blocks/@TYPE@@MATERIAL@top",\n "side": "libvulpes:blocks/@TYPE@@MATERIAL@side"\n }\n}'
blockStateSample = '{\n "variants": {\n "": { "model": "libvulpes:block/@TYPE@@MATERIAL@" }\n }\n}'
itemDir = 'src/main/resources/assets/libvulpes/models/item/'
blockDir = 'src/main/resources/assets/libvulpes/models/block/'
blockStateDir = 'src/main/resources/assets/libvulpes/blockstates/'
itemIconDir = 'src/main/resources/assets/libvulpes/textures/items/'
blockIconDir = 'src/main/resources/assets/libvulpes/textures/blocks/'
blockTagPath = "src/main/resources/data/forge/tags/blocks/"
itemTagPath = "src/main/resources/data/forge/tags/items/"
blockTagSample = '{\n "replace": false,\n "values": [@BLOCKLIST@]\n}'
def getMatrix(color):
r = ((color >> 16) & 0xff)/0xff
g = ((color >> 8) & 0xff)/0xff
b = (color & 0xff)/0xff
return str(r) + ' 0 0 0 ' + str(g) + ' 0 0 0 ' + str(b)
def getCommand(inputFile, outputFile, color):
return 'convert ' + inputFile + ' -color-matrix "' + getMatrix(color) + '" ' + outputFile
def genItem(mat, objType):
if not objType in blockTypes:
output = itemSample.replace('@MATERIAL@', mat.name).replace('@TYPE@', objType.lower())
else:
output = blockItemSample.replace('@MATERIAL@', mat.name).replace('@TYPE@', objType.lower())
filename = itemDir + objType.lower() + mat.name + '.json'
f = open(filename, 'w')
f.write(output)
# generate the icon now
if not objType in blockTypes:
inputFile = itemIconDir + objType.lower() + '.png'
outputFile = itemIconDir + objType.lower() + mat.name + '.png'
cmd = getCommand(inputFile, outputFile, mat.color)
os.system(cmd)
def genBlock(mat, objType):
if objType in coilTypes:
output = coilSample.replace('@MATERIAL@', mat.name).replace('@TYPE@', objType.lower())
else:
output = blockSample.replace('@MATERIAL@', mat.name).replace('@TYPE@', objType.lower())
filename = blockDir + objType.lower() + mat.name + '.json'
f = open(filename, 'w')
f.write(output)
# generate the blockState
output = blockStateSample.replace('@MATERIAL@', mat.name).replace('@TYPE@', objType.lower())
filename = blockStateDir + objType.lower() + mat.name + '.json'
f = open(filename, 'w')
f.write(output)
# Generate the icon now
if not objType in noIconGenTypes:
if objType in coilTypes:
inputFile = blockIconDir + objType.lower() + 'pole.png'
outputFile = blockIconDir + objType.lower() + mat.name + 'top.png'
cmd = getCommand(inputFile, outputFile, mat.color)
os.system(cmd)
inputFile = blockIconDir + objType.lower() + 'side.png'
outputFile = blockIconDir + objType.lower() + mat.name + 'side.png'
cmd = getCommand(inputFile, outputFile, mat.color)
os.system(cmd)
else:
inputFile = blockIconDir + objType.lower() + '.png'
outputFile = blockIconDir + objType.lower() + mat.name + '.png'
cmd = getCommand(inputFile, outputFile, mat.color)
os.system(cmd)
def printEnLang(mat, objType, block):
human_mat = mat.name
human_type = objType.lower()
human_mat = human_mat[0].upper() + human_mat[1:]
human_type = human_type[0].upper() + human_type[1:]
if block:
print(' "block.libvulpes.{}{}": "{} {}",'.format(objType.lower(),mat.name, human_mat, human_type))
else:
print(' "item.libvulpes.{}{}": "{} {}",'.format(objType.lower(),mat.name, human_mat, human_type))
def generateTag(tagPath, mat, objType):
if not os.path.exists(tagPath + objType.lower()):
os.makedirs(tagPath + objType.lower())
filename = tagPath + objType.lower() + '/' + mat.name + '.json'
contents = blockTagSample.replace('@BLOCKLIST@', ' "libvulpes:' + objType.lower() + mat.name + '"')
f = open(filename, 'w')
f.write(contents)
f.close()
objTypeToMaterialMap = {}
for mat in materials:
for objType in mat.outputs:
if objType not in objTypeToMaterialMap:
objTypeToMaterialMap[objType] = []
#objTypeToMaterialMap[objType].append(mat)
#genItem(mat, objType)
if objType in blockTypes:
# genBlock(mat, objType)
generateTag(blockTagPath, mat, objType)
generateTag(itemTagPath, mat, objType)
#printEnLang(mat, objType, objType in blockTypes)
for objType in objTypeToMaterialMap.keys():
contentString = []
for mat in objTypeToMaterialMap[objType]:
contentString.append(' "libvulpes:' + objType.lower() + mat.name + '"')
contents = blockTagSample.replace('@BLOCKLIST@', ',\n'.join(contentString))
f = None
try:
if objType in blockTypes:
f = open(blockTagPath + objType.lower() + '.json', 'w')
else:
f = open(itemTagPath + objType.lower() + '.json', 'w')
f.write(contents)
f.close()
except FileNotFoundError:
pass
| [
"os.system"
] | [((3652, 3666), 'os.system', 'os.system', (['cmd'], {}), '(cmd)\n', (3661, 3666), False, 'import os\n'), ((4616, 4630), 'os.system', 'os.system', (['cmd'], {}), '(cmd)\n', (4625, 4630), False, 'import os\n'), ((4854, 4868), 'os.system', 'os.system', (['cmd'], {}), '(cmd)\n', (4863, 4868), False, 'import os\n'), ((5098, 5112), 'os.system', 'os.system', (['cmd'], {}), '(cmd)\n', (5107, 5112), False, 'import os\n')] |
# -*- coding: utf-8 -*-
#
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unit tests."""
import mock
import pytest
from google.cloud import monitoring_v3
from google.cloud.monitoring_v3.proto import service_pb2
from google.cloud.monitoring_v3.proto import service_service_pb2
from google.protobuf import empty_pb2
class MultiCallableStub(object):
"""Stub for the grpc.UnaryUnaryMultiCallable interface."""
def __init__(self, method, channel_stub):
self.method = method
self.channel_stub = channel_stub
def __call__(self, request, timeout=None, metadata=None, credentials=None):
self.channel_stub.requests.append((self.method, request))
response = None
if self.channel_stub.responses:
response = self.channel_stub.responses.pop()
if isinstance(response, Exception):
raise response
if response:
return response
class ChannelStub(object):
"""Stub for the grpc.Channel interface."""
def __init__(self, responses=[]):
self.responses = responses
self.requests = []
def unary_unary(self, method, request_serializer=None, response_deserializer=None):
return MultiCallableStub(method, self)
class CustomException(Exception):
pass
class TestServiceMonitoringServiceClient(object):
def test_create_service(self):
# Setup Expected Response
name = "name3373707"
display_name = "displayName1615086568"
expected_response = {"name": name, "display_name": display_name}
expected_response = service_pb2.Service(**expected_response)
# Mock the API response
channel = ChannelStub(responses=[expected_response])
patch = mock.patch("google.api_core.grpc_helpers.create_channel")
with patch as create_channel:
create_channel.return_value = channel
client = monitoring_v3.ServiceMonitoringServiceClient()
# Setup Request
parent = client.project_path("[PROJECT]")
service = {}
response = client.create_service(parent, service)
assert expected_response == response
assert len(channel.requests) == 1
expected_request = service_service_pb2.CreateServiceRequest(
parent=parent, service=service
)
actual_request = channel.requests[0][1]
assert expected_request == actual_request
def test_create_service_exception(self):
# Mock the API response
channel = ChannelStub(responses=[CustomException()])
patch = mock.patch("google.api_core.grpc_helpers.create_channel")
with patch as create_channel:
create_channel.return_value = channel
client = monitoring_v3.ServiceMonitoringServiceClient()
# Setup request
parent = client.project_path("[PROJECT]")
service = {}
with pytest.raises(CustomException):
client.create_service(parent, service)
def test_get_service(self):
# Setup Expected Response
name_2 = "name2-1052831874"
display_name = "displayName1615086568"
expected_response = {"name": name_2, "display_name": display_name}
expected_response = service_pb2.Service(**expected_response)
# Mock the API response
channel = ChannelStub(responses=[expected_response])
patch = mock.patch("google.api_core.grpc_helpers.create_channel")
with patch as create_channel:
create_channel.return_value = channel
client = monitoring_v3.ServiceMonitoringServiceClient()
# Setup Request
name = client.service_path("[PROJECT]", "[SERVICE]")
response = client.get_service(name)
assert expected_response == response
assert len(channel.requests) == 1
expected_request = service_service_pb2.GetServiceRequest(name=name)
actual_request = channel.requests[0][1]
assert expected_request == actual_request
def test_get_service_exception(self):
# Mock the API response
channel = ChannelStub(responses=[CustomException()])
patch = mock.patch("google.api_core.grpc_helpers.create_channel")
with patch as create_channel:
create_channel.return_value = channel
client = monitoring_v3.ServiceMonitoringServiceClient()
# Setup request
name = client.service_path("[PROJECT]", "[SERVICE]")
with pytest.raises(CustomException):
client.get_service(name)
def test_list_services(self):
# Setup Expected Response
next_page_token = ""
services_element = {}
services = [services_element]
expected_response = {"next_page_token": next_page_token, "services": services}
expected_response = service_service_pb2.ListServicesResponse(
**expected_response
)
# Mock the API response
channel = ChannelStub(responses=[expected_response])
patch = mock.patch("google.api_core.grpc_helpers.create_channel")
with patch as create_channel:
create_channel.return_value = channel
client = monitoring_v3.ServiceMonitoringServiceClient()
# Setup Request
parent = client.project_path("[PROJECT]")
paged_list_response = client.list_services(parent)
resources = list(paged_list_response)
assert len(resources) == 1
assert expected_response.services[0] == resources[0]
assert len(channel.requests) == 1
expected_request = service_service_pb2.ListServicesRequest(parent=parent)
actual_request = channel.requests[0][1]
assert expected_request == actual_request
def test_list_services_exception(self):
channel = ChannelStub(responses=[CustomException()])
patch = mock.patch("google.api_core.grpc_helpers.create_channel")
with patch as create_channel:
create_channel.return_value = channel
client = monitoring_v3.ServiceMonitoringServiceClient()
# Setup request
parent = client.project_path("[PROJECT]")
paged_list_response = client.list_services(parent)
with pytest.raises(CustomException):
list(paged_list_response)
def test_update_service(self):
# Setup Expected Response
name = "name3373707"
display_name = "displayName1615086568"
expected_response = {"name": name, "display_name": display_name}
expected_response = service_pb2.Service(**expected_response)
# Mock the API response
channel = ChannelStub(responses=[expected_response])
patch = mock.patch("google.api_core.grpc_helpers.create_channel")
with patch as create_channel:
create_channel.return_value = channel
client = monitoring_v3.ServiceMonitoringServiceClient()
# Setup Request
service = {}
response = client.update_service(service)
assert expected_response == response
assert len(channel.requests) == 1
expected_request = service_service_pb2.UpdateServiceRequest(service=service)
actual_request = channel.requests[0][1]
assert expected_request == actual_request
def test_update_service_exception(self):
# Mock the API response
channel = ChannelStub(responses=[CustomException()])
patch = mock.patch("google.api_core.grpc_helpers.create_channel")
with patch as create_channel:
create_channel.return_value = channel
client = monitoring_v3.ServiceMonitoringServiceClient()
# Setup request
service = {}
with pytest.raises(CustomException):
client.update_service(service)
def test_delete_service(self):
channel = ChannelStub()
patch = mock.patch("google.api_core.grpc_helpers.create_channel")
with patch as create_channel:
create_channel.return_value = channel
client = monitoring_v3.ServiceMonitoringServiceClient()
# Setup Request
name = client.service_path("[PROJECT]", "[SERVICE]")
client.delete_service(name)
assert len(channel.requests) == 1
expected_request = service_service_pb2.DeleteServiceRequest(name=name)
actual_request = channel.requests[0][1]
assert expected_request == actual_request
def test_delete_service_exception(self):
# Mock the API response
channel = ChannelStub(responses=[CustomException()])
patch = mock.patch("google.api_core.grpc_helpers.create_channel")
with patch as create_channel:
create_channel.return_value = channel
client = monitoring_v3.ServiceMonitoringServiceClient()
# Setup request
name = client.service_path("[PROJECT]", "[SERVICE]")
with pytest.raises(CustomException):
client.delete_service(name)
def test_create_service_level_objective(self):
# Setup Expected Response
name = "name3373707"
display_name = "displayName1615086568"
goal = 317825.0
expected_response = {"name": name, "display_name": display_name, "goal": goal}
expected_response = service_pb2.ServiceLevelObjective(**expected_response)
# Mock the API response
channel = ChannelStub(responses=[expected_response])
patch = mock.patch("google.api_core.grpc_helpers.create_channel")
with patch as create_channel:
create_channel.return_value = channel
client = monitoring_v3.ServiceMonitoringServiceClient()
# Setup Request
parent = client.service_path("[PROJECT]", "[SERVICE]")
service_level_objective = {}
response = client.create_service_level_objective(
parent, service_level_objective
)
assert expected_response == response
assert len(channel.requests) == 1
expected_request = service_service_pb2.CreateServiceLevelObjectiveRequest(
parent=parent, service_level_objective=service_level_objective
)
actual_request = channel.requests[0][1]
assert expected_request == actual_request
def test_create_service_level_objective_exception(self):
# Mock the API response
channel = ChannelStub(responses=[CustomException()])
patch = mock.patch("google.api_core.grpc_helpers.create_channel")
with patch as create_channel:
create_channel.return_value = channel
client = monitoring_v3.ServiceMonitoringServiceClient()
# Setup request
parent = client.service_path("[PROJECT]", "[SERVICE]")
service_level_objective = {}
with pytest.raises(CustomException):
client.create_service_level_objective(parent, service_level_objective)
def test_get_service_level_objective(self):
# Setup Expected Response
name_2 = "name2-1052831874"
display_name = "displayName1615086568"
goal = 317825.0
expected_response = {"name": name_2, "display_name": display_name, "goal": goal}
expected_response = service_pb2.ServiceLevelObjective(**expected_response)
# Mock the API response
channel = ChannelStub(responses=[expected_response])
patch = mock.patch("google.api_core.grpc_helpers.create_channel")
with patch as create_channel:
create_channel.return_value = channel
client = monitoring_v3.ServiceMonitoringServiceClient()
# Setup Request
name = client.service_level_objective_path(
"[PROJECT]", "[SERVICE]", "[SERVICE_LEVEL_OBJECTIVE]"
)
response = client.get_service_level_objective(name)
assert expected_response == response
assert len(channel.requests) == 1
expected_request = service_service_pb2.GetServiceLevelObjectiveRequest(
name=name
)
actual_request = channel.requests[0][1]
assert expected_request == actual_request
def test_get_service_level_objective_exception(self):
# Mock the API response
channel = ChannelStub(responses=[CustomException()])
patch = mock.patch("google.api_core.grpc_helpers.create_channel")
with patch as create_channel:
create_channel.return_value = channel
client = monitoring_v3.ServiceMonitoringServiceClient()
# Setup request
name = client.service_level_objective_path(
"[PROJECT]", "[SERVICE]", "[SERVICE_LEVEL_OBJECTIVE]"
)
with pytest.raises(CustomException):
client.get_service_level_objective(name)
def test_list_service_level_objectives(self):
# Setup Expected Response
next_page_token = ""
service_level_objectives_element = {}
service_level_objectives = [service_level_objectives_element]
expected_response = {
"next_page_token": next_page_token,
"service_level_objectives": service_level_objectives,
}
expected_response = service_service_pb2.ListServiceLevelObjectivesResponse(
**expected_response
)
# Mock the API response
channel = ChannelStub(responses=[expected_response])
patch = mock.patch("google.api_core.grpc_helpers.create_channel")
with patch as create_channel:
create_channel.return_value = channel
client = monitoring_v3.ServiceMonitoringServiceClient()
# Setup Request
parent = client.service_path("[PROJECT]", "[SERVICE]")
paged_list_response = client.list_service_level_objectives(parent)
resources = list(paged_list_response)
assert len(resources) == 1
assert expected_response.service_level_objectives[0] == resources[0]
assert len(channel.requests) == 1
expected_request = service_service_pb2.ListServiceLevelObjectivesRequest(
parent=parent
)
actual_request = channel.requests[0][1]
assert expected_request == actual_request
def test_list_service_level_objectives_exception(self):
channel = ChannelStub(responses=[CustomException()])
patch = mock.patch("google.api_core.grpc_helpers.create_channel")
with patch as create_channel:
create_channel.return_value = channel
client = monitoring_v3.ServiceMonitoringServiceClient()
# Setup request
parent = client.service_path("[PROJECT]", "[SERVICE]")
paged_list_response = client.list_service_level_objectives(parent)
with pytest.raises(CustomException):
list(paged_list_response)
def test_update_service_level_objective(self):
# Setup Expected Response
name = "name3373707"
display_name = "displayName1615086568"
goal = 317825.0
expected_response = {"name": name, "display_name": display_name, "goal": goal}
expected_response = service_pb2.ServiceLevelObjective(**expected_response)
# Mock the API response
channel = ChannelStub(responses=[expected_response])
patch = mock.patch("google.api_core.grpc_helpers.create_channel")
with patch as create_channel:
create_channel.return_value = channel
client = monitoring_v3.ServiceMonitoringServiceClient()
# Setup Request
service_level_objective = {}
response = client.update_service_level_objective(service_level_objective)
assert expected_response == response
assert len(channel.requests) == 1
expected_request = service_service_pb2.UpdateServiceLevelObjectiveRequest(
service_level_objective=service_level_objective
)
actual_request = channel.requests[0][1]
assert expected_request == actual_request
def test_update_service_level_objective_exception(self):
# Mock the API response
channel = ChannelStub(responses=[CustomException()])
patch = mock.patch("google.api_core.grpc_helpers.create_channel")
with patch as create_channel:
create_channel.return_value = channel
client = monitoring_v3.ServiceMonitoringServiceClient()
# Setup request
service_level_objective = {}
with pytest.raises(CustomException):
client.update_service_level_objective(service_level_objective)
def test_delete_service_level_objective(self):
channel = ChannelStub()
patch = mock.patch("google.api_core.grpc_helpers.create_channel")
with patch as create_channel:
create_channel.return_value = channel
client = monitoring_v3.ServiceMonitoringServiceClient()
# Setup Request
name = client.service_level_objective_path(
"[PROJECT]", "[SERVICE]", "[SERVICE_LEVEL_OBJECTIVE]"
)
client.delete_service_level_objective(name)
assert len(channel.requests) == 1
expected_request = service_service_pb2.DeleteServiceLevelObjectiveRequest(
name=name
)
actual_request = channel.requests[0][1]
assert expected_request == actual_request
def test_delete_service_level_objective_exception(self):
# Mock the API response
channel = ChannelStub(responses=[CustomException()])
patch = mock.patch("google.api_core.grpc_helpers.create_channel")
with patch as create_channel:
create_channel.return_value = channel
client = monitoring_v3.ServiceMonitoringServiceClient()
# Setup request
name = client.service_level_objective_path(
"[PROJECT]", "[SERVICE]", "[SERVICE_LEVEL_OBJECTIVE]"
)
with pytest.raises(CustomException):
client.delete_service_level_objective(name)
| [
"google.cloud.monitoring_v3.proto.service_service_pb2.ListServiceLevelObjectivesRequest",
"google.cloud.monitoring_v3.proto.service_service_pb2.CreateServiceRequest",
"mock.patch",
"google.cloud.monitoring_v3.proto.service_service_pb2.DeleteServiceLevelObjectiveRequest",
"google.cloud.monitoring_v3.proto.se... | [((2109, 2149), 'google.cloud.monitoring_v3.proto.service_pb2.Service', 'service_pb2.Service', ([], {}), '(**expected_response)\n', (2128, 2149), False, 'from google.cloud.monitoring_v3.proto import service_pb2\n'), ((2260, 2317), 'mock.patch', 'mock.patch', (['"""google.api_core.grpc_helpers.create_channel"""'], {}), "('google.api_core.grpc_helpers.create_channel')\n", (2270, 2317), False, 'import mock\n'), ((2744, 2816), 'google.cloud.monitoring_v3.proto.service_service_pb2.CreateServiceRequest', 'service_service_pb2.CreateServiceRequest', ([], {'parent': 'parent', 'service': 'service'}), '(parent=parent, service=service)\n', (2784, 2816), False, 'from google.cloud.monitoring_v3.proto import service_service_pb2\n'), ((3092, 3149), 'mock.patch', 'mock.patch', (['"""google.api_core.grpc_helpers.create_channel"""'], {}), "('google.api_core.grpc_helpers.create_channel')\n", (3102, 3149), False, 'import mock\n'), ((3752, 3792), 'google.cloud.monitoring_v3.proto.service_pb2.Service', 'service_pb2.Service', ([], {}), '(**expected_response)\n', (3771, 3792), False, 'from google.cloud.monitoring_v3.proto import service_pb2\n'), ((3903, 3960), 'mock.patch', 'mock.patch', (['"""google.api_core.grpc_helpers.create_channel"""'], {}), "('google.api_core.grpc_helpers.create_channel')\n", (3913, 3960), False, 'import mock\n'), ((4363, 4411), 'google.cloud.monitoring_v3.proto.service_service_pb2.GetServiceRequest', 'service_service_pb2.GetServiceRequest', ([], {'name': 'name'}), '(name=name)\n', (4400, 4411), False, 'from google.cloud.monitoring_v3.proto import service_service_pb2\n'), ((4662, 4719), 'mock.patch', 'mock.patch', (['"""google.api_core.grpc_helpers.create_channel"""'], {}), "('google.api_core.grpc_helpers.create_channel')\n", (4672, 4719), False, 'import mock\n'), ((5326, 5387), 'google.cloud.monitoring_v3.proto.service_service_pb2.ListServicesResponse', 'service_service_pb2.ListServicesResponse', ([], {}), '(**expected_response)\n', (5366, 5387), False, 'from google.cloud.monitoring_v3.proto import service_service_pb2\n'), ((5520, 5577), 'mock.patch', 'mock.patch', (['"""google.api_core.grpc_helpers.create_channel"""'], {}), "('google.api_core.grpc_helpers.create_channel')\n", (5530, 5577), False, 'import mock\n'), ((6082, 6136), 'google.cloud.monitoring_v3.proto.service_service_pb2.ListServicesRequest', 'service_service_pb2.ListServicesRequest', ([], {'parent': 'parent'}), '(parent=parent)\n', (6121, 6136), False, 'from google.cloud.monitoring_v3.proto import service_service_pb2\n'), ((6357, 6414), 'mock.patch', 'mock.patch', (['"""google.api_core.grpc_helpers.create_channel"""'], {}), "('google.api_core.grpc_helpers.create_channel')\n", (6367, 6414), False, 'import mock\n'), ((7036, 7076), 'google.cloud.monitoring_v3.proto.service_pb2.Service', 'service_pb2.Service', ([], {}), '(**expected_response)\n', (7055, 7076), False, 'from google.cloud.monitoring_v3.proto import service_pb2\n'), ((7187, 7244), 'mock.patch', 'mock.patch', (['"""google.api_core.grpc_helpers.create_channel"""'], {}), "('google.api_core.grpc_helpers.create_channel')\n", (7197, 7244), False, 'import mock\n'), ((7613, 7670), 'google.cloud.monitoring_v3.proto.service_service_pb2.UpdateServiceRequest', 'service_service_pb2.UpdateServiceRequest', ([], {'service': 'service'}), '(service=service)\n', (7653, 7670), False, 'from google.cloud.monitoring_v3.proto import service_service_pb2\n'), ((7924, 7981), 'mock.patch', 'mock.patch', (['"""google.api_core.grpc_helpers.create_channel"""'], {}), "('google.api_core.grpc_helpers.create_channel')\n", (7934, 7981), False, 'import mock\n'), ((8357, 8414), 'mock.patch', 'mock.patch', (['"""google.api_core.grpc_helpers.create_channel"""'], {}), "('google.api_core.grpc_helpers.create_channel')\n", (8367, 8414), False, 'import mock\n'), ((8764, 8815), 'google.cloud.monitoring_v3.proto.service_service_pb2.DeleteServiceRequest', 'service_service_pb2.DeleteServiceRequest', ([], {'name': 'name'}), '(name=name)\n', (8804, 8815), False, 'from google.cloud.monitoring_v3.proto import service_service_pb2\n'), ((9069, 9126), 'mock.patch', 'mock.patch', (['"""google.api_core.grpc_helpers.create_channel"""'], {}), "('google.api_core.grpc_helpers.create_channel')\n", (9079, 9126), False, 'import mock\n'), ((9756, 9810), 'google.cloud.monitoring_v3.proto.service_pb2.ServiceLevelObjective', 'service_pb2.ServiceLevelObjective', ([], {}), '(**expected_response)\n', (9789, 9810), False, 'from google.cloud.monitoring_v3.proto import service_pb2\n'), ((9921, 9978), 'mock.patch', 'mock.patch', (['"""google.api_core.grpc_helpers.create_channel"""'], {}), "('google.api_core.grpc_helpers.create_channel')\n", (9931, 9978), False, 'import mock\n'), ((10488, 10610), 'google.cloud.monitoring_v3.proto.service_service_pb2.CreateServiceLevelObjectiveRequest', 'service_service_pb2.CreateServiceLevelObjectiveRequest', ([], {'parent': 'parent', 'service_level_objective': 'service_level_objective'}), '(parent=parent,\n service_level_objective=service_level_objective)\n', (10542, 10610), False, 'from google.cloud.monitoring_v3.proto import service_service_pb2\n'), ((10898, 10955), 'mock.patch', 'mock.patch', (['"""google.api_core.grpc_helpers.create_channel"""'], {}), "('google.api_core.grpc_helpers.create_channel')\n", (10908, 10955), False, 'import mock\n'), ((11673, 11727), 'google.cloud.monitoring_v3.proto.service_pb2.ServiceLevelObjective', 'service_pb2.ServiceLevelObjective', ([], {}), '(**expected_response)\n', (11706, 11727), False, 'from google.cloud.monitoring_v3.proto import service_pb2\n'), ((11838, 11895), 'mock.patch', 'mock.patch', (['"""google.api_core.grpc_helpers.create_channel"""'], {}), "('google.api_core.grpc_helpers.create_channel')\n", (11848, 11895), False, 'import mock\n'), ((12381, 12443), 'google.cloud.monitoring_v3.proto.service_service_pb2.GetServiceLevelObjectiveRequest', 'service_service_pb2.GetServiceLevelObjectiveRequest', ([], {'name': 'name'}), '(name=name)\n', (12432, 12443), False, 'from google.cloud.monitoring_v3.proto import service_service_pb2\n'), ((12732, 12789), 'mock.patch', 'mock.patch', (['"""google.api_core.grpc_helpers.create_channel"""'], {}), "('google.api_core.grpc_helpers.create_channel')\n", (12742, 12789), False, 'import mock\n'), ((13610, 13685), 'google.cloud.monitoring_v3.proto.service_service_pb2.ListServiceLevelObjectivesResponse', 'service_service_pb2.ListServiceLevelObjectivesResponse', ([], {}), '(**expected_response)\n', (13664, 13685), False, 'from google.cloud.monitoring_v3.proto import service_service_pb2\n'), ((13818, 13875), 'mock.patch', 'mock.patch', (['"""google.api_core.grpc_helpers.create_channel"""'], {}), "('google.api_core.grpc_helpers.create_channel')\n", (13828, 13875), False, 'import mock\n'), ((14425, 14493), 'google.cloud.monitoring_v3.proto.service_service_pb2.ListServiceLevelObjectivesRequest', 'service_service_pb2.ListServiceLevelObjectivesRequest', ([], {'parent': 'parent'}), '(parent=parent)\n', (14478, 14493), False, 'from google.cloud.monitoring_v3.proto import service_service_pb2\n'), ((14752, 14809), 'mock.patch', 'mock.patch', (['"""google.api_core.grpc_helpers.create_channel"""'], {}), "('google.api_core.grpc_helpers.create_channel')\n", (14762, 14809), False, 'import mock\n'), ((15514, 15568), 'google.cloud.monitoring_v3.proto.service_pb2.ServiceLevelObjective', 'service_pb2.ServiceLevelObjective', ([], {}), '(**expected_response)\n', (15547, 15568), False, 'from google.cloud.monitoring_v3.proto import service_pb2\n'), ((15679, 15736), 'mock.patch', 'mock.patch', (['"""google.api_core.grpc_helpers.create_channel"""'], {}), "('google.api_core.grpc_helpers.create_channel')\n", (15689, 15736), False, 'import mock\n'), ((16153, 16261), 'google.cloud.monitoring_v3.proto.service_service_pb2.UpdateServiceLevelObjectiveRequest', 'service_service_pb2.UpdateServiceLevelObjectiveRequest', ([], {'service_level_objective': 'service_level_objective'}), '(service_level_objective\n =service_level_objective)\n', (16207, 16261), False, 'from google.cloud.monitoring_v3.proto import service_service_pb2\n'), ((16548, 16605), 'mock.patch', 'mock.patch', (['"""google.api_core.grpc_helpers.create_channel"""'], {}), "('google.api_core.grpc_helpers.create_channel')\n", (16558, 16605), False, 'import mock\n'), ((17045, 17102), 'mock.patch', 'mock.patch', (['"""google.api_core.grpc_helpers.create_channel"""'], {}), "('google.api_core.grpc_helpers.create_channel')\n", (17055, 17102), False, 'import mock\n'), ((17535, 17600), 'google.cloud.monitoring_v3.proto.service_service_pb2.DeleteServiceLevelObjectiveRequest', 'service_service_pb2.DeleteServiceLevelObjectiveRequest', ([], {'name': 'name'}), '(name=name)\n', (17589, 17600), False, 'from google.cloud.monitoring_v3.proto import service_service_pb2\n'), ((17892, 17949), 'mock.patch', 'mock.patch', (['"""google.api_core.grpc_helpers.create_channel"""'], {}), "('google.api_core.grpc_helpers.create_channel')\n", (17902, 17949), False, 'import mock\n'), ((2427, 2473), 'google.cloud.monitoring_v3.ServiceMonitoringServiceClient', 'monitoring_v3.ServiceMonitoringServiceClient', ([], {}), '()\n', (2471, 2473), False, 'from google.cloud import monitoring_v3\n'), ((3259, 3305), 'google.cloud.monitoring_v3.ServiceMonitoringServiceClient', 'monitoring_v3.ServiceMonitoringServiceClient', ([], {}), '()\n', (3303, 3305), False, 'from google.cloud import monitoring_v3\n'), ((3416, 3446), 'pytest.raises', 'pytest.raises', (['CustomException'], {}), '(CustomException)\n', (3429, 3446), False, 'import pytest\n'), ((4070, 4116), 'google.cloud.monitoring_v3.ServiceMonitoringServiceClient', 'monitoring_v3.ServiceMonitoringServiceClient', ([], {}), '()\n', (4114, 4116), False, 'from google.cloud import monitoring_v3\n'), ((4829, 4875), 'google.cloud.monitoring_v3.ServiceMonitoringServiceClient', 'monitoring_v3.ServiceMonitoringServiceClient', ([], {}), '()\n', (4873, 4875), False, 'from google.cloud import monitoring_v3\n'), ((4976, 5006), 'pytest.raises', 'pytest.raises', (['CustomException'], {}), '(CustomException)\n', (4989, 5006), False, 'import pytest\n'), ((5687, 5733), 'google.cloud.monitoring_v3.ServiceMonitoringServiceClient', 'monitoring_v3.ServiceMonitoringServiceClient', ([], {}), '()\n', (5731, 5733), False, 'from google.cloud import monitoring_v3\n'), ((6524, 6570), 'google.cloud.monitoring_v3.ServiceMonitoringServiceClient', 'monitoring_v3.ServiceMonitoringServiceClient', ([], {}), '()\n', (6568, 6570), False, 'from google.cloud import monitoring_v3\n'), ((6719, 6749), 'pytest.raises', 'pytest.raises', (['CustomException'], {}), '(CustomException)\n', (6732, 6749), False, 'import pytest\n'), ((7354, 7400), 'google.cloud.monitoring_v3.ServiceMonitoringServiceClient', 'monitoring_v3.ServiceMonitoringServiceClient', ([], {}), '()\n', (7398, 7400), False, 'from google.cloud import monitoring_v3\n'), ((8091, 8137), 'google.cloud.monitoring_v3.ServiceMonitoringServiceClient', 'monitoring_v3.ServiceMonitoringServiceClient', ([], {}), '()\n', (8135, 8137), False, 'from google.cloud import monitoring_v3\n'), ((8198, 8228), 'pytest.raises', 'pytest.raises', (['CustomException'], {}), '(CustomException)\n', (8211, 8228), False, 'import pytest\n'), ((8524, 8570), 'google.cloud.monitoring_v3.ServiceMonitoringServiceClient', 'monitoring_v3.ServiceMonitoringServiceClient', ([], {}), '()\n', (8568, 8570), False, 'from google.cloud import monitoring_v3\n'), ((9236, 9282), 'google.cloud.monitoring_v3.ServiceMonitoringServiceClient', 'monitoring_v3.ServiceMonitoringServiceClient', ([], {}), '()\n', (9280, 9282), False, 'from google.cloud import monitoring_v3\n'), ((9383, 9413), 'pytest.raises', 'pytest.raises', (['CustomException'], {}), '(CustomException)\n', (9396, 9413), False, 'import pytest\n'), ((10088, 10134), 'google.cloud.monitoring_v3.ServiceMonitoringServiceClient', 'monitoring_v3.ServiceMonitoringServiceClient', ([], {}), '()\n', (10132, 10134), False, 'from google.cloud import monitoring_v3\n'), ((11065, 11111), 'google.cloud.monitoring_v3.ServiceMonitoringServiceClient', 'monitoring_v3.ServiceMonitoringServiceClient', ([], {}), '()\n', (11109, 11111), False, 'from google.cloud import monitoring_v3\n'), ((11251, 11281), 'pytest.raises', 'pytest.raises', (['CustomException'], {}), '(CustomException)\n', (11264, 11281), False, 'import pytest\n'), ((12005, 12051), 'google.cloud.monitoring_v3.ServiceMonitoringServiceClient', 'monitoring_v3.ServiceMonitoringServiceClient', ([], {}), '()\n', (12049, 12051), False, 'from google.cloud import monitoring_v3\n'), ((12899, 12945), 'google.cloud.monitoring_v3.ServiceMonitoringServiceClient', 'monitoring_v3.ServiceMonitoringServiceClient', ([], {}), '()\n', (12943, 12945), False, 'from google.cloud import monitoring_v3\n'), ((13113, 13143), 'pytest.raises', 'pytest.raises', (['CustomException'], {}), '(CustomException)\n', (13126, 13143), False, 'import pytest\n'), ((13985, 14031), 'google.cloud.monitoring_v3.ServiceMonitoringServiceClient', 'monitoring_v3.ServiceMonitoringServiceClient', ([], {}), '()\n', (14029, 14031), False, 'from google.cloud import monitoring_v3\n'), ((14919, 14965), 'google.cloud.monitoring_v3.ServiceMonitoringServiceClient', 'monitoring_v3.ServiceMonitoringServiceClient', ([], {}), '()\n', (14963, 14965), False, 'from google.cloud import monitoring_v3\n'), ((15143, 15173), 'pytest.raises', 'pytest.raises', (['CustomException'], {}), '(CustomException)\n', (15156, 15173), False, 'import pytest\n'), ((15846, 15892), 'google.cloud.monitoring_v3.ServiceMonitoringServiceClient', 'monitoring_v3.ServiceMonitoringServiceClient', ([], {}), '()\n', (15890, 15892), False, 'from google.cloud import monitoring_v3\n'), ((16715, 16761), 'google.cloud.monitoring_v3.ServiceMonitoringServiceClient', 'monitoring_v3.ServiceMonitoringServiceClient', ([], {}), '()\n', (16759, 16761), False, 'from google.cloud import monitoring_v3\n'), ((16838, 16868), 'pytest.raises', 'pytest.raises', (['CustomException'], {}), '(CustomException)\n', (16851, 16868), False, 'import pytest\n'), ((17212, 17258), 'google.cloud.monitoring_v3.ServiceMonitoringServiceClient', 'monitoring_v3.ServiceMonitoringServiceClient', ([], {}), '()\n', (17256, 17258), False, 'from google.cloud import monitoring_v3\n'), ((18059, 18105), 'google.cloud.monitoring_v3.ServiceMonitoringServiceClient', 'monitoring_v3.ServiceMonitoringServiceClient', ([], {}), '()\n', (18103, 18105), False, 'from google.cloud import monitoring_v3\n'), ((18273, 18303), 'pytest.raises', 'pytest.raises', (['CustomException'], {}), '(CustomException)\n', (18286, 18303), False, 'import pytest\n')] |
from typing import List
import torch
from detectron2.structures import ImageList, Boxes, Instances, pairwise_iou
from detectron2.modeling.box_regression import Box2BoxTransform
from detectron2.modeling.roi_heads import ROI_HEADS_REGISTRY, StandardROIHeads
from detectron2.modeling.roi_heads.cascade_rcnn import CascadeROIHeads
from .utils import get_aligned_pooler, label_and_sample_proposals
from .lazy_fast_rcnn import LazyFastRCNNOutputLayers
@ROI_HEADS_REGISTRY.register()
class LazyRoIHeads(StandardROIHeads):
@torch.no_grad()
def label_and_sample_proposals(
self, proposals: List[Instances], targets: List[Instances]
) -> List[Instances]:
return label_and_sample_proposals(self, proposals, targets)
@classmethod
def _init_box_head(cls, cfg, input_shape):
ret = super()._init_box_head(cfg, input_shape)
ret["box_predictor"] = LazyFastRCNNOutputLayers(
cfg, ret["box_head"].output_shape,
# The loss weight is set as Cascade RPN
loss_weight={
"loss_cls": 1.5,
"loss_box_reg": cfg.MODEL.ROI_BOX_HEAD.BBOX_REG_LOSS_WEIGHT
},
)
ret["box_in_features"] = cfg.MODEL.RPN.IN_FEATURES
ret["box_pooler"] = get_aligned_pooler(
cfg.MODEL.RPN, input_shape,
output_size=cfg.MODEL.ROI_BOX_HEAD.POOLER_RESOLUTION,
sampling_ratio=cfg.MODEL.ROI_BOX_HEAD.POOLER_SAMPLING_RATIO,
)
return ret
@ROI_HEADS_REGISTRY.register()
class LazyCascadeRoIHeads(CascadeROIHeads):
@torch.no_grad()
def label_and_sample_proposals(
self, proposals: List[Instances], targets: List[Instances]
) -> List[Instances]:
return label_and_sample_proposals(self, proposals, targets)
@classmethod
def _init_box_head(cls, cfg, input_shape):
ret = super()._init_box_head(cfg, input_shape)
cascade_bbox_reg_weights = cfg.MODEL.ROI_BOX_CASCADE_HEAD.BBOX_REG_WEIGHTS
box_predictors = []
for bbox_reg_weights in cascade_bbox_reg_weights:
box_predictors.append(
LazyFastRCNNOutputLayers(
cfg, ret["box_heads"][0].output_shape,
box2box_transform=Box2BoxTransform(weights=bbox_reg_weights),
loss_weight={
"loss_cls": 1.5,
"loss_box_reg": cfg.MODEL.ROI_BOX_HEAD.BBOX_REG_LOSS_WEIGHT
},
)
)
ret["box_predictors"] = box_predictors
ret["box_in_features"] = cfg.MODEL.RPN.IN_FEATURES
ret["box_pooler"] = get_aligned_pooler(
cfg.MODEL.RPN, input_shape,
output_size=cfg.MODEL.ROI_BOX_HEAD.POOLER_RESOLUTION,
sampling_ratio=cfg.MODEL.ROI_BOX_HEAD.POOLER_SAMPLING_RATIO,
)
return ret
def _match_and_label_boxes(self, proposals, stage, targets):
return label_and_sample_proposals(self, proposals, targets, False, False, stage) | [
"torch.no_grad",
"detectron2.modeling.roi_heads.ROI_HEADS_REGISTRY.register",
"detectron2.modeling.box_regression.Box2BoxTransform"
] | [((451, 480), 'detectron2.modeling.roi_heads.ROI_HEADS_REGISTRY.register', 'ROI_HEADS_REGISTRY.register', ([], {}), '()\n', (478, 480), False, 'from detectron2.modeling.roi_heads import ROI_HEADS_REGISTRY, StandardROIHeads\n'), ((1495, 1524), 'detectron2.modeling.roi_heads.ROI_HEADS_REGISTRY.register', 'ROI_HEADS_REGISTRY.register', ([], {}), '()\n', (1522, 1524), False, 'from detectron2.modeling.roi_heads import ROI_HEADS_REGISTRY, StandardROIHeads\n'), ((524, 539), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (537, 539), False, 'import torch\n'), ((1574, 1589), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (1587, 1589), False, 'import torch\n'), ((2254, 2296), 'detectron2.modeling.box_regression.Box2BoxTransform', 'Box2BoxTransform', ([], {'weights': 'bbox_reg_weights'}), '(weights=bbox_reg_weights)\n', (2270, 2296), False, 'from detectron2.modeling.box_regression import Box2BoxTransform\n')] |
# coding=utf-8
# tensorflow tf.contrib.data api test
import tensorflow as tf
# file path
filename = ''
batch_size = 100
aa = (tf.contrib.data.TextLineDataset(filename)
.map((lambda line: tf.decode_csv(line, record_defaults=[['1'], ['1'], ['1']], field_delim='\t')))
.shuffle(buffer_size=1000)
.batch_size(batch_size)
)
| [
"tensorflow.decode_csv",
"tensorflow.contrib.data.TextLineDataset"
] | [((131, 172), 'tensorflow.contrib.data.TextLineDataset', 'tf.contrib.data.TextLineDataset', (['filename'], {}), '(filename)\n', (162, 172), True, 'import tensorflow as tf\n'), ((198, 274), 'tensorflow.decode_csv', 'tf.decode_csv', (['line'], {'record_defaults': "[['1'], ['1'], ['1']]", 'field_delim': '"""\t"""'}), "(line, record_defaults=[['1'], ['1'], ['1']], field_delim='\\t')\n", (211, 274), True, 'import tensorflow as tf\n')] |
'''
实验名称:音频播放
版本:v1.0
日期:2020.12
作者:01Studio
说明:MP3/WAV音频文件播放。使用物理按键控制
'''
#导入相关模块
import audio,time
from pyb import Switch
from machine import Pin
#构建音频对象
wm=audio.WM8978()
vol = 80 #音量初始化,80
######################
# 播放 USR按键
######################
play_flag = 0
def music_play():
global play_flag
play_flag = 1
sw =Switch()
sw.callback(music_play)
######################
# 音量加 A0按键
######################
VOL_U = Pin('A0',Pin.IN,Pin.PULL_UP) #构建按键A0
vol_up_flag = 0
def vol_up(VOL_U):
global vol
#消除按键抖动
if VOL_U.value() == 0:
time.sleep_ms(10)
if VOL_U.value() == 0:
vol=vol+10
if vol > 100:
vol = 100
wm.volume(vol)
VOL_U.irq(vol_up,Pin.IRQ_FALLING, hard=1) #定义中断,下降沿触发
######################
# 音量减 E3按键
######################
VOL_D = Pin('E3',Pin.IN,Pin.PULL_UP) #构建按键A0
vol_down_flag = 0
def vol_down(VOL_D):
global vol
#消除按键抖动
if VOL_D.value() == 0:
time.sleep_ms(10)
if VOL_D.value() == 0:
vol=vol-10
if vol < 10:
vol = 10
wm.volume(vol)
VOL_D.irq(vol_down,Pin.IRQ_FALLING, hard=1) #定义中断,下降沿触发
#加载音乐
wm.load('/flash/music/Seasons In The Sun.mp3')
while True:
#播放音乐
if play_flag == 1:
wm.play()
play_flag = 0
| [
"time.sleep_ms",
"audio.WM8978",
"machine.Pin",
"pyb.Switch"
] | [((161, 175), 'audio.WM8978', 'audio.WM8978', ([], {}), '()\n', (173, 175), False, 'import audio, time\n'), ((331, 339), 'pyb.Switch', 'Switch', ([], {}), '()\n', (337, 339), False, 'from pyb import Switch\n'), ((430, 460), 'machine.Pin', 'Pin', (['"""A0"""', 'Pin.IN', 'Pin.PULL_UP'], {}), "('A0', Pin.IN, Pin.PULL_UP)\n", (433, 460), False, 'from machine import Pin\n'), ((841, 871), 'machine.Pin', 'Pin', (['"""E3"""', 'Pin.IN', 'Pin.PULL_UP'], {}), "('E3', Pin.IN, Pin.PULL_UP)\n", (844, 871), False, 'from machine import Pin\n'), ((568, 585), 'time.sleep_ms', 'time.sleep_ms', (['(10)'], {}), '(10)\n', (581, 585), False, 'import audio, time\n'), ((983, 1000), 'time.sleep_ms', 'time.sleep_ms', (['(10)'], {}), '(10)\n', (996, 1000), False, 'import audio, time\n')] |
#!/usr/bin/env python
import rospy
from nav_msgs.msg import Odometry
lin_min = 0.0
lin_max = 0.0
ang_min = 0.0
ang_max = 0.0
def odom_cb(msg):
global lin_min, lin_max, ang_min, ang_max
if lin_min > msg.twist.twist.linear.x:
lin_min = msg.twist.twist.linear.x
if lin_max < msg.twist.twist.linear.x:
lin_max = msg.twist.twist.linear.x
if ang_min > msg.twist.twist.angular.z:
ang_min = msg.twist.twist.angular.z
if ang_max < msg.twist.twist.angular.z:
ang_max = msg.twist.twist.angular.z
rospy.loginfo('linear: [%f, %f] angular: [%f, %f]', lin_min, lin_max,
ang_min, ang_max)
def main():
rospy.init_node('min_max_finder', anonymous=True)
rospy.Subscriber('odom_comb', Odometry, odom_cb)
rospy.loginfo('min_max_finde node ready and listening. now use teleop to move your robot to the limits!')
rospy.spin()
if __name__ == '__main__':
main()
| [
"rospy.init_node",
"rospy.Subscriber",
"rospy.loginfo",
"rospy.spin"
] | [((546, 639), 'rospy.loginfo', 'rospy.loginfo', (['"""linear: [%f, %f] angular: [%f, %f]"""', 'lin_min', 'lin_max', 'ang_min', 'ang_max'], {}), "('linear: [%f, %f] angular: [%f, %f]', lin_min, lin_max,\n ang_min, ang_max)\n", (559, 639), False, 'import rospy\n'), ((672, 721), 'rospy.init_node', 'rospy.init_node', (['"""min_max_finder"""'], {'anonymous': '(True)'}), "('min_max_finder', anonymous=True)\n", (687, 721), False, 'import rospy\n'), ((726, 774), 'rospy.Subscriber', 'rospy.Subscriber', (['"""odom_comb"""', 'Odometry', 'odom_cb'], {}), "('odom_comb', Odometry, odom_cb)\n", (742, 774), False, 'import rospy\n'), ((779, 894), 'rospy.loginfo', 'rospy.loginfo', (['"""min_max_finde node ready and listening. now use teleop to move your robot to the limits!"""'], {}), "(\n 'min_max_finde node ready and listening. now use teleop to move your robot to the limits!'\n )\n", (792, 894), False, 'import rospy\n'), ((889, 901), 'rospy.spin', 'rospy.spin', ([], {}), '()\n', (899, 901), False, 'import rospy\n')] |
# this file is to store all custom classes
import tkinter as tk
# class to store tkinter window properties
# font: tk font dictionary {family, size, weight, slant, underline, overstrike}
# font color: string
# nrows: the number of rows of lyric displayed (integer greater than 0)
# width: window width (int greater than 0)
# transparency: window transparency level (0.2 to 1)
# bg_color: [transparency: 1 or 2, background color (string), transparent color (string)]
# bd: border width in integers
# on_top: whether window is kept on top of all other windows (Boolean)
# x_pos, y_pos: window's x and y coordinates in pixels
class WindowProperties:
def __init__(self, font, font_color, font_color_bg, nrows, width, transparency, bg_color, bd, on_top, x_pos, y_pos):
self.font = font
self.font_color = font_color
self.font_color_bg = font_color_bg
self.nrows = nrows
self.width = width
self.transparency = transparency
self.bg_color = bg_color
self.bd = bd
self.on_top = on_top
self.x_pos = x_pos
self.y_pos = y_pos
def save(self, file_path):
with open(file_path, 'w') as f:
f.write(str(self.font['family']) + '\n')
f.write(str(self.font['size']) + '\n')
f.write(str(self.font['weight']) + '\n')
f.write(str(self.font['slant']) + '\n')
f.write(str(self.font['underline']) + '\n')
f.write(str(self.font['overstrike']) + '\n')
f.write(str(self.font_color) + '\n')
f.write(str(self.font_color_bg) + '\n')
f.write(str(self.nrows) + '\n')
f.write(str(self.width) + '\n')
f.write(str(self.transparency) + '\n')
f.write(str(self.bg_color) + '\n')
f.write(str(self.bd) + '\n')
f.write(str(self.on_top) + '\n')
f.write(str(self.x_pos) + '\n')
f.write(str(self.y_pos) + '\n')
# helper function for title bar to save setting and then close window
def close_root(root, win_properties):
win_properties.save('cache/user_setting.txt')
root.destroy()
# custom title bar class
class TitleBar:
# initialization takes x starting position, y starting position, and window
def __init__(self, last_click_x, last_click_y, root, win_properties):
# initialize title_bar, close button, and label
self.title_bar = tk.Frame(root, bg='#2e2e2e', relief='groove', bd=0, highlightthickness=0)
self.close_button = tk.Button(self.title_bar, text='×', bg="#2e2e2e", padx=5, activebackground='red',
bd=0, font="bold", fg='white', command=lambda: close_root(root, win_properties))
self.close_button.grid(row=0, column=1, sticky='E')
self.title_text = tk.Label(self.title_bar, text='', bg='#2e2e2e', padx=5, fg='white')
self.title_text.grid(row=0, column=0, sticky='W')
self.title_bar.grid_columnconfigure(0, weight=1)
# bind closing and drag
self.last_click_x = last_click_x
self.last_click_y = last_click_y
self.title_bar.bind('<Button-1>', self.save_last_click)
self.title_bar.bind('<B1-Motion>', lambda event: self.drag(event, root, win_properties))
self.title_text.bind('<Button-1>', self.save_last_click)
self.title_text.bind('<B1-Motion>', lambda event: self.drag(event, root, win_properties))
# update title function
def title(self, title_text):
self.title_text.config(text=title_text)
# update last position to help with drag function
def save_last_click(self, event):
self.last_click_x = event.x
self.last_click_y = event.y
# drag function
def drag(self, event, root, win_properties):
x, y = event.x - self.last_click_x + root.winfo_x(), event.y - self.last_click_y + root.winfo_y()
root.geometry("+%s+%s" % (x, y))
win_properties.x_pos = x
win_properties.y_pos = y
# class to store song information
# song: Spotify current song data (see spotify_func.py for format)
# lyric: current lyric used (lrc file in string format)
# search_result: list of lyric results scraped from website ([[song, link, singer]..])
# lyric_offset: number of ms to offset lyrics by when displayed (integer)
# nlyric: the lyric currently being used from search_result (integer between 0 and len(search_result) - 1)
# dynamic lyric position: to track which letter should be highlighted a different color, integer
# dynamic lyric duration: to track how frequent the lyric update function should be refreshed, in ms
# lyric_original: original lyric (not formatted) to be saved to cache
class SongProperties:
def __init__(self, songx, lyric_f, lyric_o, search_resultx, lyric_offsetx, nlyricx):
self.song = songx
self.lyric = lyric_f
self.search_result = search_resultx
self.lyric_offset = lyric_offsetx
self.nlyric = nlyricx
self.dynamic_lyric_pos = 0
self.dynamic_lyric_duration = 100
self.lyric_original = lyric_o
| [
"tkinter.Frame",
"tkinter.Label"
] | [((2419, 2492), 'tkinter.Frame', 'tk.Frame', (['root'], {'bg': '"""#2e2e2e"""', 'relief': '"""groove"""', 'bd': '(0)', 'highlightthickness': '(0)'}), "(root, bg='#2e2e2e', relief='groove', bd=0, highlightthickness=0)\n", (2427, 2492), True, 'import tkinter as tk\n'), ((2805, 2872), 'tkinter.Label', 'tk.Label', (['self.title_bar'], {'text': '""""""', 'bg': '"""#2e2e2e"""', 'padx': '(5)', 'fg': '"""white"""'}), "(self.title_bar, text='', bg='#2e2e2e', padx=5, fg='white')\n", (2813, 2872), True, 'import tkinter as tk\n')] |
"""
Unit test utilities.
"""
import textwrap
def clean_multiline_string( multiline_string, sep='\n' ):
"""
Dedent, split, remove first and last empty lines, rejoin.
"""
multiline_string = textwrap.dedent( multiline_string )
string_list = multiline_string.split( sep )
if not string_list[0]:
string_list = string_list[1:]
if not string_list[-1]:
string_list = string_list[:-1]
# return '\n'.join( docstrings )
return ''.join([ ( s + '\n' ) for s in string_list ])
__all__ = (
"clean_multiline_string",
)
| [
"textwrap.dedent"
] | [((206, 239), 'textwrap.dedent', 'textwrap.dedent', (['multiline_string'], {}), '(multiline_string)\n', (221, 239), False, 'import textwrap\n')] |
import asyncio
import json
def test_chunked_messages(plugin, read):
request = {
"jsonrpc": "2.0",
"method": "install_game",
"params": {
"game_id": "3"
}
}
message = json.dumps(request).encode() + b"\n"
read.side_effect = [message[:5], message[5:], b""]
asyncio.run(plugin.run())
plugin.install_game.assert_called_with(game_id="3")
def test_joined_messages(plugin, read):
requests = [
{
"jsonrpc": "2.0",
"method": "install_game",
"params": {
"game_id": "3"
}
},
{
"jsonrpc": "2.0",
"method": "launch_game",
"params": {
"game_id": "3"
}
}
]
data = b"".join([json.dumps(request).encode() + b"\n" for request in requests])
read.side_effect = [data, b""]
asyncio.run(plugin.run())
plugin.install_game.assert_called_with(game_id="3")
plugin.launch_game.assert_called_with(game_id="3")
def test_not_finished(plugin, read):
request = {
"jsonrpc": "2.0",
"method": "install_game",
"params": {
"game_id": "3"
}
}
message = json.dumps(request).encode() # no new line
read.side_effect = [message, b""]
asyncio.run(plugin.run())
plugin.install_game.assert_not_called()
| [
"json.dumps"
] | [((1232, 1251), 'json.dumps', 'json.dumps', (['request'], {}), '(request)\n', (1242, 1251), False, 'import json\n'), ((223, 242), 'json.dumps', 'json.dumps', (['request'], {}), '(request)\n', (233, 242), False, 'import json\n'), ((800, 819), 'json.dumps', 'json.dumps', (['request'], {}), '(request)\n', (810, 819), False, 'import json\n')] |
import os
import unittest
from ....BaseTestCase import BaseTestCase
from kombi.Crawler import Crawler
from kombi.Crawler.Fs.Image import ImageCrawler
from kombi.Crawler.PathHolder import PathHolder
class ImageCrawlerTest(BaseTestCase):
"""Test Image crawler."""
__singleFile = os.path.join(BaseTestCase.dataTestsDirectory(), "test.dpx")
__sequenceFile = os.path.join(BaseTestCase.dataTestsDirectory(), "testSeq.0001.exr")
def testSingleImage(self):
"""
Test that the crawler created for a single image is based on the image crawler.
"""
crawler = Crawler.create(PathHolder(self.__singleFile))
self.assertIsInstance(crawler, ImageCrawler)
def testSequenceImage(self):
"""
Test that the crawler created for a sequence image is based on the image crawler.
"""
crawler = Crawler.create(PathHolder(self.__sequenceFile))
self.assertIsInstance(crawler, ImageCrawler)
def testGroupTagSequence(self):
"""
Test that the tag group has been assigned to the image sequence crawler.
"""
crawler = Crawler.create(PathHolder(self.__sequenceFile))
self.assertIn('group', crawler.tagNames())
self.assertEqual(crawler.tag('group'), "testSeq.####.exr")
def testGroupSprintfTagSequence(self):
"""
Test that the tag groupSprintf has been assigned to the image sequence crawler.
"""
crawler = Crawler.create(PathHolder(self.__sequenceFile))
self.assertIn('groupSprintf', crawler.tagNames())
self.assertEqual(crawler.tag('groupSprintf'), "testSeq.%04d.exr")
def testGroupTagSingle(self):
"""
Test that the tag group has not been assigned to a single image crawler.
"""
crawler = Crawler.create(PathHolder(self.__singleFile))
self.assertNotIn('group', crawler.tagNames())
def testGroupSprintfTagSingle(self):
"""
Test that the tag groupSprintf has not been assigned to a single image crawler.
"""
crawler = Crawler.create(PathHolder(self.__singleFile))
self.assertNotIn('groupSprintf', crawler.tagNames())
def testIsSequence(self):
"""
Test if a crawler is a sequence.
"""
singleCrawler = Crawler.create(PathHolder(self.__singleFile))
sequenceCrawler = Crawler.create(PathHolder(self.__sequenceFile))
self.assertEqual(singleCrawler.isSequence(), False)
self.assertEqual(singleCrawler.var("imageType"), "single")
self.assertEqual(sequenceCrawler.isSequence(), True)
self.assertEqual(sequenceCrawler.var("imageType"), "sequence")
if __name__ == "__main__":
unittest.main()
| [
"unittest.main",
"kombi.Crawler.PathHolder.PathHolder"
] | [((2715, 2730), 'unittest.main', 'unittest.main', ([], {}), '()\n', (2728, 2730), False, 'import unittest\n'), ((613, 642), 'kombi.Crawler.PathHolder.PathHolder', 'PathHolder', (['self.__singleFile'], {}), '(self.__singleFile)\n', (623, 642), False, 'from kombi.Crawler.PathHolder import PathHolder\n'), ((878, 909), 'kombi.Crawler.PathHolder.PathHolder', 'PathHolder', (['self.__sequenceFile'], {}), '(self.__sequenceFile)\n', (888, 909), False, 'from kombi.Crawler.PathHolder import PathHolder\n'), ((1139, 1170), 'kombi.Crawler.PathHolder.PathHolder', 'PathHolder', (['self.__sequenceFile'], {}), '(self.__sequenceFile)\n', (1149, 1170), False, 'from kombi.Crawler.PathHolder import PathHolder\n'), ((1480, 1511), 'kombi.Crawler.PathHolder.PathHolder', 'PathHolder', (['self.__sequenceFile'], {}), '(self.__sequenceFile)\n', (1490, 1511), False, 'from kombi.Crawler.PathHolder import PathHolder\n'), ((1818, 1847), 'kombi.Crawler.PathHolder.PathHolder', 'PathHolder', (['self.__singleFile'], {}), '(self.__singleFile)\n', (1828, 1847), False, 'from kombi.Crawler.PathHolder import PathHolder\n'), ((2090, 2119), 'kombi.Crawler.PathHolder.PathHolder', 'PathHolder', (['self.__singleFile'], {}), '(self.__singleFile)\n', (2100, 2119), False, 'from kombi.Crawler.PathHolder import PathHolder\n'), ((2317, 2346), 'kombi.Crawler.PathHolder.PathHolder', 'PathHolder', (['self.__singleFile'], {}), '(self.__singleFile)\n', (2327, 2346), False, 'from kombi.Crawler.PathHolder import PathHolder\n'), ((2389, 2420), 'kombi.Crawler.PathHolder.PathHolder', 'PathHolder', (['self.__sequenceFile'], {}), '(self.__sequenceFile)\n', (2399, 2420), False, 'from kombi.Crawler.PathHolder import PathHolder\n')] |
#!/usr/bin/env python
import unittest, asyncio, asynctest, websockets, json
from remote_params import HttpServer, Params, Server, Remote, create_sync_params, schema_list
from remote_params.WebsocketServer import WebsocketServer
class MockSocket:
def __init__(self):
self.close_count = 0
self.msgs = []
def close(self):
self.close_count += 1
async def send(self, msg):
self.msgs.append(msg)
class TestWebsocketServer(asynctest.TestCase):
def setUp(self):
self.params = params = Params()
self.p1 = params.int('some_int')
self.p1.set(0)
self.wss = WebsocketServer(Server(self.params), start=False)
def tearDown(self):
self.wss.stop()
def test_default_port(self):
self.assertEqual(self.wss.port, 8081)
async def test_connects_only_one_remote(self):
self.assertEqual(len(self.wss.server.connected_remotes), 0)
await self.wss.start_async()
self.assertEqual(len(self.wss.server.connected_remotes), 1)
uri = f'ws://localhost:{self.wss.port}'
async with websockets.connect(uri) as websocket:
self.assertEqual(len(self.wss.server.connected_remotes), 1)
async with websockets.connect(uri) as websocket:
self.assertEqual(len(self.wss.server.connected_remotes), 1)
self.assertEqual(len(self.wss.server.connected_remotes), 1)
self.assertEqual(len(self.wss.server.connected_remotes), 1)
self.wss.stop()
self.assertEqual(len(self.wss.server.connected_remotes), 0)
async def test_incoming_value(self):
await self.wss._onMessage(f'POST /some_int?value={3}', None)
self.assertEqual(self.p1.value, 0) # server not started
await self.wss.start_async()
await self.wss._onMessage(f'POST /some_int?value={4}', None)
self.assertEqual(self.p1.value, 4) # param changed
await self.wss._onMessage(f'POST /wrong_int?value={5}', None)
self.assertEqual(self.p1.value, 4) # wrong url
self.wss.stop()
await self.wss._onMessage(f'POST /wrong_int?value={6}', None)
self.assertEqual(self.p1.value, 4) # server stopped
async def test_stop_message(self):
mocksock = MockSocket()
await self.wss._onMessage('stop', mocksock)
self.assertEqual(mocksock.close_count, 1)
async def test_responds_to_schema_request_with_schema_json(self):
mocksocket = MockSocket()
await self.wss._onMessage(f'GET schema.json', mocksocket)
# verify responded with schema json
self.assertEqual(mocksocket.msgs, [
f'POST schema.json?schema={json.dumps(schema_list(self.params))}'
])
async def test_broadcasts_value_changes(self):
await self.wss.start_async()
# connect client
uri = f'ws://127.0.0.1:{self.wss.port}'
async with websockets.connect(uri) as ws:
# receive welcome message
msg = await ws.recv()
self.assertEqual(msg, 'welcome to pyRemoteParams websockets')
# change parameter value
self.p1.set(2)
# receive parameter value change
msg = await ws.recv()
self.assertEqual(msg, 'POST /some_int?value=2')
async def test_broadcasts_schema_change(self):
await self.wss.start_async()
# connect client
uri = f'ws://127.0.0.1:{self.wss.port}'
async with websockets.connect(uri) as ws:
# receive welcome message
msg = await ws.recv()
self.assertEqual(msg, 'welcome to pyRemoteParams websockets')
# change schema layout value
self.params.string('name')
# receive parameter value change
msg = await ws.recv()
self.assertEqual(msg, f'POST schema.json?schema={json.dumps(schema_list(self.params))}')
# run just the tests in this file
if __name__ == '__main__':
unittest.main()
| [
"remote_params.Params",
"remote_params.schema_list",
"websockets.connect",
"unittest.main",
"remote_params.Server"
] | [((3672, 3687), 'unittest.main', 'unittest.main', ([], {}), '()\n', (3685, 3687), False, 'import unittest, asyncio, asynctest, websockets, json\n'), ((513, 521), 'remote_params.Params', 'Params', ([], {}), '()\n', (519, 521), False, 'from remote_params import HttpServer, Params, Server, Remote, create_sync_params, schema_list\n'), ((610, 629), 'remote_params.Server', 'Server', (['self.params'], {}), '(self.params)\n', (616, 629), False, 'from remote_params import HttpServer, Params, Server, Remote, create_sync_params, schema_list\n'), ((1036, 1059), 'websockets.connect', 'websockets.connect', (['uri'], {}), '(uri)\n', (1054, 1059), False, 'import unittest, asyncio, asynctest, websockets, json\n'), ((2718, 2741), 'websockets.connect', 'websockets.connect', (['uri'], {}), '(uri)\n', (2736, 2741), False, 'import unittest, asyncio, asynctest, websockets, json\n'), ((3215, 3238), 'websockets.connect', 'websockets.connect', (['uri'], {}), '(uri)\n', (3233, 3238), False, 'import unittest, asyncio, asynctest, websockets, json\n'), ((1158, 1181), 'websockets.connect', 'websockets.connect', (['uri'], {}), '(uri)\n', (1176, 1181), False, 'import unittest, asyncio, asynctest, websockets, json\n'), ((2519, 2543), 'remote_params.schema_list', 'schema_list', (['self.params'], {}), '(self.params)\n', (2530, 2543), False, 'from remote_params import HttpServer, Params, Server, Remote, create_sync_params, schema_list\n'), ((3577, 3601), 'remote_params.schema_list', 'schema_list', (['self.params'], {}), '(self.params)\n', (3588, 3601), False, 'from remote_params import HttpServer, Params, Server, Remote, create_sync_params, schema_list\n')] |
import warnings
import pandas as pd
from nltk.sentiment.vader import SentimentIntensityAnalyzer
warnings.filterwarnings("ignore")
def classify_comments(text_file, page_name):
"""
Description:
This function recives a text file and convert it into csv file to enable to label the comments inside that file, also this function use the nltk library whuch called vader to be enable to give percentages for positive,negative and neutral impact
Args:
text_file:text file
Returns:
DataFrames: contains classified data with positive | negative | nutral labels for each comment
"""
# nltk.download("vader_lexicon")
df = pd.read_csv("%s" % text_file, names=["comments"], sep="\t")
# Cleaning data from emails,number and special characters to be more accurate
df["comments"] = df["comments"].str.replace("^\d+\s|\s\d+\s|\s\d+$", " ")
df["comments"] = df["comments"].str.replace('"', "")
df["comments"] = df["comments"].str.replace("*", "")
df["comments"] = df["comments"].str.replace("/[^@\s]*@[^@\s]*\.[^@\s]*/", "")
df["comments"] = df["comments"].str.replace(
'"/[a-zA-Z]*[:\/\/]*[A-Za-z0-9\-_]+\.+[A-Za-z0-9\.\/%&=\?\-_]+/i"', ""
)
df["comments"] = df["comments"].str.replace(
"http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\(\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))",
"",
)
df["comments"] = df["comments"].str.replace("https://", "")
df["comments"] = df["comments"].str.replace(r"\d+(\.\d+)?", "")
sid = SentimentIntensityAnalyzer()
new_words = {
"over": -0.5,
"garbage": -2.0,
"dumpster": -3.1,
":(": -1,
"refuses": -1,
"down": -1,
"crashed": -2,
"Amen": 1,
"Available": 1,
"#Save": 1,
"always": 0.5,
}
sid.lexicon.update(new_words)
# Create new coloums for positive and negative percentages
df["impactPers"] = df["comments"].apply(
lambda comments: sid.polarity_scores(comments)
)
df["posPers"] = df["impactPers"].apply(lambda score_dict: score_dict["pos"])
df["negPers"] = df["impactPers"].apply(lambda score_dict: score_dict["neg"])
df["neuPers"] = df["impactPers"].apply(lambda score_dict: score_dict["neu"])
df["comPers"] = df["impactPers"].apply(lambda score_dict: score_dict["compound"])
# Labeling the data depending on the above persentages
def label_race(row):
"""
This is a helper function that gives a positive or negative impact for each comment based on the persentages
Args:
row :String
Returns:
String (N) or Integer
"""
if row["comPers"] >= 0.02:
return 1
elif row["comPers"] <= -0.02:
return 0
else:
return "N"
# Create new coloumn for the final labels
df["labels"] = df.apply(lambda row: label_race(row), axis=1)
# Create new file containing two coloumns
new_df = df[["comments", "labels"]]
create_dir(page_name)
new_df.to_csv("./data/%s/classified_comments.txt" % page_name)
return new_df
def create_dir(page_name):
"""
make a new directory for non-existing page data directory
Args:
page_name (str)
Returns:
[boolen]: return True if the directory not exist and make it
return False if the directory exist
"""
import os
dir_path = "./data/%s" % page_name.lower()
if not os.path.isdir(dir_path):
os.mkdir(dir_path)
return True
else:
return False
| [
"nltk.sentiment.vader.SentimentIntensityAnalyzer",
"pandas.read_csv",
"os.path.isdir",
"os.mkdir",
"warnings.filterwarnings"
] | [((98, 131), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {}), "('ignore')\n", (121, 131), False, 'import warnings\n'), ((668, 727), 'pandas.read_csv', 'pd.read_csv', (["('%s' % text_file)"], {'names': "['comments']", 'sep': '"""\t"""'}), "('%s' % text_file, names=['comments'], sep='\\t')\n", (679, 727), True, 'import pandas as pd\n'), ((1522, 1550), 'nltk.sentiment.vader.SentimentIntensityAnalyzer', 'SentimentIntensityAnalyzer', ([], {}), '()\n', (1548, 1550), False, 'from nltk.sentiment.vader import SentimentIntensityAnalyzer\n'), ((3488, 3511), 'os.path.isdir', 'os.path.isdir', (['dir_path'], {}), '(dir_path)\n', (3501, 3511), False, 'import os\n'), ((3521, 3539), 'os.mkdir', 'os.mkdir', (['dir_path'], {}), '(dir_path)\n', (3529, 3539), False, 'import os\n')] |
import copy
import logging
import warnings
from kolibri.plugins.registry import registered_plugins
logger = logging.getLogger(__name__)
def __validate_config_option(
section, name, base_config_spec, plugin_specs, module_path
):
# Raise an error if someone tries to overwrite a base option
# except for the default value.
if section in base_config_spec:
if name in base_config_spec[section]:
raise ValueError("Cannot overwrite a core Kolibri options spec option")
# Warn if a plugin tries to add an option that another plugin has already added
if section in plugin_specs:
if name in plugin_specs[section]:
warnings.warn(
"{plugin} set an option {option} in section {section} but {plugins} had already set it".format(
plugin=module_path,
plugins=", ".join(plugin_specs[section][name]),
option=name,
section=section,
)
)
plugin_specs[section][name].append(module_path)
else:
# If not create the list for this option name
# to track this and future modifications
plugin_specs[section][name] = [module_path]
else:
# If not create the dict for the section
# and the list for this option name
plugin_specs[section] = {name: [module_path]}
def __process_config_spec(
option_spec, base_config_spec, plugin_specs, module_path, final_spec
):
for section, opts in option_spec.items():
for name, attrs in opts.items():
__validate_config_option(
section, name, base_config_spec, plugin_specs, module_path
)
if section not in final_spec:
final_spec[section] = {}
final_spec[section][name] = attrs
def __validate_option_default(section, name, plugin_default_overrides, module_path):
# Warn if a plugin tries to add an option that another plugin has already added
if section in plugin_default_overrides:
if name in plugin_default_overrides[section]:
warnings.warn(
"{plugin} set an option default {option} in section {section} but {plugins} had already set it".format(
plugin=module_path,
plugins=", ".join(plugin_default_overrides[section][name]),
option=name,
section=section,
)
)
plugin_default_overrides[section][name].append(module_path)
else:
# If not create the list for this option name
# to track this and future modifications
plugin_default_overrides[section][name] = [module_path]
else:
# If not create the dict for the section
# and the list for this option name
plugin_default_overrides[section] = {name: [module_path]}
def __process_option_defaults(
option_defaults, base_config_spec, plugin_default_overrides, module_path, final_spec
):
for section, opts in option_defaults.items():
for name, default in opts.items():
__validate_option_default(
section, name, plugin_default_overrides, module_path
)
if section not in final_spec:
logger.error(
"Tried to set a new default in section {}, but this is not a valid section".format(
section
)
)
continue
if name in final_spec[section]:
# This is valid, so set a default
# Note that we do not validation here for now,
# so it is up to the user to ensure the default value
# is kosher.
final_spec[section][name]["default"] = default
else:
logger.error(
"Tried to set a new default in section {}, for option {} but this is not a valid option".format(
section, name
)
)
def extend_config_spec(base_config_spec):
plugin_specs = {}
final_spec = copy.deepcopy(base_config_spec)
# First process options config spec additions
for plugin_instance in registered_plugins:
plugin_options = plugin_instance.options_module
if plugin_options and hasattr(plugin_options, "option_spec"):
module_path = plugin_instance.module_path
option_spec = plugin_options.option_spec
__process_config_spec(
option_spec, base_config_spec, plugin_specs, module_path, final_spec
)
# Now process default value overrides, do this second in order to allow plugins
# to override default values for other plugins!
plugin_default_overrides = {}
for plugin_instance in registered_plugins:
plugin_options = plugin_instance.option_defaults_module
if plugin_options and hasattr(plugin_options, "option_defaults"):
module_path = plugin_instance.module_path
option_defaults = plugin_options.option_defaults
__process_option_defaults(
option_defaults,
base_config_spec,
plugin_default_overrides,
module_path,
final_spec,
)
return final_spec
| [
"logging.getLogger",
"copy.deepcopy"
] | [((110, 137), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (127, 137), False, 'import logging\n'), ((4187, 4218), 'copy.deepcopy', 'copy.deepcopy', (['base_config_spec'], {}), '(base_config_spec)\n', (4200, 4218), False, 'import copy\n')] |
from io import open
import time
import math
import torch
import torch.nn.functional as F
from config import MAX_LENGTH
from config import SOS_token
from config import EOS_token
from config import device
class Lang:
def __init__(self, name):
self.name = name
self.word2index = {}
self.word2count = {}
self.index2word = {0: "SOS", 1: "EOS"}#首尾符号
self.n_words = 2 # Count SOS and EOS
def addSentence(self, sentence):
for word in sentence.split(' '):
self.addWord(word)
def addWord(self, word):
if word not in self.word2index:
self.word2index[word] = self.n_words
self.word2count[word] = 1
self.index2word[self.n_words] = word
self.n_words += 1
else:
self.word2count[word] += 1
def readLangs(lang1, lang2, reverse=False):
# 读取数据放入列表
lines = open('data/chatdata_all.txt', encoding='utf-8').\
read().strip().split('\n')
# 数据处理,处理成一对对的样本
pairs = [[s for s in l.split('@@')] for l in lines]
print(pairs)
# Reverse
if reverse:
pairs = [list(reversed(p)) for p in pairs]
input_lang = Lang(lang2)
output_lang = Lang(lang1)
else:
input_lang = Lang(lang1)
output_lang = Lang(lang2)
return input_lang, output_lang, pairs
def filterPair(p):
return len(p[0].split(' ')) < MAX_LENGTH and \
len(p[1].split(' ')) < MAX_LENGTH
def filterPairs(pairs):
return [pair for pair in pairs if filterPair(pair)]
def prepareData(lang1, lang2, reverse=False):
input_lang, output_lang, pairs = readLangs(lang1, lang2, reverse)
print("Read %s sentence pairs" % len(pairs))
pairs = filterPairs(pairs)
print("Trimmed to %s sentence pairs" % len(pairs))
print("Counting words...")
for pair in pairs:
input_lang.addSentence(pair[0])
output_lang.addSentence(pair[1])
print("Counted words:")
print(input_lang.name, input_lang.n_words)
print(output_lang.name, output_lang.n_words)
return input_lang, output_lang, pairs
#句子转index
def indexesFromSentence(lang, sentence):
return [lang.word2index[word] for word in sentence.split(' ')]
#句子转tensor
def tensorFromSentence(lang, sentence):
indexes = indexesFromSentence(lang, sentence)
indexes.append(EOS_token)
return torch.tensor(indexes, dtype=torch.long, device=device).view(-1, 1)
#句子对转index
def tensorsFromPair(pair, input_lang, output_lang):
input_tensor = tensorFromSentence(input_lang, pair[0])
target_tensor = tensorFromSentence(output_lang, pair[1])
return (input_tensor, target_tensor)
def asMinutes(s):
m = math.floor(s / 60)
s -= m * 60
return '%dm %ds' % (m, s)
def timeSince(since, percent):
now = time.time()
s = now - since
es = s / (percent)
rs = es - s
return '%s (- %s)' % (asMinutes(s), asMinutes(rs)) | [
"torch.tensor",
"time.time",
"io.open",
"math.floor"
] | [((2676, 2694), 'math.floor', 'math.floor', (['(s / 60)'], {}), '(s / 60)\n', (2686, 2694), False, 'import math\n'), ((2784, 2795), 'time.time', 'time.time', ([], {}), '()\n', (2793, 2795), False, 'import time\n'), ((2357, 2411), 'torch.tensor', 'torch.tensor', (['indexes'], {'dtype': 'torch.long', 'device': 'device'}), '(indexes, dtype=torch.long, device=device)\n', (2369, 2411), False, 'import torch\n'), ((898, 945), 'io.open', 'open', (['"""data/chatdata_all.txt"""'], {'encoding': '"""utf-8"""'}), "('data/chatdata_all.txt', encoding='utf-8')\n", (902, 945), False, 'from io import open\n')] |
import os
import sys
import json
import argparse
import progressbar
from pathlib import Path
from random import shuffle
from time import time
import torch
from cpc.dataset import findAllSeqs
from cpc.feature_loader import buildFeature, FeatureModule, loadModel, buildFeature_batch
from cpc.criterion.clustering import kMeanCluster
#from cpc.criterion.research.clustering import kMeanCluster
def readArgs(pathArgs):
print(f"Loading args from {pathArgs}")
with open(pathArgs, 'r') as file:
args = argparse.Namespace(**json.load(file))
return args
def loadClusterModule(pathCheckpoint, norm_vec_len=False):
print(f"Loading ClusterModule at {pathCheckpoint}")
state_dict = torch.load(pathCheckpoint)
if "state_dict" in state_dict: #kmeans
clusterModule = kMeanCluster(torch.zeros(1, state_dict["n_clusters"], state_dict["dim"]), norm_vec_len)
clusterModule.load_state_dict(state_dict["state_dict"])
else: #dpmeans
clusterModule = kMeanCluster(state_dict["mu"])
clusterModule = clusterModule.cuda()
return clusterModule
def parseArgs(argv):
# Run parameters
parser = argparse.ArgumentParser(description='Quantize audio files using CPC Clustering Module.')
parser.add_argument('pathCheckpoint', type=str,
help='Path to the clustering checkpoint.')
parser.add_argument('pathDB', type=str,
help='Path to the dataset that we want to quantize.')
parser.add_argument('pathOutput', type=str,
help='Path to the output directory.')
parser.add_argument('--pathSeq', type=str,
help='Path to the sequences (file names) to be included used.')
parser.add_argument('--split', type=str, default=None,
help="If you want to divide the dataset in small splits, specify it "
"with idxSplit-numSplits (idxSplit > 0), eg. --split 1-20.")
parser.add_argument('--file_extension', type=str, default=".flac",
help="Extension of the audio files in the dataset (default: .flac).")
parser.add_argument('--max_size_seq', type=int, default=10240,
help='Maximal number of frames to consider '
'when computing a batch of features (defaut: 10240).')
parser.add_argument('--batch_size', type=int, default=8,
help='Batch size used to compute features '
'when computing each file (defaut: 8).')
parser.add_argument('--strict', type=bool, default=True,
help='If activated, each batch of feature '
'will contain exactly max_size_seq frames (defaut: True).')
parser.add_argument('--debug', action='store_true',
help="Load only a very small amount of files for "
"debugging purposes.")
parser.add_argument('--nobatch', action='store_true',
help="Don't use batch implementation of when building features."
"NOTE: This can have better quantized units as we can set "
"model.gAR.keepHidden = True (line 162), but the quantization"
"will be a bit longer.")
parser.add_argument('--recursionLevel', type=int, default=1,
help='Speaker level in pathDB (defaut: 1). This is only helpful'
'when --separate-speaker is activated.')
parser.add_argument('--separate-speaker', action='store_true',
help="Separate each speaker with a different output file.")
parser.add_argument('--norm_vec_len', action='store_true',
help="Normalize vector lengths.")
return parser.parse_args(argv)
def main(argv):
# Args parser
args = parseArgs(argv)
print("=============================================================")
print(f"Quantizing data from {args.pathDB}")
print("=============================================================")
# Check if directory exists
if not os.path.exists(args.pathOutput):
print("")
print(f"Creating the output directory at {args.pathOutput}")
Path(args.pathOutput).mkdir(parents=True, exist_ok=True)
# Get splits
if args.split:
assert len(args.split.split("-"))==2 and int(args.split.split("-")[1]) >= int(args.split.split("-")[0]) >= 1, \
"SPLIT must be under the form idxSplit-numSplits (numSplits >= idxSplit >= 1), eg. --split 1-20"
idx_split, num_splits = args.split.split("-")
idx_split = int(idx_split)
num_splits = int(num_splits)
# Find all sequences
print("")
print(f"Looking for all {args.file_extension} files in {args.pathDB} with speakerLevel {args.recursionLevel}")
seqNames, speakers = findAllSeqs(args.pathDB,
speaker_level=args.recursionLevel,
extension=args.file_extension,
loadCache=True)
if args.pathSeq:
with open(args.pathSeq, 'r') as f:
seqs = set([x.strip() for x in f])
filtered = []
for s in seqNames:
if s[1].split('/')[-1].split('.')[0] in seqs:
filtered.append(s)
seqNames = filtered
print(f"Done! Found {len(seqNames)} files and {len(speakers)} speakers!")
if args.separate_speaker:
seqNames_by_speaker = {}
for seq in seqNames:
speaker = seq[1].split("/")[args.recursionLevel-1]
if speaker not in seqNames_by_speaker:
seqNames_by_speaker[speaker] = []
seqNames_by_speaker[speaker].append(seq)
# Check if output file exists
if not args.split:
nameOutput = "quantized_outputs.txt"
else:
nameOutput = f"quantized_outputs_split_{idx_split}-{num_splits}.txt"
if args.separate_speaker is False:
outputFile = os.path.join(args.pathOutput, nameOutput)
assert not os.path.exists(outputFile), \
f"Output file {outputFile} already exists !!!"
# Get splits
if args.split:
startIdx = len(seqNames) // num_splits * (idx_split-1)
if idx_split == num_splits:
endIdx = len(seqNames)
else:
endIdx = min(len(seqNames) // num_splits * idx_split, len(seqNames))
seqNames = seqNames[startIdx:endIdx]
print("")
print(f"Quantizing split {idx_split} out of {num_splits} splits, with {len(seqNames)} files (idx in range({startIdx}, {endIdx})).")
# Debug mode
if args.debug:
nsamples=20
print("")
print(f"Debug mode activated, only load {nsamples} samples!")
# shuffle(seqNames)
seqNames = seqNames[:nsamples]
# Load Clustering args
assert args.pathCheckpoint[-3:] == ".pt"
if os.path.exists(args.pathCheckpoint[:-3] + "_args.json"):
pathConfig = args.pathCheckpoint[:-3] + "_args.json"
elif os.path.exists(os.path.join(os.path.dirname(args.pathCheckpoint), "checkpoint_args.json")):
pathConfig = os.path.join(os.path.dirname(args.pathCheckpoint), "checkpoint_args.json")
else:
assert False, \
f"Args file not found in the directory {os.path.dirname(args.pathCheckpoint)}"
clustering_args = readArgs(pathConfig)
print("")
print(f"Clutering args:\n{json.dumps(vars(clustering_args), indent=4, sort_keys=True)}")
print('-' * 50)
# Load CluterModule
clusterModule = loadClusterModule(args.pathCheckpoint, norm_vec_len=args.norm_vec_len)
clusterModule.cuda()
# Load FeatureMaker
print("")
print("Loading CPC FeatureMaker")
if 'level_gru' in vars(clustering_args) and clustering_args.level_gru is not None:
updateConfig = argparse.Namespace(nLevelsGRU=clustering_args.level_gru)
else:
updateConfig = None
model = loadModel([clustering_args.pathCheckpoint], updateConfig=updateConfig)[0]
## If we don't apply batch implementation, we can set LSTM model to keep hidden units
## making the quality of the quantized units better
if args.nobatch:
model.gAR.keepHidden = True
featureMaker = FeatureModule(model, clustering_args.encoder_layer)
if clustering_args.dimReduction is not None:
dimRed = loadDimReduction(clustering_args.dimReduction, clustering_args.centroidLimits)
featureMaker = torch.nn.Sequential(featureMaker, dimRed)
if not clustering_args.train_mode:
featureMaker.eval()
featureMaker.cuda()
def feature_function(x):
if args.nobatch is False:
res0 = buildFeature_batch(featureMaker, x,
seqNorm=False,
strict=args.strict,
maxSizeSeq=args.max_size_seq,
batch_size=args.batch_size)
if args.norm_vec_len:
# [!] we actually used CPC_audio/scripts/quantize_audio.py for that in the end
res0Lengths = torch.sqrt((res0*res0).sum(2))
res0 = res0 / res0Lengths.view(*(res0Lengths.shape), 1)
return res0
else:
res0 = buildFeature(featureMaker, x,
seqNorm=False,
strict=args.strict)
if args.norm_vec_len:
# [!] we actually used CPC_audio/scripts/quantize_audio.py for that in the end
res0Lengths = torch.sqrt((res0*res0).sum(2))
res0 = res0 / res0Lengths.view(*(res0Lengths.shape), 1)
return res0
print("CPC FeatureMaker loaded!")
# Quantization of files
print("")
print(f"Quantizing audio files...")
seqQuantLines = []
bar = progressbar.ProgressBar(maxval=len(seqNames))
bar.start()
start_time = time()
for index, vals in enumerate(seqNames):
bar.update(index)
file_path = vals[1]
file_path = os.path.join(args.pathDB, file_path)
# Get features & quantizing
cFeatures = feature_function(file_path).cuda()
nGroups = cFeatures.size(-1)//clusterModule.Ck.size(-1)
cFeatures = cFeatures.view(1, -1, clusterModule.Ck.size(-1))
if len(vals) > 2 and int(vals[-1]) > 9400000: # Librilight, to avoid OOM
clusterModule = clusterModule.cpu()
cFeatures = cFeatures.cpu()
qFeatures = torch.argmin(clusterModule(cFeatures), dim=-1)
clusterModule = clusterModule.cuda()
else:
qFeatures = torch.argmin(clusterModule(cFeatures), dim=-1)
qFeatures = qFeatures[0].detach().cpu().numpy()
# Transform to quantized line
quantLine = ",".join(["-".join([str(i) for i in item]) for item in qFeatures.reshape(-1, nGroups)])
seqQuantLines.append(quantLine)
bar.finish()
print(f"...done {len(seqQuantLines)} files in {time()-start_time} seconds.")
# Saving outputs
print("")
print(f"Saving outputs to {outputFile}")
outLines = []
for vals, quantln in zip(seqNames, seqQuantLines):
file_path = vals[1]
file_name = os.path.splitext(os.path.basename(file_path))[0]
outLines.append("\t".join([file_name, quantln]))
with open(outputFile, "w") as f:
f.write("\n".join(outLines))
if __name__ == "__main__":
args = sys.argv[1:]
main(args)
| [
"os.path.exists",
"cpc.dataset.findAllSeqs",
"cpc.feature_loader.buildFeature_batch",
"argparse.ArgumentParser",
"pathlib.Path",
"torch.nn.Sequential",
"cpc.criterion.clustering.kMeanCluster",
"torch.load",
"os.path.join",
"cpc.feature_loader.FeatureModule",
"os.path.dirname",
"argparse.Namesp... | [((710, 736), 'torch.load', 'torch.load', (['pathCheckpoint'], {}), '(pathCheckpoint)\n', (720, 736), False, 'import torch\n'), ((1152, 1245), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Quantize audio files using CPC Clustering Module."""'}), "(description=\n 'Quantize audio files using CPC Clustering Module.')\n", (1175, 1245), False, 'import argparse\n'), ((4894, 5005), 'cpc.dataset.findAllSeqs', 'findAllSeqs', (['args.pathDB'], {'speaker_level': 'args.recursionLevel', 'extension': 'args.file_extension', 'loadCache': '(True)'}), '(args.pathDB, speaker_level=args.recursionLevel, extension=args.\n file_extension, loadCache=True)\n', (4905, 5005), False, 'from cpc.dataset import findAllSeqs\n'), ((6936, 6991), 'os.path.exists', 'os.path.exists', (["(args.pathCheckpoint[:-3] + '_args.json')"], {}), "(args.pathCheckpoint[:-3] + '_args.json')\n", (6950, 6991), False, 'import os\n'), ((8277, 8328), 'cpc.feature_loader.FeatureModule', 'FeatureModule', (['model', 'clustering_args.encoder_layer'], {}), '(model, clustering_args.encoder_layer)\n', (8290, 8328), False, 'from cpc.feature_loader import buildFeature, FeatureModule, loadModel, buildFeature_batch\n'), ((10021, 10027), 'time.time', 'time', ([], {}), '()\n', (10025, 10027), False, 'from time import time\n'), ((999, 1029), 'cpc.criterion.clustering.kMeanCluster', 'kMeanCluster', (["state_dict['mu']"], {}), "(state_dict['mu'])\n", (1011, 1029), False, 'from cpc.criterion.clustering import kMeanCluster\n'), ((4137, 4168), 'os.path.exists', 'os.path.exists', (['args.pathOutput'], {}), '(args.pathOutput)\n', (4151, 4168), False, 'import os\n'), ((6021, 6062), 'os.path.join', 'os.path.join', (['args.pathOutput', 'nameOutput'], {}), '(args.pathOutput, nameOutput)\n', (6033, 6062), False, 'import os\n'), ((7874, 7930), 'argparse.Namespace', 'argparse.Namespace', ([], {'nLevelsGRU': 'clustering_args.level_gru'}), '(nLevelsGRU=clustering_args.level_gru)\n', (7892, 7930), False, 'import argparse\n'), ((7981, 8051), 'cpc.feature_loader.loadModel', 'loadModel', (['[clustering_args.pathCheckpoint]'], {'updateConfig': 'updateConfig'}), '([clustering_args.pathCheckpoint], updateConfig=updateConfig)\n', (7990, 8051), False, 'from cpc.feature_loader import buildFeature, FeatureModule, loadModel, buildFeature_batch\n'), ((8497, 8538), 'torch.nn.Sequential', 'torch.nn.Sequential', (['featureMaker', 'dimRed'], {}), '(featureMaker, dimRed)\n', (8516, 8538), False, 'import torch\n'), ((10147, 10183), 'os.path.join', 'os.path.join', (['args.pathDB', 'file_path'], {}), '(args.pathDB, file_path)\n', (10159, 10183), False, 'import os\n'), ((817, 876), 'torch.zeros', 'torch.zeros', (['(1)', "state_dict['n_clusters']", "state_dict['dim']"], {}), "(1, state_dict['n_clusters'], state_dict['dim'])\n", (828, 876), False, 'import torch\n'), ((6082, 6108), 'os.path.exists', 'os.path.exists', (['outputFile'], {}), '(outputFile)\n', (6096, 6108), False, 'import os\n'), ((8713, 8845), 'cpc.feature_loader.buildFeature_batch', 'buildFeature_batch', (['featureMaker', 'x'], {'seqNorm': '(False)', 'strict': 'args.strict', 'maxSizeSeq': 'args.max_size_seq', 'batch_size': 'args.batch_size'}), '(featureMaker, x, seqNorm=False, strict=args.strict,\n maxSizeSeq=args.max_size_seq, batch_size=args.batch_size)\n', (8731, 8845), False, 'from cpc.feature_loader import buildFeature, FeatureModule, loadModel, buildFeature_batch\n'), ((9369, 9433), 'cpc.feature_loader.buildFeature', 'buildFeature', (['featureMaker', 'x'], {'seqNorm': '(False)', 'strict': 'args.strict'}), '(featureMaker, x, seqNorm=False, strict=args.strict)\n', (9381, 9433), False, 'from cpc.feature_loader import buildFeature, FeatureModule, loadModel, buildFeature_batch\n'), ((534, 549), 'json.load', 'json.load', (['file'], {}), '(file)\n', (543, 549), False, 'import json\n'), ((4265, 4286), 'pathlib.Path', 'Path', (['args.pathOutput'], {}), '(args.pathOutput)\n', (4269, 4286), False, 'from pathlib import Path\n'), ((7091, 7127), 'os.path.dirname', 'os.path.dirname', (['args.pathCheckpoint'], {}), '(args.pathCheckpoint)\n', (7106, 7127), False, 'import os\n'), ((7189, 7225), 'os.path.dirname', 'os.path.dirname', (['args.pathCheckpoint'], {}), '(args.pathCheckpoint)\n', (7204, 7225), False, 'import os\n'), ((11347, 11374), 'os.path.basename', 'os.path.basename', (['file_path'], {}), '(file_path)\n', (11363, 11374), False, 'import os\n'), ((7337, 7373), 'os.path.dirname', 'os.path.dirname', (['args.pathCheckpoint'], {}), '(args.pathCheckpoint)\n', (7352, 7373), False, 'import os\n'), ((11098, 11104), 'time.time', 'time', ([], {}), '()\n', (11102, 11104), False, 'from time import time\n')] |
import glob
import os
import os.path
import sys
import shutil
import cPickle
from types import StringType, UnicodeType
from distutils.core import setup
from distutils.extension import Extension
from distutils.command.install import install
PY3K = sys.version_info[0] > 2
with open('README.rst') as inp:
long_description = inp.read()
__version__ = ''
inp = open('prody/__init__.py')
for line in inp:
if (line.startswith('__version__')):
exec(line.strip())
break
inp.close()
def isInstalled(module_name):
"""Check if a required package is installed, by trying to import it."""
try:
return __import__(module_name)
except ImportError:
return False
else:
return True
if not isInstalled('numpy'):
print("""NumPy is not installed. This package is required for main ProDy
features and needs to be installed before you can use ProDy.
You can find NumPy at: http://numpy.scipy.org""")
PACKAGES = ['prody', 'prody.atomic', 'prody.dynamics', 'prody.ensemble',
'prody.measure', 'prody.proteins', 'prody.trajectory',
'prody.routines', 'prody.utilities']
PACKAGE_DATA = {}
if sys.version_info[:2] > (2,6):
PACKAGES.extend(['prody.tests', 'prody.tests.test_kdtree',
'prody.tests.test_measure'])
PACKAGE_DATA['prody.tests'] = ['data/pdb*.pdb', 'data/*.dat',
'data/*.coo', 'data/dcd*.dcd']
EXTENSIONS = []
if os.name != 'java' and sys.version_info[0] == 2:
pairwise2 = ['cpairwise2.c', 'pairwise2.py']
if all([os.path.isfile(os.path.join('prody', 'proteins', fn))
for fn in pairwise2]):
EXTENSIONS.append(
Extension('prody.proteins.cpairwise2',
['prody/proteins/cpairwise2.c'],
include_dirs=["prody"]
))
else:
raise Exception('one or more pairwise2 module files are missing')
if isInstalled('numpy'):
import numpy
kdtree_files = ['__init__.py', 'KDTree.c', 'KDTree.h',
'KDTreemodule.c', 'Neighbor.h', 'kdtree.py']
if all([os.path.isfile(os.path.join('prody', 'kdtree', fn))
for fn in kdtree_files]):
EXTENSIONS.append(
Extension('prody.kdtree._CKDTree',
['prody/kdtree/KDTree.c',
'prody/kdtree/KDTreemodule.c'],
include_dirs=[numpy.get_include()],
))
else:
raise Exception('one or more kdtree module files are missing')
PACKAGES.append('prody.kdtree')
elif isInstalled('numpy'):
raise ImportError('numpy is not installed')
SCRIPTS = ['scripts/prody']
setup(
name='ProDy',
version=__version__,
author='<NAME>',
author_email='ahb12 at pitt dot edu',
description='A Python Package for Protein Dynamics Analysis',
long_description=long_description,
url='http://www.csb.pitt.edu/ProDy',
packages=PACKAGES,
package_data=PACKAGE_DATA,
ext_modules=EXTENSIONS,
license='GPLv3',
keywords=('protein, dynamics, elastic network model, '
'Gaussian network model, anisotropic network model, '
'essential dynamics analysis, principal component analysis, '
'Protein Data Bank, PDB, GNM, ANM, PCA'),
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: GNU General Public License (GPL)',
'Operating System :: MacOS',
'Operating System :: Microsoft :: Windows',
'Operating System :: POSIX',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Topic :: Scientific/Engineering :: Bio-Informatics',
'Topic :: Scientific/Engineering :: Chemistry',
],
scripts=SCRIPTS,
requires=['NumPy', ],
provides=['ProDy({0:s})'.format(__version__)]
)
| [
"os.path.join",
"distutils.extension.Extension",
"numpy.get_include"
] | [((1721, 1820), 'distutils.extension.Extension', 'Extension', (['"""prody.proteins.cpairwise2"""', "['prody/proteins/cpairwise2.c']"], {'include_dirs': "['prody']"}), "('prody.proteins.cpairwise2', ['prody/proteins/cpairwise2.c'],\n include_dirs=['prody'])\n", (1730, 1820), False, 'from distutils.extension import Extension\n'), ((1590, 1627), 'os.path.join', 'os.path.join', (['"""prody"""', '"""proteins"""', 'fn'], {}), "('prody', 'proteins', fn)\n", (1602, 1627), False, 'import os\n'), ((2183, 2218), 'os.path.join', 'os.path.join', (['"""prody"""', '"""kdtree"""', 'fn'], {}), "('prody', 'kdtree', fn)\n", (2195, 2218), False, 'import os\n'), ((2496, 2515), 'numpy.get_include', 'numpy.get_include', ([], {}), '()\n', (2513, 2515), False, 'import numpy\n')] |
import io
import json
from google.auth import compute_engine
from google.oauth2 import service_account
def gcp_credentials(service_account_file):
if service_account_file:
with io.open(service_account_file, 'r', encoding='utf-8') as json_fi:
credentials_info = json.load(json_fi)
credentials = service_account.Credentials.from_service_account_info(credentials_info)
else:
# Explicitly use Compute Engine credentials. These credentials are
# available on Compute Engine, App Engine Flexible, and Container Engine.
credentials = compute_engine.Credentials()
return credentials
| [
"google.oauth2.service_account.Credentials.from_service_account_info",
"json.load",
"google.auth.compute_engine.Credentials",
"io.open"
] | [((328, 399), 'google.oauth2.service_account.Credentials.from_service_account_info', 'service_account.Credentials.from_service_account_info', (['credentials_info'], {}), '(credentials_info)\n', (381, 399), False, 'from google.oauth2 import service_account\n'), ((589, 617), 'google.auth.compute_engine.Credentials', 'compute_engine.Credentials', ([], {}), '()\n', (615, 617), False, 'from google.auth import compute_engine\n'), ((191, 243), 'io.open', 'io.open', (['service_account_file', '"""r"""'], {'encoding': '"""utf-8"""'}), "(service_account_file, 'r', encoding='utf-8')\n", (198, 243), False, 'import io\n'), ((287, 305), 'json.load', 'json.load', (['json_fi'], {}), '(json_fi)\n', (296, 305), False, 'import json\n')] |
import requests
import tweepy
import random
import time
import os
import bs4
from bs4 import BeautifulSoup
from pybooru import Moebooru
siteurl='https://www.sakugabooru.com/post/show/'
header = {'user-agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/62.0.3202.94 Safari/537.36'}
client = Moebooru(site_url='https://www.sakugabooru.com')
files = client.post_list(tags="order:random")
api_keys = open("token.txt") #Create your own token.txt file with your API Keys from Twitter
lines = api_keys.readlines()
consumer_key = lines[1].rstrip()
consumer_secret= lines[4].rstrip()
access_token = lines[7].rstrip()
access_token_secret=lines[10].rstrip()
def main():
try:
files = client.post_list(tags="order:random") #Random Post
choice = random.choice(files) #Select 1 Random Post from Query
boorurl=choice['file_url'] #File URL
tags = choice['tags'] #Post Tags
verdict=filetypechecker(boorurl) #Checker if .mp4 file or not
if(verdict):
posturl = siteurl+"{0}".format(choice['id']) #POST URL from SakugaBooru
animatorname=artistgrabber(posturl)
animename=animegrabber(posturl)
time.sleep(5)
data = requests.get(boorurl,headers=header)
print("data:",data.status_code)
with open("C:/Users/Admin/Documents/PersonalFiles/Repositories/sakugabooru-video-files/{}".format(choice['id'])+".mp4",'wb') as file: #Customize Directory
file.write(data.content)
#params="Animator Name: {}\nTags: {}\nPost URL: {}\n".format(animatorname,tags,posturl)
#BETA TESTING
params="Animator Name: {}\nListed Anime Name: {}\nTags: {}\nPost URL: {}\n".format(animatorname,animename,tags,posturl)
#print(params)
time.sleep(5)
mediapost(params)
except Exception as e:
print("Main() Error:",e)
def artistgrabber(posturl):
r = requests.get(posturl,headers=header)
print("artistgrabber:",r.status_code)
soup = bs4.BeautifulSoup(r.text,'lxml')
'''
for div in soup.find_all(class_="sidebar"):
artist=div.find(class_="tag-type-artist").text
artistname=(artist.strip("? "))
'''
for div in soup.find_all(class_="tag-type-artist"):
atags = div.find_all('a')
for artists in atags:
artiststr=artists.text
print(artiststr)
return artiststr
#BETA TESTING
def animegrabber(posturl):
r = requests.get(posturl,headers=header)
print("animegrabber:",r.status_code)
soup = bs4.BeautifulSoup(r.text,'lxml')
for div in soup.find_all(class_="tag-type-copyright"):
atags = div.find_all('a')
for anime in atags:
animestr=anime.text
print(animestr)
return animestr
def filetypechecker(boorurl):
if boorurl.find('/'):
if ".mp4" in (boorurl.rsplit('/',1)[1]):
return True
else:
return False
def mediapost(params):
try:
auth = tweepy.OAuthHandler(consumer_key,consumer_secret)
auth.set_access_token(access_token,access_token_secret)
api = tweepy.API(auth)
except Exception as e:
print (e)
try:
file_path=[]
directory_name='C:/Users/Admin/Documents/PersonalFiles/Repositories/sakugabooru-video-files' #Customize Directory
media_list=filter(lambda x: os.path.isfile(os.path.join(directory_name,x)),os.listdir(directory_name))
media_list=sorted(media_list,key=lambda x: os.path.getmtime(os.path.join(directory_name,x)),reverse=True)
for media in media_list:
file_path.append(os.path.join(directory_name,media))
media=file_path[0]
print(media)
upload_media=api.media_upload(media, media_category='tweet_video')
api.update_status(status=params, media_ids=[upload_media.media_id_string])
except Exception as e:
print("Mediapost() Error:",e)
if __name__ == '__main__':
main() | [
"random.choice",
"os.listdir",
"pybooru.Moebooru",
"os.path.join",
"requests.get",
"time.sleep",
"bs4.BeautifulSoup",
"tweepy.API",
"tweepy.OAuthHandler"
] | [((336, 384), 'pybooru.Moebooru', 'Moebooru', ([], {'site_url': '"""https://www.sakugabooru.com"""'}), "(site_url='https://www.sakugabooru.com')\n", (344, 384), False, 'from pybooru import Moebooru\n'), ((2143, 2180), 'requests.get', 'requests.get', (['posturl'], {'headers': 'header'}), '(posturl, headers=header)\n', (2155, 2180), False, 'import requests\n'), ((2233, 2266), 'bs4.BeautifulSoup', 'bs4.BeautifulSoup', (['r.text', '"""lxml"""'], {}), "(r.text, 'lxml')\n", (2250, 2266), False, 'import bs4\n'), ((2662, 2699), 'requests.get', 'requests.get', (['posturl'], {'headers': 'header'}), '(posturl, headers=header)\n', (2674, 2699), False, 'import requests\n'), ((2751, 2784), 'bs4.BeautifulSoup', 'bs4.BeautifulSoup', (['r.text', '"""lxml"""'], {}), "(r.text, 'lxml')\n", (2768, 2784), False, 'import bs4\n'), ((819, 839), 'random.choice', 'random.choice', (['files'], {}), '(files)\n', (832, 839), False, 'import random\n'), ((3217, 3267), 'tweepy.OAuthHandler', 'tweepy.OAuthHandler', (['consumer_key', 'consumer_secret'], {}), '(consumer_key, consumer_secret)\n', (3236, 3267), False, 'import tweepy\n'), ((3345, 3361), 'tweepy.API', 'tweepy.API', (['auth'], {}), '(auth)\n', (3355, 3361), False, 'import tweepy\n'), ((1271, 1284), 'time.sleep', 'time.sleep', (['(5)'], {}), '(5)\n', (1281, 1284), False, 'import time\n'), ((1325, 1362), 'requests.get', 'requests.get', (['boorurl'], {'headers': 'header'}), '(boorurl, headers=header)\n', (1337, 1362), False, 'import requests\n'), ((1978, 1991), 'time.sleep', 'time.sleep', (['(5)'], {}), '(5)\n', (1988, 1991), False, 'import time\n'), ((3651, 3677), 'os.listdir', 'os.listdir', (['directory_name'], {}), '(directory_name)\n', (3661, 3677), False, 'import os\n'), ((3856, 3891), 'os.path.join', 'os.path.join', (['directory_name', 'media'], {}), '(directory_name, media)\n', (3868, 3891), False, 'import os\n'), ((3619, 3650), 'os.path.join', 'os.path.join', (['directory_name', 'x'], {}), '(directory_name, x)\n', (3631, 3650), False, 'import os\n'), ((3747, 3778), 'os.path.join', 'os.path.join', (['directory_name', 'x'], {}), '(directory_name, x)\n', (3759, 3778), False, 'import os\n')] |
"""
"""
from membership.web.urls import membership_urls
from public.web.urls import error_urls, public_urls, static_urls
from public.web.views import home
from wheezy.routing import url
locale_pattern = "{locale:(en|ru)}/"
locale_defaults = {"locale": "en"}
locale_urls = public_urls + membership_urls
locale_urls.append(("error/", error_urls))
all_urls = [
url("", home, locale_defaults, name="default"),
(locale_pattern, locale_urls, locale_defaults),
]
all_urls += static_urls
| [
"wheezy.routing.url"
] | [((365, 411), 'wheezy.routing.url', 'url', (['""""""', 'home', 'locale_defaults'], {'name': '"""default"""'}), "('', home, locale_defaults, name='default')\n", (368, 411), False, 'from wheezy.routing import url\n')] |
from stheno import (
B, # Linear algebra backend
Graph, # Graph that keep track of the graphical model
GP, # Gaussian process
EQ, # Squared-exponential kernel
Matern12, # Matern-1/2 kernel
Matern52, # Matern-5/2 kernel
Delta, # Noise kernel
Normal, # Gaussian distribution
Diagonal, # Diagonal matrix
dense, # Convert matrix objects to regular matrices
)
__all__ = ['model', 'project', 'objective', 'predict']
def model(vs, m):
"""Construct model.
Args:
vs (:class:`varz.Vars`): Variable container.
m (int): Number of latent processes.
Returns:
tuple: Tuple containing a list of the latent processes, the
observation noise, and the noises on the latent processes.
"""
g = Graph()
# Observation noise:
noise_obs = vs.bnd(0.1, name='noise_obs')
def make_latent_process(i):
# Long-term trend:
variance = vs.bnd(0.9, name=f'{i}/long_term/var')
scale = vs.bnd(2 * 30, name=f'{i}/long_term/scale')
kernel = variance * EQ().stretch(scale)
# Short-term trend:
variance = vs.bnd(0.1, name=f'{i}/short_term/var')
scale = vs.bnd(20, name=f'{i}/short_term/scale')
kernel += variance * Matern12().stretch(scale)
return GP(kernel, graph=g)
# Latent processes:
xs = [make_latent_process(i) for i in range(m)]
# Latent noises:
noises_latent = vs.bnd(0.1 * B.ones(m), name='noises_latent')
return xs, noise_obs, noises_latent
def project(vs, m, y_data, locs):
"""Project the data.
Args:
vs (:class:`varz.Vars`): Variable container.
m (int): Number of latent processes.
y_data (tensor): Observations.
locs (tensor): Spatial locations of observations.
Returns:
tuple: Tuple containing the projected outputs, the mixing matrix,
S from the mixing matrix, and the observation noises.
"""
_, noise_obs, noises_latent = model(vs, m)
# Construct mixing matrix and projection.
scales = vs.bnd(B.ones(2), name='scales')
K = dense(Matern52().stretch(scales)(locs))
U, S, _ = B.svd(K)
S = S[:m]
H = U[:, :m] * S[None, :] ** .5
T = B.transpose(U[:, :m]) / S[:, None] ** .5
# Project data and unstack over latent processes.
y_proj = B.unstack(B.matmul(T, y_data, tr_b=True))
# Observation noises:
noises_obs = noise_obs * B.ones(B.dtype(noise_obs), B.shape(y_data)[1])
return y_proj, H, S, noises_obs
def objective(vs, m, x_data, y_data, locs):
"""NLML objective.
Args:
vs (:class:`varz.Vars`): Variable container.
m (int): Number of latent processes.
x_data (tensor): Time stamps of the observations.
y_data (tensor): Observations.
locs (tensor): Spatial locations of observations.
Returns:
scalar: Negative log-marginal likelihood.
"""
y_proj, _, S, noises_obs = project(vs, m, y_data, locs)
xs, noise_obs, noises_latent = model(vs, m)
# Add contribution of latent processes.
lml = 0
for i, (x, y) in enumerate(zip(xs, y_proj)):
e_signal = GP((noise_obs / S[i] + noises_latent[i]) * Delta(),
graph=x.graph)
lml += (x + e_signal)(x_data).logpdf(y)
e_noise = GP(noise_obs / S[i] * Delta(), graph=x.graph)
lml -= e_noise(x_data).logpdf(y)
# Add regularisation contribution.
lml += B.sum(Normal(Diagonal(noises_obs)).logpdf(B.transpose(y_data)))
# Return negative the evidence, normalised by the number of data points.
n, p = B.shape(y_data)
return -lml / (n * p)
def predict(vs, m, x_data, y_data, locs, x_pred):
"""Make predictions.
Args:
vs (:class:`varz.Vars`): Variable container.
m (int): Number of latent processes.
x_data (tensor): Time stamps of the observations.
y_data (tensor): Observations.
locs (tensor): Spatial locations of observations.
x_pred (tensor): Time stamps to predict at.
Returns:
tuple: Tuple containing the predictions for the latent processes and
predictions for the observations.
"""
# Construct model and project data for prediction.
xs, noise_obs, noises_latent = model(vs, m)
y_proj, H, S, noises_obs = project(vs, m, y_data, locs)
L = noise_obs / S + noises_latent
# Condition latent processes.
xs_posterior = []
for x, noise, y in zip(xs, L, y_proj):
e = GP(noise * Delta(), graph=x.graph)
xs_posterior.append(x | ((x + e)(x_data), y))
xs = xs_posterior
# Extract posterior means and variances of the latent processes.
x_means, x_vars = zip(*[(x.mean(x_pred)[:, 0],
x.kernel.elwise(x_pred)[:, 0]) for x in xs])
# Construct predictions for latent processes.
lat_preds = [B.to_numpy(mean,
mean - 2 * (var + L[i]) ** .5,
mean + 2 * (var + L[i]) ** .5)
for i, (mean, var) in enumerate(zip(x_means, x_vars))]
# Pull means through mixing matrix.
x_means = B.stack(*x_means, axis=0)
y_means = B.matmul(H, x_means)
# Pull variances through mixing matrix and add noise.
x_vars = B.stack(*x_vars, axis=0)
y_vars = B.matmul(H ** 2, x_vars + noises_latent[:, None]) + noise_obs
# Construct predictions for observations.
obs_preds = [(mean, mean - 2 * var ** .5, mean + 2 * var ** .5)
for mean, var in zip(y_means, y_vars)]
return lat_preds, obs_preds
| [
"stheno.Delta",
"stheno.Diagonal",
"stheno.B.matmul",
"stheno.Graph",
"stheno.B.transpose",
"stheno.B.stack",
"stheno.B.dtype",
"stheno.Matern12",
"stheno.B.ones",
"stheno.B.shape",
"stheno.B.svd",
"stheno.EQ",
"stheno.Matern52",
"stheno.B.to_numpy",
"stheno.GP"
] | [((783, 790), 'stheno.Graph', 'Graph', ([], {}), '()\n', (788, 790), False, 'from stheno import B, Graph, GP, EQ, Matern12, Matern52, Delta, Normal, Diagonal, dense\n'), ((2162, 2170), 'stheno.B.svd', 'B.svd', (['K'], {}), '(K)\n', (2167, 2170), False, 'from stheno import B, Graph, GP, EQ, Matern12, Matern52, Delta, Normal, Diagonal, dense\n'), ((3605, 3620), 'stheno.B.shape', 'B.shape', (['y_data'], {}), '(y_data)\n', (3612, 3620), False, 'from stheno import B, Graph, GP, EQ, Matern12, Matern52, Delta, Normal, Diagonal, dense\n'), ((5134, 5159), 'stheno.B.stack', 'B.stack', (['*x_means'], {'axis': '(0)'}), '(*x_means, axis=0)\n', (5141, 5159), False, 'from stheno import B, Graph, GP, EQ, Matern12, Matern52, Delta, Normal, Diagonal, dense\n'), ((5174, 5194), 'stheno.B.matmul', 'B.matmul', (['H', 'x_means'], {}), '(H, x_means)\n', (5182, 5194), False, 'from stheno import B, Graph, GP, EQ, Matern12, Matern52, Delta, Normal, Diagonal, dense\n'), ((5267, 5291), 'stheno.B.stack', 'B.stack', (['*x_vars'], {'axis': '(0)'}), '(*x_vars, axis=0)\n', (5274, 5291), False, 'from stheno import B, Graph, GP, EQ, Matern12, Matern52, Delta, Normal, Diagonal, dense\n'), ((1305, 1324), 'stheno.GP', 'GP', (['kernel'], {'graph': 'g'}), '(kernel, graph=g)\n', (1307, 1324), False, 'from stheno import B, Graph, GP, EQ, Matern12, Matern52, Delta, Normal, Diagonal, dense\n'), ((2074, 2083), 'stheno.B.ones', 'B.ones', (['(2)'], {}), '(2)\n', (2080, 2083), False, 'from stheno import B, Graph, GP, EQ, Matern12, Matern52, Delta, Normal, Diagonal, dense\n'), ((2229, 2250), 'stheno.B.transpose', 'B.transpose', (['U[:, :m]'], {}), '(U[:, :m])\n', (2240, 2250), False, 'from stheno import B, Graph, GP, EQ, Matern12, Matern52, Delta, Normal, Diagonal, dense\n'), ((2348, 2378), 'stheno.B.matmul', 'B.matmul', (['T', 'y_data'], {'tr_b': '(True)'}), '(T, y_data, tr_b=True)\n', (2356, 2378), False, 'from stheno import B, Graph, GP, EQ, Matern12, Matern52, Delta, Normal, Diagonal, dense\n'), ((4872, 4957), 'stheno.B.to_numpy', 'B.to_numpy', (['mean', '(mean - 2 * (var + L[i]) ** 0.5)', '(mean + 2 * (var + L[i]) ** 0.5)'], {}), '(mean, mean - 2 * (var + L[i]) ** 0.5, mean + 2 * (var + L[i]) ** 0.5\n )\n', (4882, 4957), False, 'from stheno import B, Graph, GP, EQ, Matern12, Matern52, Delta, Normal, Diagonal, dense\n'), ((5305, 5354), 'stheno.B.matmul', 'B.matmul', (['(H ** 2)', '(x_vars + noises_latent[:, None])'], {}), '(H ** 2, x_vars + noises_latent[:, None])\n', (5313, 5354), False, 'from stheno import B, Graph, GP, EQ, Matern12, Matern52, Delta, Normal, Diagonal, dense\n'), ((1457, 1466), 'stheno.B.ones', 'B.ones', (['m'], {}), '(m)\n', (1463, 1466), False, 'from stheno import B, Graph, GP, EQ, Matern12, Matern52, Delta, Normal, Diagonal, dense\n'), ((2443, 2461), 'stheno.B.dtype', 'B.dtype', (['noise_obs'], {}), '(noise_obs)\n', (2450, 2461), False, 'from stheno import B, Graph, GP, EQ, Matern12, Matern52, Delta, Normal, Diagonal, dense\n'), ((3494, 3513), 'stheno.B.transpose', 'B.transpose', (['y_data'], {}), '(y_data)\n', (3505, 3513), False, 'from stheno import B, Graph, GP, EQ, Matern12, Matern52, Delta, Normal, Diagonal, dense\n'), ((2463, 2478), 'stheno.B.shape', 'B.shape', (['y_data'], {}), '(y_data)\n', (2470, 2478), False, 'from stheno import B, Graph, GP, EQ, Matern12, Matern52, Delta, Normal, Diagonal, dense\n'), ((3201, 3208), 'stheno.Delta', 'Delta', ([], {}), '()\n', (3206, 3208), False, 'from stheno import B, Graph, GP, EQ, Matern12, Matern52, Delta, Normal, Diagonal, dense\n'), ((3336, 3343), 'stheno.Delta', 'Delta', ([], {}), '()\n', (3341, 3343), False, 'from stheno import B, Graph, GP, EQ, Matern12, Matern52, Delta, Normal, Diagonal, dense\n'), ((4509, 4516), 'stheno.Delta', 'Delta', ([], {}), '()\n', (4514, 4516), False, 'from stheno import B, Graph, GP, EQ, Matern12, Matern52, Delta, Normal, Diagonal, dense\n'), ((1069, 1073), 'stheno.EQ', 'EQ', ([], {}), '()\n', (1071, 1073), False, 'from stheno import B, Graph, GP, EQ, Matern12, Matern52, Delta, Normal, Diagonal, dense\n'), ((1263, 1273), 'stheno.Matern12', 'Matern12', ([], {}), '()\n', (1271, 1273), False, 'from stheno import B, Graph, GP, EQ, Matern12, Matern52, Delta, Normal, Diagonal, dense\n'), ((2114, 2124), 'stheno.Matern52', 'Matern52', ([], {}), '()\n', (2122, 2124), False, 'from stheno import B, Graph, GP, EQ, Matern12, Matern52, Delta, Normal, Diagonal, dense\n'), ((3465, 3485), 'stheno.Diagonal', 'Diagonal', (['noises_obs'], {}), '(noises_obs)\n', (3473, 3485), False, 'from stheno import B, Graph, GP, EQ, Matern12, Matern52, Delta, Normal, Diagonal, dense\n')] |
from os import listdir
from os.path import join
import os, errno
def getImageNum(rootDir):
return len(listdir(join(rootDir)))
def safeMkdir(path:str):
try:
os.makedirs(path)
except OSError as e:
if e.errno != errno.EEXIST:
raise | [
"os.path.join",
"os.makedirs"
] | [((175, 192), 'os.makedirs', 'os.makedirs', (['path'], {}), '(path)\n', (186, 192), False, 'import os, errno\n'), ((115, 128), 'os.path.join', 'join', (['rootDir'], {}), '(rootDir)\n', (119, 128), False, 'from os.path import join\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import shutil
from os.path import basename, exists, isdir, splitext
from sfzparser import SFZParser
def main(args=None):
fn = args[0]
bn = splitext(basename(fn))[0]
parser = SFZParser(fn)
fixed = False
for name, sect in parser.sections:
# fix sample filename without directory prefix
if name == 'region' and 'sample' in sect and isdir(bn) and '/' not in sect['sample']:
print("Setting prefix for sample '{}' to '{}'.".format(sect['sample'], bn))
sect['sample'] = bn + '/' + sect['sample']
fixed = True
if fixed:
if not exists(fn + '.bak'):
shutil.copy(fn, fn + '.bak')
with open(args[0], 'w') as sfz:
for name, sect in parser.sections:
if name == 'comment':
sfz.write(sect + '\n')
else:
sfz.write("<{}>\n".format(name))
for key, value in sorted(sect.items()):
sfz.write(" {}={}\n".format(key, value))
else:
print("Nothing to fix.")
if __name__ == '__main__':
import sys
sys.exit(main(sys.argv[1:] or 0))
| [
"os.path.exists",
"os.path.isdir",
"os.path.basename",
"sfzparser.SFZParser",
"shutil.copy"
] | [((236, 249), 'sfzparser.SFZParser', 'SFZParser', (['fn'], {}), '(fn)\n', (245, 249), False, 'from sfzparser import SFZParser\n'), ((206, 218), 'os.path.basename', 'basename', (['fn'], {}), '(fn)\n', (214, 218), False, 'from os.path import basename, exists, isdir, splitext\n'), ((416, 425), 'os.path.isdir', 'isdir', (['bn'], {}), '(bn)\n', (421, 425), False, 'from os.path import basename, exists, isdir, splitext\n'), ((655, 674), 'os.path.exists', 'exists', (["(fn + '.bak')"], {}), "(fn + '.bak')\n", (661, 674), False, 'from os.path import basename, exists, isdir, splitext\n'), ((688, 716), 'shutil.copy', 'shutil.copy', (['fn', "(fn + '.bak')"], {}), "(fn, fn + '.bak')\n", (699, 716), False, 'import shutil\n')] |
import unittest
from huobi.rest.client import HuobiRestClient
from huobi.rest.error import (
HuobiRestiApiError
)
import os
from os.path import join, dirname
from dotenv import load_dotenv
dotenv_path = join(dirname(dirname(dirname(dirname(__file__)))), '.env')
load_dotenv(dotenv_path)
class TestCommonEndpoint(unittest.TestCase):
def setUp(self):
access_key = os.environ['ACCESS_KEY']
secret_key = os.environ['SECRET_KEY']
self.client = HuobiRestClient(
access_key=access_key, secret_key=secret_key)
def tearDown(self):
self.client.close()
class TestCommonSymbols(TestCommonEndpoint):
def test_success(self):
res = self.client.symbols()
self.assertEqual(res.res.status_code, 200)
self.assertIn('data', res.data)
self.assertIsInstance(res.data['data'], list)
def test_authentication_fail(self):
client = HuobiRestClient(
access_key='1',
secret_key='2',
)
with self.assertRaises(HuobiRestiApiError):
client.accounts()
class TestCommonCurrencies(TestCommonEndpoint):
def test_success(self):
res = self.client.currencies()
self.assertEqual(res.res.status_code, 200)
def test_alias(self):
res = self.client.currencys()
self.assertEqual(res.res.status_code, 200)
class TestCommonTimestamp(TestCommonEndpoint):
def test_success(self):
res = self.client.timestamp()
self.assertEqual(res.res.status_code, 200)
| [
"os.path.dirname",
"huobi.rest.client.HuobiRestClient",
"dotenv.load_dotenv"
] | [((268, 292), 'dotenv.load_dotenv', 'load_dotenv', (['dotenv_path'], {}), '(dotenv_path)\n', (279, 292), False, 'from dotenv import load_dotenv\n'), ((476, 537), 'huobi.rest.client.HuobiRestClient', 'HuobiRestClient', ([], {'access_key': 'access_key', 'secret_key': 'secret_key'}), '(access_key=access_key, secret_key=secret_key)\n', (491, 537), False, 'from huobi.rest.client import HuobiRestClient\n'), ((919, 966), 'huobi.rest.client.HuobiRestClient', 'HuobiRestClient', ([], {'access_key': '"""1"""', 'secret_key': '"""2"""'}), "(access_key='1', secret_key='2')\n", (934, 966), False, 'from huobi.rest.client import HuobiRestClient\n'), ((238, 255), 'os.path.dirname', 'dirname', (['__file__'], {}), '(__file__)\n', (245, 255), False, 'from os.path import join, dirname\n')] |
#
# Copyright 2017 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import requests
from flask import Blueprint, current_app, flash, Markup, render_template, \
request, redirect, url_for
from gerritclient import client
from gerritclient import error as client_error
from gerritviewer import common
from .forms import CreateUserAccountForm, EditContactInfoForm, \
QueryUserAccountForm
accounts = Blueprint('accounts', __name__)
@accounts.route('/accounts', methods=['GET', 'POST'])
def fetch():
form = QueryUserAccountForm()
gerrit_accounts = None
account_client = client.get_client('account',
connection=common.get_connection())
try:
if form.validate_on_submit():
gerrit_accounts = account_client.get_all(
form.query_string.data, detailed=form.details.data)
flash(Markup("Search results for <strong>'{}'</strong>: {}".format(
form.query_string.data,
"Nothing Found" if not gerrit_accounts else '')),
category='note')
except (requests.ConnectionError, client_error.HTTPError) as error:
current_app.logger.error(error)
flash(error, category='error')
return render_template('accounts/accounts.html',
gerrit_url=common.get_gerrit_url(),
gerrit_version=common.get_version(),
entry_category='accounts',
entries=gerrit_accounts,
form=form)
@accounts.route('/accounts/<account_id>')
def fetch_single(account_id):
account = {}
account_client = client.get_client('account',
connection=common.get_connection())
try:
account = account_client.get_by_id(
account_id, detailed=request.args.get('details', False))
account['is_active'] = account_client.is_active(account_id)
account['membership'] = account_client.get_membership(account_id)
action = request.args.get('action')
if action:
account_actions = {'enable': account_client.enable,
'disable': account_client.disable}
account_actions[action](account_id)
flash(Markup("Account with <strong>ID={}</strong> was "
"successfully <strong>{}d</strong>".format(
account_id, action)), category='note')
return redirect(url_for('accounts.fetch_single',
account_id=account_id))
except (requests.ConnectionError, client_error.HTTPError) as error:
current_app.logger.error(error)
flash(error, category='error')
return render_template('accounts/profile.html',
gerrit_url=common.get_gerrit_url(),
gerrit_version=common.get_version(),
entry_category='accounts',
entry_item=account,
entry_item_name=account.get('name'))
@accounts.route('/accounts/contact/<account_id>', methods=['GET', 'POST'])
def edit_contact_info(account_id):
form = EditContactInfoForm()
account = {}
account_client = client.get_client('account',
connection=common.get_connection())
try:
account = account_client.get_by_id(account_id, detailed=False)
current_status = get_account_status(account_id)
if form.validate_on_submit():
fullname, username = form.fullname.data, form.username.data
status = form.status.data
response = {}
if account.get('name') != fullname:
response['full name'] = account_client.set_name(account_id,
fullname)
if username and account.get('username') != username:
response['username'] = account_client.set_username(account_id,
username)
if status != current_status:
response['status'] = account_client.set_status(account_id,
status) or ''
if response:
flash(Markup("The following parameters were successfully "
"updated: {0}".format(", ".join(
":: ".join(_) for _ in response.items()))),
category='note')
return redirect(url_for('accounts.fetch_single',
account_id=account_id))
except (requests.ConnectionError, client_error.HTTPError) as error:
current_app.logger.error(error)
flash(error, category='error')
return render_template('accounts/contacts.html',
gerrit_url=common.get_gerrit_url(),
gerrit_version=common.get_version(),
entry_category='accounts',
entry_item=account,
entry_item_name=account.get('name'),
form=form)
@accounts.route('/accounts/ssh/<account_id>')
def ssh(account_id):
account_client = client.get_client('account',
connection=common.get_connection())
account, ssh_keys = {}, []
try:
account = account_client.get_by_id(account_id, detailed=False)
ssh_keys = account_client.get_ssh_keys(account_id)
except (requests.ConnectionError, client_error.HTTPError) as error:
current_app.logger.error(error)
flash(error, category='error')
return render_template('accounts/ssh.html',
gerrit_url=common.get_gerrit_url(),
gerrit_version=common.get_version(),
entry_category='accounts',
entry_item=account,
entry_item_name=account.get('name'),
entries=ssh_keys)
@accounts.route('/accounts/create', methods=['GET', 'POST'])
def create():
form = CreateUserAccountForm()
if form.validate_on_submit():
account_client = client.get_client('account',
connection=common.get_connection())
data = {k: v for k, v in (('username', form.username.data),
('name', form.fullname.data),
('email', form.email.data)) if v}
try:
response = account_client.create(form.username.data, data=data)
msg = Markup("A new user account '<strong>{0}</strong>' "
"with ID={1} was successfully created.".format(
response['username'], response['_account_id']))
flash(msg, category='note')
return redirect(url_for('accounts.fetch_single',
account_id=response['_account_id']))
except (requests.ConnectionError, client_error.HTTPError) as error:
current_app.logger.error(error)
flash(error, category='error')
return render_template('accounts/create.html',
gerrit_url=common.get_gerrit_url(),
gerrit_version=common.get_version(),
form=form)
# Status of account is only available since gerrit 2.14,
# so we have to fetch it in a proper way for all versions
def get_account_status(account_id):
account_client = client.get_client('account',
connection=common.get_connection())
try:
current_status = account_client.get_status(account_id)
except client_error.HTTPError:
current_status = None
return current_status
| [
"flask.request.args.get",
"flask.flash",
"gerritviewer.common.get_gerrit_url",
"gerritviewer.common.get_connection",
"gerritviewer.common.get_version",
"flask.url_for",
"flask.Blueprint",
"flask.current_app.logger.error"
] | [((943, 974), 'flask.Blueprint', 'Blueprint', (['"""accounts"""', '__name__'], {}), "('accounts', __name__)\n", (952, 974), False, 'from flask import Blueprint, current_app, flash, Markup, render_template, request, redirect, url_for\n'), ((2600, 2626), 'flask.request.args.get', 'request.args.get', (['"""action"""'], {}), "('action')\n", (2616, 2626), False, 'from flask import Blueprint, current_app, flash, Markup, render_template, request, redirect, url_for\n'), ((1205, 1228), 'gerritviewer.common.get_connection', 'common.get_connection', ([], {}), '()\n', (1226, 1228), False, 'from gerritviewer import common\n'), ((1708, 1739), 'flask.current_app.logger.error', 'current_app.logger.error', (['error'], {}), '(error)\n', (1732, 1739), False, 'from flask import Blueprint, current_app, flash, Markup, render_template, request, redirect, url_for\n'), ((1748, 1778), 'flask.flash', 'flash', (['error'], {'category': '"""error"""'}), "(error, category='error')\n", (1753, 1778), False, 'from flask import Blueprint, current_app, flash, Markup, render_template, request, redirect, url_for\n'), ((1870, 1893), 'gerritviewer.common.get_gerrit_url', 'common.get_gerrit_url', ([], {}), '()\n', (1891, 1893), False, 'from gerritviewer import common\n'), ((1937, 1957), 'gerritviewer.common.get_version', 'common.get_version', ([], {}), '()\n', (1955, 1957), False, 'from gerritviewer import common\n'), ((2294, 2317), 'gerritviewer.common.get_connection', 'common.get_connection', ([], {}), '()\n', (2315, 2317), False, 'from gerritviewer import common\n'), ((3227, 3258), 'flask.current_app.logger.error', 'current_app.logger.error', (['error'], {}), '(error)\n', (3251, 3258), False, 'from flask import Blueprint, current_app, flash, Markup, render_template, request, redirect, url_for\n'), ((3267, 3297), 'flask.flash', 'flash', (['error'], {'category': '"""error"""'}), "(error, category='error')\n", (3272, 3297), False, 'from flask import Blueprint, current_app, flash, Markup, render_template, request, redirect, url_for\n'), ((3388, 3411), 'gerritviewer.common.get_gerrit_url', 'common.get_gerrit_url', ([], {}), '()\n', (3409, 3411), False, 'from gerritviewer import common\n'), ((3455, 3475), 'gerritviewer.common.get_version', 'common.get_version', ([], {}), '()\n', (3473, 3475), False, 'from gerritviewer import common\n'), ((3904, 3927), 'gerritviewer.common.get_connection', 'common.get_connection', ([], {}), '()\n', (3925, 3927), False, 'from gerritviewer import common\n'), ((5327, 5358), 'flask.current_app.logger.error', 'current_app.logger.error', (['error'], {}), '(error)\n', (5351, 5358), False, 'from flask import Blueprint, current_app, flash, Markup, render_template, request, redirect, url_for\n'), ((5367, 5397), 'flask.flash', 'flash', (['error'], {'category': '"""error"""'}), "(error, category='error')\n", (5372, 5397), False, 'from flask import Blueprint, current_app, flash, Markup, render_template, request, redirect, url_for\n'), ((5489, 5512), 'gerritviewer.common.get_gerrit_url', 'common.get_gerrit_url', ([], {}), '()\n', (5510, 5512), False, 'from gerritviewer import common\n'), ((5556, 5576), 'gerritviewer.common.get_version', 'common.get_version', ([], {}), '()\n', (5574, 5576), False, 'from gerritviewer import common\n'), ((5950, 5973), 'gerritviewer.common.get_connection', 'common.get_connection', ([], {}), '()\n', (5971, 5973), False, 'from gerritviewer import common\n'), ((6225, 6256), 'flask.current_app.logger.error', 'current_app.logger.error', (['error'], {}), '(error)\n', (6249, 6256), False, 'from flask import Blueprint, current_app, flash, Markup, render_template, request, redirect, url_for\n'), ((6265, 6295), 'flask.flash', 'flash', (['error'], {'category': '"""error"""'}), "(error, category='error')\n", (6270, 6295), False, 'from flask import Blueprint, current_app, flash, Markup, render_template, request, redirect, url_for\n'), ((6382, 6405), 'gerritviewer.common.get_gerrit_url', 'common.get_gerrit_url', ([], {}), '()\n', (6403, 6405), False, 'from gerritviewer import common\n'), ((6449, 6469), 'gerritviewer.common.get_version', 'common.get_version', ([], {}), '()\n', (6467, 6469), False, 'from gerritviewer import common\n'), ((7478, 7505), 'flask.flash', 'flash', (['msg'], {'category': '"""note"""'}), "(msg, category='note')\n", (7483, 7505), False, 'from flask import Blueprint, current_app, flash, Markup, render_template, request, redirect, url_for\n'), ((7900, 7923), 'gerritviewer.common.get_gerrit_url', 'common.get_gerrit_url', ([], {}), '()\n', (7921, 7923), False, 'from gerritviewer import common\n'), ((7967, 7987), 'gerritviewer.common.get_version', 'common.get_version', ([], {}), '()\n', (7985, 7987), False, 'from gerritviewer import common\n'), ((8280, 8303), 'gerritviewer.common.get_connection', 'common.get_connection', ([], {}), '()\n', (8301, 8303), False, 'from gerritviewer import common\n'), ((2405, 2439), 'flask.request.args.get', 'request.args.get', (['"""details"""', '(False)'], {}), "('details', False)\n", (2421, 2439), False, 'from flask import Blueprint, current_app, flash, Markup, render_template, request, redirect, url_for\n'), ((3054, 3109), 'flask.url_for', 'url_for', (['"""accounts.fetch_single"""'], {'account_id': 'account_id'}), "('accounts.fetch_single', account_id=account_id)\n", (3061, 3109), False, 'from flask import Blueprint, current_app, flash, Markup, render_template, request, redirect, url_for\n'), ((5154, 5209), 'flask.url_for', 'url_for', (['"""accounts.fetch_single"""'], {'account_id': 'account_id'}), "('accounts.fetch_single', account_id=account_id)\n", (5161, 5209), False, 'from flask import Blueprint, current_app, flash, Markup, render_template, request, redirect, url_for\n'), ((6935, 6958), 'gerritviewer.common.get_connection', 'common.get_connection', ([], {}), '()\n', (6956, 6958), False, 'from gerritviewer import common\n'), ((7534, 7602), 'flask.url_for', 'url_for', (['"""accounts.fetch_single"""'], {'account_id': "response['_account_id']"}), "('accounts.fetch_single', account_id=response['_account_id'])\n", (7541, 7602), False, 'from flask import Blueprint, current_app, flash, Markup, render_template, request, redirect, url_for\n'), ((7732, 7763), 'flask.current_app.logger.error', 'current_app.logger.error', (['error'], {}), '(error)\n', (7756, 7763), False, 'from flask import Blueprint, current_app, flash, Markup, render_template, request, redirect, url_for\n'), ((7780, 7810), 'flask.flash', 'flash', (['error'], {'category': '"""error"""'}), "(error, category='error')\n", (7785, 7810), False, 'from flask import Blueprint, current_app, flash, Markup, render_template, request, redirect, url_for\n')] |
# Copyright (c) 2020, NVIDIA CORPORATION.
from __future__ import annotations
from typing import TYPE_CHECKING, Optional, Union, overload
from typing_extensions import Literal
import cudf
if TYPE_CHECKING:
from cudf.core.column import ColumnBase
class ColumnMethodsMixin:
_column: ColumnBase
_parent: Optional[Union["cudf.Series", "cudf.Index"]]
def __init__(
self,
column: ColumnBase,
parent: Union["cudf.Series", "cudf.Index"] = None,
):
self._column = column
self._parent = parent
@overload
def _return_or_inplace(
self, new_col, inplace: Literal[False], expand=False, retain_index=True
) -> Union["cudf.Series", "cudf.Index"]:
...
@overload
def _return_or_inplace(
self, new_col, expand: bool = False, retain_index: bool = True
) -> Union["cudf.Series", "cudf.Index"]:
...
@overload
def _return_or_inplace(
self, new_col, inplace: Literal[True], expand=False, retain_index=True
) -> None:
...
@overload
def _return_or_inplace(
self,
new_col,
inplace: bool = False,
expand: bool = False,
retain_index: bool = True,
) -> Optional[Union["cudf.Series", "cudf.Index"]]:
...
def _return_or_inplace(
self, new_col, inplace=False, expand=False, retain_index=True
):
"""
Returns an object of the type of the column owner or updates the column
of the owner (Series or Index) to mimic an inplace operation
"""
if inplace:
if self._parent is not None:
self._parent._mimic_inplace(
self._parent.__class__._from_table(
cudf._lib.table.Table({self._parent.name: new_col})
),
inplace=True,
)
return None
else:
self._column._mimic_inplace(new_col, inplace=True)
return None
else:
if self._parent is None:
return new_col
if expand or isinstance(
self._parent, (cudf.DataFrame, cudf.MultiIndex)
):
# This branch indicates the passed as new_col
# is a Table
table = new_col
if isinstance(self._parent, cudf.BaseIndex):
idx = self._parent._constructor_expanddim._from_table(
table=table
)
idx.names = None
return idx
else:
return self._parent._constructor_expanddim(
data=table._data, index=self._parent.index
)
elif isinstance(self._parent, cudf.Series):
if retain_index:
return cudf.Series(
new_col,
name=self._parent.name,
index=self._parent.index,
)
else:
return cudf.Series(new_col, name=self._parent.name)
elif isinstance(self._parent, cudf.BaseIndex):
return cudf.core.index.as_index(
new_col, name=self._parent.name
)
else:
return self._parent._mimic_inplace(new_col, inplace=False)
| [
"cudf.core.index.as_index",
"cudf._lib.table.Table",
"cudf.Series"
] | [((1753, 1804), 'cudf._lib.table.Table', 'cudf._lib.table.Table', (['{self._parent.name: new_col}'], {}), '({self._parent.name: new_col})\n', (1774, 1804), False, 'import cudf\n'), ((2896, 2966), 'cudf.Series', 'cudf.Series', (['new_col'], {'name': 'self._parent.name', 'index': 'self._parent.index'}), '(new_col, name=self._parent.name, index=self._parent.index)\n', (2907, 2966), False, 'import cudf\n'), ((3111, 3155), 'cudf.Series', 'cudf.Series', (['new_col'], {'name': 'self._parent.name'}), '(new_col, name=self._parent.name)\n', (3122, 3155), False, 'import cudf\n'), ((3238, 3295), 'cudf.core.index.as_index', 'cudf.core.index.as_index', (['new_col'], {'name': 'self._parent.name'}), '(new_col, name=self._parent.name)\n', (3262, 3295), False, 'import cudf\n')] |
# Example of Naive Bayes implemented from Scratch in Python
import csv
import random
import math
import xgboost as xgb
import matplotlib.pyplot as plt
import numpy as np
def loadCsv(filename):
lines = csv.reader(open(filename, "r"))
dataset = list(lines)
for i in range(len(dataset)):
dataset[i] = [float(x) for x in dataset[i]]
return dataset
def loadDataset_ckd(filename, trainingSet=[]):
lines = csv.reader(open(filename, "r"))
dataset = list(lines)
#print(len(dataset),range(len(dataset)))
for x in range(len(dataset)):
for y in range(15):
dataset[x][y] = float(dataset[x][y])
trainingSet.append(dataset[x])
def loadDataset_ckd1(filename, testSet=[]):
lines1 = csv.reader(open(filename, "r"))
dataset1 = list(lines1)
#print(len(dataset1),range(len(dataset1)))
for x in range(len(dataset1)):
for y in range(15):
dataset1[x][y] = float(dataset1[x][y])
testSet.append(dataset1[x])
def loadDataset_ml(filename, trainingSet=[]):
lines = csv.reader(open(filename, "r"))
dataset = list(lines)
for x in range(len(dataset)):
for y in range(9):
dataset[x][y] = float(dataset[x][y])
trainingSet.append(dataset[x])
def loadDataset_hd(filename, trainingSet=[]):
lines = csv.reader(open(filename, "r"))
dataset = list(lines)
for x in range(len(dataset)):
for y in range(12):
dataset[x][y] = float(dataset[x][y])
trainingSet.append(dataset[x])
def loadDataset_ml1(filename, testSet=[]):
lines1 = csv.reader(open(filename, "r"))
dataset1 = list(lines1)
#print(len(dataset1),range(len(dataset1)))
for x in range(len(dataset1)):
for y in range(9):
dataset1[x][y] = float(dataset1[x][y])
testSet.append(dataset1[x])
def loadDataset_hd1(filename, testSet=[]):
lines1 = csv.reader(open(filename, "r"))
dataset1 = list(lines1)
#print(len(dataset1),range(len(dataset1)))
for x in range(len(dataset1)):
for y in range(12):
dataset1[x][y] = float(dataset1[x][y])
testSet.append(dataset1[x])
def splitDataset(dataset, splitRatio):
trainSize = int(len(dataset) * splitRatio)
trainSet = []
copy = list(dataset)
while len(trainSet) < trainSize:
index = random.randrange(len(copy))
trainSet.append(copy.pop(index))
return [trainSet, copy]
def separateByClass(dataset):
separated = {}
for i in range(len(dataset)):
vector = dataset[i]
if (vector[-1] not in separated):
separated[vector[-1]] = []
separated[vector[-1]].append(vector)
return separated
def mean(numbers):
return sum(numbers)/float(len(numbers))
def stdev(numbers):
avg = mean(numbers)
variance = sum([pow(x-avg,2) for x in numbers])/float(len(numbers)-1)
return math.sqrt(variance)
def summarize(dataset):
summaries = [(mean(attribute), stdev(attribute)) for attribute in zip(*dataset)]
del summaries[-1]
return summaries
def summarizeByClass(dataset):
separated = separateByClass(dataset)
summaries = {}
#print(separated)
for classValue, instances in separated.items():
#print(instances)
summaries[classValue] = summarize(instances)
return summaries
def calculateProbability(x, mean, stdev):
#print(x,mean,stdev)
if(x==0 and mean==0 and stdev==0):
x = 1
mean = 1
stdev = 1
#print(x,mean,stdev)
part2 = (2*math.pow(stdev,2))
if(part2==0) :
part2 = 0.1
#print(part2)
exponent = math.exp(-(math.pow(x-mean,2)/part2))
part3 = (math.sqrt(2*math.pi) * stdev)
if(part3==0) :
part3 = 0.1
fin = (1 / part3) * exponent
return fin
def calculateClassProbabilities(summaries, inputVector):
probabilities = {}
for classValue, classSummaries in summaries.items():
probabilities[classValue] = 1
for i in range(len(classSummaries)):
mean, stdev = classSummaries[i]
x = inputVector[i]
probabilities[classValue] *= calculateProbability(x, mean, stdev)
return probabilities
def predict(summaries, inputVector):
probabilities = calculateClassProbabilities(summaries, inputVector)
bestLabel, bestProb = None, -1
for classValue, probability in probabilities.items():
if bestLabel is None or probability > bestProb:
bestProb = probability
bestLabel = classValue
return bestLabel
def getPredictions(summaries, testSet):
predictions = []
for i in range(len(testSet)):
result = predict(summaries, testSet[i])
predictions.append(result)
return predictions
def getAccuracy(testSet, predictions):
correct = 0
for i in range(len(testSet)):
if testSet[i][-1] == predictions[i]:
correct += 1
return (correct/float(len(testSet))) * 100.0
def main():
print ('\n~~~~~~~~~~~');
#checking of presence of ckd disease
# prepare data
matched_count = 0 ;
total_datas = 0
trainingSet=[]
testSet=[]
loadDataset_ckd('dataset_ckd_train.csv', trainingSet)
total_datas = total_datas+int(repr(len(trainingSet)))
loadDataset_ckd1('dataset_ckd_test.csv', testSet)
print ('Train set of ckd: ',repr(len(trainingSet)))
#print ('Train set: ', trainingSet)
#print ('Test set: ', repr(len(testSet)))
print ('Input for CKD disease related parameters :\n ',testSet)
summaries = summarizeByClass(trainingSet)
matched_count = matched_count+int(repr(len(summaries)))
print('matches: ',repr(len(summaries)))
# test model
predictions = getPredictions(summaries, testSet)
#print('> predicted=' , predictions)
print('> disease presence =' , predictions )
accuracy = getAccuracy(testSet, predictions)
#print('Accuracy: {0}%').format(accuracy)
#print('Accuracy: ',accuracy)
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
print ('\n~~~~~~~~~~~');
#checking of presence of diabetes disease
trainingSet=[]
testSet=[]
loadDataset_ml('dataset_diabetes_train.csv', trainingSet)
total_datas = total_datas+int(repr(len(trainingSet)))
loadDataset_ml1('dataset_diabetes_test.csv', testSet)
print ('Train set of diabetes: ',repr(len(trainingSet)))
print ('Input for Diabetes disease related parameters :\n ',testSet)
#print(trainingSet)
#print(testSet)
# prepare model
summaries = summarizeByClass(trainingSet)
#print(summaries)
matched_count = matched_count+int(repr(len(summaries)))
print('matches: ',repr(len(summaries)))
# test model
predictions = getPredictions(summaries, testSet)
#print('> predicted=' , predictions)
print('> disease presence =' , predictions )
accuracy = getAccuracy(testSet, predictions)
#print('Accuracy: {0}%').format(accuracy)
#print('Accuracy: ',accuracy)
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
print ('\n~~~~~~~~~~~');
#checking of presence of heart disease
trainingSet=[]
testSet=[]
loadDataset_hd('dataset_heartdisease_train.csv', trainingSet)
total_datas = total_datas+int(repr(len(trainingSet)))
loadDataset_hd1('dataset_heartdisease_test.csv', testSet)
print ('Train set of heart disease: ',repr(len(trainingSet)))
print ('Input for heart disease related parameters :\n ',testSet)
summaries = summarizeByClass(trainingSet)
#print(summaries)
matched_count = matched_count+int(repr(len(summaries)))
print('matches: ',repr(len(summaries)))
# test model
predictions = getPredictions(summaries, testSet)
#print('> predicted=' , predictions)
print('> disease presence =' , predictions )
accuracy = getAccuracy(testSet, predictions)
#print('Accuracy: {0}%').format(accuracy)
#print('Accuracy: ',accuracy)
print('Total Datas',total_datas,'Matched Accuracy: ',matched_count)
main() | [
"math.pow",
"math.sqrt"
] | [((2999, 3018), 'math.sqrt', 'math.sqrt', (['variance'], {}), '(variance)\n', (3008, 3018), False, 'import math\n'), ((3595, 3613), 'math.pow', 'math.pow', (['stdev', '(2)'], {}), '(stdev, 2)\n', (3603, 3613), False, 'import math\n'), ((3726, 3748), 'math.sqrt', 'math.sqrt', (['(2 * math.pi)'], {}), '(2 * math.pi)\n', (3735, 3748), False, 'import math\n'), ((3688, 3709), 'math.pow', 'math.pow', (['(x - mean)', '(2)'], {}), '(x - mean, 2)\n', (3696, 3709), False, 'import math\n')] |
from random import choice
from time import sleep
jokenpo = ['Pedra', 'Papel', 'Tesoura']
jokenposter_stainger = choice(jokenpo)
jogador = int(input('Qual a sua jogada?'
'\n1. Pedra'
'\n2. Papel'
'\n3. Tesoura'
'\nEscolha: '))
print('\nJO...')
sleep(1)
print('KEN...')
sleep(1)
print('PO!!!'
'\n ')
if jogador == 1:
print('Você: Pedra')
elif jogador == 2:
print('Você: Papel')
elif jogador == 3:
print('Você: Tesoura')
else:
print('Escolha uma opção válida.')
print('Jokenposter Stainger: {}'.format(jokenposter_stainger))
sleep(2)
print('--' * 20)
if jokenposter_stainger == 'Pedra' and jogador == 1:
print('Empate!')
w = 0
elif jokenposter_stainger == 'Pedra' and jogador == 2:
print('Você ganhou!')
w = 2
elif jokenposter_stainger == 'Pedra' and jogador == 3:
print('Você perdeu!')
w = 1
elif jokenposter_stainger == 'Papel' and jogador == 1:
print('Você perdeu!')
w = 1
elif jokenposter_stainger == 'Papel' and jogador == 2:
print('Empate!')
w = 0
elif jokenposter_stainger == 'Papel' and jogador == 3:
print('Você ganhou!')
w = 2
elif jokenposter_stainger == 'Tesoura' and jogador == 1:
print('Você ganhou!')
w = 2
elif jokenposter_stainger == 'Tesoura' and jogador == 2:
print('Você perdeu!')
w = 1
elif jokenposter_stainger == 'Tesoura' and jogador == 3:
print('Empate!')
w = 0
if w == 0:
print('<NAME>: Vamo de novo! Ta com medinho?')
elif w == 1:
print('<NAME>: OTÁRIO ')
elif w == 2:
print('<NAME>: TAAAVA DEMORAANDO! Revanche!')
else:
print(' ')
print('--' * 20)
| [
"random.choice",
"time.sleep"
] | [((113, 128), 'random.choice', 'choice', (['jokenpo'], {}), '(jokenpo)\n', (119, 128), False, 'from random import choice\n'), ((325, 333), 'time.sleep', 'sleep', (['(1)'], {}), '(1)\n', (330, 333), False, 'from time import sleep\n'), ((350, 358), 'time.sleep', 'sleep', (['(1)'], {}), '(1)\n', (355, 358), False, 'from time import sleep\n'), ((629, 637), 'time.sleep', 'sleep', (['(2)'], {}), '(2)\n', (634, 637), False, 'from time import sleep\n')] |
#!/usr/bin/python
# -*- coding: utf-8 -*-
import sys
import os
import stat
protoc_exec = None
def find_protoc():
global protoc_exec
if protoc_exec is not None:
return protoc_exec
script_dir = os.path.dirname(os.path.realpath(__file__))
if sys.platform[0:5].lower() == "linux":
protoc_exec = os.path.join(script_dir, 'linux_x86_64', 'protoc')
elif sys.platform[0:6].lower() == "darwin":
protoc_exec = os.path.join(script_dir, 'macos_x86_64', 'protoc')
else:
protoc_exec = os.path.join(script_dir, 'windows_x86_64', 'protoc.exe')
os.chmod(protoc_exec, stat.S_IRWXU + stat.S_IRWXG + stat.S_IRWXO)
return protoc_exec
""" run as a executable """
if __name__ == "__main__":
print(find_protoc())
| [
"os.path.realpath",
"os.path.join",
"os.chmod"
] | [((590, 655), 'os.chmod', 'os.chmod', (['protoc_exec', '(stat.S_IRWXU + stat.S_IRWXG + stat.S_IRWXO)'], {}), '(protoc_exec, stat.S_IRWXU + stat.S_IRWXG + stat.S_IRWXO)\n', (598, 655), False, 'import os\n'), ((230, 256), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (246, 256), False, 'import os\n'), ((325, 375), 'os.path.join', 'os.path.join', (['script_dir', '"""linux_x86_64"""', '"""protoc"""'], {}), "(script_dir, 'linux_x86_64', 'protoc')\n", (337, 375), False, 'import os\n'), ((446, 496), 'os.path.join', 'os.path.join', (['script_dir', '"""macos_x86_64"""', '"""protoc"""'], {}), "(script_dir, 'macos_x86_64', 'protoc')\n", (458, 496), False, 'import os\n'), ((529, 585), 'os.path.join', 'os.path.join', (['script_dir', '"""windows_x86_64"""', '"""protoc.exe"""'], {}), "(script_dir, 'windows_x86_64', 'protoc.exe')\n", (541, 585), False, 'import os\n')] |
# -*- coding: utf-8 -*-
# -*- coding: utf-8 -*-
import pytest
from django.test import RequestFactory
from django.urls import reverse
from doubles import allow, expect
from rest_framework import status
from apps.accounts.models.choices import ActionCategory
from apps.accounts.services.auth import AuthService
from apps.accounts.tests.factories.pending_action import PendingActionFactory
from apps.accounts.views.confirm_email import ConfirmEmailView
@pytest.mark.django_db
class ConfirmEmailTests:
@classmethod
def make_confirm_email_url(cls, token):
return reverse(
'accounts:confirm-email',
kwargs={'token': token}
)
def test_get_with_valid_token(self, api_client):
pending_action = PendingActionFactory(category=ActionCategory.CONFIRM_EMAIL.value)
allow(AuthService).confirm_email.and_return(True)
expect(AuthService).confirm_email.once()
response = api_client.get(self.make_confirm_email_url(pending_action.token))
assert response.status_code == status.HTTP_200_OK
def test_get_without_invalid_token(self, api_client):
allow(AuthService).confirm_email.and_return(True)
expect(AuthService).confirm_email.never()
response = api_client.get(self.make_confirm_email_url('invalid_token'))
assert response.status_code == status.HTTP_200_OK
| [
"apps.accounts.tests.factories.pending_action.PendingActionFactory",
"doubles.allow",
"doubles.expect",
"django.urls.reverse"
] | [((579, 637), 'django.urls.reverse', 'reverse', (['"""accounts:confirm-email"""'], {'kwargs': "{'token': token}"}), "('accounts:confirm-email', kwargs={'token': token})\n", (586, 637), False, 'from django.urls import reverse\n'), ((751, 816), 'apps.accounts.tests.factories.pending_action.PendingActionFactory', 'PendingActionFactory', ([], {'category': 'ActionCategory.CONFIRM_EMAIL.value'}), '(category=ActionCategory.CONFIRM_EMAIL.value)\n', (771, 816), False, 'from apps.accounts.tests.factories.pending_action import PendingActionFactory\n'), ((826, 844), 'doubles.allow', 'allow', (['AuthService'], {}), '(AuthService)\n', (831, 844), False, 'from doubles import allow, expect\n'), ((884, 903), 'doubles.expect', 'expect', (['AuthService'], {}), '(AuthService)\n', (890, 903), False, 'from doubles import allow, expect\n'), ((1136, 1154), 'doubles.allow', 'allow', (['AuthService'], {}), '(AuthService)\n', (1141, 1154), False, 'from doubles import allow, expect\n'), ((1194, 1213), 'doubles.expect', 'expect', (['AuthService'], {}), '(AuthService)\n', (1200, 1213), False, 'from doubles import allow, expect\n')] |
"""OperatorWebSite class unit test."""
from __init__ import json, os, time, unittest, \
webdriver, WebDriverException, OperatorWebSite
class TestOperatorWebSite(unittest.TestCase):
"""Unit test class for OperatorWebSite."""
def load_data(self):
""" Load the data file. """
self.data = None
file_name = os.path.abspath("data/sites/operators.json")
file_data = None
# Check whether the file exists
self.assertTrue(os.path.isfile(file_name), 'Invalid data file')
# Open the file and load its data
try:
file_data = open(file_name, 'r')
except (IOError, OSError) as err:
raise err
else:
try:
# Load the data file into a JSON object
self.data = json.loads(file_data.read())["operators"][0]
self.assertIsNotNone(self.data)
except ValueError as err:
raise err
finally:
file_data.close()
def setUp(self):
"""Setup."""
try:
# Chrome driver
self.driver = webdriver.Chrome()
self.load_data()
# Create the operator web site object
self.operator_obj = OperatorWebSite(self.driver, self.data)
self.assertIsNotNone(
self.operator_obj, 'Could not creat OperatorWebSite object')
self.driver.get(self.data["url"])
except WebDriverException:
self.driver.quit()
raise
def tearDown(self):
"""Tear down."""
# Close and quit the browser
self.driver.close()
def run_action(self, action_name, action_args=None):
"""Run a specific OperatorWebSite action."""
# Execute the requested web driver action
method = self.operator_obj.get_attr(
self.operator_obj, action_name)
self.assertIsNotNone(
method, 'Failed to get method \'%s\'' % action_name)
res = method(action_args)
self.assertIsNotNone(res, ('Action \'%s\' failed', action_name))
time.sleep(3)
return res
def test_type_zone(self):
args = {
'path': self.data['actions'][0]['type_zone'],
'zone': self.data['zones'][0]}
action = 'type_zone'
res = self.run_action(action, args)
self.assertTrue(res, ('Action \'%s\' failed', action))
def test_click(self):
self.test_type_zone()
action = 'click'
args = {'path': self.data['actions'][1][action]}
res = self.run_action(action, args)
self.assertTrue(res, ('Action \'%s\' failed', action))
def test_get_cost(self):
self.test_type_zone()
self.test_click()
action = 'get_cost'
args = {'path': self.data['actions'][2][action]}
res = self.run_action(action, args)
self.assertTrue(res, ('Action \'%s\' failed', action))
def test_not_action(self):
self.assertRaises(AssertionError, self.run_action, "not_action")
if __name__ == '__main__':
unittest.main()
| [
"__init__.os.path.isfile",
"__init__.unittest.main",
"__init__.webdriver.Chrome",
"__init__.os.path.abspath",
"__init__.time.sleep",
"__init__.OperatorWebSite"
] | [((3089, 3104), '__init__.unittest.main', 'unittest.main', ([], {}), '()\n', (3102, 3104), False, 'from __init__ import json, os, time, unittest, webdriver, WebDriverException, OperatorWebSite\n'), ((344, 388), '__init__.os.path.abspath', 'os.path.abspath', (['"""data/sites/operators.json"""'], {}), "('data/sites/operators.json')\n", (359, 388), False, 'from __init__ import json, os, time, unittest, webdriver, WebDriverException, OperatorWebSite\n'), ((2108, 2121), '__init__.time.sleep', 'time.sleep', (['(3)'], {}), '(3)\n', (2118, 2121), False, 'from __init__ import json, os, time, unittest, webdriver, WebDriverException, OperatorWebSite\n'), ((479, 504), '__init__.os.path.isfile', 'os.path.isfile', (['file_name'], {}), '(file_name)\n', (493, 504), False, 'from __init__ import json, os, time, unittest, webdriver, WebDriverException, OperatorWebSite\n'), ((1121, 1139), '__init__.webdriver.Chrome', 'webdriver.Chrome', ([], {}), '()\n', (1137, 1139), False, 'from __init__ import json, os, time, unittest, webdriver, WebDriverException, OperatorWebSite\n'), ((1251, 1290), '__init__.OperatorWebSite', 'OperatorWebSite', (['self.driver', 'self.data'], {}), '(self.driver, self.data)\n', (1266, 1290), False, 'from __init__ import json, os, time, unittest, webdriver, WebDriverException, OperatorWebSite\n')] |
from totality import Totality, Node, NodeId
def test_basic():
t = Totality()
coll = t.create_collection(username="system")
node_id = NodeId(node_type="facility")
node = Node(node_id, 34, -120, collection=coll)
print(node.to_doc())
assert t is not None | [
"totality.Node",
"totality.Totality",
"totality.NodeId"
] | [((71, 81), 'totality.Totality', 'Totality', ([], {}), '()\n', (79, 81), False, 'from totality import Totality, Node, NodeId\n'), ((146, 174), 'totality.NodeId', 'NodeId', ([], {'node_type': '"""facility"""'}), "(node_type='facility')\n", (152, 174), False, 'from totality import Totality, Node, NodeId\n'), ((186, 226), 'totality.Node', 'Node', (['node_id', '(34)', '(-120)'], {'collection': 'coll'}), '(node_id, 34, -120, collection=coll)\n', (190, 226), False, 'from totality import Totality, Node, NodeId\n')] |
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import torch
from fvcore.transforms import HFlipTransform
from detectron2.modeling.postprocessing import detector_postprocess
from detectron2.modeling.test_time_augmentation import GeneralizedRCNNWithTTA
class DensePoseGeneralizedRCNNWithTTA(GeneralizedRCNNWithTTA):
def __init__(self, cfg, model, transform_data, tta_mapper=None, batch_size=1):
"""
Args:
cfg (CfgNode):
model (GeneralizedRCNN): a GeneralizedRCNN to apply TTA on.
transform_data (DensePoseTransformData): contains symmetry label
transforms used for horizontal flip
tta_mapper (callable): takes a dataset dict and returns a list of
augmented versions of the dataset dict. Defaults to
`DatasetMapperTTA(cfg)`.
batch_size (int): batch the augmented images into this batch size for inference.
"""
self._transform_data = transform_data
super().__init__(cfg=cfg, model=model, tta_mapper=tta_mapper, batch_size=batch_size)
# the implementation follows closely the one from detectron2/modeling
def _inference_one_image(self, input):
"""
Args:
input (dict): one dataset dict with "image" field being a CHW tensor
Returns:
dict: one output dict
"""
orig_shape = (input["height"], input["width"])
# For some reason, resize with uint8 slightly increases box AP but decreases densepose AP
input["image"] = input["image"].to(torch.uint8)
augmented_inputs, tfms = self._get_augmented_inputs(input)
# Detect boxes from all augmented versions
with self._turn_off_roi_heads(["mask_on", "keypoint_on", "densepose_on"]):
# temporarily disable roi heads
all_boxes, all_scores, all_classes = self._get_augmented_boxes(augmented_inputs, tfms)
merged_instances = self._merge_detections(all_boxes, all_scores, all_classes, orig_shape)
if self.cfg.MODEL.MASK_ON or self.cfg.MODEL.DENSEPOSE_ON:
# Use the detected boxes to obtain new fields
augmented_instances = self._rescale_detected_boxes(
augmented_inputs, merged_instances, tfms
)
# run forward on the detected boxes
outputs = self._batch_inference(augmented_inputs, augmented_instances)
# Delete now useless variables to avoid being out of memory
del augmented_inputs, augmented_instances
# average the predictions
if self.cfg.MODEL.MASK_ON:
merged_instances.pred_masks = self._reduce_pred_masks(outputs, tfms)
if self.cfg.MODEL.DENSEPOSE_ON:
merged_instances.pred_densepose = self._reduce_pred_densepose(outputs, tfms)
# postprocess
merged_instances = detector_postprocess(merged_instances, *orig_shape)
return {"instances": merged_instances}
else:
return {"instances": merged_instances}
def _reduce_pred_densepose(self, outputs, tfms):
for output, tfm in zip(outputs, tfms):
if any(isinstance(t, HFlipTransform) for t in tfm.transforms):
output.pred_densepose.hflip(self._transform_data)
# Less memory-intensive averaging
for attr in "SIUV":
setattr(
outputs[0].pred_densepose,
attr,
sum(getattr(o.pred_densepose, attr) for o in outputs) / len(outputs),
)
return outputs[0].pred_densepose
| [
"detectron2.modeling.postprocessing.detector_postprocess"
] | [((2922, 2973), 'detectron2.modeling.postprocessing.detector_postprocess', 'detector_postprocess', (['merged_instances', '*orig_shape'], {}), '(merged_instances, *orig_shape)\n', (2942, 2973), False, 'from detectron2.modeling.postprocessing import detector_postprocess\n')] |
# -*- coding: utf-8 -*-
"""Generate charts for the ComPath GitHub Pages site."""
import click
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
from more_click import verbose_option
from compath_resources import get_df
from compath_resources.constants import DATA_DIRECTORY, IMG_DIRECTORY
__all__ = [
'charts',
]
@click.command()
@verbose_option
def charts():
"""Generate the summary for ComPath."""
sns.set_theme(style="darkgrid")
df = get_df(include_reactome_hierarchy=False, include_decopath=True, include_special=True)
df.to_csv(DATA_DIRECTORY.joinpath('compath.tsv'), sep='\t', index=False)
prefix_df = pd.concat([df['source prefix'], df['target prefix']]).to_frame()
prefix_df.columns = ['Prefix']
fig, axes = plt.subplots(1, 2, figsize=(12, 4), sharey=True)
sns.countplot(data=prefix_df, x='Prefix', ax=axes[0])
sns.countplot(data=df, x='relation', ax=axes[1])
axes[0].set_xlabel('')
axes[0].set_title('By Prefix')
axes[1].set_xlabel('')
axes[1].set_title('By Type')
axes[1].set_ylabel('')
plt.suptitle(f'Summary of {len(df.index)} ComPath Mappings')
plt.tight_layout()
plt.savefig(IMG_DIRECTORY / 'prefixes.svg')
plt.savefig(IMG_DIRECTORY / 'prefixes.png', dpi=300)
plt.close(fig)
if __name__ == '__main__':
charts()
| [
"matplotlib.pyplot.savefig",
"seaborn.set_theme",
"compath_resources.constants.DATA_DIRECTORY.joinpath",
"matplotlib.pyplot.close",
"compath_resources.get_df",
"matplotlib.pyplot.tight_layout",
"seaborn.countplot",
"click.command",
"pandas.concat",
"matplotlib.pyplot.subplots"
] | [((348, 363), 'click.command', 'click.command', ([], {}), '()\n', (361, 363), False, 'import click\n'), ((442, 473), 'seaborn.set_theme', 'sns.set_theme', ([], {'style': '"""darkgrid"""'}), "(style='darkgrid')\n", (455, 473), True, 'import seaborn as sns\n'), ((483, 572), 'compath_resources.get_df', 'get_df', ([], {'include_reactome_hierarchy': '(False)', 'include_decopath': '(True)', 'include_special': '(True)'}), '(include_reactome_hierarchy=False, include_decopath=True,\n include_special=True)\n', (489, 572), False, 'from compath_resources import get_df\n'), ((780, 828), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(2)'], {'figsize': '(12, 4)', 'sharey': '(True)'}), '(1, 2, figsize=(12, 4), sharey=True)\n', (792, 828), True, 'import matplotlib.pyplot as plt\n'), ((833, 886), 'seaborn.countplot', 'sns.countplot', ([], {'data': 'prefix_df', 'x': '"""Prefix"""', 'ax': 'axes[0]'}), "(data=prefix_df, x='Prefix', ax=axes[0])\n", (846, 886), True, 'import seaborn as sns\n'), ((891, 939), 'seaborn.countplot', 'sns.countplot', ([], {'data': 'df', 'x': '"""relation"""', 'ax': 'axes[1]'}), "(data=df, x='relation', ax=axes[1])\n", (904, 939), True, 'import seaborn as sns\n'), ((1158, 1176), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (1174, 1176), True, 'import matplotlib.pyplot as plt\n'), ((1181, 1224), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(IMG_DIRECTORY / 'prefixes.svg')"], {}), "(IMG_DIRECTORY / 'prefixes.svg')\n", (1192, 1224), True, 'import matplotlib.pyplot as plt\n'), ((1229, 1281), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(IMG_DIRECTORY / 'prefixes.png')"], {'dpi': '(300)'}), "(IMG_DIRECTORY / 'prefixes.png', dpi=300)\n", (1240, 1281), True, 'import matplotlib.pyplot as plt\n'), ((1286, 1300), 'matplotlib.pyplot.close', 'plt.close', (['fig'], {}), '(fig)\n', (1295, 1300), True, 'import matplotlib.pyplot as plt\n'), ((583, 621), 'compath_resources.constants.DATA_DIRECTORY.joinpath', 'DATA_DIRECTORY.joinpath', (['"""compath.tsv"""'], {}), "('compath.tsv')\n", (606, 621), False, 'from compath_resources.constants import DATA_DIRECTORY, IMG_DIRECTORY\n'), ((663, 716), 'pandas.concat', 'pd.concat', (["[df['source prefix'], df['target prefix']]"], {}), "([df['source prefix'], df['target prefix']])\n", (672, 716), True, 'import pandas as pd\n')] |
# -*- coding: utf-8 -*-
"""
Created on Thu Feb 28 14:24:27 2019
@author: adarzi
"""
#Loading the libraries
import pandas as pd
import os
from os import sys
import pickle
#setting the directory
os.chdir(sys.path[0])
#loading the data:
data = pd.read_csv('../../Inputs/Trip_Data/AirSage_Data/trips_long_distance.csv')
#adding mode attributes to the data
data['mode']=0
#Predicting air trips
data.loc[data.loc[(data['trip_dist']>=50000) & (data['speed_Q75']>=100)].index.values,'mode']=4
#separating air trips from other trips
airtrips=data.loc[data['mode']==4]
df=data.loc[data['mode']==0]
#Loading data scaler model
datascaler=pickle.load(open('data_scaler.sav','rb'))
#Scaling test data
test_data=df[df.columns[2:34]]
test_data_scaled = datascaler.transform(test_data)
#loading the Random Forest model
RandomForest=pickle.load(open('Random_Forest.sav','rb'))
#Predicting other Modes
prediction=RandomForest.predict(test_data_scaled)
#adding the prediction results to the data
df.mode=prediction
#Combining all trips and saving
alltrips=df.append(airtrips)
alltrips=pd.DataFrame.sort_index(alltrips)
alltrips.to_csv('../../Inputs/Trip_Data/AirSage_Data/trips_long_distance_with_mode.csv')
| [
"os.chdir",
"pandas.read_csv",
"pandas.DataFrame.sort_index"
] | [((208, 229), 'os.chdir', 'os.chdir', (['sys.path[0]'], {}), '(sys.path[0])\n', (216, 229), False, 'import os\n'), ((262, 336), 'pandas.read_csv', 'pd.read_csv', (['"""../../Inputs/Trip_Data/AirSage_Data/trips_long_distance.csv"""'], {}), "('../../Inputs/Trip_Data/AirSage_Data/trips_long_distance.csv')\n", (273, 336), True, 'import pandas as pd\n'), ((1120, 1153), 'pandas.DataFrame.sort_index', 'pd.DataFrame.sort_index', (['alltrips'], {}), '(alltrips)\n', (1143, 1153), True, 'import pandas as pd\n')] |
import math
H, N = [int(n) for n in input().split()]
A = []
B = []
for _ in range(N):
a, b = [int(n) for n in input().split()]
A.append(a)
B.append(b)
p = []
for i in range(N):
p.append(A[i] / B[i])
for pp in p:
pass
# print(pp)
maisu = []
for i in range(N):
maisu.append(math.ceil(H / A[i]))
for m in maisu:
print(m) | [
"math.ceil"
] | [((292, 311), 'math.ceil', 'math.ceil', (['(H / A[i])'], {}), '(H / A[i])\n', (301, 311), False, 'import math\n')] |
""" config.py
Microsimulation config for mulit-LAD MPI simulation
"""
import numpy as np
import glob
import neworder
# define some global variables describing where the starting population and the parameters of the dynamics come from
initial_populations = glob.glob("examples/people_multi/data/ssm_*_MSOA11_ppp_2011.csv")
asfr = "examples/shared/NewETHPOP_fertility.csv"
asmr = "examples/shared/NewETHPOP_mortality.csv"
# internal in-migration
asir = "examples/shared/NewETHPOP_inmig.csv"
# internal out-migration
asor = "examples/shared/NewETHPOP_outmig.csv"
# immigration
ascr = "examples/shared/NewETHPOP_immig.csv"
# emigration
asxr = "examples/shared/NewETHPOP_emig.csv"
# MPI split initial population files over threads
def partition(arr, count):
return [arr[i::count] for i in range(count)]
initial_populations = partition(initial_populations, neworder.mpi.size())
# running/debug options
neworder.log_level = 1
# initialisation
neworder.initialisations = {
"people": { "module": "population", "class_": "Population", "args": (initial_populations[neworder.mpi.rank()], asfr, asmr, asir, asor, ascr, asxr) }
}
# define the evolution
neworder.timeline = neworder.Timeline(2011.25, 2050.25, [39])
# timestep must be defined in neworder
neworder.dataframe.transitions = {
"fertility": "people.births(timestep)",
"mortality": "people.deaths(timestep)",
"migration": "people.migrations(timestep)",
"age": "people.age(timestep)"
}
# checks to perform after each timestep. Assumed to return a boolean
neworder.do_checks = True # Faith
# assumed to be methods of class_ returning True if checks pass
neworder.checks = {
"check": "people.check()"
}
# Generate output at each checkpoint
neworder.checkpoints = {
#"check_data" : "people.check()",
"write_table" : "people.write_table()"
}
| [
"neworder.mpi.rank",
"neworder.Timeline",
"neworder.mpi.size",
"glob.glob"
] | [((258, 323), 'glob.glob', 'glob.glob', (['"""examples/people_multi/data/ssm_*_MSOA11_ppp_2011.csv"""'], {}), "('examples/people_multi/data/ssm_*_MSOA11_ppp_2011.csv')\n", (267, 323), False, 'import glob\n'), ((1171, 1212), 'neworder.Timeline', 'neworder.Timeline', (['(2011.25)', '(2050.25)', '[39]'], {}), '(2011.25, 2050.25, [39])\n', (1188, 1212), False, 'import neworder\n'), ((857, 876), 'neworder.mpi.size', 'neworder.mpi.size', ([], {}), '()\n', (874, 876), False, 'import neworder\n'), ((1065, 1084), 'neworder.mpi.rank', 'neworder.mpi.rank', ([], {}), '()\n', (1082, 1084), False, 'import neworder\n')] |
# present and accept dispute processing
from flask import Blueprint, request, make_response, render_template, flash, redirect
from openarticlegauge.core import app
import openarticlegauge.util as util
import openarticlegauge.models as models
blueprint = Blueprint('issue', __name__)
@blueprint.route("/", methods=['GET','POST'])
@blueprint.route(".json", methods=['GET','POST'])
@blueprint.route("/<path:path>", methods=['GET','POST','DELETE'])
@util.jsonp
def issue(path=''):
givejson = util.request_wants_json()
path = path.replace('.json','')
i = False
if path:
i = models.Issue.pull(path)
if request.method == 'GET':
if givejson:
resp = make_response( i.data )
resp.mimetype = "application/json"
return resp
else:
return render_template('issue.html', issue=i)
elif request.method == 'POST':
if not i:
i = models.Issue()
if request.json:
i.data = request.json
elif request.values:
i.data['about'] = request.values['about']
i.data['issue'] = request.values['issue']
i.data['email'] = request.values['email']
else:
abort(404)
# only save an issue about an ID we actually have a record for
if len(i.data['about']) < 9:
cid = 'pmid:'
else:
cid = 'doi:'
check = models.Record.pull(cid + i.data['about'].replace('/','_'))
if check is not None:
i.save()
elif givejson:
abort(404)
else:
flash("Sorry, your issue is about an identifier for which we do not hold a record.", 'error')
return render_template('issue.html', issue=i)
if app.config['CONTACT_EMAIL'] and not app.config['DEBUG']:
text = 'Hey, an issue has been raised for ' + i.data['about'] + '\n\nView it at http://oag.cottagelabs.com/issue/' + i.id
util.send_mail([app.config['CONTACT_EMAIL']], app.config['CONTACT_EMAIL'], "issue raised", text)
if givejson:
resp = make_response( i.data )
resp.mimetype = "application/json"
return resp
else:
flash("Thanks, your issue has been raised", 'success')
return redirect('/issue/' + i.id)
elif request.method == 'DELETE' and i:
i.delete()
return ""
else:
abort(404)
| [
"flask.render_template",
"openarticlegauge.util.request_wants_json",
"flask.flash",
"openarticlegauge.models.Issue.pull",
"flask.redirect",
"openarticlegauge.models.Issue",
"flask.make_response",
"flask.Blueprint",
"openarticlegauge.util.send_mail"
] | [((257, 285), 'flask.Blueprint', 'Blueprint', (['"""issue"""', '__name__'], {}), "('issue', __name__)\n", (266, 285), False, 'from flask import Blueprint, request, make_response, render_template, flash, redirect\n'), ((497, 522), 'openarticlegauge.util.request_wants_json', 'util.request_wants_json', ([], {}), '()\n', (520, 522), True, 'import openarticlegauge.util as util\n'), ((604, 627), 'openarticlegauge.models.Issue.pull', 'models.Issue.pull', (['path'], {}), '(path)\n', (621, 627), True, 'import openarticlegauge.models as models\n'), ((701, 722), 'flask.make_response', 'make_response', (['i.data'], {}), '(i.data)\n', (714, 722), False, 'from flask import Blueprint, request, make_response, render_template, flash, redirect\n'), ((829, 867), 'flask.render_template', 'render_template', (['"""issue.html"""'], {'issue': 'i'}), "('issue.html', issue=i)\n", (844, 867), False, 'from flask import Blueprint, request, make_response, render_template, flash, redirect\n'), ((938, 952), 'openarticlegauge.models.Issue', 'models.Issue', ([], {}), '()\n', (950, 952), True, 'import openarticlegauge.models as models\n'), ((1980, 2080), 'openarticlegauge.util.send_mail', 'util.send_mail', (["[app.config['CONTACT_EMAIL']]", "app.config['CONTACT_EMAIL']", '"""issue raised"""', 'text'], {}), "([app.config['CONTACT_EMAIL']], app.config['CONTACT_EMAIL'],\n 'issue raised', text)\n", (1994, 2080), True, 'import openarticlegauge.util as util\n'), ((2118, 2139), 'flask.make_response', 'make_response', (['i.data'], {}), '(i.data)\n', (2131, 2139), False, 'from flask import Blueprint, request, make_response, render_template, flash, redirect\n'), ((2239, 2293), 'flask.flash', 'flash', (['"""Thanks, your issue has been raised"""', '"""success"""'], {}), "('Thanks, your issue has been raised', 'success')\n", (2244, 2293), False, 'from flask import Blueprint, request, make_response, render_template, flash, redirect\n'), ((2313, 2339), 'flask.redirect', 'redirect', (["('/issue/' + i.id)"], {}), "('/issue/' + i.id)\n", (2321, 2339), False, 'from flask import Blueprint, request, make_response, render_template, flash, redirect\n'), ((1613, 1716), 'flask.flash', 'flash', (['"""Sorry, your issue is about an identifier for which we do not hold a record."""', '"""error"""'], {}), "(\n 'Sorry, your issue is about an identifier for which we do not hold a record.'\n , 'error')\n", (1618, 1716), False, 'from flask import Blueprint, request, make_response, render_template, flash, redirect\n'), ((1726, 1764), 'flask.render_template', 'render_template', (['"""issue.html"""'], {'issue': 'i'}), "('issue.html', issue=i)\n", (1741, 1764), False, 'from flask import Blueprint, request, make_response, render_template, flash, redirect\n')] |
"""Tokenize a document.
*** NLTK tokenization and this module have been deprecated in favor of a
sklearn-based solution. However, NLTK may offer more options for tokenization,
stemming, etc., this module is retained for future reference.
"""
import re
import nltk
import toolz as tz
re_not_alpha = re.compile('[^a-zA-Z]')
STOPWORDS = set(nltk.corpus.stopwords.words('english'))
def is_alpha(tt):
"""Given a POS tagged token (<token>, <pos>), return True if the token has
only alphabetic characters (i.e., no punctuation or numbers).
"""
return not bool(re_not_alpha.search(tt[0]))
def not_proper(tt):
"""Given a POS tagged token (<token>, <pos>), return True if the token is
not tagged as a proper noun ('NNP').
"""
return (tt[1] != 'NNP')
def not_stopword(tt):
"""Given a POS tagged token (<token>, <pos>), return True if the token is
not a stopword.
"""
return (tt[0] not in STOPWORDS)
def lower(tt):
"""Given a POS tagged token (<token>, <pos>), return
(<token>.lower(), <pos>).
"""
return (tt[0].lower(), tt[1])
def stem(tt):
"""Given a POS tagged token (<token>, <pos>), return
(<stemmed token>, <pos>).
"""
return (nltk.stem.lancaster.LancasterStemmer().stem(tt[0]), tt[1])
def remove_pos(tt):
"""Given a POS tagged token (<token>, <pos>), return only the token.
"""
return tt[0]
def tokenize(doc, with_stem=False):
"""Given a document string, return a list of tokens.
"""
pipeline = [
(filter, is_alpha),
(filter, not_proper),
(map, lower),
(filter, not_stopword)]
if with_stem:
pipeline += [(map, stem)]
pipeline += [(map, remove_pos)]
return list(tz.thread_last(
nltk.tag.pos_tag(nltk.tokenize.word_tokenize(doc)),
*pipeline))
| [
"nltk.tokenize.word_tokenize",
"nltk.stem.lancaster.LancasterStemmer",
"nltk.corpus.stopwords.words",
"re.compile"
] | [((302, 325), 're.compile', 're.compile', (['"""[^a-zA-Z]"""'], {}), "('[^a-zA-Z]')\n", (312, 325), False, 'import re\n'), ((342, 380), 'nltk.corpus.stopwords.words', 'nltk.corpus.stopwords.words', (['"""english"""'], {}), "('english')\n", (369, 380), False, 'import nltk\n'), ((1215, 1253), 'nltk.stem.lancaster.LancasterStemmer', 'nltk.stem.lancaster.LancasterStemmer', ([], {}), '()\n', (1251, 1253), False, 'import nltk\n'), ((1771, 1803), 'nltk.tokenize.word_tokenize', 'nltk.tokenize.word_tokenize', (['doc'], {}), '(doc)\n', (1798, 1803), False, 'import nltk\n')] |
from unittest.mock import ANY, Mock, patch
import pytest
from streamlit_server_state.server_state_item import ServerStateItem
@pytest.fixture
def patch_is_rerunnable():
with patch(
"streamlit_server_state.server_state_item.is_rerunnable"
) as mock_is_rerunnable:
mock_is_rerunnable.return_value = True
yield
def test_bound_sessions_are_requested_to_rerun_when_value_is_set_or_update(
patch_is_rerunnable,
):
session = Mock()
item = ServerStateItem()
item.bind_session(session)
session.request_rerun.assert_not_called()
item.set_value(42)
session.request_rerun.assert_has_calls([ANY])
item.set_value(100)
session.request_rerun.assert_has_calls([ANY, ANY])
def test_all_bound_sessions_are_requested_to_rerun(patch_is_rerunnable):
session1 = Mock()
session2 = Mock()
item = ServerStateItem()
item.bind_session(session1)
item.bind_session(session2)
session1.request_rerun.assert_not_called()
session2.request_rerun.assert_not_called()
item.set_value(42)
session1.request_rerun.assert_has_calls([ANY])
session2.request_rerun.assert_has_calls([ANY])
item.set_value(100)
session1.request_rerun.assert_has_calls([ANY, ANY])
session2.request_rerun.assert_has_calls([ANY, ANY])
def test_bound_sessions_are_not_duplicate(patch_is_rerunnable):
session = Mock()
item = ServerStateItem()
item.bind_session(session)
item.bind_session(session) # Bind the sessoin twice
session.request_rerun.assert_not_called()
item.set_value(42)
session.request_rerun.assert_called_once()
def test_bound_sessions_are_not_requested_to_rerun_when_the_set_value_is_not_changed(
patch_is_rerunnable,
):
session = Mock()
item = ServerStateItem()
item.bind_session(session)
session.request_rerun.assert_not_called()
item.set_value(42)
session.request_rerun.assert_called_once()
item.set_value(42)
session.request_rerun.assert_called_once() # No new calls
def test_bound_sessions_are_requested_to_rerun_when_a_same_but_mutated_object_is_set(
patch_is_rerunnable,
):
session = Mock()
item = ServerStateItem()
item.bind_session(session)
session.request_rerun.assert_not_called()
item.set_value({})
session.request_rerun.assert_has_calls([ANY])
value = item.get_value()
value["foo"] = 42
item.set_value(value)
session.request_rerun.assert_has_calls([ANY, ANY])
| [
"unittest.mock.patch",
"unittest.mock.Mock",
"streamlit_server_state.server_state_item.ServerStateItem"
] | [((464, 470), 'unittest.mock.Mock', 'Mock', ([], {}), '()\n', (468, 470), False, 'from unittest.mock import ANY, Mock, patch\n'), ((483, 500), 'streamlit_server_state.server_state_item.ServerStateItem', 'ServerStateItem', ([], {}), '()\n', (498, 500), False, 'from streamlit_server_state.server_state_item import ServerStateItem\n'), ((823, 829), 'unittest.mock.Mock', 'Mock', ([], {}), '()\n', (827, 829), False, 'from unittest.mock import ANY, Mock, patch\n'), ((845, 851), 'unittest.mock.Mock', 'Mock', ([], {}), '()\n', (849, 851), False, 'from unittest.mock import ANY, Mock, patch\n'), ((864, 881), 'streamlit_server_state.server_state_item.ServerStateItem', 'ServerStateItem', ([], {}), '()\n', (879, 881), False, 'from streamlit_server_state.server_state_item import ServerStateItem\n'), ((1384, 1390), 'unittest.mock.Mock', 'Mock', ([], {}), '()\n', (1388, 1390), False, 'from unittest.mock import ANY, Mock, patch\n'), ((1403, 1420), 'streamlit_server_state.server_state_item.ServerStateItem', 'ServerStateItem', ([], {}), '()\n', (1418, 1420), False, 'from streamlit_server_state.server_state_item import ServerStateItem\n'), ((1757, 1763), 'unittest.mock.Mock', 'Mock', ([], {}), '()\n', (1761, 1763), False, 'from unittest.mock import ANY, Mock, patch\n'), ((1776, 1793), 'streamlit_server_state.server_state_item.ServerStateItem', 'ServerStateItem', ([], {}), '()\n', (1791, 1793), False, 'from streamlit_server_state.server_state_item import ServerStateItem\n'), ((2160, 2166), 'unittest.mock.Mock', 'Mock', ([], {}), '()\n', (2164, 2166), False, 'from unittest.mock import ANY, Mock, patch\n'), ((2179, 2196), 'streamlit_server_state.server_state_item.ServerStateItem', 'ServerStateItem', ([], {}), '()\n', (2194, 2196), False, 'from streamlit_server_state.server_state_item import ServerStateItem\n'), ((182, 245), 'unittest.mock.patch', 'patch', (['"""streamlit_server_state.server_state_item.is_rerunnable"""'], {}), "('streamlit_server_state.server_state_item.is_rerunnable')\n", (187, 245), False, 'from unittest.mock import ANY, Mock, patch\n')] |
#!/usr/bin/env python
# Copyright (c) 2016, 2017 <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import argparse
import commands
import operator
import os
import re
import struct
import sys
import zlib
DEFAULT_FILES_SEC = '.files'
DEFAULT_FS_TABLE_SEC = '.fs_table'
DEFAULT_PSTORE_SEC = '.uld_pdata'
DEFAULT_PSTORE_OFF = 0
PLT_ENTRY_SIZE = 0x14
PLT_ENTRY_GOTOFFFUNCDESC_OFFSET = 0x10
FS_ENTRY_FMT = '<IIIII'
FS_ENTRY_SIZE = struct.calcsize(FS_ENTRY_FMT)
FS_ENTRY_CRC_OFFSET = 0xc
PSTORE_FS_TABLE_CRC_OFFSET = 0x18
ROFIXUP_MEM_SEC_LIST = [
'.got',
'.got.plt',
'.data',
'.bss'
]
_debug = 0
def dprint(s):
global _debug
if _debug > 0:
print(s)
class CmdError(Exception):
pass
def qc(cmd):
s, o = commands.getstatusoutput(cmd)
if (s != 0):
print(o)
raise CmdError('cmd: \'{}\' exited with code: {}'.format(cmd, s))
return o
def wsclean(s):
return re.sub(r'\s+', ' ', s.strip())
class ElfSection(object):
def __init__(self, idx, name, size, vma, lma, file_off, algn, flags):
self.idx = idx
self.name = name
self.size = size
self.vma = vma
self.lma = lma
self.file_off = file_off
self.algn = algn
self.flags = flags
self.file_size = None
self.type = None
def __str__(self):
ret = 'ElfSection(idx={}, name={}, size=0x{:08x}, vma=0x{:08x}, ' \
'lma=0x{:08x}, file_off=0x{:08x}, algn={}, flags={}, '
if self.file_size is None:
ret += 'file_size={}, '
else:
ret += 'file_size=0x{:08x}, '
ret += 'type={})'
ret = ret.format(self.idx, self.name, self.size, self.vma, self.lma,
self.file_off, self.algn, self.flags, self.file_size,
self.type)
return ret
def get_elf_sections(path):
ret = []
objdump = os.environ.get('OBJDUMP', 'objdump')
readelf = os.environ.get('READELF', 'readelf')
# Get section data from objdump/readelf and trim headers and footers off.
oc_out = qc('{} -h {}'.format(objdump, path)).split('\n')[5:]
re_out = qc('{} -S {}'.format(readelf, path)).split('\n')[5:-4]
# Data extraction was tested using binutils 2.22 and 2.26.1.
oc_iter = iter(oc_out)
for line in oc_iter:
flags = oc_iter.next().strip()
if 'ALLOC' not in flags:
continue
line = wsclean(line).split(' ')
idx, name, size, vma, lma, file_off, algn = line
elf_sec = ElfSection(idx, name, int(size, 16), int(vma, 16),
int(lma, 16), int(file_off, 16), algn, flags)
ret.append(elf_sec)
re_dict = {}
for line in re_out:
line = line[line.find('.'):]
line = wsclean(line).split(' ')
# Skip entries that do not have the ALLOC flag.
if 'A' not in line[-4]:
continue
name = line[0]
sec_type = line[1]
re_dict[name] = sec_type
for elf_sec in ret:
elf_sec.type = re_dict.get(elf_sec.name, None)
sec_list = ret[:]
sec_list = [x for x in sec_list if x.type == 'NOBITS']
for elf_sec in sec_list:
elf_sec.file_size = 0
sec_list = ret[:]
sec_list = [x for x in sec_list if x.type != 'NOBITS']
sec_list.sort(key=operator.attrgetter('file_off'))
# elf_sec.size (i.e. size during execution) may not always be the same
# size as section size in the file.
for index in range(len(sec_list)):
elf_sec = sec_list[index]
if index == len(sec_list) - 1:
# Best guess.
elf_sec.file_size = elf_sec.size
break
next_elf_sec = sec_list[index + 1]
file_size = next_elf_sec.file_off - elf_sec.file_off
# Cover case where there may be orphaned data in between sections in
# the file.
if file_size > elf_sec.size:
file_size = elf_sec.size
elf_sec.file_size = file_size
return ret
def sec_list_to_dict(sec_list):
return {x.name: x for x in sec_list}
def sec_name_in_sec_list(sec_list, name):
return sec_list_to_dict(sec_list).has_key(name)
def name_to_sec(sec_list, name):
return sec_list_to_dict(sec_list).get(name, None)
def file_off_to_sec(sec_list, file_off):
for elf_sec in sec_list:
if file_off >= elf_sec.file_off and \
file_off < elf_sec.file_off + elf_sec.file_size:
return elf_sec
raise ValueError('Could not find section for file_off: 0x{:08x}'.format(
file_off))
def lma_to_sec(sec_list, lma):
for elf_sec in sec_list:
if lma >= elf_sec.lma and lma < elf_sec.lma + elf_sec.file_size:
return elf_sec
raise ValueError('Could not find section for lma: 0x{:08x}'.format(
lma))
def vma_to_sec(sec_list, vma):
for elf_sec in sec_list:
if vma >= elf_sec.vma and vma < elf_sec.vma + elf_sec.size:
return elf_sec
raise ValueError('Could not find section for vma: 0x{:08x}'.format(
vma))
def lma_to_file_off(sec_list, lma):
elf_sec = lma_to_sec(sec_list, lma)
return lma - elf_sec.lma + elf_sec.file_off
def extract_sec(fd, sec_list, name):
fd_off = fd.tell()
sec_dict = sec_list_to_dict(sec_list)
fd.seek(sec_dict[name].file_off)
buf = fd.read(sec_dict[name].file_size)
fd.seek(fd_off)
return buf
class FSEntry(object):
def __init__(self, e_file_off, buf, name_len):
self.e_file_off = e_file_off
self.file_base, self.next_e, self.size, self.crc, self.flags, \
self.name = struct.unpack(
FS_ENTRY_FMT + '{}s'.format(name_len), buf)
def __str__(self):
ret = 'FSEntry(file_base=0x{:08x}, next_e=0x{:08x}, size=0x{:08x}, ' \
'crc=0x{:08x}, flags=0x{:08x}, name={})'
ret = ret.format(self.file_base, self.next_e, self.size, self.crc,
self.flags, self.name)
return ret
def parse_fs_table(buf, buf_file_off, sec_list):
ret = []
e_base = 0
while True:
# This will put offset at the first char of fs_entry.name
offset = e_base + FS_ENTRY_SIZE
# Find the variable length of fs_entry.name.
while buf[offset] != '\0':
offset += 1
#fse = FSEntry(e_base + buf_file_off, buf[e_base:offset + 1])
fse = FSEntry(e_base + buf_file_off, buf[e_base:offset],
offset - FS_ENTRY_SIZE - e_base)
ret.append(fse)
if fse.next_e is 0:
break
e_base = lma_to_file_off(sec_list, fse.next_e)
e_base -= buf_file_off
if e_base < 0 or e_base > len(buf):
err = 'Next FS entry at lma: 0x{:08x} file_off: 0x{:08x} not ' \
'in range of buf'
raise ValueError(err.format(fse.next_e, lma_to_file_off(sec_list,
fse.next_e)))
return ret
def apply_rofixups(uld_sec_list, uld_fd, elf_sec_list, elf_fd, elf_file_lma):
if not sec_name_in_sec_list(elf_sec_list, '.rofixup'):
return
uld_opos = uld_fd.tell()
elf_opos = elf_fd.tell()
elf_file_off = lma_to_file_off(uld_sec_list, elf_file_lma)
fixups = extract_sec(elf_fd, elf_sec_list, '.rofixup')
fixups = [struct.unpack('<I', fixups[x:x + 4])[0] for x in
range(0, len(fixups), 4)]
global _debug
for addr in fixups:
sec = lma_to_sec(elf_sec_list, addr)
file_off = lma_to_file_off(elf_sec_list, addr)
elf_fd.seek(file_off)
value = elf_fd.read(4)
value = struct.unpack('<I', value)[0]
# If a fixup value does not have a valid lma it is most likely pointing
# to a NOBITS sections (i.e. .bss) and should already be set as skip.
try:
value_sec = lma_to_sec(elf_sec_list, value)
except ValueError:
if skip is False:
raise
value_sec = None
# Sections that will be loaded into memory will have fixups applied
# at runtime.
if value_sec is None or value_sec.name in ROFIXUP_MEM_SEC_LIST:
skip = True
else:
skip = False
if _debug > 0:
if value_sec is None:
try:
value_sec = vma_to_sec(elf_sec_list, value)
value_sec_name = value_sec.name + ' (vma)'
except:
value_sec_name = 'UNKNOWN'
else:
value_sec_name = value_sec.name
if skip is True:
value_sec_name += ' (skip)'
fmt = ' addr: 0x{:08x} sec: {:<12s} file_off: 0x{:08x} ' \
'value: 0x{:08x} value_sec: {}'
dprint(fmt.format(addr, sec.name, file_off, value, value_sec_name))
if skip is True:
continue
# Sanity check that everything is in the right place.
uld_fd.seek(elf_file_off + file_off)
uld_value = uld_fd.read(4)
uld_value = struct.unpack('<I', uld_value)[0]
if value != uld_value:
fmt = 'Incorrect value 0x{:08x} at 0x{:08x} in uld_fd ' \
'expected 0x{:08x}'
raise ValueError(fmt.format(uld_value, elf_file_off + file_off,
value))
fixup_value = lma_to_file_off(elf_sec_list, value) + elf_file_lma
msg = ' Applying fixup for 0x{:08x} value 0x{:08x}->0x{:08x}'
dprint(msg.format(addr, value, fixup_value))
fixup_value = struct.pack('<I', fixup_value)
uld_fd.seek(-4, 1)
uld_fd.write(fixup_value)
uld_fd.seek(uld_opos)
elf_fd.seek(elf_opos)
def patch_plt_gotofffuncdesc(uld_sec_list, uld_fd, elf_sec_list, elf_fd,
elf_file_lma):
plt_sec = name_to_sec(elf_sec_list, '.plt')
count = 0
picreg_offset = 0
uld_opos = uld_fd.tell()
if plt_sec is not None:
# A .plt section should not exist without a .got.plt
got_plt_sec = name_to_sec(elf_sec_list, '.got.plt')
if got_plt_sec is None:
raise ValueError('.got.plt section not found when .plt section '
'is present')
# ld is generating the GOTOFFFUNCDESC values as an offset from the
# base of .got.plt. During runtime the values in .got.plt are
# referenced using the pic base register which will point to the
# base of .got if present (if not it will be the base of .got.plt).
# Update the GOTOFFFUNCDESC values accordingly.
got_sec = name_to_sec(elf_sec_list, '.got')
if got_sec is not None:
picreg_offset = got_plt_sec.lma - got_sec.lma
if picreg_offset != 0:
if plt_sec.size % 20 != 0:
raise ValueError('.plt size {} is not multiple of 20'.format(
plt_sec.size))
elf_file_off = lma_to_file_off(uld_sec_list, elf_file_lma)
plt_file_off = lma_to_file_off(elf_sec_list, plt_sec.lma)
for val_off in range(PLT_ENTRY_GOTOFFFUNCDESC_OFFSET, plt_sec.size,
PLT_ENTRY_SIZE):
uld_fd.seek(elf_file_off + plt_file_off + val_off)
val = uld_fd.read(4)
val = struct.unpack('<I', val)[0]
#dprint('.plt + {:08x} GOTOFFFUNCDESC {:08x}->{:08x}'.format(
# val_off, val, val + picreg_offset))
val = struct.pack('<I', val + picreg_offset)
uld_fd.seek(-4, 1)
uld_fd.write(val)
count += 1
dprint('Updated GOTOFFFUNCDESC values for {} plt entries'.format(count))
uld_fd.seek(uld_opos)
def find_elf_file(elf_search_path, elf_filename):
for dir_path in elf_search_path:
path = os.path.join(dir_path, elf_filename)
if os.path.exists(path):
return path
raise ValueError('{} not found.'.format(elf_filename))
def write_elf_file_crc(uld_sec_list, uld_fd, fse):
uld_opos = uld_fd.tell()
elf_file_off = lma_to_file_off(uld_sec_list, fse.file_base)
uld_fd.seek(elf_file_off)
data = uld_fd.read(fse.size)
crc = zlib.crc32(data, 0)
crc &= 0xffffffff
dprint('Patching new crc 0x{:08x} for file {}'.format(crc, fse.name))
crc = struct.pack('<I', crc)
uld_fd.seek(fse.e_file_off + FS_ENTRY_CRC_OFFSET)
uld_fd.write(crc)
uld_fd.seek(uld_opos)
def write_fs_table_crc(uld_fd, fs_table_off, fs_table_size, pstore_off):
uld_opos = uld_fd.tell()
uld_fd.seek(fs_table_off)
data = uld_fd.read(fs_table_size)
crc = zlib.crc32(data, 0)
crc &= 0xffffffff
dprint('Patching new crc 0x{:08x} for fs_table'.format(crc))
crc = struct.pack('<I', crc)
uld_fd.seek(pstore_off + PSTORE_FS_TABLE_CRC_OFFSET)
uld_fd.write(crc)
uld_fd.seek(uld_opos)
def patch_uld_elf(args):
global _debug
dprint('args: {}'.format(args))
uld_sec_list = get_elf_sections(args.uld_path)
uld_fd = open(args.uld_path, 'r+')
uld_sec_dict = sec_list_to_dict(uld_sec_list)
fs_table_sec = uld_sec_dict[args.fs_table_section]
uld_fd.seek(fs_table_sec.file_off)
# Detect dev case where no files are embedded.
if uld_sec_dict.has_key(args.file_section):
fs_table = uld_fd.read(fs_table_sec.file_size)
fs_table = parse_fs_table(fs_table, fs_table_sec.file_off,
uld_sec_list)
else:
fs_table = []
if _debug > 0:
dprint('Read {} FSEntries from {}'.format(len(fs_table),
args.uld_path))
for x in fs_table:
dprint(' {}'.format(x))
uld_dir = os.path.split(args.uld_path)[0]
if args.elf_search_path is not None:
elf_search_path = args.elf_search_path + [uld_dir,]
else:
elf_search_path = [uld_dir,]
pstore_off = uld_sec_dict[args.pstore_section].file_off
pstore_off += args.pstore_offset
for fse in fs_table:
elf_path = find_elf_file(elf_search_path, fse.name)
dprint('Processing file {}'.format(elf_path))
elf_fd = open(elf_path, 'r')
elf_sec_list = get_elf_sections(elf_path)
apply_rofixups(uld_sec_list, uld_fd, elf_sec_list, elf_fd,
fse.file_base)
patch_plt_gotofffuncdesc(uld_sec_list, uld_fd, elf_sec_list, elf_fd,
fse.file_base)
write_elf_file_crc(uld_sec_list, uld_fd, fse)
elf_fd.close()
write_fs_table_crc(uld_fd, fs_table_sec.file_off, fs_table_sec.size,
pstore_off)
uld_fd.close()
def main(argv=None):
if argv is not None:
prog = os.path.basename(argv[0])
else:
prog = 'patch-uld-elf.py'
epilog='\nIf OBJCOPY/READELF is not present in environment ' \
'\'objcopy\' and \'readelf\' will be used.\n' \
'Usage for --elf-search-path:\n' \
' {} /path/to/uld-elf --elf-search-path /path/to/search-1 ' \
'/path/to/search-2'
epilog = epilog.format(prog)
parser = argparse.ArgumentParser(prog=prog,
formatter_class=argparse.RawDescriptionHelpFormatter,
description='Apply rofixups for files contained within uld '
'firmware and recalculate crc32 checksums for files and fs_table',
epilog=epilog)
parser.add_argument('--file-section', type=str, default=DEFAULT_FILES_SEC,
help='Section prefix to for files (default: {})'.format(
DEFAULT_FILES_SEC))
parser.add_argument('--fs-table-section', type=str,
default=DEFAULT_FS_TABLE_SEC,
help='Section prefix to for fs_table (default: {})'.format(
DEFAULT_FS_TABLE_SEC))
parser.add_argument('--pstore-section', type=str,
default=DEFAULT_PSTORE_SEC,
help='Section prefix to for pstore (default: {})'.format(
DEFAULT_PSTORE_SEC))
parser.add_argument('--pstore-offset', type=int,
default=DEFAULT_PSTORE_OFF,
help='Offset from section base for pstore (default: {})'.format(
DEFAULT_PSTORE_OFF))
parser.add_argument('--elf-search-path', type=str, nargs='+',
help='Search path for elf files, each path is processed in order '
'from the command line followed by the base directory of '
'uld-path')
parser.add_argument('--verbose', action='store_true')
parser.add_argument('uld_path', type=str, metavar='uld-path',
help='Path to input file path or name=path to change default '
'name (derived from path and file-path-strip)')
args = parser.parse_args()
global _debug
if args.verbose is True:
_debug = 1
patch_uld_elf(args)
if __name__ == '__main__':
main()
| [
"operator.attrgetter",
"struct.calcsize",
"os.path.exists",
"argparse.ArgumentParser",
"os.environ.get",
"os.path.join",
"struct.pack",
"os.path.split",
"commands.getstatusoutput",
"struct.unpack",
"os.path.basename",
"zlib.crc32"
] | [((1444, 1473), 'struct.calcsize', 'struct.calcsize', (['FS_ENTRY_FMT'], {}), '(FS_ENTRY_FMT)\n', (1459, 1473), False, 'import struct\n'), ((1761, 1790), 'commands.getstatusoutput', 'commands.getstatusoutput', (['cmd'], {}), '(cmd)\n', (1785, 1790), False, 'import commands\n'), ((2905, 2941), 'os.environ.get', 'os.environ.get', (['"""OBJDUMP"""', '"""objdump"""'], {}), "('OBJDUMP', 'objdump')\n", (2919, 2941), False, 'import os\n'), ((2956, 2992), 'os.environ.get', 'os.environ.get', (['"""READELF"""', '"""readelf"""'], {}), "('READELF', 'readelf')\n", (2970, 2992), False, 'import os\n'), ((13095, 13114), 'zlib.crc32', 'zlib.crc32', (['data', '(0)'], {}), '(data, 0)\n', (13105, 13114), False, 'import zlib\n'), ((13223, 13245), 'struct.pack', 'struct.pack', (['"""<I"""', 'crc'], {}), "('<I', crc)\n", (13234, 13245), False, 'import struct\n'), ((13532, 13551), 'zlib.crc32', 'zlib.crc32', (['data', '(0)'], {}), '(data, 0)\n', (13542, 13551), False, 'import zlib\n'), ((13650, 13672), 'struct.pack', 'struct.pack', (['"""<I"""', 'crc'], {}), "('<I', crc)\n", (13661, 13672), False, 'import struct\n'), ((15916, 16159), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'prog': 'prog', 'formatter_class': 'argparse.RawDescriptionHelpFormatter', 'description': '"""Apply rofixups for files contained within uld firmware and recalculate crc32 checksums for files and fs_table"""', 'epilog': 'epilog'}), "(prog=prog, formatter_class=argparse.\n RawDescriptionHelpFormatter, description=\n 'Apply rofixups for files contained within uld firmware and recalculate crc32 checksums for files and fs_table'\n , epilog=epilog)\n", (15939, 16159), False, 'import argparse\n'), ((10534, 10564), 'struct.pack', 'struct.pack', (['"""<I"""', 'fixup_value'], {}), "('<I', fixup_value)\n", (10545, 10564), False, 'import struct\n'), ((12722, 12758), 'os.path.join', 'os.path.join', (['dir_path', 'elf_filename'], {}), '(dir_path, elf_filename)\n', (12734, 12758), False, 'import os\n'), ((12770, 12790), 'os.path.exists', 'os.path.exists', (['path'], {}), '(path)\n', (12784, 12790), False, 'import os\n'), ((14576, 14604), 'os.path.split', 'os.path.split', (['args.uld_path'], {}), '(args.uld_path)\n', (14589, 14604), False, 'import os\n'), ((15549, 15574), 'os.path.basename', 'os.path.basename', (['argv[0]'], {}), '(argv[0])\n', (15565, 15574), False, 'import os\n'), ((4310, 4341), 'operator.attrgetter', 'operator.attrgetter', (['"""file_off"""'], {}), "('file_off')\n", (4329, 4341), False, 'import operator\n'), ((8287, 8323), 'struct.unpack', 'struct.unpack', (['"""<I"""', 'fixups[x:x + 4]'], {}), "('<I', fixups[x:x + 4])\n", (8300, 8323), False, 'import struct\n'), ((8595, 8621), 'struct.unpack', 'struct.unpack', (['"""<I"""', 'value'], {}), "('<I', value)\n", (8608, 8621), False, 'import struct\n'), ((10030, 10060), 'struct.unpack', 'struct.unpack', (['"""<I"""', 'uld_value'], {}), "('<I', uld_value)\n", (10043, 10060), False, 'import struct\n'), ((12390, 12428), 'struct.pack', 'struct.pack', (['"""<I"""', '(val + picreg_offset)'], {}), "('<I', val + picreg_offset)\n", (12401, 12428), False, 'import struct\n'), ((12213, 12237), 'struct.unpack', 'struct.unpack', (['"""<I"""', 'val'], {}), "('<I', val)\n", (12226, 12237), False, 'import struct\n')] |
from functools import partial
from typing import List, Optional, Sequence, Tuple
import torch
import torch.nn.functional as F
from torch import nn
from mmderain.models.common import sizeof
from mmderain.models.registry import BACKBONES
from mmderain.models.layers import SELayer_Modified
class ResidualBlock(nn.Module):
def __init__(self, planes: int) -> None:
super().__init__()
self.model = nn.Sequential(
nn.Conv2d(planes, planes, kernel_size=3, stride=1, padding=1),
nn.GroupNorm(num_groups=8, num_channels=planes)
)
self.act = nn.LeakyReLU(0.2, inplace=True)
def forward(self, x: torch.Tensor) -> torch.Tensor:
out = self.model(x)
out = self.act(out + x)
return out
class FFRB(nn.Module):
"""Feature fusion residual block"""
def __init__(
self,
in_planes: int,
mid_planes: int,
out_planes: int,
kernel_size: int
) -> None:
super().__init__()
inter_planes = mid_planes * 4
planes_per_group = 4
self.model0 = nn.Sequential(
nn.Conv2d(in_planes, inter_planes, kernel_size=3, stride=1, padding=1),
nn.GroupNorm(num_groups=inter_planes // planes_per_group, num_channels=inter_planes),
nn.LeakyReLU(0.2, inplace=True),
nn.Conv2d(inter_planes, mid_planes,
kernel_size=kernel_size, stride=1, padding=(kernel_size - 1) // 2),
nn.GroupNorm(num_groups=mid_planes//planes_per_group, num_channels=mid_planes),
nn.LeakyReLU(0.2, inplace=True)
)
self.model1 = nn.Sequential(
nn.Conv2d(in_planes+mid_planes, out_planes, kernel_size=3, stride=1, padding=1),
nn.GroupNorm(num_groups=4, num_channels=out_planes),
nn.LeakyReLU(0.2, inplace=True),
SELayer_Modified(out_planes, reduction=out_planes//6,
bias=True, act=nn.LeakyReLU(0.2, inplace=True))
)
def forward(self, x: torch.Tensor) -> torch.Tensor:
out = self.model0(x)
out = torch.cat([x, out], dim=1)
out = self.model1(out)
return out
class Encoder(nn.Module):
def __init__(self, layers: List[nn.Module]) -> None:
super().__init__()
self.models = nn.ModuleList(layers)
self.downsample = nn.AvgPool2d(kernel_size=2, stride=2)
def forward(self, x: torch.Tensor) -> Tuple[List[torch.Tensor]]:
features = []
out = x
for model in self.models:
out = model(out)
features.append(out)
out = self.downsample(out)
return out, features
class Decoder(nn.Module):
def __init__(self, layers: List[nn.Module]) -> None:
super().__init__()
self.models = nn.ModuleList(layers)
self.upsample = partial(F.interpolate, mode='nearest', align_corners=None)
def forward(self, x: torch.Tensor, bridges: Sequence[torch.Tensor]) -> torch.Tensor:
features = []
out = x
for model in self.models:
out = model(out)
out = self.upsample(out, scale_factor=2)
out = torch.cat([out, bridges.pop()], dim=1)
features.append(out)
return out, features
class UFFRB(nn.Module):
"""U-Net structure constructed with FFRBs"""
def __init__(self, planes: int, depth: int) -> None:
super().__init__()
ffrb_builder = partial(FFRB, mid_planes=planes, out_planes=planes, kernel_size=3)
self.encoder = Encoder([ffrb_builder(in_planes=planes) for _ in range(depth // 2)])
self.decoder = Decoder([ffrb_builder(in_planes=planes if i == 0 else planes*2)
for i in range(depth//2)])
def forward(self, x: torch.Tensor) -> torch.Tensor:
out = x
out, encoder_features = self.encoder(out)
out, _ = self.decoder(out, encoder_features)
return out
class Foundation(nn.Module):
"""Structure for feature compensator and error detector"""
def __init__(
self,
in_planes: int,
mid_planes: int,
out_planes: int,
uffrb_depth: int = -1,
n_ffrb: int = 3,
act: Optional[str] = None
) -> None:
super().__init__()
models = []
planes_per_group = 4
models.extend([
nn.Conv2d(in_planes, mid_planes, kernel_size=3, stride=1, padding=1),
nn.GroupNorm(num_groups=mid_planes//planes_per_group, num_channels=mid_planes),
nn.LeakyReLU(0.2, inplace=True)
])
use_uffrb = uffrb_depth > 0
if use_uffrb:
models.append(UFFRB(mid_planes, uffrb_depth))
for i in range(n_ffrb):
if use_uffrb and i == 0:
models.append(FFRB(mid_planes*2, mid_planes, mid_planes, kernel_size=3))
else:
models.append(FFRB(mid_planes, mid_planes, mid_planes, kernel_size=3))
models.append(nn.Conv2d(mid_planes, out_planes, kernel_size=3, stride=1, padding=1))
if act == 'leakyrelu':
models.append(nn.LeakyReLU(0.2, inplace=True))
elif act == 'sigmoid':
models.append(nn.Sigmoid())
self.model = nn.Sequential(*models)
def forward(self, x: torch.Tensor) -> torch.Tensor:
return self.model(x)
class FeatureCompensator(Foundation):
def __init__(
self,
in_planes: int,
mid_planes: int,
out_planes: int,
use_uffrb: bool = True,
n_ffrb: int = 3,
act: str = 'leakyrelu'
) -> None:
uffrb_depth = 6 if use_uffrb else -1
super().__init__(in_planes, mid_planes, out_planes, uffrb_depth, n_ffrb, act)
class ErrorDetector(Foundation):
def __init__(
self,
in_planes: int,
mid_planes: int,
out_planes: int,
use_uffrb: bool,
n_ffrb: int,
act: Optional[str] = None
) -> None:
uffrb_depth = 6 if use_uffrb else -1
super().__init__(in_planes, mid_planes, out_planes, uffrb_depth, n_ffrb, act)
class Refinement(nn.Module):
"""Refinement Module"""
def __init__(
self,
in_planes: int,
mid_planes: int,
out_planes: int,
n_scale: int,
n_residual: int
) -> None:
super().__init__()
self.multi_scale = nn.ModuleList([
nn.Sequential(
nn.Conv2d(in_planes, 1, kernel_size=1, stride=1, padding=0),
nn.LeakyReLU(0.2, inplace=True)
)
for _ in range(n_scale)
])
self.conv0 = nn.Sequential(
nn.Conv2d(in_planes+4, mid_planes, kernel_size=3, stride=1, padding=1),
nn.LeakyReLU(0.2, inplace=True)
)
self.stacked_rb = nn.ModuleList([
ResidualBlock(mid_planes)
for _ in range(n_residual)
])
self.use_feature_idxs = [0, 3, 6]
self.last = nn.Sequential(
nn.Conv2d(mid_planes * len(self.use_feature_idxs), mid_planes,
kernel_size=3, stride=1, padding=1),
nn.LeakyReLU(0.2, inplace=True),
nn.Conv2d(mid_planes, mid_planes // 2, kernel_size=3, stride=1, padding=1),
nn.LeakyReLU(0.2, inplace=True),
nn.Conv2d(mid_planes//2, out_planes, kernel_size=3, stride=1, padding=1),
nn.LeakyReLU(0.2, inplace=True)
)
self.upsample = partial(F.interpolate, mode='nearest', align_corners=None)
def forward(self, x: torch.Tensor) -> torch.Tensor:
# mutli-scale processing
out_shape = sizeof(x)
outputs = []
for i, model in enumerate(self.multi_scale):
tmp = F.avg_pool2d(x, kernel_size=2**(i+2))
tmp = model(tmp)
tmp = self.upsample(tmp, size=out_shape)
outputs.append(tmp)
multi_scale_out = torch.cat(outputs, dim=1)
# pass through stacked residual blocks
out = torch.cat([multi_scale_out, x], dim=1)
out = self.conv0(out)
features = []
for i, model in enumerate(self.stacked_rb):
out = model(out)
if i in self.use_feature_idxs:
features.append(out)
out = torch.cat(features, dim=1)
out = self.last(out)
return out
@BACKBONES.register_module()
class RLNet(nn.Module):
"""DerainRLNet Network Structure
Paper: Robust Representation Learning with Feedback for Single Image Deraining
Official Code: https://github.com/LI-Hao-SJTU/DerainRLNet
Args:
in_channels (int): Channel number of inputs.
out_channels (int): Channel number of outputs.
mid_channels (list[int]): Channel number of intermediate features. Default: [24, 32, 18]
theta (list[float]): Values of theta1 and theta2. Default: [0.15, 0.05]
n_scale (int): Number of scales in refinement module. Default: 4
n_residual (int): Number of residual blocks in refinement module. Default: 7
"""
def __init__(
self,
in_channels: int,
out_channels: int,
mid_channels: List[int] = [24, 32, 18],
theta: List[float] = [0.15, 0.05],
n_scale: int = 4,
n_residual: int = 7
) -> None:
super().__init__()
theta1, theta2 = theta
self.theta1 = theta1
self.theta2 = theta2
mid0, mid1, mid2 = mid_channels
self.conv0 = nn.Sequential(
nn.Conv2d(in_channels, mid0, kernel_size=1, stride=1, padding=0),
nn.LeakyReLU(0.2, inplace=True)
)
# separate branches
encoder_builder = partial(FFRB, mid0, mid0, mid0)
self.encoder1 = Encoder([encoder_builder(kernel_size=3) for _ in range(3)])
self.encoder2 = Encoder([encoder_builder(kernel_size=5) for _ in range(3)])
self.encoder3 = Encoder([encoder_builder(kernel_size=7) for _ in range(3)])
decoder_builders = [
partial(FFRB, mid0, mid0, mid0),
partial(FFRB, mid0*2+3*out_channels, mid0, mid0),
partial(FFRB, mid0*2+3*out_channels, mid0, mid0)
]
self.decoder1 = Decoder([f(kernel_size=3) for f in decoder_builders])
self.decoder2 = Decoder([f(kernel_size=5) for f in decoder_builders])
self.decoder3 = Decoder([f(kernel_size=7) for f in decoder_builders])
# feature compensators
self.fc1_internal = FeatureCompensator(3*mid0, mid1, out_channels)
self.fc2_internal = FeatureCompensator(3*mid0, mid1, out_channels)
self.fc1_externel = FeatureCompensator(in_channels, mid1, out_channels,
use_uffrb=False, n_ffrb=1, act='sigmoid')
self.fc2_externel = FeatureCompensator(in_channels, mid1, out_channels,
use_uffrb=False, n_ffrb=1, act='sigmoid')
# error detectors
self.ed1 = ErrorDetector(3*(mid0*2+3*out_channels), mid1, out_channels,
use_uffrb=False, n_ffrb=5, act='leakyrelu')
self.ed2 = ErrorDetector(in_channels+out_channels, mid1, out_channels,
use_uffrb=True, n_ffrb=4, act='sigmoid')
# post processor
self.fusion = nn.Sequential(
nn.Conv2d(6*mid0, mid2, kernel_size=1, stride=1, padding=0),
nn.LeakyReLU(0.2, inplace=True)
)
self.refinement = Refinement(mid2 + 6*out_channels, mid1, out_channels, n_scale, n_residual)
self.pool = nn.AvgPool2d(kernel_size=2, stride=2)
self.upsample = partial(F.interpolate, mode='nearest', align_corners=None)
def forward(self, x: torch.Tensor, y: Optional[torch.Tensor] = None) -> Tuple[torch.Tensor]:
out = self.conv0(x)
out1, features1 = self.encoder1(out)
out2, features2 = self.encoder2(out)
out3, features3 = self.encoder3(out)
# feature compensation
FA1, FA2, FA3 = features1[1], features2[1], features3[1]
FB1, FB2, FB3 = features1[2], features2[2], features3[2]
F1 = self.fc1_internal(torch.cat([FA1, FA2, FA3], dim=1))
F2 = self.fc2_internal(torch.cat([FB1, FB2, FB3], dim=1))
refined1 = [features1[0]] + \
[torch.cat([FA1, F1, F1, F1], dim=1), torch.cat([FB1, F2, F2, F2], dim=1)]
refined2 = [features2[0]] + \
[torch.cat([FA2, F1, F1, F1], dim=1), torch.cat([FB2, F2, F2, F2], dim=1)]
refined3 = [features3[0]] + \
[torch.cat([FA3, F1, F1, F1], dim=1), torch.cat([FB3, F2, F2, F2], dim=1)]
out1, dec_feat1 = self.decoder1(out1, refined1)
out2, dec_feat2 = self.decoder2(out2, refined2)
out3, dec_feat3 = self.decoder3(out3, refined3)
# error detection
FE1, FE2, FE3 = dec_feat1[1], dec_feat2[1], dec_feat3[1]
phi1 = self.ed1(torch.cat([FE1, FE2, FE3], dim=1))
phi = self.ed2(torch.cat([self.pool(x), phi1], dim=1))
err = torch.div(self.theta1, phi) - self.theta1
phi1_prime = F.relu(phi1-err*(1-2*phi1), inplace=True)
phi1_prime = self.upsample(phi1_prime, scale_factor=2)
# post processing
out = torch.cat([out1, out2, out3], dim=1)
out = self.fusion(out)
# inject error information
out = torch.cat([out, phi1_prime, phi1_prime, phi1_prime,
phi1_prime, phi1_prime, phi1_prime], dim=1)
# refine
out = self.refinement(out)
if y is None:
return out, F1, F2, phi1, phi
else:
y2 = F.avg_pool2d(y, kernel_size=2, stride=2)
y4 = F.avg_pool2d(y, kernel_size=4, stride=4)
k2 = self.fc1_externel(y2)
k4 = self.fc2_externel(y4)
y2 = y2 + self.theta2 * self.theta2 * k2 * y2
y4 = y4 + self.theta2 * self.theta2 * k4 * y4
return out, F1, F2, phi1, phi, y2, y4, k2, k4
| [
"torch.nn.GroupNorm",
"torch.nn.Sigmoid",
"torch.nn.LeakyReLU",
"mmderain.models.common.sizeof",
"torch.nn.ModuleList",
"torch.nn.Sequential",
"torch.nn.functional.avg_pool2d",
"torch.nn.Conv2d",
"mmderain.models.registry.BACKBONES.register_module",
"functools.partial",
"torch.nn.functional.relu... | [((8377, 8404), 'mmderain.models.registry.BACKBONES.register_module', 'BACKBONES.register_module', ([], {}), '()\n', (8402, 8404), False, 'from mmderain.models.registry import BACKBONES\n'), ((598, 629), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', (['(0.2)'], {'inplace': '(True)'}), '(0.2, inplace=True)\n', (610, 629), False, 'from torch import nn\n'), ((2115, 2141), 'torch.cat', 'torch.cat', (['[x, out]'], {'dim': '(1)'}), '([x, out], dim=1)\n', (2124, 2141), False, 'import torch\n'), ((2328, 2349), 'torch.nn.ModuleList', 'nn.ModuleList', (['layers'], {}), '(layers)\n', (2341, 2349), False, 'from torch import nn\n'), ((2376, 2413), 'torch.nn.AvgPool2d', 'nn.AvgPool2d', ([], {'kernel_size': '(2)', 'stride': '(2)'}), '(kernel_size=2, stride=2)\n', (2388, 2413), False, 'from torch import nn\n'), ((2823, 2844), 'torch.nn.ModuleList', 'nn.ModuleList', (['layers'], {}), '(layers)\n', (2836, 2844), False, 'from torch import nn\n'), ((2869, 2927), 'functools.partial', 'partial', (['F.interpolate'], {'mode': '"""nearest"""', 'align_corners': 'None'}), "(F.interpolate, mode='nearest', align_corners=None)\n", (2876, 2927), False, 'from functools import partial\n'), ((3475, 3541), 'functools.partial', 'partial', (['FFRB'], {'mid_planes': 'planes', 'out_planes': 'planes', 'kernel_size': '(3)'}), '(FFRB, mid_planes=planes, out_planes=planes, kernel_size=3)\n', (3482, 3541), False, 'from functools import partial\n'), ((5270, 5292), 'torch.nn.Sequential', 'nn.Sequential', (['*models'], {}), '(*models)\n', (5283, 5292), False, 'from torch import nn\n'), ((7496, 7554), 'functools.partial', 'partial', (['F.interpolate'], {'mode': '"""nearest"""', 'align_corners': 'None'}), "(F.interpolate, mode='nearest', align_corners=None)\n", (7503, 7554), False, 'from functools import partial\n'), ((7665, 7674), 'mmderain.models.common.sizeof', 'sizeof', (['x'], {}), '(x)\n', (7671, 7674), False, 'from mmderain.models.common import sizeof\n'), ((7945, 7970), 'torch.cat', 'torch.cat', (['outputs'], {'dim': '(1)'}), '(outputs, dim=1)\n', (7954, 7970), False, 'import torch\n'), ((8033, 8071), 'torch.cat', 'torch.cat', (['[multi_scale_out, x]'], {'dim': '(1)'}), '([multi_scale_out, x], dim=1)\n', (8042, 8071), False, 'import torch\n'), ((8299, 8325), 'torch.cat', 'torch.cat', (['features'], {'dim': '(1)'}), '(features, dim=1)\n', (8308, 8325), False, 'import torch\n'), ((9702, 9733), 'functools.partial', 'partial', (['FFRB', 'mid0', 'mid0', 'mid0'], {}), '(FFRB, mid0, mid0, mid0)\n', (9709, 9733), False, 'from functools import partial\n'), ((11600, 11637), 'torch.nn.AvgPool2d', 'nn.AvgPool2d', ([], {'kernel_size': '(2)', 'stride': '(2)'}), '(kernel_size=2, stride=2)\n', (11612, 11637), False, 'from torch import nn\n'), ((11662, 11720), 'functools.partial', 'partial', (['F.interpolate'], {'mode': '"""nearest"""', 'align_corners': 'None'}), "(F.interpolate, mode='nearest', align_corners=None)\n", (11669, 11720), False, 'from functools import partial\n'), ((13115, 13164), 'torch.nn.functional.relu', 'F.relu', (['(phi1 - err * (1 - 2 * phi1))'], {'inplace': '(True)'}), '(phi1 - err * (1 - 2 * phi1), inplace=True)\n', (13121, 13164), True, 'import torch.nn.functional as F\n'), ((13261, 13297), 'torch.cat', 'torch.cat', (['[out1, out2, out3]'], {'dim': '(1)'}), '([out1, out2, out3], dim=1)\n', (13270, 13297), False, 'import torch\n'), ((13379, 13478), 'torch.cat', 'torch.cat', (['[out, phi1_prime, phi1_prime, phi1_prime, phi1_prime, phi1_prime, phi1_prime]'], {'dim': '(1)'}), '([out, phi1_prime, phi1_prime, phi1_prime, phi1_prime, phi1_prime,\n phi1_prime], dim=1)\n', (13388, 13478), False, 'import torch\n'), ((446, 507), 'torch.nn.Conv2d', 'nn.Conv2d', (['planes', 'planes'], {'kernel_size': '(3)', 'stride': '(1)', 'padding': '(1)'}), '(planes, planes, kernel_size=3, stride=1, padding=1)\n', (455, 507), False, 'from torch import nn\n'), ((521, 568), 'torch.nn.GroupNorm', 'nn.GroupNorm', ([], {'num_groups': '(8)', 'num_channels': 'planes'}), '(num_groups=8, num_channels=planes)\n', (533, 568), False, 'from torch import nn\n'), ((1123, 1193), 'torch.nn.Conv2d', 'nn.Conv2d', (['in_planes', 'inter_planes'], {'kernel_size': '(3)', 'stride': '(1)', 'padding': '(1)'}), '(in_planes, inter_planes, kernel_size=3, stride=1, padding=1)\n', (1132, 1193), False, 'from torch import nn\n'), ((1207, 1296), 'torch.nn.GroupNorm', 'nn.GroupNorm', ([], {'num_groups': '(inter_planes // planes_per_group)', 'num_channels': 'inter_planes'}), '(num_groups=inter_planes // planes_per_group, num_channels=\n inter_planes)\n', (1219, 1296), False, 'from torch import nn\n'), ((1305, 1336), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', (['(0.2)'], {'inplace': '(True)'}), '(0.2, inplace=True)\n', (1317, 1336), False, 'from torch import nn\n'), ((1350, 1456), 'torch.nn.Conv2d', 'nn.Conv2d', (['inter_planes', 'mid_planes'], {'kernel_size': 'kernel_size', 'stride': '(1)', 'padding': '((kernel_size - 1) // 2)'}), '(inter_planes, mid_planes, kernel_size=kernel_size, stride=1,\n padding=(kernel_size - 1) // 2)\n', (1359, 1456), False, 'from torch import nn\n'), ((1488, 1573), 'torch.nn.GroupNorm', 'nn.GroupNorm', ([], {'num_groups': '(mid_planes // planes_per_group)', 'num_channels': 'mid_planes'}), '(num_groups=mid_planes // planes_per_group, num_channels=mid_planes\n )\n', (1500, 1573), False, 'from torch import nn\n'), ((1580, 1611), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', (['(0.2)'], {'inplace': '(True)'}), '(0.2, inplace=True)\n', (1592, 1611), False, 'from torch import nn\n'), ((1671, 1756), 'torch.nn.Conv2d', 'nn.Conv2d', (['(in_planes + mid_planes)', 'out_planes'], {'kernel_size': '(3)', 'stride': '(1)', 'padding': '(1)'}), '(in_planes + mid_planes, out_planes, kernel_size=3, stride=1,\n padding=1)\n', (1680, 1756), False, 'from torch import nn\n'), ((1764, 1815), 'torch.nn.GroupNorm', 'nn.GroupNorm', ([], {'num_groups': '(4)', 'num_channels': 'out_planes'}), '(num_groups=4, num_channels=out_planes)\n', (1776, 1815), False, 'from torch import nn\n'), ((1829, 1860), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', (['(0.2)'], {'inplace': '(True)'}), '(0.2, inplace=True)\n', (1841, 1860), False, 'from torch import nn\n'), ((5016, 5085), 'torch.nn.Conv2d', 'nn.Conv2d', (['mid_planes', 'out_planes'], {'kernel_size': '(3)', 'stride': '(1)', 'padding': '(1)'}), '(mid_planes, out_planes, kernel_size=3, stride=1, padding=1)\n', (5025, 5085), False, 'from torch import nn\n'), ((6687, 6759), 'torch.nn.Conv2d', 'nn.Conv2d', (['(in_planes + 4)', 'mid_planes'], {'kernel_size': '(3)', 'stride': '(1)', 'padding': '(1)'}), '(in_planes + 4, mid_planes, kernel_size=3, stride=1, padding=1)\n', (6696, 6759), False, 'from torch import nn\n'), ((6771, 6802), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', (['(0.2)'], {'inplace': '(True)'}), '(0.2, inplace=True)\n', (6783, 6802), False, 'from torch import nn\n'), ((7166, 7197), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', (['(0.2)'], {'inplace': '(True)'}), '(0.2, inplace=True)\n', (7178, 7197), False, 'from torch import nn\n'), ((7211, 7285), 'torch.nn.Conv2d', 'nn.Conv2d', (['mid_planes', '(mid_planes // 2)'], {'kernel_size': '(3)', 'stride': '(1)', 'padding': '(1)'}), '(mid_planes, mid_planes // 2, kernel_size=3, stride=1, padding=1)\n', (7220, 7285), False, 'from torch import nn\n'), ((7299, 7330), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', (['(0.2)'], {'inplace': '(True)'}), '(0.2, inplace=True)\n', (7311, 7330), False, 'from torch import nn\n'), ((7344, 7418), 'torch.nn.Conv2d', 'nn.Conv2d', (['(mid_planes // 2)', 'out_planes'], {'kernel_size': '(3)', 'stride': '(1)', 'padding': '(1)'}), '(mid_planes // 2, out_planes, kernel_size=3, stride=1, padding=1)\n', (7353, 7418), False, 'from torch import nn\n'), ((7430, 7461), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', (['(0.2)'], {'inplace': '(True)'}), '(0.2, inplace=True)\n', (7442, 7461), False, 'from torch import nn\n'), ((7767, 7808), 'torch.nn.functional.avg_pool2d', 'F.avg_pool2d', (['x'], {'kernel_size': '(2 ** (i + 2))'}), '(x, kernel_size=2 ** (i + 2))\n', (7779, 7808), True, 'import torch.nn.functional as F\n'), ((9527, 9591), 'torch.nn.Conv2d', 'nn.Conv2d', (['in_channels', 'mid0'], {'kernel_size': '(1)', 'stride': '(1)', 'padding': '(0)'}), '(in_channels, mid0, kernel_size=1, stride=1, padding=0)\n', (9536, 9591), False, 'from torch import nn\n'), ((9605, 9636), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', (['(0.2)'], {'inplace': '(True)'}), '(0.2, inplace=True)\n', (9617, 9636), False, 'from torch import nn\n'), ((10029, 10060), 'functools.partial', 'partial', (['FFRB', 'mid0', 'mid0', 'mid0'], {}), '(FFRB, mid0, mid0, mid0)\n', (10036, 10060), False, 'from functools import partial\n'), ((10074, 10128), 'functools.partial', 'partial', (['FFRB', '(mid0 * 2 + 3 * out_channels)', 'mid0', 'mid0'], {}), '(FFRB, mid0 * 2 + 3 * out_channels, mid0, mid0)\n', (10081, 10128), False, 'from functools import partial\n'), ((10136, 10190), 'functools.partial', 'partial', (['FFRB', '(mid0 * 2 + 3 * out_channels)', 'mid0', 'mid0'], {}), '(FFRB, mid0 * 2 + 3 * out_channels, mid0, mid0)\n', (10143, 10190), False, 'from functools import partial\n'), ((11363, 11424), 'torch.nn.Conv2d', 'nn.Conv2d', (['(6 * mid0)', 'mid2'], {'kernel_size': '(1)', 'stride': '(1)', 'padding': '(0)'}), '(6 * mid0, mid2, kernel_size=1, stride=1, padding=0)\n', (11372, 11424), False, 'from torch import nn\n'), ((11436, 11467), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', (['(0.2)'], {'inplace': '(True)'}), '(0.2, inplace=True)\n', (11448, 11467), False, 'from torch import nn\n'), ((12177, 12210), 'torch.cat', 'torch.cat', (['[FA1, FA2, FA3]'], {'dim': '(1)'}), '([FA1, FA2, FA3], dim=1)\n', (12186, 12210), False, 'import torch\n'), ((12243, 12276), 'torch.cat', 'torch.cat', (['[FB1, FB2, FB3]'], {'dim': '(1)'}), '([FB1, FB2, FB3], dim=1)\n', (12252, 12276), False, 'import torch\n'), ((12940, 12973), 'torch.cat', 'torch.cat', (['[FE1, FE2, FE3]'], {'dim': '(1)'}), '([FE1, FE2, FE3], dim=1)\n', (12949, 12973), False, 'import torch\n'), ((13052, 13079), 'torch.div', 'torch.div', (['self.theta1', 'phi'], {}), '(self.theta1, phi)\n', (13061, 13079), False, 'import torch\n'), ((13648, 13688), 'torch.nn.functional.avg_pool2d', 'F.avg_pool2d', (['y'], {'kernel_size': '(2)', 'stride': '(2)'}), '(y, kernel_size=2, stride=2)\n', (13660, 13688), True, 'import torch.nn.functional as F\n'), ((13706, 13746), 'torch.nn.functional.avg_pool2d', 'F.avg_pool2d', (['y'], {'kernel_size': '(4)', 'stride': '(4)'}), '(y, kernel_size=4, stride=4)\n', (13718, 13746), True, 'import torch.nn.functional as F\n'), ((4396, 4464), 'torch.nn.Conv2d', 'nn.Conv2d', (['in_planes', 'mid_planes'], {'kernel_size': '(3)', 'stride': '(1)', 'padding': '(1)'}), '(in_planes, mid_planes, kernel_size=3, stride=1, padding=1)\n', (4405, 4464), False, 'from torch import nn\n'), ((4478, 4563), 'torch.nn.GroupNorm', 'nn.GroupNorm', ([], {'num_groups': '(mid_planes // planes_per_group)', 'num_channels': 'mid_planes'}), '(num_groups=mid_planes // planes_per_group, num_channels=mid_planes\n )\n', (4490, 4563), False, 'from torch import nn\n'), ((4570, 4601), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', (['(0.2)'], {'inplace': '(True)'}), '(0.2, inplace=True)\n', (4582, 4601), False, 'from torch import nn\n'), ((5144, 5175), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', (['(0.2)'], {'inplace': '(True)'}), '(0.2, inplace=True)\n', (5156, 5175), False, 'from torch import nn\n'), ((12330, 12365), 'torch.cat', 'torch.cat', (['[FA1, F1, F1, F1]'], {'dim': '(1)'}), '([FA1, F1, F1, F1], dim=1)\n', (12339, 12365), False, 'import torch\n'), ((12367, 12402), 'torch.cat', 'torch.cat', (['[FB1, F2, F2, F2]'], {'dim': '(1)'}), '([FB1, F2, F2, F2], dim=1)\n', (12376, 12402), False, 'import torch\n'), ((12455, 12490), 'torch.cat', 'torch.cat', (['[FA2, F1, F1, F1]'], {'dim': '(1)'}), '([FA2, F1, F1, F1], dim=1)\n', (12464, 12490), False, 'import torch\n'), ((12492, 12527), 'torch.cat', 'torch.cat', (['[FB2, F2, F2, F2]'], {'dim': '(1)'}), '([FB2, F2, F2, F2], dim=1)\n', (12501, 12527), False, 'import torch\n'), ((12580, 12615), 'torch.cat', 'torch.cat', (['[FA3, F1, F1, F1]'], {'dim': '(1)'}), '([FA3, F1, F1, F1], dim=1)\n', (12589, 12615), False, 'import torch\n'), ((12617, 12652), 'torch.cat', 'torch.cat', (['[FB3, F2, F2, F2]'], {'dim': '(1)'}), '([FB3, F2, F2, F2], dim=1)\n', (12626, 12652), False, 'import torch\n'), ((1972, 2003), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', (['(0.2)'], {'inplace': '(True)'}), '(0.2, inplace=True)\n', (1984, 2003), False, 'from torch import nn\n'), ((5234, 5246), 'torch.nn.Sigmoid', 'nn.Sigmoid', ([], {}), '()\n', (5244, 5246), False, 'from torch import nn\n'), ((6469, 6528), 'torch.nn.Conv2d', 'nn.Conv2d', (['in_planes', '(1)'], {'kernel_size': '(1)', 'stride': '(1)', 'padding': '(0)'}), '(in_planes, 1, kernel_size=1, stride=1, padding=0)\n', (6478, 6528), False, 'from torch import nn\n'), ((6546, 6577), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', (['(0.2)'], {'inplace': '(True)'}), '(0.2, inplace=True)\n', (6558, 6577), False, 'from torch import nn\n')] |
# -*- coding: utf-8 -*-
# Generated by Django 1.10.3 on 2017-02-11 00:33
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('issue_order', '0004_auto_20170210_2358'),
]
operations = [
migrations.AlterField(
model_name='courierbatch',
name='credit',
field=models.DecimalField(blank=True, decimal_places=2, max_digits=10, null=True, verbose_name='Credit'),
),
migrations.AlterField(
model_name='courierbatch',
name='rate',
field=models.DecimalField(blank=True, decimal_places=2, max_digits=10, null=True, verbose_name='Rate per Package'),
),
migrations.AlterField(
model_name='courierbatch',
name='state',
field=models.IntegerField(db_index=True, default=2, verbose_name='State'),
),
migrations.AlterField(
model_name='courierbatch',
name='system',
field=models.CharField(blank=True, choices=[('yunda', '\u97f5\u8fbe\u7ebf'), ('postal', '\u90ae\u653f\u7ebf')], db_index=True, max_length=32, null=True, verbose_name='System Name'),
),
migrations.AlterField(
model_name='courierbatch',
name='uuid',
field=models.CharField(blank=True, db_index=True, max_length=64, null=True, unique=True, verbose_name='UUID'),
),
]
| [
"django.db.models.DecimalField",
"django.db.models.CharField",
"django.db.models.IntegerField"
] | [((411, 513), 'django.db.models.DecimalField', 'models.DecimalField', ([], {'blank': '(True)', 'decimal_places': '(2)', 'max_digits': '(10)', 'null': '(True)', 'verbose_name': '"""Credit"""'}), "(blank=True, decimal_places=2, max_digits=10, null=True,\n verbose_name='Credit')\n", (430, 513), False, 'from django.db import migrations, models\n'), ((635, 747), 'django.db.models.DecimalField', 'models.DecimalField', ([], {'blank': '(True)', 'decimal_places': '(2)', 'max_digits': '(10)', 'null': '(True)', 'verbose_name': '"""Rate per Package"""'}), "(blank=True, decimal_places=2, max_digits=10, null=True,\n verbose_name='Rate per Package')\n", (654, 747), False, 'from django.db import migrations, models\n'), ((870, 937), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'db_index': '(True)', 'default': '(2)', 'verbose_name': '"""State"""'}), "(db_index=True, default=2, verbose_name='State')\n", (889, 937), False, 'from django.db import migrations, models\n'), ((1065, 1213), 'django.db.models.CharField', 'models.CharField', ([], {'blank': '(True)', 'choices': "[('yunda', '韵达线'), ('postal', '邮政线')]", 'db_index': '(True)', 'max_length': '(32)', 'null': '(True)', 'verbose_name': '"""System Name"""'}), "(blank=True, choices=[('yunda', '韵达线'), ('postal', '邮政线')],\n db_index=True, max_length=32, null=True, verbose_name='System Name')\n", (1081, 1213), False, 'from django.db import migrations, models\n'), ((1365, 1472), 'django.db.models.CharField', 'models.CharField', ([], {'blank': '(True)', 'db_index': '(True)', 'max_length': '(64)', 'null': '(True)', 'unique': '(True)', 'verbose_name': '"""UUID"""'}), "(blank=True, db_index=True, max_length=64, null=True,\n unique=True, verbose_name='UUID')\n", (1381, 1472), False, 'from django.db import migrations, models\n')] |
import dns
import dns.resolver
import dns.rdatatype
def dns_resolve(domain: str) -> list:
addrs = []
resolver = dns.resolver.Resolver(configure=False)
# Default to Google DNS
resolver.nameservers = ['8.8.8.8', '8.8.4.4']
try:
for answer in resolver.resolve(domain, 'A').response.answer:
for item in answer:
if item.rdtype == dns.rdatatype.A:
addrs.append(item.address)
except dns.resolver.NoAnswer:
pass
try:
for answer in resolver.resolve(domain, 'AAAA').response.answer:
for item in answer:
if item.rdtype == dns.rdatatype.AAAA:
addrs.append(item.address)
except dns.resolver.NoAnswer:
pass
return addrs
| [
"dns.resolver.Resolver"
] | [((123, 161), 'dns.resolver.Resolver', 'dns.resolver.Resolver', ([], {'configure': '(False)'}), '(configure=False)\n', (144, 161), False, 'import dns\n')] |
""""
File : kombu_messaging.py
Author : ian
Created : 09-28-2016
Last Modified By : ian
Last Modified On : 09-28-2016
***********************************************************************
The MIT License (MIT)
Copyright © 2016 <NAME> <<EMAIL>>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the “Software”), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
**********************************************************************i*
"""
from kombu.message import Message as Message
from core.messaging import BrightsideMessage, BrightsideMessageHeader, BrightsideMessageBody, BrightsideMessageType
from core.exceptions import MessagingException
from uuid import UUID, uuid4
from typing import Dict
import re
import codecs
message_type_header = "MessageType"
message_id_header = "MessageId"
message_correlation_id_header = "CorrelationId"
message_topic_name_header = "Topic"
message_handled_count_header = "HandledCount"
message_delay_milliseconds_header = "x-delay"
message_delayed_milliseconds_header = "x-delay"
message_original_message_id_header = "x-original-message-id"
message_delivery_tag_header = "DeliveryTag"
# See http://stackoverflow.com/questions/4020539/process-escape-sequences-in-a-string-in-python
ESCAPE_SEQUENCE_RE = re.compile(r'''
( \\U........ # 8-digit hex escapes
| \\u.... # 4-digit hex escapes
| \\x.. # 2-digit hex escapes
| \\[0-7]{1,3} # Octal escapes
| \\N\{[^}]+\} # Unicode characters by name
| \\[\\'"abfnrtv] # Single-character escapes
)''', re.UNICODE | re.VERBOSE)
def decode_escapes(s):
def decode_match(match):
return codecs.decode(match.group(0), 'unicode-escape')
return ESCAPE_SEQUENCE_RE.sub(decode_match, s)
class ReadError:
def __init__(self, error_message: str) -> None:
self.error_message = error_message
def __str__(self) -> str:
return self.error_message
class BrightsideMessageFactory:
"""
The message factory turn an 'on-the-wire' message into our internal representation. We try to be as
tolerant as possible (following Postel's Law: https://en.wikipedia.org/wiki/Robustness_principle) Be conservative
in what you do, be liberal in what you accept
"""
def __init__(self):
self._has_read_errors = False
def create_message(self, message: Message) -> BrightsideMessage:
self._has_read_errors = False
def _get_correlation_id() -> UUID:
header, err = self._read_header(message_correlation_id_header, message)
if err is None:
return UUID(header)
else:
self._has_read_errors = True
return ""
def _get_message_id() -> UUID:
header, err = self._read_header(message_id_header, message)
if err is None:
return UUID(header)
else:
self._has_read_errors = True
return uuid4()
def _get_message_type() -> BrightsideMessageType:
header, err = self._read_header(message_type_header, message)
if err is None:
return BrightsideMessageType(header)
else:
self._has_read_errors = True
return BrightsideMessageType.unacceptable
def _get_payload() -> str:
body, err = self._read_payload(message)
if err is None:
return body
else:
self._has_read_errors = True
return ""
def _get_payload_type() -> str:
payload_type, err = self._read_payload_type(message)
if err is None:
return payload_type
else:
self._has_read_errors = True
return ""
def _get_topic() -> str:
header, err = self._read_header(message_topic_name_header, message)
if err is None:
return header
else:
self._has_read_errors = True
return ""
message_id = _get_message_id()
topic = _get_topic()
message_type = _get_message_type() if not message.errors or self._has_read_errors else BrightsideMessageType.unacceptable
correlation_id = _get_correlation_id()
payload = _get_payload()
payload_type = _get_payload_type()
message_header = BrightsideMessageHeader(identity=message_id, topic=topic, message_type=message_type,
correlation_id=correlation_id, content_type="json")
message_body = BrightsideMessageBody(body=payload, body_type=payload_type)
return BrightsideMessage(message_header, message_body)
# All of these methods are warned as static, implies they should be helper classes that take state in constructor
def _read_header(self, header_key: str, message: Message) -> (str, ReadError):
if header_key not in message.headers.keys():
return "", ReadError("Could not read header with key: {}".format(header_key))
else:
return message.headers.get(header_key), None
def _read_payload(self, message: Message) -> (str, ReadError):
if not message.errors:
body_text = decode_escapes(message.body)
return body_text[1:-1], None
else:
errors = ", ".join(message.errors)
return "", ReadError("Could not parse message. Errors: {}".format(errors))
def _read_payload_type(self, message: Message) -> (str, ReadError):
if not message.errors:
return message.content_type, None
else:
errors = ", ".join(message.errors)
return "", ReadError("Could not read payload type. Errors: {}".format(errors))
class KombuMessageFactory():
def __init__(self, message: BrightsideMessage) -> None:
self._message = message
def create_message_header(self) -> Dict:
def _add_correlation_id(brightstide_message_header: Dict, correlation_id: UUID) -> None:
if correlation_id is not None:
brightstide_message_header[message_correlation_id_header] = correlation_id
def _add_message_id(brightside_message_header: Dict, identity: UUID) -> None:
if identity is None:
raise MessagingException("Missing id on message, this is a required field")
brightside_message_header[message_id_header] = identity
def _add_message_type(brightside_message_header: Dict, brightside_message_type: BrightsideMessageType) -> None:
if brightside_message_type is None:
raise MessagingException("Missing type on message, this is a required field")
brightside_message_header[message_type_header] = brightside_message_type
header = {}
_add_message_id(header, str(self._message.header.id))
_add_message_type(header, self._message.header.message_type.value)
_add_correlation_id(header, str(self._message.header.correlation_id))
return header
| [
"uuid.UUID",
"re.compile",
"core.messaging.BrightsideMessageBody",
"uuid.uuid4",
"core.messaging.BrightsideMessageHeader",
"core.messaging.BrightsideMessage",
"core.messaging.BrightsideMessageType",
"core.exceptions.MessagingException"
] | [((2174, 2525), 're.compile', 're.compile', (['"""\n ( \\\\\\\\U........ # 8-digit hex escapes\n | \\\\\\\\u.... # 4-digit hex escapes\n | \\\\\\\\x.. # 2-digit hex escapes\n | \\\\\\\\[0-7]{1,3} # Octal escapes\n | \\\\\\\\N\\\\{[^}]+\\\\} # Unicode characters by name\n | \\\\\\\\[\\\\\\\\\'"abfnrtv] # Single-character escapes\n )"""', '(re.UNICODE | re.VERBOSE)'], {}), '(\n """\n ( \\\\\\\\U........ # 8-digit hex escapes\n | \\\\\\\\u.... # 4-digit hex escapes\n | \\\\\\\\x.. # 2-digit hex escapes\n | \\\\\\\\[0-7]{1,3} # Octal escapes\n | \\\\\\\\N\\\\{[^}]+\\\\} # Unicode characters by name\n | \\\\\\\\[\\\\\\\\\'"abfnrtv] # Single-character escapes\n )"""\n , re.UNICODE | re.VERBOSE)\n', (2184, 2525), False, 'import re\n'), ((5330, 5471), 'core.messaging.BrightsideMessageHeader', 'BrightsideMessageHeader', ([], {'identity': 'message_id', 'topic': 'topic', 'message_type': 'message_type', 'correlation_id': 'correlation_id', 'content_type': '"""json"""'}), "(identity=message_id, topic=topic, message_type=\n message_type, correlation_id=correlation_id, content_type='json')\n", (5353, 5471), False, 'from core.messaging import BrightsideMessage, BrightsideMessageHeader, BrightsideMessageBody, BrightsideMessageType\n'), ((5540, 5599), 'core.messaging.BrightsideMessageBody', 'BrightsideMessageBody', ([], {'body': 'payload', 'body_type': 'payload_type'}), '(body=payload, body_type=payload_type)\n', (5561, 5599), False, 'from core.messaging import BrightsideMessage, BrightsideMessageHeader, BrightsideMessageBody, BrightsideMessageType\n'), ((5616, 5663), 'core.messaging.BrightsideMessage', 'BrightsideMessage', (['message_header', 'message_body'], {}), '(message_header, message_body)\n', (5633, 5663), False, 'from core.messaging import BrightsideMessage, BrightsideMessageHeader, BrightsideMessageBody, BrightsideMessageType\n'), ((3522, 3534), 'uuid.UUID', 'UUID', (['header'], {}), '(header)\n', (3526, 3534), False, 'from uuid import UUID, uuid4\n'), ((3787, 3799), 'uuid.UUID', 'UUID', (['header'], {}), '(header)\n', (3791, 3799), False, 'from uuid import UUID, uuid4\n'), ((3886, 3893), 'uuid.uuid4', 'uuid4', ([], {}), '()\n', (3891, 3893), False, 'from uuid import UUID, uuid4\n'), ((4078, 4107), 'core.messaging.BrightsideMessageType', 'BrightsideMessageType', (['header'], {}), '(header)\n', (4099, 4107), False, 'from core.messaging import BrightsideMessage, BrightsideMessageHeader, BrightsideMessageBody, BrightsideMessageType\n'), ((7267, 7336), 'core.exceptions.MessagingException', 'MessagingException', (['"""Missing id on message, this is a required field"""'], {}), "('Missing id on message, this is a required field')\n", (7285, 7336), False, 'from core.exceptions import MessagingException\n'), ((7596, 7667), 'core.exceptions.MessagingException', 'MessagingException', (['"""Missing type on message, this is a required field"""'], {}), "('Missing type on message, this is a required field')\n", (7614, 7667), False, 'from core.exceptions import MessagingException\n')] |
import numpy as np
def pline(x1, y1, x2, y2, x, y):
px = x2 - x1
py = y2 - y1
dd = px * px + py * py
u = ((x - x1) * px + (y - y1) * py) / max(1e-9, float(dd))
dx = x1 + u * px - x
dy = y1 + u * py - y
return dx * dx + dy * dy
def psegment(x1, y1, x2, y2, x, y):
px = x2 - x1
py = y2 - y1
dd = px * px + py * py
u = max(min(((x - x1) * px + (y - y1) * py) / float(dd), 1), 0)
dx = x1 + u * px - x
dy = y1 + u * py - y
return dx * dx + dy * dy
def plambda(x1, y1, x2, y2, x, y):
px = x2 - x1
py = y2 - y1
dd = px * px + py * py
return ((x - x1) * px + (y - y1) * py) / max(1e-9, float(dd))
def postprocess(lines, scores, threshold=0.01, tol=1e9, do_clip=False):
nlines, nscores = [], []
for (p, q), score in zip(lines, scores):
start, end = 0, 1
for a, b in nlines: # nlines: Selected lines.
if (
min(
max(pline(*p, *q, *a), pline(*p, *q, *b)),
max(pline(*a, *b, *p), pline(*a, *b, *q)),
)
> threshold ** 2
):
continue
lambda_a = plambda(*p, *q, *a)
lambda_b = plambda(*p, *q, *b)
if lambda_a > lambda_b:
lambda_a, lambda_b = lambda_b, lambda_a
lambda_a -= tol
lambda_b += tol
# case 1: skip (if not do_clip)
if start < lambda_a and lambda_b < end:
continue
# not intersect
if lambda_b < start or lambda_a > end:
continue
# cover
if lambda_a <= start and end <= lambda_b:
start = 10
break
# case 2 & 3:
if lambda_a <= start and start <= lambda_b:
start = lambda_b
if lambda_a <= end and end <= lambda_b:
end = lambda_a
if start >= end:
break
if start >= end:
continue
nlines.append(np.array([p + (q - p) * start, p + (q - p) * end]))
nscores.append(score)
return np.array(nlines), np.array(nscores)
| [
"numpy.array"
] | [((2149, 2165), 'numpy.array', 'np.array', (['nlines'], {}), '(nlines)\n', (2157, 2165), True, 'import numpy as np\n'), ((2167, 2184), 'numpy.array', 'np.array', (['nscores'], {}), '(nscores)\n', (2175, 2184), True, 'import numpy as np\n'), ((2056, 2106), 'numpy.array', 'np.array', (['[p + (q - p) * start, p + (q - p) * end]'], {}), '([p + (q - p) * start, p + (q - p) * end])\n', (2064, 2106), True, 'import numpy as np\n')] |
# Copyright 2020 - 2021 MONAI Consortium
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import platform
from _thread import interrupt_main
from contextlib import contextmanager
from glob import glob
from os import path
from threading import Timer
from typing import Optional
import torch
from monai.utils.module import get_torch_version_tuple, optional_import
dir_path = path.dirname(path.realpath(__file__))
@contextmanager
def timeout(time, message):
timer = None
try:
timer = Timer(time, interrupt_main)
timer.daemon = True
yield timer.start()
except KeyboardInterrupt as e:
if timer is not None and timer.is_alive():
raise e # interrupt from user?
raise TimeoutError(message) from e
finally:
if timer is not None:
try:
timer.cancel()
finally:
pass
def load_module(
module_name: str, defines: Optional[dict] = None, verbose_build: bool = False, build_timeout: int = 300
):
"""
Handles the loading of c++ extension modules.
Args:
module_name: Name of the module to load.
Must match the name of the relevant source directory in the `_extensions` directory.
defines: Dictionary containing names and values of compilation defines.
verbose_build: Set to true to enable build logging.
build_timeout: Time in seconds before the build will throw an exception to prevent hanging.
"""
# Ensuring named module exists in _extensions directory.
module_dir = path.join(dir_path, module_name)
if not path.exists(module_dir):
raise ValueError(f"No extension module named {module_name}")
platform_str = f"_{platform.system()}_{platform.python_version()}_"
platform_str += "".join(f"{v}" for v in get_torch_version_tuple()[:2])
# Adding configuration to module name.
if defines is not None:
module_name = "_".join([module_name] + [f"{v}" for v in defines.values()])
# Gathering source files.
source = glob(path.join(module_dir, "**", "*.cpp"), recursive=True)
if torch.cuda.is_available():
source += glob(path.join(module_dir, "**", "*.cu"), recursive=True)
platform_str += f"_{torch.version.cuda}"
# Constructing compilation argument list.
define_args = [] if not defines else [f"-D {key}={defines[key]}" for key in defines]
# Ninja may be blocked by something out of our control.
# This will error if the build takes longer than expected.
with timeout(build_timeout, "Build appears to be blocked. Is there a stopped process building the same extension?"):
load, _ = optional_import("torch.utils.cpp_extension", name="load") # main trigger some JIT config in pytorch
# This will either run the build or return the existing .so object.
name = module_name + platform_str.replace(".", "_")
module = load(
name=name, sources=source, extra_cflags=define_args, extra_cuda_cflags=define_args, verbose=verbose_build
)
return module
| [
"os.path.exists",
"monai.utils.module.optional_import",
"threading.Timer",
"os.path.join",
"os.path.realpath",
"platform.system",
"torch.cuda.is_available",
"monai.utils.module.get_torch_version_tuple",
"platform.python_version"
] | [((881, 904), 'os.path.realpath', 'path.realpath', (['__file__'], {}), '(__file__)\n', (894, 904), False, 'from os import path\n'), ((2056, 2088), 'os.path.join', 'path.join', (['dir_path', 'module_name'], {}), '(dir_path, module_name)\n', (2065, 2088), False, 'from os import path\n'), ((2606, 2631), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (2629, 2631), False, 'import torch\n'), ((994, 1021), 'threading.Timer', 'Timer', (['time', 'interrupt_main'], {}), '(time, interrupt_main)\n', (999, 1021), False, 'from threading import Timer\n'), ((2100, 2123), 'os.path.exists', 'path.exists', (['module_dir'], {}), '(module_dir)\n', (2111, 2123), False, 'from os import path\n'), ((2545, 2581), 'os.path.join', 'path.join', (['module_dir', '"""**"""', '"""*.cpp"""'], {}), "(module_dir, '**', '*.cpp')\n", (2554, 2581), False, 'from os import path\n'), ((3157, 3214), 'monai.utils.module.optional_import', 'optional_import', (['"""torch.utils.cpp_extension"""'], {'name': '"""load"""'}), "('torch.utils.cpp_extension', name='load')\n", (3172, 3214), False, 'from monai.utils.module import get_torch_version_tuple, optional_import\n'), ((2218, 2235), 'platform.system', 'platform.system', ([], {}), '()\n', (2233, 2235), False, 'import platform\n'), ((2238, 2263), 'platform.python_version', 'platform.python_version', ([], {}), '()\n', (2261, 2263), False, 'import platform\n'), ((2656, 2691), 'os.path.join', 'path.join', (['module_dir', '"""**"""', '"""*.cu"""'], {}), "(module_dir, '**', '*.cu')\n", (2665, 2691), False, 'from os import path\n'), ((2311, 2336), 'monai.utils.module.get_torch_version_tuple', 'get_torch_version_tuple', ([], {}), '()\n', (2334, 2336), False, 'from monai.utils.module import get_torch_version_tuple, optional_import\n')] |