id
stringlengths 3
8
| content
stringlengths 100
981k
|
|---|---|
11581134
|
class WosToolsError(Exception):
"""
Any exception known by wostools.
"""
class InvalidReference(WosToolsError, ValueError):
"""
Raised when we try to create an article out of an invalid reference.
"""
def __init__(self, reference: str):
super().__init__(f"{reference} does not look like an ISI citation")
class InvalidScopusFile(WosToolsError, ValueError):
def __init__(self):
super().__init__("The file does not look like a valid bib file")
class InvalidIsiLine(WosToolsError, ValueError):
"""
Raised when we encounter an invalid line when processing an ISI file.
"""
def __init__(self, line: str):
super().__init__(f"'{line}' is not a valid ISI file line")
class MissingLabelFields(WosToolsError, ValueError):
"""
Raised when we don't have any of the required fields for an ISI reference.
"""
def __init__(self, article, message: str = None):
self.article = article
super().__init__(message or "Missing required fields for label")
|
11581141
|
from builtins import range
import logging
import numpy as np
import os
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import matplotlib.colors as mcolors
logger = logging.getLogger(__name__)
def _cmap_discretize(cmap, N):
"""Return a discrete colormap from the continuous colormap cmap.
cmap: colormap instance, eg. cm.jet.
N: number of colors.
Example
x = resize(arange(100), (5,100))
djet = cmap_discretize(cm.jet, 5)
imshow(x, cmap=djet)
"""
if type(cmap) == str:
cmap = plt.get_cmap(cmap)
colors_i = np.concatenate((np.linspace(0, 1., N), (0.,0.,0.,0.)))
colors_rgba = cmap(colors_i)
indices = np.linspace(0, 1., N+1)
cdict = {}
for ki, key in enumerate(('red','green','blue')):
cdict[key] = [(indices[i], colors_rgba[i-1,ki], colors_rgba[i,ki])
for i in range(N+1)]
# Return colormap object.
return mcolors.LinearSegmentedColormap(cmap.name + "_%d"%N, cdict, 1024)
def _colorbar_index(ncolors, cmap):
cmap = _cmap_discretize(cmap, ncolors)
mappable = cm.ScalarMappable(cmap=cmap)
mappable.set_array([])
mappable.set_clim(-0.5, ncolors+0.5)
colorbar = plt.colorbar(mappable)
colorbar.set_ticks(np.linspace(0, ncolors, ncolors))
colorbar.set_ticklabels(range(ncolors))
def plot_latent_2d(latent_vars, target=None, fig_dirpath=None):
"""
Plot in 2D samples from the latent space.
Args:
latent_vars: ndarray, the latent samples with shape (N, 2)
target: ndarray, the numeric labels used for coloring of the latent samples, shape is (N, 1)
fig_dirpath: str, optional path to folder where the figure will be saved and not showed
Returns:
"""
logger.info("Plotting 2D latent space.")
plt.figure(figsize=(6, 6))
cmap = plt.get_cmap('viridis')
if target is not None:
plt.scatter(latent_vars[:, 0], latent_vars[:, 1], c=target, s=1, cmap=cmap)
else:
raise NotImplementedError
# n_distinct = np.unique(target).size
# _colorbar_index(ncolors=n_distinct, cmap=cmap)
# plt.colorbar()
if fig_dirpath is not None:
if not os.path.exists(fig_dirpath):
os.makedirs(fig_dirpath)
plt.savefig(os.path.join(fig_dirpath, 'latent_samples.png'))
else:
plt.show()
def plot_sampled_data(data, fig_dirpath=None):
"""
Plot the generated samples in a large square plot of the concatenated generated images.
Args:
data: ndarray, the generated samples with shape (N, data_dim)
fig_dirpath: str, optional path to folder where the figure will be saved and not showed
Returns:
"""
logger.info("Plotting sampled data.")
data_dim = data.shape[1]
sample_side_size = int(np.sqrt(data_dim))
data = data.reshape(-1, sample_side_size, sample_side_size)
data_size = data.shape[0]
samples_per_fig_side = int(np.sqrt(data_size))
data = data[:samples_per_fig_side**2].reshape(samples_per_fig_side, samples_per_fig_side,
sample_side_size, sample_side_size)
data = np.concatenate(np.concatenate(data, axis=1), axis=1)
plt.figure(figsize=(10, 10))
plt.imshow(data, cmap='Greys_r')
if fig_dirpath is not None:
if not os.path.exists(fig_dirpath):
os.makedirs(fig_dirpath)
plt.savefig(os.path.join(fig_dirpath, 'generated_samples.png'))
else:
plt.show()
def plot_reconstructed_data(data, reconstructed_data, fig_dirpath=None):
"""
Plot pairwise data and reconstructed data images in a large plot.
Args:
data: ndarray, original data samples of shape (N, data_dim)
reconstructed_data: ndarray, reconstructed data samples of the same shape as data
fig_dirpath: str, optional path to folder where the figure will be saved and not showed
Returns:
"""
logger.info("Plotting reconstructed data.")
data_dim = data.shape[1]
sample_side_size = int(np.sqrt(data_dim))
reconstructed_data = reconstructed_data.reshape(-1, sample_side_size, sample_side_size)
data = data.reshape(-1, sample_side_size, sample_side_size)
data_size = data.shape[0]
combined_data_reconstructions = np.concatenate([data, reconstructed_data], axis=-1)
# add a separating blank line between the reshaped column of image pairs for better visualisation
combined_data_reconstructions = np.concatenate([combined_data_reconstructions,
np.ones((data_size, sample_side_size, 1))], axis=-1)
samples_per_fig_side = int(np.sqrt(data_size))
combined_data_reconstructions = combined_data_reconstructions[:samples_per_fig_side ** 2].reshape(
samples_per_fig_side, samples_per_fig_side, sample_side_size, sample_side_size * 2 + 1)
combined_data_reconstructions = np.concatenate(np.concatenate(combined_data_reconstructions, axis=1), axis=1)
plt.figure(figsize=(10, 10))
plt.imshow(combined_data_reconstructions, cmap='Greys_r')
if fig_dirpath is not None:
if not os.path.exists(fig_dirpath):
os.makedirs(fig_dirpath)
plt.savefig(os.path.join(fig_dirpath, 'reconstructed_samples.png'))
else:
plt.show()
|
11581164
|
import decimal
import hashlib
import json
import subprocess
import zipfile
from io import BytesIO
from django.conf import settings
class Alignment:
LEFT = 'PKTextAlignmentLeft'
CENTER = 'PKTextAlignmentCenter'
RIGHT = 'PKTextAlignmentRight'
JUSTIFIED = 'PKTextAlignmentJustified'
NATURAL = 'PKTextAlignmentNatural'
class BarcodeFormat:
PDF417 = 'PKBarcodeFormatPDF417'
QR = 'PKBarcodeFormatQR'
AZTEC = 'PKBarcodeFormatAztec'
CODE128 = 'PKBarcodeFormatCode128'
class TransitType:
AIR = 'PKTransitTypeAir'
TRAIN = 'PKTransitTypeTrain'
BUS = 'PKTransitTypeBus'
BOAT = 'PKTransitTypeBoat'
GENERIC = 'PKTransitTypeGeneric'
class DateStyle:
NONE = 'PKDateStyleNone'
SHORT = 'PKDateStyleShort'
MEDIUM = 'PKDateStyleMedium'
LONG = 'PKDateStyleLong'
FULL = 'PKDateStyleFull'
class NumberStyle:
DECIMAL = 'PKNumberStyleDecimal'
PERCENT = 'PKNumberStylePercent'
SCIENTIFIC = 'PKNumberStyleScientific'
SPELLOUT = 'PKNumberStyleSpellOut'
class Field:
def __init__(self, key, value, label=''):
self.key = key # Required. The key must be unique within the scope
self.value = value # Required. Value of the field. For example, 42
self.label = label # Optional. Label text for the field.
# Optional. Format string for the alert text that is displayed when
# the pass is updated
self.change_message = ''
self.text_alignment = Alignment.LEFT
def json_dict(self):
return self.__dict__
class DateField(Field):
def __init__(self, key, value, label=''):
super().__init__(key, value, label)
self.date_style = DateStyle.SHORT # Style of date to display
self.time_style = DateStyle.SHORT # Style of time to display
# If true, the labels value is displayed as a relative date
self.is_relative = False
def json_dict(self):
return self.__dict__
class NumberField(Field):
def __init__(self, key, value, label=''):
super().__init__(key, value, label)
self.number_style = NumberStyle.DECIMAL # Style of date to display
def json_dict(self):
return self.__dict__
class CurrencyField(Field):
def __init__(self, key, value, label='', currency_code=''):
super().__init__(key, value, label)
self.currency_code = currency_code # ISO 4217 currency code
def json_dict(self):
return self.__dict__
class Barcode:
def __init__(
self,
message,
format_=BarcodeFormat.PDF417,
alt_text=''
):
self.format = format_
# Required. Message or payload to be displayed as a barcode
self.message = message
# Required. Text encoding that is used to convert the message
self.message_encoding = 'iso-8859-1'
self.altText = alt_text # Optional. Text displayed near the barcode
def json_dict(self):
return self.__dict__
class Location:
def __init__(self, latitude, longitude, altitude=0.0):
# Required. Latitude, in degrees, of the location.
try:
self.latitude = float(latitude)
except (ValueError, TypeError):
self.latitude = 0.0
# Required. Longitude, in degrees, of the location.
try:
self.longitude = float(longitude)
except (ValueError, TypeError):
self.longitude = 0.0
# Optional. Altitude, in meters, of the location.
try:
self.altitude = float(altitude)
except (ValueError, TypeError):
self.altitude = 0.0
# Optional. Notification distance
self.distance = None
# Optional. Text displayed on the lock screen when
# the pass is currently near the location
self.relevant_text = ''
def json_dict(self):
return self.__dict__
class IBeacon(object):
def __init__(self, proximity_uuid, major, minor):
# IBeacon data
self.proximity_uuid = proximity_uuid
self.major = major
self.minor = minor
# Optional. Text message where near the ibeacon
self.relevant_text = ''
def json_dict(self):
return self.__dict__
class PassInformation:
def __init__(self):
self.header_fields = []
self.primary_fields = []
self.secondary_fields = []
self.back_fields = []
self.auxiliary_fields = []
def add_header_field(self, key, value, label):
self.header_fields.append(Field(key, value, label))
def add_primary_field(self, key, value, label):
self.primary_fields.append(Field(key, value, label))
def add_secondary_field(self, key, value, label):
self.secondary_fields.append(Field(key, value, label))
def add_back_field(self, key, value, label):
self.back_fields.append(Field(key, value, label))
def add_auxiliary_field(self, key, value, label):
self.auxiliary_fields.append(Field(key, value, label))
def json_dict(self):
d = {}
if self.header_fields:
d.update({
'header_fields': [f.json_dict() for f in self.header_fields]
})
if self.primary_fields:
d.update({
'primary_fields': [f.json_dict() for f in self.primary_fields]
})
if self.secondary_fields:
d.update({
'secondary_fields': [
f.json_dict() for f in self.secondary_fields
]
})
if self.back_fields:
d.update({
'back_fields': [f.json_dict() for f in self.back_fields]
})
if self.auxiliary_fields:
d.update({
'auxiliary_fields': [
f.json_dict() for f in self.auxiliary_fields
]
})
return d
class BoardingPass(PassInformation):
def __init__(self, transit_type=TransitType.AIR):
super().__init__()
self.transit_type = transit_type
self.json_name = 'boardingPass'
def json_dict(self):
d = super().json_dict()
d.update({'transitType': self.transit_type})
return d
class Coupon(PassInformation):
def __init__(self):
super().__init__()
self.json_name = 'coupon'
class EventTicket(PassInformation):
def __init__(self):
super().__init__()
self.json_name = 'eventTicket'
class Generic(PassInformation):
def __init__(self):
super().__init__()
self.json_name = 'generic'
class StoreCard(PassInformation):
def __init__(self):
super().__init__()
self.json_name = 'storeCard'
class Pass:
def __init__(
self,
pass_information,
pass_type_identifier='',
organization_name='',
team_identifier='',
foreground_color=None,
background_color=None,
label_color=None,
logo_text=None,
web_service_url='',
authentication_token='',
serial_number='',
description='',
format_version=1,
barcode=None,
suppress_strip_shine=False,
locations=None,
ibeacons=None,
relevant_date=None,
associated_store_identifiers=None,
app_launch_url=None,
user_info=None,
expiration_date=None,
voided=None
):
self._files = {} # Holds the files to include in the .pkpass
self._hashes = {} # Holds the SHAs of the files array
# Standard Keys
# Required. Team identifier of the organization that originated and
# signed the pass, as issued by Apple.
self.team_identifier = team_identifier
# Required. Pass type identifier, as issued by Apple. The value must
# correspond with your signing certificate. Used for grouping.
self.pass_type_identifier = pass_type_identifier
# Required. Display name of the organization that originated and
# signed the pass.
self.organization_name = organization_name
# Required. Serial number that uniquely identifies the pass.
self.serial_number = serial_number
# Required. Brief description of the pass, used by the iOS
# accessibility technologies.
self.description = description
# Required. Version of the file format. The value must be 1.
self.format_version = format_version
# Visual Appearance Keys
# Optional. Background color of the pass
self.background_color = background_color
# Optional. Foreground color of the pass
self.foreground_color = foreground_color
self.label_color = label_color # Optional. Color of the label text
self.logo_text = logo_text # Optional. Text displayed next to the logo
self.barcode = barcode # Optional. Information specific to barcodes.
# Optional. If true, the strip image is displayed
self.suppress_strip_shine = suppress_strip_shine
# Web Service Keys
# Optional. If present, authenticationToken must be supplied
self.web_service_url = web_service_url
# The authentication token to use with the web service
self.authentication_token = authentication_token
# Relevance Keys
# Optional. Locations where the pass is relevant.
# For example, the location of your store.
self.locations = locations
# Optional. IBeacons data
self.ibeacons = ibeacons
# Optional. Date and time when the pass becomes relevant
self.relevant_date = relevant_date
# Optional. A list of iTunes Store item identifiers for
# the associated apps.
self.associated_store_identifiers = associated_store_identifiers
self.app_launch_url = app_launch_url
# Optional. Additional hidden data in json for the passbook
self.user_info = user_info
self.expiration_date = expiration_date
self.voided = voided
self.pass_information = pass_information
# Adds file to the file array
def add_file(self, name, fd):
self._files[name] = fd.read()
# Creates the actual .pkpass file
def create(
self,
zip_file=None
):
zip_file = settings.WALLET_PASS_PATH.format(self.serial_number)
pass_json = self._create_pass_json()
manifest = self._create_manifest(pass_json)
signature = self._create_signature(
manifest,
settings.WALLET_CERTIFICATE_PATH,
settings.WALLET_KEY_PATH,
settings.WALLET_WWDR_PATH,
settings.WALLET_PASSWORD,
)
if not zip_file:
zip_file = BytesIO()
self._create_zip(
pass_json,
manifest,
signature,
zip_file=zip_file
)
return zip_file
def _create_pass_json(self):
return json.dumps(self, default=pass_handler).encode('utf-8')
# creates the hashes for the files and adds them into a json string.
def _create_manifest(self, pass_json):
# Creates SHA hashes for all files in package
self._hashes['pass.json'] = hashlib.sha1(pass_json).hexdigest()
for filename, filedata in self._files.items():
self._hashes[filename] = hashlib.sha1(filedata).hexdigest()
return json.dumps(self._hashes).encode('utf-8')
# Creates a signature and saves it
@staticmethod
def _create_signature(
manifest,
certificate,
key,
wwdr_certificate,
password
):
openssl_cmd = [
'openssl',
'smime',
'-binary',
'-sign',
'-certfile',
wwdr_certificate,
'-signer',
certificate,
'-inkey',
key,
'-outform',
'DER',
'-passin',
'<PASSWORD>:{}'.format(password),
]
process = subprocess.Popen(
openssl_cmd,
stderr=subprocess.PIPE,
stdout=subprocess.PIPE,
stdin=subprocess.PIPE,
)
process.stdin.write(manifest)
der, error = process.communicate()
if process.returncode != 0:
raise Exception(error)
return der
# Creates .pkpass (zip archive)
def _create_zip(self, pass_json, manifest, signature, zip_file=None):
zf = zipfile.ZipFile(zip_file or 'pass.pkpass', 'w')
zf.writestr('signature', signature)
zf.writestr('manifest.json', manifest)
zf.writestr('pass.json', pass_json)
for filename, filedata in self._files.items():
zf.writestr(filename, filedata)
zf.close()
def json_dict(self):
d = {
'description': self.description,
'formatVersion': self.format_version,
'organizationName': self.organization_name,
'passTypeIdentifier': self.pass_type_identifier,
'serialNumber': self.serial_number,
'teamIdentifier': self.team_identifier,
'suppressStripShine': self.suppress_strip_shine,
self.pass_information.json_name: self.pass_information.json_dict()
}
if self.barcode:
d.update({'barcode': self.barcode.json_dict()})
if self.relevant_date:
d.update({'relevantDate': self.relevant_date})
if self.background_color:
d.update({'backgroundColor': self.background_color})
if self.foreground_color:
d.update({'foregroundColor': self.foreground_color})
if self.label_color:
d.update({'labelColor': self.label_color})
if self.logo_text:
d.update({'logoText': self.logo_text})
if self.locations:
d.update({'locations': self.locations})
if self.ibeacons:
d.update({'beacons': self.ibeacons})
if self.user_info:
d.update({'userInfo': self.user_info})
if self.associated_store_identifiers:
d.update({
'associatedStoreIdentifiers': self.associated_store_identifiers
})
if self.app_launch_url:
d.update({'appLaunchURL': self.app_launch_url})
if self.expiration_date:
d.update({'expirationDate': self.expiration_date})
if self.voided:
d.update({'voided': True})
if self.web_service_url:
d.update({'webServiceURL': self.web_service_url,
'authenticationToken': self.authentication_token})
return d
def pass_handler(obj):
if hasattr(obj, 'json_dict'):
return obj.json_dict()
else:
# For Decimal latitude and logitude etc.
if isinstance(obj, decimal.Decimal):
return str(obj)
else:
return obj
|
11581179
|
from model import magic
from flask import Flask, request, Response
import sqlite3
import json
import threading
from constants import CURRENT_YEAR, DATABASE_PATH
import datetime
app = Flask(__name__)
database_path = DATABASE_PATH
@app.route('/')
def index():
response = Response({})
response.headers['Access-Control-Allow-Origin'] = '*'
return response
@app.route('/refresh')
def start_new_prediction():
t = threading.Thread(target=magic)
t.daemon = True
t.start()
response = Response(json.dumps("Process started."))
response.headers['Access-Control-Allow-Origin'] = '*'
return response
@app.route('/rankings')
def rankings():
conn = sqlite3.connect(database_path)
cur = conn.cursor()
cur.execute('SELECT * FROM prediction_rankings')
rankings_raw = cur.fetchall()
columns = [x[0] for x in cur.description]
rankings = []
for ranking in rankings_raw:
ranking_on_date = {}
for column, data in zip(columns[1:], ranking[1:]):
ranking_on_date[column] = data
rankings.append(ranking_on_date)
response = Response(json.dumps(rankings))
response.headers['Access-Control-Allow-Origin'] = '*'
return response
@app.route('/summary')
def summary():
conn = sqlite3.connect(database_path)
cur = conn.cursor()
cur.execute('SELECT * FROM summary')
summary = cur.fetchall()[0]
columns = [x[0] for x in cur.description]
summary_dict = {}
for column, data in zip(columns, summary):
summary_dict[column] = data
response = Response(json.dumps(summary_dict))
response.headers['Access-Control-Allow-Origin'] = '*'
return response
@app.route('/predictions')
def predictions():
conn = sqlite3.connect(database_path)
cur = conn.cursor()
query = 'SELECT * FROM prediction_results'
req_params_raw = request.data
if req_params_raw:
req_params = json.loads(req_params_raw)
query_type = 'AND' if 'against' in req_params else 'OR'
teams = ["'" + team + "'" for team in req_params['teams']]
teams = ",".join(teams)
query += ' WHERE HomeTeam IN ({}) {} AwayTeam IN ({})'.format(teams, query_type, teams)
cur.execute(query)
predictions_raw = cur.fetchall()
columns = [x[0] for x in cur.description]
predictions = []
for prediction in predictions_raw:
prediction_match = {}
for column, data in zip(columns[1:], prediction[1:]):
prediction_match[column] = data
predictions.append(prediction_match)
response = Response(json.dumps(predictions))
response.headers['Access-Control-Allow-Origin'] = '*'
return response
@app.route('/previous_results')
def previous_results():
conn = sqlite3.connect(database_path)
cur = conn.cursor()
season_start = datetime.datetime(CURRENT_YEAR, 7, 1).date().strftime('%Y-%m-%d')
query = 'SELECT * FROM previous_results WHERE Date > "{}"'.format(season_start)
req_params_raw = request.data
if req_params_raw:
req_params = json.loads(req_params_raw)
query_type = 'AND' if 'against' in req_params else 'OR'
teams = ["'" + team + "'" for team in req_params['teams']]
teams = ",".join(teams)
query += ' AND (HomeTeam IN ({}) {} AwayTeam IN ({}))'.format(teams, query_type, teams)
cur.execute(query)
previous_results_raw = cur.fetchall()
columns = [x[0] for x in cur.description]
previous_results = []
for result in previous_results_raw:
match_result = {}
for column, data in zip(columns[1:], result[1:]):
match_result[column] = data
previous_results.append(match_result)
response = Response(json.dumps(previous_results))
response.headers['Access-Control-Allow-Origin'] = '*'
return response
if __name__ == '__main__':
app.run()
|
11581188
|
import logging
from lib.plugins import Inspector
from lib.util import findTyped
import json
"""
Description:
Executs a command remotely and returns the entire result of the command as the metric
Metrics:
The string of the result
"""
class Exec(Inspector):
def __init__(self, driver, command, json = False, environment = {}, extract = None):
self._driver = driver
self._cmd = command
self._parseJson = json
self._environment = environment
self._extract = extract
def getName(self):
return "Exec: %s" % self._cmd
def getMetrics(self):
# Serialize environment
envs = str.join(' ', map(lambda kv: str.format('{}="{}"', kv[0], kv[1]), self._environment.items()))
# Log; Intentionally don't log formed command, could have secrets
logging.debug("Executing command: %s", self._cmd)
# Execute
cmd = str.format("{} {}", envs, self._cmd)
ret = self._driver.sh(cmd)
if ret['status'] != 0:
raise Exception('Process returned non-zero exit code')
# Parse
if self._parseJson:
parsed = json.loads(ret['stdout'].strip())
if self._extract:
extracted = {}
for k,v in self._extract.items():
extracted[k] = findTyped(parsed, v)
return extracted
return parsed
return ret
def create(driver, args):
return Exec(driver, **args)
|
11581194
|
from apple.util.condition_tools import ConditionOpcode
def make_create_coin_condition(puzzle_hash, amount):
return [ConditionOpcode.CREATE_COIN, puzzle_hash, amount]
def make_assert_aggsig_condition(pubkey):
return [ConditionOpcode.AGG_SIG_UNSAFE, pubkey]
def make_assert_my_coin_id_condition(coin_name):
return [ConditionOpcode.ASSERT_MY_COIN_ID, coin_name]
def make_assert_absolute_height_exceeds_condition(block_index):
return [ConditionOpcode.ASSERT_HEIGHT_ABSOLUTE, block_index]
def make_assert_relative_height_exceeds_condition(block_index):
return [ConditionOpcode.ASSERT_HEIGHT_RELATIVE, block_index]
def make_assert_absolute_seconds_exceeds_condition(time):
return [ConditionOpcode.ASSERT_SECONDS_ABSOLUTE, time]
def make_assert_relative_seconds_exceeds_condition(time):
return [ConditionOpcode.ASSERT_SECONDS_RELATIVE, time]
def make_reserve_fee_condition(fee):
return [ConditionOpcode.RESERVE_FEE, fee]
def make_assert_coin_announcement(announcement_hash):
return [ConditionOpcode.ASSERT_COIN_ANNOUNCEMENT, announcement_hash]
def make_assert_puzzle_announcement(announcement_hash):
return [ConditionOpcode.ASSERT_PUZZLE_ANNOUNCEMENT, announcement_hash]
def make_create_coin_announcement(message):
return [ConditionOpcode.CREATE_COIN_ANNOUNCEMENT, message]
def make_create_puzzle_announcement(message):
return [ConditionOpcode.CREATE_PUZZLE_ANNOUNCEMENT, message]
def make_assert_my_parent_id(parent_id):
return [ConditionOpcode.ASSERT_MY_PARENT_ID, parent_id]
def make_assert_my_puzzlehash(puzzlehash):
return [ConditionOpcode.ASSERT_MY_PUZZLEHASH, puzzlehash]
def make_assert_my_amount(amount):
return [ConditionOpcode.ASSERT_MY_AMOUNT, amount]
|
11581224
|
import tensorflow as tf
import numpy as np
class Model(object):
def __init__(self, args):
self.args = args
self.global_step = tf.train.get_or_create_global_step()
with tf.name_scope('init_variables'):
self.utterances = tf.placeholder(
tf.int64, [args.batch_size, args.max_cont_len, args.max_utte_len], name="utterances")
self.responses = tf.placeholder(
tf.int64, [args.batch_size, args.max_utte_len], name="responses")
self.labels = tf.placeholder(
tf.int64, [args.batch_size], name="labels")
self.dropout = tf.placeholder(tf.float32, name="dropout")
with tf.name_scope('init_layers'):
self.emb = tf.keras.layers.Embedding(
args.dict_size, args.emb_dim)
self.first_gru = tf.nn.rnn_cell.GRUCell(args.first_rnn_hsz)
self.transform_A = tf.keras.layers.Dense(
args.first_rnn_hsz, use_bias=False)
self.cnn = tf.keras.layers.Conv2D(
args.fillters,
args.kernel_size,
activation=tf.nn.relu)
self.max_pool = tf.keras.layers.MaxPool2D(
pool_size=args.kernel_size,
strides=args.kernel_size,
)
self.match_vec = tf.keras.layers.Dense(
args.match_vec_dim, activation=tf.nn.relu)
self.second_gru = tf.nn.rnn_cell.GRUCell(args.second_rnn_hsz)
self.pred = tf.keras.layers.Dense(2, activation=tf.nn.log_softmax)
self.init_graph()
def init_graph(self):
args = self.args
with tf.name_scope("utterance_response_matching"):
resps_emb = self.emb(self.responses)
resps_gru, _ = tf.nn.dynamic_rnn(
cell=self.first_gru,
inputs=resps_emb,
dtype=tf.float32,
scope="first_gru")
if self.dropout != 1.:
resps_gru = tf.nn.dropout(resps_gru, self.dropout)
resps_emb_t = tf.transpose(resps_emb, perm=[0, 2, 1])
resps_gru_t = tf.transpose(resps_gru, perm=[0, 2, 1])
uttes = tf.unstack(self.utterances, axis=1)
match_vecs = []
for utte in uttes:
utte_emb = self.emb(utte)
mat_1 = tf.matmul(utte_emb, resps_emb_t)
utte_rnn, _ = tf.nn.dynamic_rnn(
cell=self.first_gru,
inputs=utte_emb,
dtype=tf.float32,
scope="first_gru")
if self.dropout != 1.:
utte_rnn = tf.nn.dropout(utte_rnn, self.dropout)
mat_2 = tf.matmul(self.transform_A(utte_rnn), resps_gru_t)
M = tf.stack([mat_1, mat_2], axis=3)
conv_layer = self.cnn(M)
pool_layer = self.max_pool(conv_layer)
match_vec = self.match_vec(
tf.contrib.layers.flatten(pool_layer))
match_vecs.append(match_vec)
with tf.name_scope("matching_accumulation"):
match_vecs = tf.stack(match_vecs, axis=1)
if self.dropout != 1.:
match_vecs = tf.nn.dropout(match_vecs, self.dropout)
_, hidden = tf.nn.dynamic_rnn(
cell=self.second_gru,
inputs=match_vecs,
dtype=tf.float32,
scope="second_gru")
if self.dropout != 1.:
hidden = tf.nn.dropout(hidden, self.dropout)
with tf.name_scope("matching_prediction"):
props = self.pred(hidden)
with tf.name_scope("loss"):
loss = tf.nn.sparse_softmax_cross_entropy_with_logits(
logits=props, labels=self.labels)
loss = tf.reduce_mean(loss)
optimizer = tf.train.AdamOptimizer(args.lr)
self.train_op = optimizer.minimize(
loss, global_step=self.global_step)
with tf.name_scope("predictions"):
predictions = tf.argmax(props, 1)
corrects = tf.equal(predictions, self.labels)
self.corrects = tf.reduce_mean(
tf.cast(corrects, "float"), name="corrects")
with tf.name_scope("summary"):
tf.summary.scalar('loss', loss)
tf.summary.scalar('corrects', self.corrects)
self.merged = tf.summary.merge_all()
def train_step(self, batch, sess, dropout):
feed_dict = {
self.utterances: batch.utterances,
self.responses: batch.responses,
self.labels: batch.labels,
self.dropout: dropout
}
_, step, merged = sess.run(
[self.train_op, self.global_step, self.merged], feed_dict)
return step, merged
def eval_step(self, batch, sess):
feed_dict = {
self.utterances: batch.utterances,
self.responses: batch.responses,
self.labels: batch.labels,
self.dropout: 1.
}
_, corrects = sess.run(
[self.global_step, self.corrects], feed_dict)
return corrects
|
11581226
|
class Solution:
def minEatingSpeed(self, piles: List[int], H: int) -> int:
# 判断速度 k 是否满足条件
def help(k: int) -> boolean:
cnt = 0
for pile in piles:
cnt += (pile - 1) // k + 1
return cnt <= H
l, h = 1, max(piles)
while l <= h:
mid = l + (h - l) // 2
if l == h:
return l
if help(mid):
h = mid
else:
l = mid + 1
return -1
|
11581234
|
import nose.tools as nt
import numpy as np
import theano
import theano.tensor as T
import treeano
import treeano.nodes as tn
from treeano.sandbox.nodes import gradient_normalization as gn
fX = theano.config.floatX
def test_gradient_batch_normalization_op():
epsilon = 1e-8
op = gn.GradientBatchNormalizationOp(subtract_mean=True,
keep_mean=False,
epsilon=epsilon)
X = np.random.randn(3, 4).astype(fX)
W = np.random.randn(2, 3).astype(fX)
x = T.matrix("x")
w = T.matrix("w")
orig_grad = T.grad(w.dot(x).sum(), x).eval({x: X, w: W})
new_grad = T.grad(w.dot(op(x)).sum(), x).eval({x: X, w: W})
mu = orig_grad.mean(axis=0, keepdims=True)
sigma = orig_grad.std(axis=0, keepdims=True) + epsilon
ans = (orig_grad - mu) / sigma
np.testing.assert_allclose(ans,
new_grad,
rtol=1e-5)
np.testing.assert_allclose(np.zeros(4),
new_grad.mean(axis=0),
atol=1e-5)
np.testing.assert_allclose(np.ones(4),
new_grad.std(axis=0),
rtol=1e-5)
|
11581298
|
from ._showscale import ShowscaleValidator
from ._reversescale import ReversescaleValidator
from ._colorsrc import ColorsrcValidator
from ._colorscale import ColorscaleValidator
from ._colorbar import ColorBarValidator
from ._color import ColorValidator
from ._cmin import CminValidator
from ._cmax import CmaxValidator
from ._cauto import CautoValidator
from ._autocolorscale import AutocolorscaleValidator
|
11581335
|
from direct.distributed.DistributedObject import DistributedObject
from GameStatManagerBase import GameStatManagerBase
class DistributedGameStatManager(DistributedObject, GameStatManagerBase):
from direct.directnotify import DirectNotifyGlobal
notify = DirectNotifyGlobal.directNotify.newCategory('DistributedGameStatManager')
def __init__(self, cr):
DistributedObject.__init__(self, cr)
GameStatManagerBase.__init__(self)
self.aggroModelIndex = None
base.gsm = self
return
def generate(self):
self.cr.gameStatManager = self
DistributedObject.generate(self)
def announceGenerate(self):
DistributedObject.announceGenerate(self)
def disable(self):
GameStatManagerBase.disable(self)
DistributedObject.disable(self)
self.ignoreAll()
if self.cr.gameStatManager == self:
self.cr.gameStatManager = None
return
def delete(self):
GameStatManagerBase.delete(self)
DistributedObject.delete(self)
base.gsm = None
return
def setAggroModelIndex(self, modelIndex):
self.aggroModelIndex = modelIndex
messenger.send('SwitchAgrroModel')
def getAggroModelIndex(self):
return self.aggroModelIndex
def loadSoundList(self):
try:
soundFile = open('SoundList.txt', 'r')
except:
return
lineList = soundFile.readlines()
newDict = {}
for soundLine in lineList:
tokens = soundLine.split()
name = str(tokens[0])
value = int(tokens[1])
newDict[name] = value
loader.addSoundListDict(newDict)
def saveSoundList(self):
soundFile = open('SoundList.txt', 'w')
soundDict = loader.getSoundListDict()
soundNameList = soundDict.keys()
soundNameList.sort()
for soundName in soundNameList:
outString = '%s %s\n' % (soundName, soundDict[soundName])
soundFile.write(outString)
soundFile.close()
|
11581370
|
from brancher.standard_variables import NormalVariable, DeterministicVariable
import brancher.functions as BF
from numpy import sin
##
a = DeterministicVariable(1.5, 'a')
b = DeterministicVariable(0.3, 'b')
c = DeterministicVariable(0.3, 'c')
d = BF.sin(a + b**2)/(3*c)
##
print(d)
|
11581385
|
import requests
import json
from typing import List, Dict, Any, Union
import logging
log = logging.getLogger()
def bls_api_query_v1(
start: str = "2011",
end: str = "2020",
series_id: List[str] = ["CUUR0000SA0", "SUUR0000SA0"],
) -> Dict[str, Any]:
"""
Read a series from the BLS API, V1. Series IDs are not always readily available.
See https://api.bls.gov/publicAPI/v1/timeseries/data/ for some examples. The
BLS text download pages can sometimes contain information, too.
:param start: Start year, str
:param end: End year, str
:param series_id: Series ID
:return: JSON response
"""
log.info("Querying BLS API (v1) for data on {}".format(series_id))
headers = {"Content-type": "application/json"}
data = json.dumps({"seriesid": series_id, "startyear": start, "endyear": end})
response = requests.post(
"https://api.bls.gov/publicAPI/v1/timeseries/data/", data=data, headers=headers
)
json_response = json.loads(response.text)
return json_response
def parse_bls_api_query_v1(
json_response: Dict[str, Union[str, Any]]
) -> List[Dict[str, Any]]:
"""
Parse the BLS API (V1) response.
:param json_response: JSON response from API query.
{'status': 'REQUEST_SUCCEEDED',
'responseTime': 138,
'message': [],
'Results': {'series': [{'seriesID': 'CUUR0000SA0',
'data': [{'year': '2020',
'period': 'M07',
'periodName': 'July',
'latest': 'true',
'value': '259.101',
'footnotes': [{}]}, ...]}
:return: Parsed response
[{'year': '2020',
'period': 'M07',
'periodName': 'July',
'latest': 'true',
'value': '259.101',
'footnotes': [{}],
'series_name': 'CUUR0000SA0'}, ...}]
"""
log.info("Parsing API response")
series: List[str, Any] = json_response.get("Results").get("series")
for series_id in series:
series_name = series_id.get("seriesID")
data = [record for record in series_id.get("data")]
for record in data:
record.update({"series_name": series_name})
return data
|
11581394
|
import copy
from typing import Union, Callable, List
from ..event import SimaiNote, NoteType
from ..maisxt import (
MaiSxt,
HoldNote as SDTHoldNote,
SlideStartNote as SDTSlideStartNote,
)
from ..simai import (
SimaiChart,
pattern_to_int,
TapNote,
HoldNote,
SlideNote,
TouchHoldNote,
TouchTapNote,
)
def _default_touch_converter(
sxt: MaiSxt, touch_note: Union[TouchTapNote, TouchHoldNote]
) -> None:
if isinstance(touch_note, TouchTapNote) and touch_note.region == "C":
sxt.add_tap(measure=touch_note.measure, position=0)
elif isinstance(touch_note, TouchTapNote):
sxt.add_tap(
measure=touch_note.measure,
position=touch_note.position,
)
elif isinstance(touch_note, TouchHoldNote) and touch_note.region == "C":
sxt.add_hold(
measure=touch_note.measure,
position=0,
duration=touch_note.duration,
)
def simai_to_sdt(
simai: SimaiChart,
touch_converter: Callable[
[MaiSxt, Union[TouchHoldNote, TouchTapNote]], None
] = _default_touch_converter,
convert_touch: bool = False,
) -> MaiSxt:
initial_bpm = simai.get_bpm(1.0)
sdt = MaiSxt(initial_bpm)
convert_notes(sdt, simai.notes, touch_converter, convert_touch)
sdt.notes.sort()
equivalent_notes = []
for note in sdt.notes:
current_measure = note.measure
current_time = simai.measure_to_second(current_measure)
scale = sdt.bpm / simai.get_bpm(current_measure)
note = copy.deepcopy(note)
note.measure = sdt.second_to_measure(current_time)
if isinstance(note, SDTHoldNote):
note.duration = note.duration * scale
elif isinstance(note, SDTSlideStartNote):
note.duration = note.duration * scale
note.delay = note.delay * scale
equivalent_notes.append(note)
sdt.notes = equivalent_notes
return sdt
def convert_notes(
sxt: MaiSxt,
simai_notes: List[SimaiNote],
touch_converter: Callable[[MaiSxt, Union[TouchHoldNote, TouchTapNote]], None],
convert_touch: bool,
) -> None:
skipped_notes = 0
for simai_note in simai_notes:
note_type = simai_note.note_type
if isinstance(simai_note, TapNote):
is_break = note_type in [NoteType.break_tap, NoteType.break_star]
is_star = note_type in [NoteType.star, NoteType.break_star]
sxt.add_tap(
measure=simai_note.measure,
position=simai_note.position,
is_break=is_break,
is_star=is_star,
)
elif isinstance(simai_note, HoldNote):
sxt.add_hold(
measure=simai_note.measure,
position=simai_note.position,
duration=simai_note.duration,
)
elif isinstance(simai_note, SlideNote):
# SDT slide duration include the delay
# unlike in simai
pattern = pattern_to_int(simai_note)
sxt.add_slide(
measure=simai_note.measure,
start_position=simai_note.position,
end_position=simai_note.end_position,
duration=simai_note.duration + simai_note.delay,
pattern=pattern,
delay=simai_note.delay,
)
elif isinstance(simai_note, (TouchTapNote, TouchHoldNote)):
# Touch tap and touch hold
if convert_touch:
touch_converter(sxt, simai_note)
else:
skipped_notes += 1
else:
print(f"Warning: Unknown note type {note_type}")
if skipped_notes > 0:
print(f"Skipped {skipped_notes} touch note(s)")
|
11581417
|
import dataclasses
from koala.typing import *
from koala.utils import to_dict
JsonVar = TypeVar("JsonVar", bound='JsonMessage')
__json_mapper: Dict[str, Any] = dict()
def register_model(cls):
global __json_mapper
__json_mapper[cls.__qualname__] = cls
def find_model(name: str) -> Optional[Type['JsonMessage']]:
if name in __json_mapper:
return __json_mapper[name]
return None
class JsonMeta(type):
def __new__(cls, class_name, class_parents, class_attr):
cls = type.__new__(cls, class_name, class_parents, class_attr)
register_model(cls)
return cls
@dataclasses.dataclass
class JsonMessage(metaclass=JsonMeta):
@classmethod
def from_dict(cls, kwargs: dict):
return cls(**kwargs)
def to_dict(self) -> dict:
return cast(dict, to_dict(self))
|
11581451
|
import pytest
pytestmark = [pytest.mark.django_db]
def test_no_invitation_when_no_room_url_is_defined(shipment, invite_to_clickmeeting, invite_to_zoomus):
shipment = shipment()
shipment()
invite_to_clickmeeting.assert_not_called()
invite_to_zoomus.assert_not_called()
def test_invite_to_clickmeeting(shipment, invite_to_clickmeeting, user):
shipment = shipment()
shipment.course.setattr_and_save('clickmeeting_room_url', 'https://room.url')
shipment()
invite_to_clickmeeting.assert_called_once_with(room_url='https://room.url', email=user.email)
def test_invite_to_zoomus(shipment, invite_to_zoomus, user):
shipment = shipment()
shipment.course.setattr_and_save('zoomus_webinar_id', '100500')
shipment()
invite_to_zoomus.assert_called_once_with(webinar_id='100500', user_id=user.id)
|
11581479
|
from abc import ABC, abstractmethod
from collections import defaultdict
from easydict import EasyDict
from utils import import_module
class BaseCommander(ABC):
@abstractmethod
def get_collector_task(self) -> dict:
raise NotImplementedError
class NaiveCommander(BaseCommander):
def __init__(self, cfg: dict) -> None:
self._cfg = cfg
self.collector_task_space = cfg.collector_task_space
self.learner_task_space = cfg.learner_task_space
self.collector_task_count = 0
self.learner_task_count = 0
self._learner_info = defaultdict(list)
self._learner_task_finish_count = 0
self._collector_task_finish_count = 0
def get_collector_task(self) -> dict:
if self.collector_task_count < self.collector_task_space:
self.collector_task_count += 1
collector_cfg = self._cfg.collector_cfg
collector_cfg.collect_setting = {'eps': 0.9}
collector_cfg.eval_flag = False
return {
'task_id': 'collector_task_id{}'.format(self.collector_task_count),
'buffer_id': 'test',
'collector_cfg': collector_cfg,
'policy': self._cfg.policy,
}
else:
return None
def get_learner_task(self) -> dict:
if self.learner_task_count < self.learner_task_space:
self.learner_task_count += 1
learner_cfg = self._cfg.learner_cfg
learner_cfg.max_iterations = self._cfg.max_iterations
return {
'task_id': 'learner_task_id{}'.format(self.learner_task_count),
'policy_id': 'test.pth',
'buffer_id': 'test',
'learner_cfg': learner_cfg,
'replay_buffer_cfg': self._cfg.replay_buffer_cfg,
'policy': self._cfg.policy
}
else:
return None
def finish_collector_task(self, task_id: str, finished_task: dict) -> None:
self._collector_task_finish_count += 1
def finish_learner_task(self, task_id: str, finished_task: dict) -> None:
self._learner_task_finish_count += 1
return finished_task['buffer_id']
def notify_fail_collector_task(self, task: dict) -> None:
pass
def notify_fail_learner_task(self, task: dict) -> None:
pass
def get_learner_info(self, task_id: str, info: dict) -> None:
self._learner_info[task_id].append(info)
commander_map = {'naive': NaiveCommander}
def register_parallel_commander(name: str, commander: type) -> None:
assert isinstance(name, str)
assert issubclass(commander, BaseCommander)
commander_map[name] = commander
def create_parallel_commander(cfg: dict) -> BaseCommander:
cfg = EasyDict(cfg)
import_module(cfg.import_names)
commander_type = cfg.parallel_commander_type
if commander_type not in commander_map:
raise KeyError("not support parallel commander type: {}".format(commander_type))
else:
return commander_map[commander_type](cfg)
|
11581494
|
from __future__ import print_function, absolute_import
import time
import torch
import torch.nn as nn
from torch.nn import functional as F
import torch.distributed as dist
from .utils.meters import AverageMeter
class Trainer(object):
#############################
# Training module for
# 1. "NetVLAD: CNN architecture for weakly supervised place recognition" (CVPR'16), loss_type='triplet'
# 2. "Stochastic Attraction-Repulsion Embedding for Large Scale Localization" (ICCV'19), loss_type='sare_ind' or 'sare_joint'
#############################
def __init__(self, model, margin=0.3, gpu=None, temp=0.07):
super(Trainer, self).__init__()
self.model = model
self.gpu = gpu
self.margin = margin
self.temp = temp
def train(self, epoch, sub_id, data_loader, optimizer, train_iters,
print_freq=1, vlad=True, loss_type='triplet'):
self.model.train()
batch_time = AverageMeter()
data_time = AverageMeter()
losses = AverageMeter()
end = time.time()
data_loader.new_epoch()
for i in range(train_iters):
inputs = self._parse_data(data_loader.next())
data_time.update(time.time() - end)
loss = self._forward(inputs, vlad, loss_type)
losses.update(loss.item())
optimizer.zero_grad()
loss.backward()
optimizer.step()
batch_time.update(time.time() - end)
end = time.time()
try:
rank = dist.get_rank()
except:
rank = 0
if ((i + 1) % print_freq == 0 and rank==0):
print('Epoch: [{}-{}][{}/{}]\t'
'Time {:.3f} ({:.3f})\t'
'Data {:.3f} ({:.3f})\t'
'Loss {:.3f} ({:.3f})'
.format(epoch, sub_id, i + 1, train_iters,
batch_time.val, batch_time.avg,
data_time.val, data_time.avg,
losses.val, losses.avg))
def _parse_data(self, inputs):
imgs = [input[0] for input in inputs]
imgs = torch.stack(imgs).permute(1,0,2,3,4)
# imgs_size: batch_size*triplet_size*C*H*W
return imgs.cuda(self.gpu)
def _forward(self, inputs, vlad, loss_type):
B, N, C, H, W = inputs.size()
inputs = inputs.view(-1, C, H, W)
outputs_pool, outputs_vlad = self.model(inputs)
if (not vlad):
# adopt VLAD layer for feature aggregation
return self._get_loss(outputs_pool, loss_type, B, N)
else:
# adopt max pooling for feature aggregation
return self._get_loss(outputs_vlad, loss_type, B, N)
def _get_loss(self, outputs, loss_type, B, N):
outputs = outputs.view(B, N, -1)
L = outputs.size(-1)
output_negatives = outputs[:, 2:]
output_anchors = outputs[:, 0]
output_positives = outputs[:, 1]
if (loss_type=='triplet'):
output_anchors = output_anchors.unsqueeze(1).expand_as(output_negatives).contiguous().view(-1, L)
output_positives = output_positives.unsqueeze(1).expand_as(output_negatives).contiguous().view(-1, L)
output_negatives = output_negatives.contiguous().view(-1, L)
loss = F.triplet_margin_loss(output_anchors, output_positives, output_negatives,
margin=self.margin, p=2, reduction='mean')
elif (loss_type=='sare_joint'):
### original version: euclidean distance
dist_pos = ((output_anchors - output_positives)**2).sum(1)
dist_pos = dist_pos.view(B, 1)
output_anchors = output_anchors.unsqueeze(1).expand_as(output_negatives).contiguous().view(-1, L)
output_negatives = output_negatives.contiguous().view(-1, L)
dist_neg = ((output_anchors - output_negatives)**2).sum(1)
dist_neg = dist_neg.view(B, -1)
dist = - torch.cat((dist_pos, dist_neg), 1)
dist = F.log_softmax(dist, 1)
loss = (- dist[:, 0]).mean()
### new version: dot product
# dist_pos = torch.mm(output_anchors, output_positives.transpose(0,1)) # B*B
# dist_pos = dist_pos.diagonal(0)
# dist_pos = dist_pos.view(B, 1)
#
# output_anchors = output_anchors.unsqueeze(1).expand_as(output_negatives).contiguous().view(-1, L)
# output_negatives = output_negatives.contiguous().view(-1, L)
# dist_neg = torch.mm(output_anchors, output_negatives.transpose(0,1)) # B*B
# dist_neg = dist_neg.diagonal(0)
# dist_neg = dist_neg.view(B, -1)
#
# dist = torch.cat((dist_pos, dist_neg), 1)/self.temp
# dist = F.log_softmax(dist, 1)
# loss = (- dist[:, 0]).mean()
elif (loss_type=='sare_ind'):
### original version: euclidean distance
dist_pos = ((output_anchors - output_positives)**2).sum(1)
dist_pos = dist_pos.view(B, 1)
output_anchors = output_anchors.unsqueeze(1).expand_as(output_negatives).contiguous().view(-1, L)
output_negatives = output_negatives.contiguous().view(-1, L)
dist_neg = ((output_anchors - output_negatives)**2).sum(1)
dist_neg = dist_neg.view(B, -1)
dist_neg = dist_neg.unsqueeze(2)
dist_pos = dist_pos.view(B, 1, 1).expand_as(dist_neg)
dist = - torch.cat((dist_pos, dist_neg), 2).view(-1, 2)
dist = F.log_softmax(dist, 1)
loss = (- dist[:, 0]).mean()
### new version: dot product
# dist_pos = torch.mm(output_anchors, output_positives.transpose(0,1)) # B*B
# dist_pos = dist_pos.diagonal(0)
# dist_pos = dist_pos.view(B, 1)
#
# output_anchors = output_anchors.unsqueeze(1).expand_as(output_negatives).contiguous().view(-1, L)
# output_negatives = output_negatives.contiguous().view(-1, L)
# dist_neg = torch.mm(output_anchors, output_negatives.transpose(0,1)) # B*B
# dist_neg = dist_neg.diagonal(0)
# dist_neg = dist_neg.view(B, -1)
#
# dist_neg = dist_neg.unsqueeze(2)
# dist_pos = dist_pos.view(B, 1, 1).expand_as(dist_neg)
# dist = torch.cat((dist_pos, dist_neg), 2).view(-1, 2)/self.temp
# dist = F.log_softmax(dist, 1)
# loss = (- dist[:, 0]).mean()
else:
assert ("Unknown loss function")
return loss
class SFRSTrainer(object):
#############################
# Training module for
# "Self-supervising Fine-grained Region Similarities for Large-scale Image Localization"
#############################
def __init__(self, model, model_cache, margin=0.3,
neg_num=10, gpu=None, temp=[0.07,]):
super(SFRSTrainer, self).__init__()
self.model = model
self.model_cache = model_cache
self.margin = margin
self.gpu = gpu
self.neg_num = neg_num
self.temp = temp
def train(self, gen, epoch, sub_id, data_loader, optimizer, train_iters,
print_freq=1, lambda_soft=0.5, loss_type='sare_ind'):
self.model.train()
self.model_cache.train()
batch_time = AverageMeter()
data_time = AverageMeter()
losses_hard = AverageMeter()
losses_soft = AverageMeter()
end = time.time()
data_loader.new_epoch()
for i in range(train_iters):
inputs_easy, inputs_diff = self._parse_data(data_loader.next())
data_time.update(time.time() - end)
loss_hard, loss_soft = self._forward(inputs_easy, inputs_diff, loss_type, gen)
loss = loss_hard + loss_soft*lambda_soft
optimizer.zero_grad()
loss.backward()
optimizer.step()
losses_hard.update(loss_hard.item())
losses_soft.update(loss_soft.item())
batch_time.update(time.time() - end)
end = time.time()
try:
rank = dist.get_rank()
except:
rank = 0
if ((i + 1) % print_freq == 0 and rank==0):
print('Epoch: [{}-{}][{}/{}]\t'
'Time {:.3f} ({:.3f})\t'
'Data {:.3f} ({:.3f})\t'
'Loss_hard {:.3f} ({:.3f})\t'
'Loss_soft {:.3f} ({:.3f})'
.format(epoch, sub_id, i + 1, train_iters,
batch_time.val, batch_time.avg,
data_time.val, data_time.avg,
losses_hard.val, losses_hard.avg,
losses_soft.val, losses_soft.avg))
def _parse_data(self, inputs):
imgs = [input[0] for input in inputs]
imgs = torch.stack(imgs).permute(1,0,2,3,4)
imgs_easy = imgs[:,:self.neg_num+2]
imgs_diff = torch.cat((imgs[:,0].unsqueeze(1).contiguous(), imgs[:,self.neg_num+2:]), dim=1)
return imgs_easy.cuda(self.gpu), imgs_diff.cuda(self.gpu)
def _forward(self, inputs_easy, inputs_diff, loss_type, gen):
B, _, C, H, W = inputs_easy.size()
inputs_easy = inputs_easy.view(-1, C, H, W)
inputs_diff = inputs_diff.view(-1, C, H, W)
sim_easy, vlad_anchors, vlad_pairs = self.model(inputs_easy)
# vlad_anchors: B*1*9*L
# vlad_pairs: B*(1+neg_num)*9*L
with torch.no_grad():
sim_diff_label, _, _ = self.model_cache(inputs_diff) # B*diff_pos_num*9*9
sim_diff, _, _ = self.model(inputs_diff)
if (gen==0):
loss_hard = self._get_loss(vlad_anchors[:,0,0], vlad_pairs[:,0,0], vlad_pairs[:,1:,0], B, loss_type)
else:
loss_hard = 0
for tri_idx in range(B):
loss_hard += self._get_hard_loss(vlad_anchors[tri_idx,0,0].contiguous(), vlad_pairs[tri_idx,0,0].contiguous(), \
vlad_pairs[tri_idx,1:], sim_easy[tri_idx,1:,0].contiguous().detach(), loss_type)
loss_hard /= B
log_sim_diff = F.log_softmax(sim_diff[:,:,0].contiguous().view(B,-1)/self.temp[0], dim=1)
loss_soft = (- F.softmax(sim_diff_label[:,:,0].contiguous().view(B,-1)/self.temp[gen], dim=1).detach() * log_sim_diff).mean(0).sum()
return loss_hard, loss_soft
def _get_hard_loss(self, anchors, positives, negatives, score_neg, loss_type):
# select the most difficult regions for negatives
score_arg = score_neg.view(self.neg_num,-1).argmax(1)
score_arg = score_arg.unsqueeze(-1).unsqueeze(-1).expand_as(negatives).contiguous()
select_negatives = torch.gather(negatives,1,score_arg)
select_negatives = select_negatives[:,0]
return self._get_loss(anchors.unsqueeze(0).contiguous(), \
positives.unsqueeze(0).contiguous(), \
select_negatives.unsqueeze(0).contiguous(), 1, loss_type)
def _get_loss(self, output_anchors, output_positives, output_negatives, B, loss_type):
L = output_anchors.size(-1)
if (loss_type=='triplet'):
output_anchors = output_anchors.unsqueeze(1).expand_as(output_negatives).contiguous().view(-1, L)
output_positives = output_positives.unsqueeze(1).expand_as(output_negatives).contiguous().view(-1, L)
output_negatives = output_negatives.contiguous().view(-1, L)
loss = F.triplet_margin_loss(output_anchors, output_positives, output_negatives,
margin=self.margin, p=2, reduction='mean')
elif (loss_type=='sare_joint'):
dist_pos = torch.mm(output_anchors, output_positives.transpose(0,1)) # B*B
dist_pos = dist_pos.diagonal(0)
dist_pos = dist_pos.view(B, 1)
output_anchors = output_anchors.unsqueeze(1).expand_as(output_negatives).contiguous().view(-1, L)
output_negatives = output_negatives.contiguous().view(-1, L)
dist_neg = torch.mm(output_anchors, output_negatives.transpose(0,1)) # B*B
dist_neg = dist_neg.diagonal(0)
dist_neg = dist_neg.view(B, -1)
# joint optimize
dist = torch.cat((dist_pos, dist_neg), 1)/self.temp[0]
dist = F.log_softmax(dist, 1)
loss = (- dist[:, 0]).mean()
elif (loss_type=='sare_ind'):
dist_pos = torch.mm(output_anchors, output_positives.transpose(0,1)) # B*B
dist_pos = dist_pos.diagonal(0)
dist_pos = dist_pos.view(B, 1)
output_anchors = output_anchors.unsqueeze(1).expand_as(output_negatives).contiguous().view(-1, L)
output_negatives = output_negatives.contiguous().view(-1, L)
dist_neg = torch.mm(output_anchors, output_negatives.transpose(0,1)) # B*B
dist_neg = dist_neg.diagonal(0)
dist_neg = dist_neg.view(B, -1)
# indivial optimize
dist_neg = dist_neg.unsqueeze(2)
dist_pos = dist_pos.view(B, 1, 1).expand_as(dist_neg)
dist = torch.cat((dist_pos, dist_neg), 2).view(-1, 2)/self.temp[0]
dist = F.log_softmax(dist, 1)
loss = (- dist[:, 0]).mean()
else:
assert ("Unknown loss function")
return loss
|
11581529
|
import os.path
from os.path import abspath
import re
import sys
import types
import pickle
from test import support
import test.test_importlib.util
import unittest
import unittest.mock
import unittest.test
class TestableTestProgram(unittest.TestProgram):
module = None
exit = True
defaultTest = failfast = catchbreak = buffer = None
verbosity = 1
progName = ''
testRunner = testLoader = None
def __init__(self):
pass
class TestDiscovery(unittest.TestCase):
# Heavily mocked tests so I can avoid hitting the filesystem
def test_get_name_from_path(self):
loader = unittest.TestLoader()
loader._top_level_dir = '/foo'
name = loader._get_name_from_path('/foo/bar/baz.py')
self.assertEqual(name, 'bar.baz')
if not __debug__:
# asserts are off
return
with self.assertRaises(AssertionError):
loader._get_name_from_path('/bar/baz.py')
def test_find_tests(self):
loader = unittest.TestLoader()
original_listdir = os.listdir
def restore_listdir():
os.listdir = original_listdir
original_isfile = os.path.isfile
def restore_isfile():
os.path.isfile = original_isfile
original_isdir = os.path.isdir
def restore_isdir():
os.path.isdir = original_isdir
path_lists = [['test2.py', 'test1.py', 'not_a_test.py', 'test_dir',
'test.foo', 'test-not-a-module.py', 'another_dir'],
['test4.py', 'test3.py', ]]
os.listdir = lambda path: path_lists.pop(0)
self.addCleanup(restore_listdir)
def isdir(path):
return path.endswith('dir')
os.path.isdir = isdir
self.addCleanup(restore_isdir)
def isfile(path):
# another_dir is not a package and so shouldn't be recursed into
return not path.endswith('dir') and not 'another_dir' in path
os.path.isfile = isfile
self.addCleanup(restore_isfile)
loader._get_module_from_name = lambda path: path + ' module'
orig_load_tests = loader.loadTestsFromModule
def loadTestsFromModule(module, pattern=None):
# This is where load_tests is called.
base = orig_load_tests(module, pattern=pattern)
return base + [module + ' tests']
loader.loadTestsFromModule = loadTestsFromModule
loader.suiteClass = lambda thing: thing
top_level = os.path.abspath('/foo')
loader._top_level_dir = top_level
suite = list(loader._find_tests(top_level, 'test*.py'))
# The test suites found should be sorted alphabetically for reliable
# execution order.
expected = [[name + ' module tests'] for name in
('test1', 'test2', 'test_dir')]
expected.extend([[('test_dir.%s' % name) + ' module tests'] for name in
('test3', 'test4')])
self.assertEqual(suite, expected)
def test_find_tests_socket(self):
# A socket is neither a directory nor a regular file.
# https://bugs.python.org/issue25320
loader = unittest.TestLoader()
original_listdir = os.listdir
def restore_listdir():
os.listdir = original_listdir
original_isfile = os.path.isfile
def restore_isfile():
os.path.isfile = original_isfile
original_isdir = os.path.isdir
def restore_isdir():
os.path.isdir = original_isdir
path_lists = [['socket']]
os.listdir = lambda path: path_lists.pop(0)
self.addCleanup(restore_listdir)
os.path.isdir = lambda path: False
self.addCleanup(restore_isdir)
os.path.isfile = lambda path: False
self.addCleanup(restore_isfile)
loader._get_module_from_name = lambda path: path + ' module'
orig_load_tests = loader.loadTestsFromModule
def loadTestsFromModule(module, pattern=None):
# This is where load_tests is called.
base = orig_load_tests(module, pattern=pattern)
return base + [module + ' tests']
loader.loadTestsFromModule = loadTestsFromModule
loader.suiteClass = lambda thing: thing
top_level = os.path.abspath('/foo')
loader._top_level_dir = top_level
suite = list(loader._find_tests(top_level, 'test*.py'))
self.assertEqual(suite, [])
def test_find_tests_with_package(self):
loader = unittest.TestLoader()
original_listdir = os.listdir
def restore_listdir():
os.listdir = original_listdir
original_isfile = os.path.isfile
def restore_isfile():
os.path.isfile = original_isfile
original_isdir = os.path.isdir
def restore_isdir():
os.path.isdir = original_isdir
directories = ['a_directory', 'test_directory', 'test_directory2']
path_lists = [directories, [], [], []]
os.listdir = lambda path: path_lists.pop(0)
self.addCleanup(restore_listdir)
os.path.isdir = lambda path: True
self.addCleanup(restore_isdir)
os.path.isfile = lambda path: os.path.basename(path) not in directories
self.addCleanup(restore_isfile)
class Module(object):
paths = []
load_tests_args = []
def __init__(self, path):
self.path = path
self.paths.append(path)
if os.path.basename(path) == 'test_directory':
def load_tests(loader, tests, pattern):
self.load_tests_args.append((loader, tests, pattern))
return [self.path + ' load_tests']
self.load_tests = load_tests
def __eq__(self, other):
return self.path == other.path
loader._get_module_from_name = lambda name: Module(name)
orig_load_tests = loader.loadTestsFromModule
def loadTestsFromModule(module, pattern=None):
# This is where load_tests is called.
base = orig_load_tests(module, pattern=pattern)
return base + [module.path + ' module tests']
loader.loadTestsFromModule = loadTestsFromModule
loader.suiteClass = lambda thing: thing
loader._top_level_dir = '/foo'
# this time no '.py' on the pattern so that it can match
# a test package
suite = list(loader._find_tests('/foo', 'test*'))
# We should have loaded tests from the a_directory and test_directory2
# directly and via load_tests for the test_directory package, which
# still calls the baseline module loader.
self.assertEqual(suite,
[['a_directory module tests'],
['test_directory load_tests',
'test_directory module tests'],
['test_directory2 module tests']])
# The test module paths should be sorted for reliable execution order
self.assertEqual(Module.paths,
['a_directory', 'test_directory', 'test_directory2'])
# load_tests should have been called once with loader, tests and pattern
# (but there are no tests in our stub module itself, so that is [] at
# the time of call).
self.assertEqual(Module.load_tests_args,
[(loader, [], 'test*')])
def test_find_tests_default_calls_package_load_tests(self):
loader = unittest.TestLoader()
original_listdir = os.listdir
def restore_listdir():
os.listdir = original_listdir
original_isfile = os.path.isfile
def restore_isfile():
os.path.isfile = original_isfile
original_isdir = os.path.isdir
def restore_isdir():
os.path.isdir = original_isdir
directories = ['a_directory', 'test_directory', 'test_directory2']
path_lists = [directories, [], [], []]
os.listdir = lambda path: path_lists.pop(0)
self.addCleanup(restore_listdir)
os.path.isdir = lambda path: True
self.addCleanup(restore_isdir)
os.path.isfile = lambda path: os.path.basename(path) not in directories
self.addCleanup(restore_isfile)
class Module(object):
paths = []
load_tests_args = []
def __init__(self, path):
self.path = path
self.paths.append(path)
if os.path.basename(path) == 'test_directory':
def load_tests(loader, tests, pattern):
self.load_tests_args.append((loader, tests, pattern))
return [self.path + ' load_tests']
self.load_tests = load_tests
def __eq__(self, other):
return self.path == other.path
loader._get_module_from_name = lambda name: Module(name)
orig_load_tests = loader.loadTestsFromModule
def loadTestsFromModule(module, pattern=None):
# This is where load_tests is called.
base = orig_load_tests(module, pattern=pattern)
return base + [module.path + ' module tests']
loader.loadTestsFromModule = loadTestsFromModule
loader.suiteClass = lambda thing: thing
loader._top_level_dir = '/foo'
# this time no '.py' on the pattern so that it can match
# a test package
suite = list(loader._find_tests('/foo', 'test*.py'))
# We should have loaded tests from the a_directory and test_directory2
# directly and via load_tests for the test_directory package, which
# still calls the baseline module loader.
self.assertEqual(suite,
[['a_directory module tests'],
['test_directory load_tests',
'test_directory module tests'],
['test_directory2 module tests']])
# The test module paths should be sorted for reliable execution order
self.assertEqual(Module.paths,
['a_directory', 'test_directory', 'test_directory2'])
# load_tests should have been called once with loader, tests and pattern
self.assertEqual(Module.load_tests_args,
[(loader, [], 'test*.py')])
def test_find_tests_customize_via_package_pattern(self):
# This test uses the example 'do-nothing' load_tests from
# https://docs.python.org/3/library/unittest.html#load-tests-protocol
# to make sure that that actually works.
# Housekeeping
original_listdir = os.listdir
def restore_listdir():
os.listdir = original_listdir
self.addCleanup(restore_listdir)
original_isfile = os.path.isfile
def restore_isfile():
os.path.isfile = original_isfile
self.addCleanup(restore_isfile)
original_isdir = os.path.isdir
def restore_isdir():
os.path.isdir = original_isdir
self.addCleanup(restore_isdir)
self.addCleanup(sys.path.remove, abspath('/foo'))
# Test data: we expect the following:
# a listdir to find our package, and isfile and isdir checks on it.
# a module-from-name call to turn that into a module
# followed by load_tests.
# then our load_tests will call discover() which is messy
# but that finally chains into find_tests again for the child dir -
# which is why we don't have an infinite loop.
# We expect to see:
# the module load tests for both package and plain module called,
# and the plain module result nested by the package module load_tests
# indicating that it was processed and could have been mutated.
vfs = {abspath('/foo'): ['my_package'],
abspath('/foo/my_package'): ['__init__.py', 'test_module.py']}
def list_dir(path):
return list(vfs[path])
os.listdir = list_dir
os.path.isdir = lambda path: not path.endswith('.py')
os.path.isfile = lambda path: path.endswith('.py')
class Module(object):
paths = []
load_tests_args = []
def __init__(self, path):
self.path = path
self.paths.append(path)
if path.endswith('test_module'):
def load_tests(loader, tests, pattern):
self.load_tests_args.append((loader, tests, pattern))
return [self.path + ' load_tests']
else:
def load_tests(loader, tests, pattern):
self.load_tests_args.append((loader, tests, pattern))
# top level directory cached on loader instance
__file__ = '/foo/my_package/__init__.py'
this_dir = os.path.dirname(__file__)
pkg_tests = loader.discover(
start_dir=this_dir, pattern=pattern)
return [self.path + ' load_tests', tests
] + pkg_tests
self.load_tests = load_tests
def __eq__(self, other):
return self.path == other.path
loader = unittest.TestLoader()
loader._get_module_from_name = lambda name: Module(name)
loader.suiteClass = lambda thing: thing
loader._top_level_dir = abspath('/foo')
# this time no '.py' on the pattern so that it can match
# a test package
suite = list(loader._find_tests(abspath('/foo'), 'test*.py'))
# We should have loaded tests from both my_package and
# my_package.test_module, and also run the load_tests hook in both.
# (normally this would be nested TestSuites.)
self.assertEqual(suite,
[['my_package load_tests', [],
['my_package.test_module load_tests']]])
# Parents before children.
self.assertEqual(Module.paths,
['my_package', 'my_package.test_module'])
# load_tests should have been called twice with loader, tests and pattern
self.assertEqual(Module.load_tests_args,
[(loader, [], 'test*.py'),
(loader, [], 'test*.py')])
def test_discover(self):
loader = unittest.TestLoader()
original_isfile = os.path.isfile
original_isdir = os.path.isdir
def restore_isfile():
os.path.isfile = original_isfile
os.path.isfile = lambda path: False
self.addCleanup(restore_isfile)
orig_sys_path = sys.path[:]
def restore_path():
sys.path[:] = orig_sys_path
self.addCleanup(restore_path)
full_path = os.path.abspath(os.path.normpath('/foo'))
with self.assertRaises(ImportError):
loader.discover('/foo/bar', top_level_dir='/foo')
self.assertEqual(loader._top_level_dir, full_path)
self.assertIn(full_path, sys.path)
os.path.isfile = lambda path: True
os.path.isdir = lambda path: True
def restore_isdir():
os.path.isdir = original_isdir
self.addCleanup(restore_isdir)
_find_tests_args = []
def _find_tests(start_dir, pattern, namespace=None):
_find_tests_args.append((start_dir, pattern))
return ['tests']
loader._find_tests = _find_tests
loader.suiteClass = str
suite = loader.discover('/foo/bar/baz', 'pattern', '/foo/bar')
top_level_dir = os.path.abspath('/foo/bar')
start_dir = os.path.abspath('/foo/bar/baz')
self.assertEqual(suite, "['tests']")
self.assertEqual(loader._top_level_dir, top_level_dir)
self.assertEqual(_find_tests_args, [(start_dir, 'pattern')])
self.assertIn(top_level_dir, sys.path)
def test_discover_start_dir_is_package_calls_package_load_tests(self):
# This test verifies that the package load_tests in a package is indeed
# invoked when the start_dir is a package (and not the top level).
# http://bugs.python.org/issue22457
# Test data: we expect the following:
# an isfile to verify the package, then importing and scanning
# as per _find_tests' normal behaviour.
# We expect to see our load_tests hook called once.
vfs = {abspath('/toplevel'): ['startdir'],
abspath('/toplevel/startdir'): ['__init__.py']}
def list_dir(path):
return list(vfs[path])
self.addCleanup(setattr, os, 'listdir', os.listdir)
os.listdir = list_dir
self.addCleanup(setattr, os.path, 'isfile', os.path.isfile)
os.path.isfile = lambda path: path.endswith('.py')
self.addCleanup(setattr, os.path, 'isdir', os.path.isdir)
os.path.isdir = lambda path: not path.endswith('.py')
self.addCleanup(sys.path.remove, abspath('/toplevel'))
class Module(object):
paths = []
load_tests_args = []
def __init__(self, path):
self.path = path
def load_tests(self, loader, tests, pattern):
return ['load_tests called ' + self.path]
def __eq__(self, other):
return self.path == other.path
loader = unittest.TestLoader()
loader._get_module_from_name = lambda name: Module(name)
loader.suiteClass = lambda thing: thing
suite = loader.discover('/toplevel/startdir', top_level_dir='/toplevel')
# We should have loaded tests from the package __init__.
# (normally this would be nested TestSuites.)
self.assertEqual(suite,
[['load_tests called startdir']])
def setup_import_issue_tests(self, fakefile):
listdir = os.listdir
os.listdir = lambda _: [fakefile]
isfile = os.path.isfile
os.path.isfile = lambda _: True
orig_sys_path = sys.path[:]
def restore():
os.path.isfile = isfile
os.listdir = listdir
sys.path[:] = orig_sys_path
self.addCleanup(restore)
def setup_import_issue_package_tests(self, vfs):
self.addCleanup(setattr, os, 'listdir', os.listdir)
self.addCleanup(setattr, os.path, 'isfile', os.path.isfile)
self.addCleanup(setattr, os.path, 'isdir', os.path.isdir)
self.addCleanup(sys.path.__setitem__, slice(None), list(sys.path))
def list_dir(path):
return list(vfs[path])
os.listdir = list_dir
os.path.isdir = lambda path: not path.endswith('.py')
os.path.isfile = lambda path: path.endswith('.py')
def test_discover_with_modules_that_fail_to_import(self):
loader = unittest.TestLoader()
self.setup_import_issue_tests('test_this_does_not_exist.py')
suite = loader.discover('.')
self.assertIn(os.getcwd(), sys.path)
self.assertEqual(suite.countTestCases(), 1)
# Errors loading the suite are also captured for introspection.
self.assertNotEqual([], loader.errors)
self.assertEqual(1, len(loader.errors))
error = loader.errors[0]
self.assertTrue(
'Failed to import test module: test_this_does_not_exist' in error,
'missing error string in %r' % error)
test = list(list(suite)[0])[0] # extract test from suite
with self.assertRaises(ImportError):
test.test_this_does_not_exist()
def test_discover_with_init_modules_that_fail_to_import(self):
vfs = {abspath('/foo'): ['my_package'],
abspath('/foo/my_package'): ['__init__.py', 'test_module.py']}
self.setup_import_issue_package_tests(vfs)
import_calls = []
def _get_module_from_name(name):
import_calls.append(name)
raise ImportError("Cannot import Name")
loader = unittest.TestLoader()
loader._get_module_from_name = _get_module_from_name
suite = loader.discover(abspath('/foo'))
self.assertIn(abspath('/foo'), sys.path)
self.assertEqual(suite.countTestCases(), 1)
# Errors loading the suite are also captured for introspection.
self.assertNotEqual([], loader.errors)
self.assertEqual(1, len(loader.errors))
error = loader.errors[0]
self.assertTrue(
'Failed to import test module: my_package' in error,
'missing error string in %r' % error)
test = list(list(suite)[0])[0] # extract test from suite
with self.assertRaises(ImportError):
test.my_package()
self.assertEqual(import_calls, ['my_package'])
# Check picklability
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
pickle.loads(pickle.dumps(test, proto))
def test_discover_with_module_that_raises_SkipTest_on_import(self):
if not unittest.BaseTestSuite._cleanup:
raise unittest.SkipTest("Suite cleanup is disabled")
loader = unittest.TestLoader()
def _get_module_from_name(name):
raise unittest.SkipTest('skipperoo')
loader._get_module_from_name = _get_module_from_name
self.setup_import_issue_tests('test_skip_dummy.py')
suite = loader.discover('.')
self.assertEqual(suite.countTestCases(), 1)
result = unittest.TestResult()
suite.run(result)
self.assertEqual(len(result.skipped), 1)
# Check picklability
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
pickle.loads(pickle.dumps(suite, proto))
def test_discover_with_init_module_that_raises_SkipTest_on_import(self):
if not unittest.BaseTestSuite._cleanup:
raise unittest.SkipTest("Suite cleanup is disabled")
vfs = {abspath('/foo'): ['my_package'],
abspath('/foo/my_package'): ['__init__.py', 'test_module.py']}
self.setup_import_issue_package_tests(vfs)
import_calls = []
def _get_module_from_name(name):
import_calls.append(name)
raise unittest.SkipTest('skipperoo')
loader = unittest.TestLoader()
loader._get_module_from_name = _get_module_from_name
suite = loader.discover(abspath('/foo'))
self.assertIn(abspath('/foo'), sys.path)
self.assertEqual(suite.countTestCases(), 1)
result = unittest.TestResult()
suite.run(result)
self.assertEqual(len(result.skipped), 1)
self.assertEqual(result.testsRun, 1)
self.assertEqual(import_calls, ['my_package'])
# Check picklability
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
pickle.loads(pickle.dumps(suite, proto))
def test_command_line_handling_parseArgs(self):
program = TestableTestProgram()
args = []
program._do_discovery = args.append
program.parseArgs(['something', 'discover'])
self.assertEqual(args, [[]])
args[:] = []
program.parseArgs(['something', 'discover', 'foo', 'bar'])
self.assertEqual(args, [['foo', 'bar']])
def test_command_line_handling_discover_by_default(self):
program = TestableTestProgram()
args = []
program._do_discovery = args.append
program.parseArgs(['something'])
self.assertEqual(args, [[]])
self.assertEqual(program.verbosity, 1)
self.assertIs(program.buffer, False)
self.assertIs(program.catchbreak, False)
self.assertIs(program.failfast, False)
def test_command_line_handling_discover_by_default_with_options(self):
program = TestableTestProgram()
args = []
program._do_discovery = args.append
program.parseArgs(['something', '-v', '-b', '-v', '-c', '-f'])
self.assertEqual(args, [[]])
self.assertEqual(program.verbosity, 2)
self.assertIs(program.buffer, True)
self.assertIs(program.catchbreak, True)
self.assertIs(program.failfast, True)
def test_command_line_handling_do_discovery_too_many_arguments(self):
program = TestableTestProgram()
program.testLoader = None
with support.captured_stderr() as stderr, \
self.assertRaises(SystemExit) as cm:
# too many args
program._do_discovery(['one', 'two', 'three', 'four'])
self.assertEqual(cm.exception.args, (2,))
self.assertIn('usage:', stderr.getvalue())
def test_command_line_handling_do_discovery_uses_default_loader(self):
program = object.__new__(unittest.TestProgram)
program._initArgParsers()
class Loader(object):
args = []
def discover(self, start_dir, pattern, top_level_dir):
self.args.append((start_dir, pattern, top_level_dir))
return 'tests'
program.testLoader = Loader()
program._do_discovery(['-v'])
self.assertEqual(Loader.args, [('.', 'test*.py', None)])
def test_command_line_handling_do_discovery_calls_loader(self):
program = TestableTestProgram()
class Loader(object):
args = []
def discover(self, start_dir, pattern, top_level_dir):
self.args.append((start_dir, pattern, top_level_dir))
return 'tests'
program._do_discovery(['-v'], Loader=Loader)
self.assertEqual(program.verbosity, 2)
self.assertEqual(program.test, 'tests')
self.assertEqual(Loader.args, [('.', 'test*.py', None)])
Loader.args = []
program = TestableTestProgram()
program._do_discovery(['--verbose'], Loader=Loader)
self.assertEqual(program.test, 'tests')
self.assertEqual(Loader.args, [('.', 'test*.py', None)])
Loader.args = []
program = TestableTestProgram()
program._do_discovery([], Loader=Loader)
self.assertEqual(program.test, 'tests')
self.assertEqual(Loader.args, [('.', 'test*.py', None)])
Loader.args = []
program = TestableTestProgram()
program._do_discovery(['fish'], Loader=Loader)
self.assertEqual(program.test, 'tests')
self.assertEqual(Loader.args, [('fish', 'test*.py', None)])
Loader.args = []
program = TestableTestProgram()
program._do_discovery(['fish', 'eggs'], Loader=Loader)
self.assertEqual(program.test, 'tests')
self.assertEqual(Loader.args, [('fish', 'eggs', None)])
Loader.args = []
program = TestableTestProgram()
program._do_discovery(['fish', 'eggs', 'ham'], Loader=Loader)
self.assertEqual(program.test, 'tests')
self.assertEqual(Loader.args, [('fish', 'eggs', 'ham')])
Loader.args = []
program = TestableTestProgram()
program._do_discovery(['-s', 'fish'], Loader=Loader)
self.assertEqual(program.test, 'tests')
self.assertEqual(Loader.args, [('fish', 'test*.py', None)])
Loader.args = []
program = TestableTestProgram()
program._do_discovery(['-t', 'fish'], Loader=Loader)
self.assertEqual(program.test, 'tests')
self.assertEqual(Loader.args, [('.', 'test*.py', 'fish')])
Loader.args = []
program = TestableTestProgram()
program._do_discovery(['-p', 'fish'], Loader=Loader)
self.assertEqual(program.test, 'tests')
self.assertEqual(Loader.args, [('.', 'fish', None)])
self.assertFalse(program.failfast)
self.assertFalse(program.catchbreak)
Loader.args = []
program = TestableTestProgram()
program._do_discovery(['-p', 'eggs', '-s', 'fish', '-v', '-f', '-c'],
Loader=Loader)
self.assertEqual(program.test, 'tests')
self.assertEqual(Loader.args, [('fish', 'eggs', None)])
self.assertEqual(program.verbosity, 2)
self.assertTrue(program.failfast)
self.assertTrue(program.catchbreak)
def setup_module_clash(self):
class Module(object):
__file__ = 'bar/foo.py'
sys.modules['foo'] = Module
full_path = os.path.abspath('foo')
original_listdir = os.listdir
original_isfile = os.path.isfile
original_isdir = os.path.isdir
def cleanup():
os.listdir = original_listdir
os.path.isfile = original_isfile
os.path.isdir = original_isdir
del sys.modules['foo']
if full_path in sys.path:
sys.path.remove(full_path)
self.addCleanup(cleanup)
def listdir(_):
return ['foo.py']
def isfile(_):
return True
def isdir(_):
return True
os.listdir = listdir
os.path.isfile = isfile
os.path.isdir = isdir
return full_path
def test_detect_module_clash(self):
full_path = self.setup_module_clash()
loader = unittest.TestLoader()
mod_dir = os.path.abspath('bar')
expected_dir = os.path.abspath('foo')
msg = re.escape(r"'foo' module incorrectly imported from %r. Expected %r. "
"Is this module globally installed?" % (mod_dir, expected_dir))
self.assertRaisesRegex(
ImportError, '^%s$' % msg, loader.discover,
start_dir='foo', pattern='foo.py'
)
self.assertEqual(sys.path[0], full_path)
def test_module_symlink_ok(self):
full_path = self.setup_module_clash()
original_realpath = os.path.realpath
mod_dir = os.path.abspath('bar')
expected_dir = os.path.abspath('foo')
def cleanup():
os.path.realpath = original_realpath
self.addCleanup(cleanup)
def realpath(path):
if path == os.path.join(mod_dir, 'foo.py'):
return os.path.join(expected_dir, 'foo.py')
return path
os.path.realpath = realpath
loader = unittest.TestLoader()
loader.discover(start_dir='foo', pattern='foo.py')
def test_discovery_from_dotted_path(self):
loader = unittest.TestLoader()
tests = [self]
expectedPath = os.path.abspath(os.path.dirname(unittest.test.__file__))
self.wasRun = False
def _find_tests(start_dir, pattern, namespace=None):
self.wasRun = True
self.assertEqual(start_dir, expectedPath)
return tests
loader._find_tests = _find_tests
suite = loader.discover('unittest.test')
self.assertTrue(self.wasRun)
self.assertEqual(suite._tests, tests)
def test_discovery_from_dotted_path_builtin_modules(self):
loader = unittest.TestLoader()
listdir = os.listdir
os.listdir = lambda _: ['test_this_does_not_exist.py']
isfile = os.path.isfile
isdir = os.path.isdir
os.path.isdir = lambda _: False
orig_sys_path = sys.path[:]
def restore():
os.path.isfile = isfile
os.path.isdir = isdir
os.listdir = listdir
sys.path[:] = orig_sys_path
self.addCleanup(restore)
with self.assertRaises(TypeError) as cm:
loader.discover('sys')
self.assertEqual(str(cm.exception),
'Can not use builtin modules '
'as dotted module names')
def test_discovery_from_dotted_namespace_packages(self):
loader = unittest.TestLoader()
package = types.ModuleType('package')
package.__path__ = ['/a', '/b']
package.__spec__ = types.SimpleNamespace(
loader=None,
submodule_search_locations=['/a', '/b']
)
def _import(packagename, *args, **kwargs):
sys.modules[packagename] = package
return package
_find_tests_args = []
def _find_tests(start_dir, pattern, namespace=None):
_find_tests_args.append((start_dir, pattern))
return ['%s/tests' % start_dir]
loader._find_tests = _find_tests
loader.suiteClass = list
with unittest.mock.patch('builtins.__import__', _import):
# Since loader.discover() can modify sys.path, restore it when done.
with support.DirsOnSysPath():
# Make sure to remove 'package' from sys.modules when done.
with test.test_importlib.util.uncache('package'):
suite = loader.discover('package')
self.assertEqual(suite, ['/a/tests', '/b/tests'])
def test_discovery_failed_discovery(self):
loader = unittest.TestLoader()
package = types.ModuleType('package')
def _import(packagename, *args, **kwargs):
sys.modules[packagename] = package
return package
with unittest.mock.patch('builtins.__import__', _import):
# Since loader.discover() can modify sys.path, restore it when done.
with support.DirsOnSysPath():
# Make sure to remove 'package' from sys.modules when done.
with test.test_importlib.util.uncache('package'):
with self.assertRaises(TypeError) as cm:
loader.discover('package')
self.assertEqual(str(cm.exception),
'don\'t know how to discover from {!r}'
.format(package))
if __name__ == '__main__':
unittest.main()
|
11581551
|
import os
import os.path
import sys
xdg = os.getenv('XDG_CONFIG_HOME') or os.path.join(os.getenv('HOME'), '.config')
conffile = os.path.join(xdg, 'pytyle3', 'config.py')
if not os.access(conffile, os.R_OK):
conffile = os.path.join('/', 'etc', 'xdg', 'pytyle3', 'config.py')
if not os.access(conffile, os.R_OK):
print >> sys.stderr, 'UNRECOVERABLE ERROR: ' \
'No configuration file found at %s' % conffile
sys.exit(1)
execfile(conffile)
|
11581553
|
import cv2
image = cv2.imread("../assets/img.png")
scale_rate = 0.5
height = int(image.shape[0] * scale_rate)
width = int(image.shape[1] * scale_rate)
image_resized = cv2.resize(image, (width, height))
cv2.imshow("Resized Image", image_resized)
cv2.waitKey(0)
|
11581559
|
from __future__ import print_function
import sys, os, numpy, h5py
from pyglib.mbody.coulomb_matrix import U_J_to_radial_integrals, \
radial_integrals_to_U_J
def h5save_usr_qa_setup(material, log=sys.stdout):
'''
A list of questions to be answered to initialize the CyGutz job.
'''
f = h5py.File('ginit.h5', 'a')
if '/usrqa' in f:
f.close()
return
usr_input = open("input.slog", 'w')
print('\n' + " User inputs to initialize the G-RISB job.")
# cut-off distance to determine the rotation group.
dist_cut = -1.0
if '-c' in sys.argv:
dist_cut = float(sys.argv[sys.argv.index('-c') + 1])
print(" The uniform dist_cut for extracting a centered" + \
" cluster \n for symmetry evaluation = {}\n".format(dist_cut),
file=log)
f['/usrqa/dist_cut'] = dist_cut
unit = 'rydberg'
if '-u' in sys.argv:
unit = sys.argv[sys.argv.index('-u') + 1]
f['/usrqa/unit'] = unit
# Spin symmetry breaking
if '-spl' in sys.argv:
spin_polarization = sys.argv[sys.argv.index('-spl') + 1]
else:
spin_polarization = get_usr_input(
"\n Do you want to BREAK SPIN-SYMMETRY?", ['y', 'n'])
print(spin_polarization, file=usr_input)
f['/usrqa/spin_polarization'] = spin_polarization
# Ferromagnetic or not
if 'y' == spin_polarization:
ferromagnetism = get_usr_input(
"\n Is it a ferromagnetic (FM) calculation?", ['y', 'n'])
print(ferromagnetism, file=usr_input)
f['/usrqa/ferromagnetism'] = ferromagnetism
# Orbital symmetry breaking
if '-opl' in sys.argv:
orbital_polarization = sys.argv[sys.argv.index('-opl') + 1]
else:
orbital_polarization = get_usr_input(
"\n Do you want to COMPLETELY break orbital-symmetry?",
['y', 'n'])
print(orbital_polarization, file=usr_input)
f['/usrqa/full_orbital_polarization'] = orbital_polarization
# Spin-orbit interaction.
if '-soc' in sys.argv:
spin_orbit_coup = sys.argv[sys.argv.index('-soc') + 1]
else:
spin_orbit_coup = get_usr_input(
"\n Do you want to take into account the SPIN-ORBIT" +
" interaction?", ['y', 'n'])
print(spin_orbit_coup, file=usr_input)
f['/usrqa/spin_orbit_coup'] = spin_orbit_coup
if 'y' == spin_polarization == spin_orbit_coup:
print(' Warning: magnetism with spin-orbit coupling is in ' +
'experimental stage.')
if 'y' == ferromagnetism:
ferro_magmom_direction = get_direction(log=usr_input)
# Crystal field
if 'n' in orbital_polarization:
if '-cf' in sys.argv:
crystal_field = sys.argv[sys.argv.index('-cf') + 1]
else:
crystal_field = get_usr_input(
"\n Do you want to take into account the" +
" CRYSTAL FIELD effect?", ['y', 'n'])
print(crystal_field, file=usr_input)
else:
crystal_field = 'y'
f['/usrqa/crystal_field'] = crystal_field
# Coulomb U
if '-um' in sys.argv:
lhub = sys.argv[sys.argv.index('-um') + 1]
else:
print("\n Please select the method to parametrize Coulomb U-matrix.\n"+
" LHUB = 1: Slater-Condo parametrization (U,J).\n" +
" 3: Slater-Condo parametrization (F-integral).\n" +
" 2: Kanamori parametrization (useful for models).\n" +
" 0: Manual input.")
lhub = get_usr_input(" Please select LHUB: ", ['1', '2', "3", '0'])
usr_input.write(lhub + '\n')
lhub = int(lhub)
f['/usrqa/u_matrix_type'] = lhub
# Choose double counting functional
if '-dc' in sys.argv:
ldc = sys.argv[sys.argv.index('-dc') + 1]
else:
print("\n Please select method for U-interaction double counting.\n" +
" LDC = 12: Recommended for LDA+G-RISB calculations.\n" +
" Fully-localized-limit (FLL) double counting. \n" +
" (updating Vdc at each charge iteration.) \n" +
" 2: Fix double counting potential \n" +
" (keep same Vdc/n0 at each charge iteration,\n" +
" n0 to be provided.) \n" +
" 1: FLL double counting potential \n" +
" (n0 self-consistently determined.) \n" +
" 0: No double counting (useful for models). ")
ldc = get_usr_input(" Please select LDC: ", ['12', '0', '1', '2'])
usr_input.write(ldc + '\n')
ldc = int(ldc)
f['/usrqa/ldc'] = ldc
# Equivalent atom indices
if '-eqidx' in sys.argv:
string_idx_equivalent_atoms = sys.argv[sys.argv.index('-eqidx') + 1]
idx_equivalent_atoms = [
int(s) for s in string_idx_equivalent_atoms.split()]
else:
idx_equivalent_atoms = material.get_EquivalentAtoms()
yn = get_usr_input("\n Symmetrically-equivalent atom indices: " \
+ ''.join("%2d " % (i) for i in idx_equivalent_atoms) +
"\n (note: '0 0 0 1 1' means 1-3 and 4-5 are two" +
" inequivalent atoms). \n Accept?", ['y', 'n'])
print(yn, file=usr_input)
if yn == 'n':
while True:
string_idx_equivalent_atoms = raw_input(
" Enter user-defined equivalent atom indices: ")
yn1 = get_usr_input(
"\n User-defined equivalent atom indices: " +
string_idx_equivalent_atoms + ". Accept?", ['y', 'n'])
if yn1 == 'y':
idx_equivalent_atoms = [
int(s) for s in string_idx_equivalent_atoms.split()]
print(string_idx_equivalent_atoms, file=usr_input)
print(yn1, file=usr_input)
break
f['/usrqa/idx_equivalent_atoms'] = idx_equivalent_atoms
# asking user list of correlated atoms and relative information:
unique_df_list = []
unique_corr_symbol_list = []
unique_u_list = []
unique_j_list = []
unique_f_list = []
unique_magmom_direction_list = []
unique_nf_list = []
for i, s in enumerate(material.symbols):
if s in material.symbols[:i]:
continue
print('\n ' + '-'*12 + "\n atom {} {}".format(i,s))
correlated = get_usr_input("\n Is this atom correlated?",
['y', 'n'])
print(correlated, file=usr_input)
if 'n' in correlated:
continue
unique_corr_symbol_list.append(s)
df = get_usr_input_combo(
"\n Enter correlated shells?", ['s', 'p', 'd', 'f'])
print(df, file=usr_input)
unique_df_list.append(df)
if lhub in [1, 2]:
while True:
answer = raw_input(
'\n Please provide interaction parameters U,J ' +
'\n separated by a space (eV): ')
try:
answer = answer.split()
UJ = [float(answer[i]) for i in range(2)]
break
except:
pass
print(answer, file=usr_input)
unique_u_list.append(UJ[0])
unique_j_list.append(UJ[1])
_l = "spdf".index(unique_df_list[-1])
f_list = numpy.zeros(4)
f_list[:_l+1] = U_J_to_radial_integrals(_l, UJ[0], UJ[1])
unique_f_list.append(f_list)
elif lhub == 3:
while True:
answer = raw_input(
'\n Please provide radial integrals F0,F2,F4,F6' +
'\n separated by spaces and padded by 0s (eV): ')
try:
answer = answer.split()
f_list = [float(answer[i]) for i in range(4)]
break
except:
pass
print(answer, file=usr_input)
unique_f_list.append(f_list)
_l = "spdf".index(unique_df_list[-1])
U,J = radial_integrals_to_U_J(_l, f_list)
unique_u_list.append(U)
unique_j_list.append(J)
if ldc == 2:
while True:
answer = raw_input(
'\n Please provide the fixed number of' +
'\n localized {}-electrons for Vdc: '.format(df))
try:
nf = float(answer)
break
except:
continue
print(answer, file=usr_input)
unique_nf_list.append([nf/2,nf/2])
if 'y' == spin_polarization == spin_orbit_coup:
if 'y' == ferromagnetism:
vec = ferro_magmom_direction
else:
vec = get_direction(log=usr_input)
else:
vec = [1., 0., 0.]
unique_magmom_direction_list.append(vec)
f['/usrqa/unique_corr_symbol_list'] = unique_corr_symbol_list
f['/usrqa/unique_df_list'] = unique_df_list
if len(unique_u_list) > 0:
f['/usrqa/unique_u_list_ev'] = unique_u_list
f['/usrqa/unique_j_list_ev'] = unique_j_list
if len(unique_f_list) > 0:
f["/usrqa/unique_f_list_ev"] = unique_f_list
if ldc == 2:
f['/usrqa/unique_nf_list'] = unique_nf_list
if 'y' == spin_polarization:
f['/usrqa/unique_magmom_direction_list'] = \
unique_magmom_direction_list
if '-newton' in sys.argv:
lnewton = sys.argv.index('-newton')+1
else:
print("\n Please select the method to solve G-RISB equations.\n" +
" LNEWTON = 0: Recommended.\n" +
" Modified Powell hybrid method (HYDRD1).\n" +
" -1: Broyden method.")
lnewton = get_usr_input(" Please select LNEWTON: ", ['-1', '0'])
usr_input.write(lnewton + '\n')
lnewton = int(lnewton)
f['/usrqa/lnewton'] = lnewton
if '-ed' in sys.argv:
iembeddiag = sys.argv.index('-ed')+1
else:
print("\n Please select the method to solve embedding Hamiltonian.\n"+
" LDIAG = -1: Valence truncation ED.\n" +
" -2: Valence truncation ED with Sz symmetry.\n" +
" -3: Valence truncation ED for S=0 (spin-singlet)\n"+
" -4: Valence truncation ED with Jz symmetry.\n" +
" -11: machine learning solver for soc only, exptl.\n"+
" -12: syten (dmrg) solver, exptl. \n" +
" 10: HF (Mixing one-particle DM, exptl.).")
iembeddiag = get_usr_input(" Please select LDIAG: ", \
['-12', '-11', '-4', '-3', '-1', '-2', '10'])
usr_input.write(iembeddiag + '\n')
iembeddiag = int(iembeddiag)
f['/usrqa/iembeddiag'] = iembeddiag
usr_input.close()
f.close()
os.rename("input.slog", "init_ga.input")
def get_usr_input(message, accept_list):
while True:
answer = raw_input(
message +
" \n Pick one from [" +
', '.join(item for item in accept_list) + "]...")
if answer not in accept_list:
print(" Please pick an answer in the list!" + \
" Make your choice again.")
else:
break
return answer
def get_usr_input_combo(message, accept_list):
while True:
answer = raw_input(
message +
" \n Pick one or combinations separated by blank space" +
" \n from [" + ', '.join(item for item in accept_list) + "]...")
if answer_valid(answer, accept_list):
break
return answer
def answer_valid(answer, accept_list):
answer_list = answer.split()
for ans in answer_list:
if ans not in accept_list:
return False
return True
def get_direction(log=sys.stdout):
'''get direction array(3).
'''
while True:
vec_ = raw_input(
'\n please enter direction (to be normalized) \n'+\
' of the magnetic moment by components \n'+\
' in global coordinate system: x y z\n ')
vec = vec_.split()
vec = numpy.array(map(float, vec))
if len(vec) == 3:
vec = vec/numpy.linalg.norm(vec)
break
else:
print(' enter 3 float numbers with finger space only.')
print(vec_, file=log)
return vec
|
11581674
|
import imp
import os
import sys
from gpcheckcat_modules.foreign_key_check import ForeignKeyCheck
from mock import *
from gp_unittest import *
class GpCheckCatTestCase(GpTestCase):
def setUp(self):
self.logger = Mock(spec=['log', 'info', 'debug', 'error'])
self.db_connection = Mock(spec=['close', 'query'])
self.autoCast = {'regproc': '::oid',
'regprocedure': '::oid',
'regoper': '::oid',
'regoperator': '::oid',
'regclass': '::oid',
'regtype': '::oid',
'int2vector': '::int2[]'}
self.subject = ForeignKeyCheck(self.db_connection, self.logger, False, self.autoCast)
self.full_join_cat_tables = set(['pg_attribute','gp_distribution_policy','pg_appendonly','pg_constraint','pg_index'])
self.foreign_key_check= Mock(spec=['runCheck'])
self.foreign_key_check.runCheck.return_value = []
self.db_connection.query.return_value.ntuples.return_value = 2
self.db_connection.query.return_value.listfields.return_value = ['pkey1', 'pkey2']
self.db_connection.query.return_value.getresult.return_value = [('r1','r2'), ('r3','r4')]
def test_get_fk_query_left_join_returns_the_correct_query(self):
expected_query = """
SELECT pkeys-1, pkeys-2, missing_catalog, present_key, pkcatname_pkeystr,
array_agg(gp_segment_id order by gp_segment_id) as segids
FROM (
SELECT (case when cat1.fkeystr is not NULL then 'pkcatname' when cat2.pkeystr is not NULL then 'catname' end) as missing_catalog,
(case when cat1.fkeystr is not NULL then 'fkeystr' when cat2.pkeystr is not NULL then 'pkeystr' end) as present_key,
cat1.gp_segment_id, cat1pkeys-1, cat1pkeys-2, cat1.fkeystr as pkcatname_pkeystr
FROM
gp_dist_random('catname') cat1 LEFT OUTER JOIN
gp_dist_random('pkcatname') cat2
ON (cat1.gp_segment_id = cat2.gp_segment_id AND
cat1.fkeystr = cat2.pkeystr )
WHERE cat2.pkeystr is NULL
AND cat1.fkeystr != 0
UNION ALL
SELECT (case when cat1.fkeystr is not NULL then 'pkcatname' when cat2.pkeystr is not NULL then 'catname' end) as missing_catalog,
(case when cat1.fkeystr is not NULL then 'fkeystr' when cat2.pkeystr is not NULL then 'pkeystr' end) as present_key,
-1 as gp_segment_id, cat1pkeys-1, cat1pkeys-2, cat1.fkeystr as pkcatname_pkeystr
FROM
catname cat1 LEFT OUTER JOIN
pkcatname cat2
ON (cat1.gp_segment_id = cat2.gp_segment_id AND
cat1.fkeystr = cat2.pkeystr )
WHERE cat2.pkeystr is NULL
AND cat1.fkeystr != 0
ORDER BY pkeys-1, pkeys-2, gp_segment_id
) allresults
GROUP BY pkeys-1, pkeys-2, pkcatname_pkeystr, missing_catalog, present_key
"""
result_query = self.subject.get_fk_query_left_join("catname", "pkcatname", "fkeystr", "pkeystr", ["pkeys-1", "pkeys-2"], ["cat1pkeys-1", "cat1pkeys-2"])
self.assertEquals(expected_query, result_query)
def test_get_fk_query_full_join_returns_the_correct_query(self):
expected_query = """
SELECT pkeys-1, pkeys-2, missing_catalog, present_key, pkcatname_pkeystr,
array_agg(gp_segment_id order by gp_segment_id) as segids
FROM (
SELECT (case when cat1.fkeystr is not NULL then 'pkcatname' when cat2.pkeystr is not NULL then 'catname' end) as missing_catalog,
(case when cat1.fkeystr is not NULL then 'fkeystr' when cat2.pkeystr is not NULL then 'pkeystr' end) as present_key,
COALESCE(cat1.gp_segment_id,cat2.gp_segment_id) as gp_segment_id , cat1pkeys-1, cat1pkeys-2, COALESCE(cat1.fkeystr, cat2.pkeystr) as pkcatname_pkeystr
FROM
gp_dist_random('catname') cat1 FULL OUTER JOIN
gp_dist_random('pkcatname') cat2
ON (cat1.gp_segment_id = cat2.gp_segment_id AND
cat1.fkeystr = cat2.pkeystr )
WHERE (cat2.pkeystr is NULL or cat1.fkeystr is NULL)
AND filter
UNION ALL
SELECT (case when cat1.fkeystr is not NULL then 'pkcatname' when cat2.pkeystr is not NULL then 'catname' end) as missing_catalog,
(case when cat1.fkeystr is not NULL then 'fkeystr' when cat2.pkeystr is not NULL then 'pkeystr' end) as present_key,
-1, cat1pkeys-1, cat1pkeys-2, COALESCE(cat1.fkeystr, cat2.pkeystr) as pkcatname_pkeystr
FROM
catname cat1 FULL OUTER JOIN
pkcatname cat2
ON (cat1.gp_segment_id = cat2.gp_segment_id AND
cat1.fkeystr = cat2.pkeystr )
WHERE (cat2.pkeystr is NULL or cat1.fkeystr is NULL)
AND filter
ORDER BY pkeys-1, pkeys-2, gp_segment_id
) allresults
GROUP BY pkeys-1, pkeys-2, pkcatname_pkeystr, missing_catalog, present_key
"""
result_query = self.subject.get_fk_query_full_join("catname", "pkcatname", "fkeystr", "pkeystr", ["pkeys-1", "pkeys-2"], ["cat1pkeys-1", "cat1pkeys-2"], "filter")
self.assertEquals(expected_query, result_query)
@patch('gpcheckcat_modules.foreign_key_check.ForeignKeyCheck.checkTableForeignKey')
def test_runCheck(self, mock):
tables = [self._get_mock_for_catalog_table("table1"), self._get_mock_for_catalog_table("table2")]
self.subject.runCheck(tables)
self.assertEquals(len(self.subject.checkTableForeignKey.call_args_list), len(tables))
for table in tables:
self.assertIn(call(table), self.subject.checkTableForeignKey.call_args_list)
@patch('gpcheckcat_modules.foreign_key_check.ForeignKeyCheck.get_fk_query_full_join')
@patch('gpcheckcat_modules.foreign_key_check.ForeignKeyCheck.get_fk_query_left_join')
@patch('gpcheckcat_modules.foreign_key_check.log_literal')
def test_checkTableForeignKey__returns_correct_join_query(self, log_literal_mock, fk_query_left_join_mock, fk_query_full_join_mock):
#cat_tables_to_validate = set(['pg_attribute','gp_distribution_policy','pg_appendonly','pg_constraint','pg_index','pg_type','pg_window'])
cat_tables_to_validate = set(['pg_attribute', 'pg_appendonly', 'pg_index','pg_type','pg_window'])
foreign_key_mock_calls = []
foreign_key_mock_calls_left = []
for table_name in cat_tables_to_validate:
foreign_key_mock_1 = self._get_mock_for_foreign_key(pkey_tablename="pg_class", cat_table_name=table_name)
foreign_key_mock_2 = self._get_mock_for_foreign_key(pkey_tablename="arbitrary_catalog_table", cat_table_name=table_name)
catalog_table_mock = self._get_mock_for_catalog_table(table_name, [foreign_key_mock_1, foreign_key_mock_2])
col_type = self._get_col_types(table_name)
issue_list = self.subject.checkTableForeignKey(catalog_table_mock)
self.assertEquals(len(issue_list) , 2)
self.assertEquals(issue_list[0], ('pg_class', ['pkey1', 'pkey2'], [('r1', 'r2'), ('r3', 'r4')]))
self.assertEquals(issue_list[1], ('arbitrary_catalog_table', ['pkey1', 'pkey2'], [('r1', 'r2'), ('r3', 'r4')]))
self.assertEquals(self.db_connection.query.call_count, 2)
def __generate_pg_class_call(table, primary_key_cat_name, col_type, with_filter=True):
if with_filter:
return call(table_name, '%s' % primary_key_cat_name, '%s' % col_type.keys()[0], 'oid',
['%s_pkey1' % (table_name), '%s_pkey2' % (table_name)],
filter=self._get_filter(table_name),
cat1pkeys=['cat1.pkey1 as %s_pkey1' % (table_name),
'cat1.pkey2 as %s_pkey2' % (table_name)])
else:
return call(table_name, '%s' % primary_key_cat_name, '%s' % col_type.keys()[0], 'oid',
['%s_pkey1' % (table_name), '%s_pkey2' % (table_name)],
['cat1.pkey1 as %s_pkey1' % (table_name),
'cat1.pkey2 as %s_pkey2' % (table_name)])
# make sure that the correct pg_class_call is used depending on the
# foreign key
# XXX: if we know that it's the fake catalog, then we can assume
# that it will do a left join... we need to figure that out
if table_name in self.full_join_cat_tables:
pg_class_call = __generate_pg_class_call(table_name, 'pg_class', col_type, with_filter=True)
foreign_key_mock_calls.append(pg_class_call)
pg_class_call = __generate_pg_class_call(table_name, 'arbitrary_catalog_table', col_type, with_filter=False)
foreign_key_mock_calls_left.append(pg_class_call)
else:
pg_class_call = __generate_pg_class_call(table_name, 'pg_class', col_type, with_filter=False)
foreign_key_mock_calls_left.append(pg_class_call)
if table_name in self.full_join_cat_tables:
self.assertEquals(fk_query_full_join_mock.call_count, 1)
self.assertEquals(fk_query_left_join_mock.call_count, 1)
fk_query_full_join_mock.assert_has_calls(foreign_key_mock_calls, any_order=False)
else:
arbitrary_catalog_table_call = __generate_pg_class_call(table_name, 'arbitrary_catalog_table', col_type, with_filter=False)
foreign_key_mock_calls_left.append(arbitrary_catalog_table_call)
self.assertEquals(fk_query_left_join_mock.call_count, 2)
self.assertEquals(fk_query_full_join_mock.call_count, 0)
fk_query_left_join_mock.assert_has_calls(foreign_key_mock_calls_left, any_order=False)
self.db_connection.query.call_count = 0
fk_query_full_join_mock.call_count = 0
fk_query_left_join_mock.call_count = 0
####################### PRIVATE METHODS #######################
def _get_filter(self, table_name):
query_filters = {}
query_filters['pg_appendonly'] = "(relstorage='a' or relstorage='c')"
query_filters['pg_attribute'] = "true"
query_filters['pg_constraint'] = "((relchecks>0 or relhaspkey='t') and relkind = 'r')"
query_filters['gp_distribution_policy'] = """(relnamespace not in(select oid from pg_namespace where nspname ~ 'pg_')
and relnamespace not in(select oid from pg_namespace where nspname ~ 'gp_')
and relnamespace!=(select oid from pg_namespace where nspname='information_schema')
and relkind='r' and (relstorage='a' or relstorage='h' or relstorage='c'))"""
query_filters["pg_index"] = "(relkind='i')"
if table_name in query_filters:
return query_filters[table_name]
else:
return []
def _get_col_types(self, table_name):
table_col_types = {'pg_attribute': {'attlen': 'int2', 'atthasdef': 'bool', 'attndims': 'int4',
'attnum': 'int2', 'attname': 'name', 'attalign': 'char',
'attnotnull': 'bool', 'atttypid': 'oid', 'attrelid': 'oid',
'attinhcount': 'int4', 'attcacheoff': 'int4',
'attislocal': 'bool', 'attstattarget': 'int4',
'attstorage': 'char', 'attbyval': 'bool',
'atttypmod': 'int4', 'attisdropped': 'bool'},
'pg_appendonly': {'relid': 'oid'},
'pg_attribute': {'attrelid': 'oid'},
'pg_constraint': {'conrelid': 'oid'},
'pg_index': {'indexrelid': 'oid'},
'gp_distribution_policy': {}
}
if table_name not in table_col_types.keys():
table_name = 'pg_attribute'
return table_col_types[table_name]
def _get_mock_for_catalog_table(self, table_name, foreign_keys=None):
catalog_table_mock = Mock(spec=['getTableName','isShared','getForeignKeys','getPrimaryKey','getTableColtypes'])
catalog_table_mock.getTableName.return_value = table_name
catalog_table_mock.isShared.return_value = True
catalog_table_mock.getForeignKeys.return_value = foreign_keys
catalog_table_mock.getPrimaryKey.return_value = ["pkey1", "pkey2"]
catalog_table_mock.getTableColtypes.return_value = self._get_col_types(table_name)
return catalog_table_mock
def _get_mock_for_foreign_key(self, pkey_tablename, cat_table_name):
spec_list = ['getPKey', 'getPkeyTableName', 'getColumns']
filter_mapping = {'pg_appendonly': ['relid'],
'pg_attribute': ['attrelid'],
'pg_constraint': ['conrelid'],
'pg_index': ['indexrelid'],
'gp_distribution_policy': []
}
attribute_foreign_key_mock = Mock(spec=spec_list)
attribute_foreign_key_mock.getPKey.return_value = ['oid']
attribute_foreign_key_mock.getPkeyTableName.return_value = pkey_tablename
query_filter = ['attrelid']
if cat_table_name in filter_mapping:
query_filter = filter_mapping[cat_table_name]
attribute_foreign_key_mock.getColumns.return_value = query_filter
return attribute_foreign_key_mock
if __name__ == '__main__':
run_tests()
|
11581737
|
import torch
import fused_layer_norm_cuda
from apex.normalization import FusedLayerNorm
import pyprof2
pyprof2.init()
pyprof2.wrap(fused_layer_norm_cuda, 'forward')
pyprof2.wrap(fused_layer_norm_cuda, 'backward')
pyprof2.wrap(fused_layer_norm_cuda, 'forward_affine')
pyprof2.wrap(fused_layer_norm_cuda, 'backward_affine')
input = torch.randn(20, 5, 10, 10).cuda()
# With Learnable Parameters
m = FusedLayerNorm(input.size()[1:]).cuda()
output = m(input)
# Without Learnable Parameters
m = FusedLayerNorm(input.size()[1:], elementwise_affine=False).cuda()
output = m(input)
# Normalize over last two dimensions
m = FusedLayerNorm([10, 10]).cuda()
output = m(input)
# Normalize over last dimension of size 10
m = FusedLayerNorm(10).cuda()
output = m(input)
|
11581815
|
from setuptools import setup
def readme():
with open('README.rst') as f:
return f.read()
setup(name='pyeventbus',
version='0.5',
description='A Python EventBus',
long_description=readme(),
classifiers=[
'Development Status :: 3 - Alpha',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 2.7',
'Framework :: Flask',
'Intended Audience :: Developers',
'Intended Audience :: Science/Research',
'Natural Language :: English',
'Operating System :: OS Independent',
],
keywords='python eventbus with threading and concurrency support',
url='https://github.com/n89nanda/pyeventbus',
author='<NAME>',
author_email='<EMAIL>',
license='MIT',
packages=['pyeventbus'],
install_requires=[
'gevent',
],
include_package_data=True,
zip_safe=False)
|
11581825
|
from __future__ import absolute_import
import sys
from unittest import TestCase
from plotly.optional_imports import get_module
class OptionalImportsTest(TestCase):
def test_get_module_exists(self):
import math
module = get_module("math")
self.assertIsNotNone(module)
self.assertEqual(math, module)
def test_get_module_exists_submodule(self):
import requests.sessions
module = get_module("requests.sessions")
self.assertIsNotNone(module)
self.assertEqual(requests.sessions, module)
def test_get_module_does_not_exist(self):
module = get_module("hoopla")
self.assertIsNone(module)
def test_get_module_import_exception(self):
# Get module that raises an exception on import
module_str = "plotly.tests.test_core." "test_optional_imports.exploding_module"
if sys.version_info >= (3, 4):
with self.assertLogs("_plotly_utils.optional_imports", level="ERROR") as cm:
module = get_module(module_str)
# No exception should be raised and None should be returned
self.assertIsNone(module)
# Check logging level and log message
expected_start = (
"ERROR:_plotly_utils.optional_imports:"
"Error importing optional module " + module_str
)
self.assertEqual(cm.output[0][: len(expected_start)], expected_start)
# Check that exception message is included after log message
expected_end = "Boom!"
self.assertEqual(cm.output[0][-len(expected_end) :], expected_end)
else:
# Don't check logging
module = get_module(module_str)
# No exception should be raised and None should be returned
self.assertIsNone(module)
|
11581869
|
from WhatsAppManifest import ADB, Automator
from WhatsAppManifest.automator.android import AndroidContacts
# Note: We need the AdbServer class (even without using SSH) so that Automator can open the internal connection.
with ADB(use_ssh=False) as AdbServer:
automator = Automator(adb_server=AdbServer, adb_host="127.0.0.1", adb_port=5037)
for device in automator.list_devices(state=None):
android_contacts = AndroidContacts(device)
print(android_contacts.get_contact("+001198765-4321"))
|
11581891
|
import os
from pytest import fixture
from colab_ssh.utils import expose_env_variable
@fixture()
def bash_rc_path():
bash_rc_path = "./.bashrc"
yield bash_rc_path
os.remove(bash_rc_path)
def test_env_var(bash_rc_path):
os.system(f"echo 'previous stuff' >> {bash_rc_path}")
os.environ["COLAB_SSH_TEST_ENV_VAR"] = "123"
expose_env_variable("COLAB_SSH_TEST_ENV_VAR", bash_rc_path)
with open("./.bashrc", "r") as f:
lines = f.readlines()
assert lines == [
"previous stuff\n",
"export COLAB_SSH_TEST_ENV_VAR=123\n"
]
|
11581957
|
import unittest
from records_mover.db.bigquery.bigquery_db_driver import BigQueryDBDriver
from records_mover.records.records_format import DelimitedRecordsFormat
from mock import MagicMock, Mock, patch
import sqlalchemy
from sqlalchemy_bigquery import BIGNUMERIC
class TestBigQueryDBDriver(unittest.TestCase):
@patch('records_mover.db.bigquery.bigquery_db_driver.BigQueryLoader')
def setUp(self, mock_BigQueryLoader):
self.mock_db_engine = MagicMock(name='db_engine')
self.mock_url_resolver = Mock(name='url_resolver')
self.mock_BigQueryLoader = mock_BigQueryLoader
self.bigquery_db_driver = BigQueryDBDriver(db=self.mock_db_engine,
url_resolver=self.mock_url_resolver)
def test_load_implemented(self):
mock_schema = Mock(name='mock_schema')
mock_table = Mock(name='mock_table')
mock_load_plan = Mock(name='mock_load_plan')
mock_load_plan.records_format = Mock(name='records_format', spec=DelimitedRecordsFormat)
mock_load_plan.records_format.hints = {}
mock_directory = Mock(name='mock_directory')
ret = self.bigquery_db_driver.loader().\
load(schema=mock_schema,
table=mock_table,
load_plan=mock_load_plan,
directory=mock_directory)
self.assertEqual(ret, self.mock_BigQueryLoader.return_value.load.return_value)
def test_can_load_this_format(self):
mock_source_records_format = Mock(name='source_records_format', spec=DelimitedRecordsFormat)
out = self.bigquery_db_driver.loader_from_fileobj().\
can_load_this_format(mock_source_records_format)
self.mock_BigQueryLoader.return_value.can_load_this_format.\
assert_called_with(mock_source_records_format)
self.assertEqual(out,
self.mock_BigQueryLoader.return_value.can_load_this_format.return_value)
def test_known_supported_records_formats_for_load(self):
out = self.bigquery_db_driver.loader().known_supported_records_formats_for_load()
self.mock_BigQueryLoader.return_value.known_supported_records_formats_for_load.\
assert_called_with()
self.assertEqual(out,
self.mock_BigQueryLoader.return_value.
known_supported_records_formats_for_load.return_value)
def test_type_for_date_plus_time_with_tz(self):
out = self.bigquery_db_driver.type_for_date_plus_time(has_tz=True)
self.assertEqual(type(out), sqlalchemy.sql.sqltypes.TIMESTAMP)
def test_type_for_date_plus_time_with_no_tz(self):
out = self.bigquery_db_driver.type_for_date_plus_time(has_tz=False)
self.assertEqual(type(out), sqlalchemy.sql.sqltypes.DATETIME)
def test_make_column_name_valid(self):
expected = {
'foo bar': 'foo_bar',
'foo-bar': 'foo_bar',
}
for colname_input, expected_colname_output in expected.items():
colname_output = self.bigquery_db_driver.make_column_name_valid(colname_input)
self.assertEqual(colname_output, expected_colname_output)
def test_integer_limits(self):
self.assertEqual(self.bigquery_db_driver.integer_limits(sqlalchemy.types.Integer()),
(-9223372036854775808, 9223372036854775807))
def test_fp_constraints(self):
self.assertEqual(self.bigquery_db_driver.fp_constraints(sqlalchemy.types.Float()),
(64, 53))
def test_fixed_point_constraints(self):
constraints = self.bigquery_db_driver.fixed_point_constraints(
sqlalchemy.types.Numeric(6, 2))
self.assertEqual(constraints, (6, 2))
def test_fixed_point_constraints_bignumeric_bare(self):
constraints = self.bigquery_db_driver.fixed_point_constraints(BIGNUMERIC())
self.assertEqual(constraints, (76, 38))
def test_fixed_point_constraints_new_type(self):
constraints = self.bigquery_db_driver.fixed_point_constraints(
Mock(precision=None, scale=None))
self.assertIsNone(constraints, None)
def test_type_for_fixed_point_big(self):
type_ = self.bigquery_db_driver.type_for_fixed_point(123, 45)
self.assertEqual(type(type_), sqlalchemy.types.Float)
def test_type_for_fixed_point_small(self):
type_ = self.bigquery_db_driver.type_for_fixed_point(12, 3)
self.assertEqual(type(type_), sqlalchemy.types.Numeric)
def test_type_for_integer(self):
type_ = self.bigquery_db_driver.type_for_integer(min_value=1, max_value=4)
self.assertEqual(type(type_), sqlalchemy.types.Integer)
def test_load_from_fileobj(self):
mock_schema = Mock(name='schema')
mock_table = Mock(name='table')
mock_load_plan = Mock(name='load_plan')
mock_fileobj = Mock(name='fileobj')
mock_bigquery_loader = self.mock_BigQueryLoader.return_value
out = self.bigquery_db_driver.loader_from_fileobj().\
load_from_fileobj(mock_schema, mock_table,
mock_load_plan, mock_fileobj)
mock_bigquery_loader.load_from_fileobj.assert_called_with(mock_schema,
mock_table,
mock_load_plan,
mock_fileobj)
self.assertEqual(out, mock_bigquery_loader.load_from_fileobj.return_value)
def test_type_for_integer_small_type(self):
INT64_MIN = -9223372036854775808
min_value = INT64_MIN - 100
max_value = 200
out = self.bigquery_db_driver.type_for_integer(min_value, max_value)
self.assertEqual(type(out), sqlalchemy.types.Numeric)
|
11581987
|
import torch
import torch.nn.functional as F
import numpy as np
import copy
import pdb
from collections import OrderedDict as OD
from collections import defaultdict as DD
torch.random.manual_seed(0)
''' For MIR '''
def overwrite_grad(pp, new_grad, grad_dims):
"""
This is used to overwrite the gradients with a new gradient
vector, whenever violations occur.
pp: parameters
newgrad: corrected gradient
grad_dims: list storing number of parameters at each layer
"""
cnt = 0
for param in pp():
param.grad=torch.zeros_like(param.data)
beg = 0 if cnt == 0 else sum(grad_dims[:cnt])
en = sum(grad_dims[:cnt + 1])
this_grad = new_grad[beg: en].contiguous().view(
param.data.size())
param.grad.data.copy_(this_grad)
cnt += 1
def get_grad_vector(args, pp, grad_dims):
"""
gather the gradients in one vector
"""
grads = torch.Tensor(sum(grad_dims))
if args.cuda: grads = grads.cuda()
grads.fill_(0.0)
cnt = 0
for param in pp():
if param.grad is not None:
beg = 0 if cnt == 0 else sum(grad_dims[:cnt])
en = sum(grad_dims[:cnt + 1])
grads[beg: en].copy_(param.grad.data.view(-1))
cnt += 1
return grads
def get_future_step_parameters(this_net, grad_vector, grad_dims, lr=1):
"""
computes \theta-\delta\theta
:param this_net:
:param grad_vector:
:return:
"""
new_net=copy.deepcopy(this_net)
overwrite_grad(new_net.parameters,grad_vector,grad_dims)
with torch.no_grad():
for param in new_net.parameters():
if param.grad is not None:
param.data=param.data - lr*param.grad.data
return new_net
def get_grad_dims(self):
self.grad_dims = []
for param in self.net.parameters():
self.grad_dims.append(param.data.numel())
''' Others '''
def onehot(t, num_classes, device='cpu'):
"""
convert index tensor into onehot tensor
:param t: index tensor
:param num_classes: number of classes
"""
return torch.zeros(t.size()[0], num_classes).to(device).scatter_(1, t.view(-1, 1), 1)
def distillation_KL_loss(y, teacher_scores, T, scale=1, reduction='batchmean'):
"""Computes the distillation loss (cross-entropy).
xentropy(y, t) = kl_div(y, t) + entropy(t)
entropy(t) does not contribute to gradient wrt y, so we skip that.
Thus, loss value is slightly different, but gradients are correct.
\delta_y{xentropy(y, t)} = \delta_y{kl_div(y, t)}.
scale is required as kl_div normalizes by nelements and not batch size.
"""
return F.kl_div(F.log_softmax(y / T, dim=1), F.softmax(teacher_scores / T, dim=1),
reduction=reduction) * scale
def naive_cross_entropy_loss(input, target, size_average=True):
"""
in PyTorch's cross entropy, targets are expected to be labels
so to predict probabilities this loss is needed
suppose q is the target and p is the input
loss(p, q) = -\sum_i q_i \log p_i
"""
assert input.size() == target.size()
input = torch.log(F.softmax(input, dim=1).clamp(1e-5, 1))
# input = input - torch.log(torch.sum(torch.exp(input), dim=1)).view(-1, 1)
loss = - torch.sum(input * target)
return loss / input.size()[0] if size_average else loss
def compute_offsets(task, nc_per_task, is_cifar):
"""
Compute offsets for cifar to determine which
outputs to select for a given task.
"""
if is_cifar:
offset1 = task * nc_per_task
offset2 = (task + 1) * nc_per_task
else:
offset1 = 0
offset2 = nc_per_task
return offset1, offset2
def out_mask(t, nc_per_task, n_outputs):
# make sure we predict classes within the current task
offset1 = int(t * nc_per_task)
offset2 = int((t + 1) * nc_per_task)
if offset1 > 0:
output[:, :offset1].data.fill_(-10e10)
if offset2 < self.n_outputs:
output[:, offset2:n_outputs].data.fill_(-10e10)
class Reshape(torch.nn.Module):
def __init__(self, shape):
super(Reshape, self).__init__()
self.shape = shape
def forward(self, input):
return input.view(input.size(0), *self.shape)
''' LOG '''
def logging_per_task(wandb, log, run, mode, metric, task=0, task_t=0, value=0):
if 'final' in metric:
log[run][mode][metric] = value
else:
log[run][mode][metric][task_t, task] = value
if wandb is not None:
if 'final' in metric:
wandb.log({mode+metric:value}, step=run)
def print_(log, mode, task):
to_print = mode + ' ' + str(task) + ' '
for name, value in log.items():
# only print acc for now
if len(value) > 0:
name_ = name + ' ' * (12 - len(name))
value = sum(value) / len(value)
if 'acc' in name or 'gen' in name:
to_print += '{}\t {:.4f}\t'.format(name_, value)
# print('{}\t {}\t task {}\t {:.4f}'.format(mode, name_, task, value))
print(to_print)
def get_logger(names, n_runs=1, n_tasks=None):
log = OD()
#log = DD()
log.print_ = lambda a, b: print_(log, a, b)
for i in range(n_runs):
log[i] = {}
for mode in ['train','valid','test']:
log[i][mode] = {}
for name in names:
log[i][mode][name] = np.zeros([n_tasks,n_tasks])
log[i][mode]['final_acc'] = 0.
log[i][mode]['final_forget'] = 0.
return log
def get_temp_logger(exp, names):
log = OD()
log.print_ = lambda a, b: print_(log, a, b)
for name in names: log[name] = []
return log
|
11582011
|
import random
import os
import numpy as np
import cv2
import torch
from torchvision.transforms import functional as F
from utils import (
generate_shiftscalerotate_matrix,
)
class Compose:
def __init__(self, transforms):
self.transforms = transforms
def __call__(self, img, target):
for t in self.transforms:
img, target = t(img, target)
return img, target
def __repr__(self):
format_str = self.__class__.__name__ + '('
for t in self.transforms:
format_str += '\n'
format_str += f' {t}'
format_str += '\n)'
return format_str
class Resize:
def __init__(self, dst_width, dst_height, dst_K):
self.dst_width = dst_width
self.dst_height = dst_height
self.dst_K = dst_K
def __call__(self, img, target):
M = np.matmul(self.dst_K, np.linalg.inv(target.K))
#
img = cv2.warpAffine(img, M[:2], (self.dst_width, self.dst_height), flags=cv2.INTER_LINEAR, borderValue=(128, 128, 128))
target = target.transform(M, self.dst_K, self.dst_width, self.dst_height)
return img, target
class RandomShiftScaleRotate:
def __init__(self, shift_limit, scale_limit, rotate_limit, dst_width, dst_height, dst_K):
self.shift_limit = shift_limit
self.scale_limit = scale_limit
self.rotate_limit = rotate_limit
#
self.dst_width = dst_width
self.dst_height = dst_height
self.dst_K = dst_K
def __call__(self, img, target):
M = generate_shiftscalerotate_matrix(
self.shift_limit, self.scale_limit, self.rotate_limit,
self.dst_width, self.dst_height
)
img = cv2.warpAffine(img, M[:2], (self.dst_width, self.dst_height), flags=cv2.INTER_LINEAR, borderValue=(128, 128, 128))
target = target.transform(M, self.dst_K, self.dst_width, self.dst_height)
return img, target
class RandomHSV:
def __init__(self, h_ratio, s_ratio, v_ratio):
self.h_ratio = h_ratio
self.s_ratio = s_ratio
self.v_ratio = v_ratio
def __call__(self, img, target):
img = distort_hsv(img, self.h_ratio, self.s_ratio, self.v_ratio)
return img, target
class RandomNoise:
def __init__(self, noise_ratio):
self.noise_ratio = noise_ratio
def __call__(self, img, target):
img = distort_noise(img, self.noise_ratio)
return img, target
class RandomSmooth:
def __init__(self, smooth_ratio):
self.smooth_ratio = smooth_ratio
def __call__(self, img, target):
img = distort_smooth(img, self.smooth_ratio)
return img, target
class ToTensor:
def __call__(self, img, target):
img = img.transpose(2, 0, 1)
img = torch.from_numpy(img).float()
target = target.to_tensor()
return img, target
class Normalize:
def __init__(self, mean, std):
self.mean = mean
self.std = std
def __call__(self, img, target):
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) / 255
img = img - np.array(self.mean).reshape(1,1,3)
img = img / np.array(self.std).reshape(1,1,3)
return img, target
|
11582100
|
import io
import json
import textwrap
import time
from docker.errors import NotFound
DEFAULT_TEST_IMAGE_NAME = 'locationlabs/zzzdockertestimage'
BASE_IMAGE = "alpine:3.4"
def _normalize_image_id(image_id):
"""
The image IDs we get back from parsing "docker build" output are abbreviated to 12 hex digits.
In order to compare them to the ids we get from "docker.Client.images()" calls, we need to
normalize them
"""
if image_id is None:
return None
if image_id.startswith("sha256:"):
image_id = image_id[len("sha256:"):]
return image_id[0:12]
def assert_images(docker_client, *image_ids):
"""
Verify that, except for the base image used by ImageFactory, only the specifed images exist.
"""
existing_image_ids = [_normalize_image_id(image["Id"]) for image in docker_client.images()
if BASE_IMAGE not in image["RepoTags"]]
assert set(existing_image_ids) == set(_normalize_image_id(image_id) for image_id in image_ids)
assert len(existing_image_ids) == len(image_ids)
class ImageFactory:
def __init__(self, docker_client, name=DEFAULT_TEST_IMAGE_NAME, base_image=BASE_IMAGE):
self.docker_client = docker_client
self.counter = 1
self.name = name
self.base_image = base_image
self.image_ids = []
def add(self, tag, *other_tags):
return self.add_named(self.name, tag, *other_tags)
def add_named(self, name, tag, *other_tags):
# The "Created" timestamp on images has 1-second granuarity.
# So if we create images too quickly, the order of creation won't necessarily match
# the order we get when we sort containers by the "Created" field - and our unit tests
# won't do what we expect.
# So, add an automatic delay to the image factory to avoid this issue.
# (This is a simple implementation that is guaranteed to do the job. I tried a more complex
# solution, but I was surprised to see that waiting 1 second is sometimes not enough.)
if self.counter != 1:
time.sleep(2)
dockerfile = textwrap.dedent("""\
FROM {base_image}
MAINTAINER Unit Testing
RUN echo "Test image {name}:{counter}" > /content.txt
CMD sleep 999
""".format(base_image=self.base_image, name=name, counter=self.counter))
dockerfile_bytes = io.BytesIO(dockerfile.encode('utf-8'))
self.counter += 1
response_gen = self.docker_client.build(
fileobj=dockerfile_bytes,
tag='{}:{}'.format(name, tag),
rm=True,
forcerm=True)
response = [json.loads(line) for line in response_gen]
response_stream = [obj['stream'][0:-1] for obj in response if 'stream' in obj]
for line in response_stream:
print line
assert response_stream[-1].find('Successfully built') == 0
# grab and store image id
image_id = response_stream[-1].rsplit(' ', 1)[1]
self.image_ids.append(image_id)
for other_tag in other_tags:
self.docker_client.tag(image_id, name, other_tag, force=True)
return image_id
def cleanup(self):
cleaned = 0
for image_id in self.image_ids:
try:
self.docker_client.remove_image(image_id, force=True)
cleaned += 1
except NotFound:
pass
return cleaned
|
11582124
|
from __future__ import annotations
import fnmatch
import re
import threading
from collections import OrderedDict
from typing import Any, Dict, FrozenSet
LANGUAGE = 'en'
MINIMUM_WAIT = 60
EXTRA_WAIT = 30
EXTRA_WAIT_JOIN = 0 # Add this many seconds to the waiting time for each !join
WAIT_AFTER_JOIN = 25 # Wait at least this many seconds after the last join
# token bucket for the IRC client; 1 token = 1 message sent to IRC
# Run the bot with --lagtest to receive settings recommendations for this
IRC_TB_INIT = 23 # initial number of tokens
IRC_TB_DELAY = 1.73 # wait time between adding tokens
IRC_TB_BURST = 23 # maximum number of tokens that can be accumulated
# !wait uses a token bucket
WAIT_TB_INIT = 2 # initial number of tokens
WAIT_TB_DELAY = 240 # wait time between adding tokens
WAIT_TB_BURST = 3 # maximum number of tokens that can be accumulated
STATS_RATE_LIMIT = 60
VOTES_RATE_LIMIT = 60
ADMINS_RATE_LIMIT = 300
GSTATS_RATE_LIMIT = 0
PSTATS_RATE_LIMIT = 0
RSTATS_RATE_LIMIT = 0
TIME_RATE_LIMIT = 10
START_RATE_LIMIT = 10 # (per-user)
WAIT_RATE_LIMIT = 10 # (per-user)
GOAT_RATE_LIMIT = 300 # (per-user)
MIN_PLAYERS = 6
MAX_PLAYERS = 24
NIGHT_TIME_LIMIT = 120
NIGHT_TIME_WARN = 90 # should be less than NIGHT_TIME_LIMIT
DAY_TIME_LIMIT = 720
DAY_TIME_WARN = 600 # should be less than DAY_TIME_LIMIT
JOIN_TIME_LIMIT = 3600
# May only be set if the above are also set
SHORT_DAY_PLAYERS = 6 # Number of players left to have a short day
SHORT_DAY_LIMIT = 520
SHORT_DAY_WARN = 400
# If time lord dies, the timers get set to this instead (60s day, 30s night)
TIME_LORD_DAY_LIMIT = 60
TIME_LORD_DAY_WARN = 45
TIME_LORD_NIGHT_LIMIT = 30
TIME_LORD_NIGHT_WARN = 0
KILL_IDLE_TIME = 300
WARN_IDLE_TIME = 180
PM_WARN_IDLE_TIME = 240
PART_GRACE_TIME = 30
QUIT_GRACE_TIME = 60
ACC_GRACE_TIME = 30
START_QUIT_DELAY = 10
# controls how many people it does in one /msg; only works for messages that are the same
MAX_PRIVMSG_TARGETS = 4
# how many mode values can be specified at once; used only as fallback
MODELIMIT = 3
QUIET_DEAD_PLAYERS = False
DEVOICE_DURING_NIGHT = False
ALWAYS_PM_ROLE = False
QUIET_MODE = "q" # "q" or "b"
QUIET_PREFIX = "" # "" or "~q:"
ACCOUNT_PREFIX = "$a:" # "$a:" or "~a:"
# The bot will automatically toggle those modes of people joining
AUTO_TOGGLE_MODES = ""
DEFAULT_EXPIRY = "30d"
LEAVE_PENALTY = 1
LEAVE_EXPIRY = "30d"
IDLE_PENALTY = 1
IDLE_EXPIRY = "30d"
PART_PENALTY = 1
PART_EXPIRY = "30d"
ACC_PENALTY = 1
ACC_EXPIRY = "30d"
# Give penalties if idling night.
# All other penalties take precedence over night penalties; only one penalty will be given per game.
NIGHT_IDLE_PENALTY = 1
NIGHT_IDLE_EXPIRY = "14d"
# If True, disallows adding stasis via !fstasis (requires warnings instead)
RESTRICT_FSTASIS = True
# The formatting of this sucks, sorry. This is used to automatically apply sanctions to warning levels
# When a user crosses from below the min threshold to min or above points, the listed sanctions apply
# Sanctions also apply while moving within the same threshold bracket (such as from min to max)
# Valid sanctions are deny, stasis, scalestasis, and tempban
# Scalestasis applies stasis equal to the formula ax^2 + bx + c, where x is the number of warning points
# Tempban number can either be a duration (ending in d, h, or m) or a number meaning it expires when
# warning points fall below that threshold.
AUTO_SANCTION = (
#min max sanctions
(6, 10, {"stasis": 1}),
(11, 15, {"scalestasis": (0, 1, -8)}),
(16, 16, {"tempban": 8})
)
# Send a message to deadchat or wolfchat when a user spectates them
SPECTATE_NOTICE = True
# Whether to include which user is doing the spectating in the message
SPECTATE_NOTICE_USER = False
# The following is a bitfield, and they can be mixed together
# Defaults to none of these, can be changed on a per-game-mode basis
RESTRICT_WOLFCHAT = 0x00
### DO NOT CHANGE THESE!
### They are for easier code interpretation/modification
RW_DISABLE_NIGHT = 0x01 # Disable during night (commands are still relayed)
RW_DISABLE_DAY = 0x02 # Disable during day (commands are still relayed)
RW_ONLY_KILL_CMD = 0x04 # Only relay kill commands when wolfchat is disabled
RW_ONLY_SAME_CMD = 0x08 # Only relay commands to other people who have access to the same command
RW_WOLVES_ONLY_CHAT = 0x10 # Non-wolves cannot participate in wolfchat (commands still relayed as applicable)
RW_NO_INTERACTION = 0x20 # Do not relay commands to/from non-wolves regardless of other settings
RW_REM_NON_WOLVES = 0x40 # Remove non-wolves from wolfchat entirely (can be killed, do not count towards wolf win condition, do not show in wolflist, etc.)
RW_TRAITOR_NON_WOLF = 0x80 # Consider traitor as a non-wolf for the purposes of the above restrictions (if unset, traitor is treated the same as wolf cub)
ENABLE_DEADCHAT = True # dead players can communicate with each other
ABSTAIN_ENABLED = True # whether village can !abstain in order to not vote anyone during day
LIMIT_ABSTAIN = True # if true, village will be limited to successfully !abstaining a vote only once
SELF_LYNCH_ALLOWED = True
HIDDEN_TRAITOR = True
HIDDEN_AMNESIAC = False # amnesiac still shows as amnesiac if killed even after turning
HIDDEN_CLONE = False
GUARDIAN_ANGEL_CAN_GUARD_SELF = True
START_WITH_DAY = False
ROLE_REVEAL = "on" # on/off/team - what role information is shown on death
STATS_TYPE = "default" # default/accurate/team/disabled - what role information is shown when doing !stats
START_VOTES_SCALE = 0.3
START_VOTES_MAX = 4
# Debug mode settings, whether or not timers and stasis should apply during debug mode
DISABLE_DEBUG_MODE_TIMERS = True
DISABLE_DEBUG_MODE_TIME_LORD = False
DISABLE_DEBUG_MODE_REAPER = True
DISABLE_DEBUG_MODE_STASIS = True
DEBUG_MODE_NOTHROW_MESSAGES = True
# number of bullets a gunner role gets when the role is assigned or swapped in
SHOTS_MULTIPLIER = {
"gunner": 0.12,
"sharpshooter": 0.06,
"wolf gunner": 0.06
}
# hit, miss, and headshot chances for each gunner role (explode = 1 - hit - miss)
GUN_CHANCES = {
"gunner": (15/20, 4/20, 4/20), # 75% hit, 20% miss, 5% explode, 20% headshot
"sharpshooter": (1, 0, 1), # 100% hit, 0% miss, 0% explode, 100% headshot
"wolf gunner": (14/20, 6/20, 12/20) # 70% hit, 30% miss, 0% explode, 60% headshot
}
# modifier applied to regular gun chances if the user is also drunk
DRUNK_GUN_CHANCES = (-5/20, 4/20, -3/20) # -25% hit, +20% miss, +5% explode, -15% headshot
DRUNK_SHOTS_MULTIPLIER = 3
GUNNER_KILLS_WOLF_AT_NIGHT_CHANCE = 1/4
# at night, the wolf can steal 1 bullet from the victim and become a wolf gunner
# (will always be 1 bullet regardless of SHOTS_MULTIPLIER setting for wolf gunner above)
WOLF_STEALS_GUN = True
GUARDIAN_ANGEL_DIES_CHANCE = 0
BODYGUARD_DIES_CHANCE = 0
DETECTIVE_REVEALED_CHANCE = 2/5
FALLEN_ANGEL_KILLS_GUARDIAN_ANGEL_CHANCE = 1/2
AMNESIAC_NIGHTS = 3 # amnesiac gets to know their actual role on this night
DOCTOR_IMMUNIZATION_MULTIPLIER = 0.135 # ceil(num_players * multiplier) = number of immunizations
GAME_MODES = {}
GAME_PHASES = ("night", "day") # all phases that constitute "in game", game modes can extend this with custom phases
# IP address to bind to before connecting, or empty string to use OS default
BINDHOST = ""
# Disable CPRIVMSG/CNOTICE -- some ircds implicitly treat regular PRIVMSG and NOTICE as such, and support more
# targets per message the normal way than with the explicit command
DISABLE_CPRIVMSG = False
SSL_VERIFY = True
SSL_CERTFP = ()
# Tracking Mozilla's "intermediate" compatibility list -- https://wiki.mozilla.org/Security/Server_Side_TLS#Intermediate_compatibility_.28default.29
SSL_CIPHERS = ( # single string split over multiple lines - lack of commas intentional
"ECDHE-ECDSA-CHACHA20-POLY1305:ECDHE-RSA-CHACHA20-POLY1305:ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES256-GCM-"
"SHA384:ECDHE-RSA-AES256-GCM-SHA384:DHE-RSA-AES128-GCM-SHA256:DHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-AES128-SHA256:ECDHE-RSA-AES128-"
"SHA256:ECDHE-ECDSA-AES128-SHA:ECDHE-RSA-AES256-SHA384:ECDHE-RSA-AES128-SHA:ECDHE-ECDSA-AES256-SHA384:ECDHE-ECDSA-AES256-SHA:ECDHE-RSA-"
"AES256-SHA:DHE-RSA-AES128-SHA256:DHE-RSA-AES128-SHA:DHE-RSA-AES256-SHA256:DHE-RSA-AES256-SHA:ECDHE-ECDSA-DES-CBC3-SHA:ECDHE-RSA-DES-CBC3-"
"SHA:EDH-RSA-DES-CBC3-SHA:AES128-GCM-SHA256:AES256-GCM-SHA384:AES128-SHA256:AES256-SHA256:AES128-SHA:AES256-SHA:DES-CBC3-SHA:!DSS"
)
SSL_CERTFILE = None
SSL_KEYFILE = None
NICKSERV = "NickServ"
NICKSERV_IDENTIFY_COMMAND = "IDENTIFY {account} {password}"
NICKSERV_GHOST_COMMAND = "GHOST {nick}"
NICKSERV_RELEASE_COMMAND = "RELEASE {nick}"
NICKSERV_REGAIN_COMMAND = "REGAIN {nick}"
CHANSERV = "ChanServ"
CHANSERV_OP_COMMAND = "OP {channel}"
GUEST_NICK_PATTERN = r"^Guest\d+$|^\d|away.+|.+away"
LOG_CHANNEL = "" # Log !fwarns to this channel, if set
LOG_PREFIX = "" # Message prefix for LOG_CHANNEL
DEV_CHANNEL = ""
DEV_PREFIX = ""
# Data collection settings. lykos will send details about errors that happen to the lykos developers,
# these settings control how much data is sent. Please see https://werewolf.chat/dc for more information.
# These settings additionally impacts what data is written to the error log.
TRACEBACK_VERBOSITY = 2 # 0 = no locals at all, 1 = innermost frame's locals, 2 = all locals
USER_DATA_LEVEL = 0 # 0 = fully anonymize users, 1 = expose nick only, 2 = expose full hostmask, account, and channel membership
CHANNEL_DATA_LEVEL = 0 # 0 = fully anonymize channels, 1 = expose channel name
# How often to ping the server (in seconds) to detect unclean disconnection
SERVER_PING_INTERVAL = 120
# The default role can be anything, but HIDDEN_ROLE must be either "villager" or "cultist";
# hidden roles are informed they are HIDDEN_ROLE (ergo that role should not have any abilities),
# and win with that role's team. Seer sees all non-safe and non-cursed roles as HIDDEN_ROLE.
DEFAULT_ROLE = "villager"
HIDDEN_ROLE = "villager"
# Roles listed here cannot be used in !fgame roles=blah.
DISABLED_ROLES: FrozenSet[str] = frozenset()
# Game modes that cannot be randomly picked or voted for
DISABLED_GAMEMODES: FrozenSet[str] = frozenset()
# Commands listed here cannot be used by anyone (even admins/owners)
DISABLED_COMMANDS: FrozenSet[str] = frozenset()
GIF_CHANCE = 1/50
ALL_FLAGS = frozenset("AaDdFgjmNpSsw")
GRAVEYARD_LOCK = threading.RLock()
WARNING_LOCK = threading.RLock()
# vim: set sw=4 expandtab:
|
11582164
|
import unittest
from datetime import datetime, timedelta
from django.contrib.auth.models import User
from django.core.urlresolvers import reverse
from django.db import IntegrityError
from django.test import TestCase
from django.test.client import Client
from django.utils.translation import ugettext as _
from wouso.core import scoring
from wouso.core.magic.templatetags.artifacts import artifact, spell_due, artifact_full
from wouso.core.scoring.models import Coin, Formula
from wouso.core.tests import WousoTest
from wouso.core.user.models import Player
from wouso.core.magic.models import Spell
from wouso.games.challenge.models import Challenge, ChallengeUser, ChallengeGame
from wouso.games.qotd.models import QotdUser
from wouso.games.qotd.tests import _make_question_for_today
from wouso.interface.activity.models import Activity
from models import *
from manager import MagicManager
class ManagerTestCase(WousoTest):
""" Test the core.magic.manager.Manager helper.
"""
def setUp(self):
self.user = User.objects.create(username='test')
self.player = self.user.get_profile()
def test_manager_properties(self):
self.assertTrue(self.player.magic)
self.assertIsInstance(self.player.magic, MagicManager)
self.assertEqual(self.player.magic.spells.count(), 0)
self.assertEqual(self.player.magic.spells_cast.count(), 0)
self.assertEqual(self.player.magic.spells_available.count(), 0)
self.assertEqual(self.player.magic.artifact_amounts.count(), 0)
self.assertEqual(self.player.magic.spell_amounts.count(), 0)
self.assertFalse(self.player.magic.has_modifier('inexistent-modifier'))
self.assertEqual(self.player.magic.modifier_percents('inexistent-modifier'), 100) # should return 0
def test_manager_use_modifier(self):
Artifact.objects.create(name='modifier-name')
self.player.magic.give_modifier('modifier-name', 1)
self.assertTrue(self.player.magic.has_modifier('modifier-name'))
self.player.magic.use_modifier('modifier-name', 1)
self.assertFalse(self.player.magic.has_modifier('modifier-name'))
def test_cast_spell(self):
spell1 = Spell.objects.create(name='le-spell')
spell2 = Spell.objects.create(name='le-spell2', mass=True, type='o')
v = []
for i in range(0, 7):
player = self._get_player(i + 2)
player.points = 10-i
player.save()
v.append(player)
v[3].magic.add_spell(spell2)
neigh = v[3].get_neighbours_from_top(2)
neigh = v[3].magic.filter_players_by_spell(neigh, spell2)
v[3].magic.mass_cast(spell2, neigh, datetime.now()+timedelta(days=1))
for i in [1, 2, 4, 5]:
self.assertTrue(v[i].magic.is_spelled)
self.assertTrue(v[3].magic.is_spelled)
v[6].magic.cast_spell(spell1, v[0], datetime.now()+timedelta(days=1))
self.assertFalse(v[6].magic.is_spelled)
v[0].magic.add_spell(spell1)
v[6].magic.cast_spell(spell1, v[0], datetime.now()+timedelta(days=1))
self.assertTrue(v[6].magic.is_spelled)
class ModifierTest(TestCase):
def test_path_simple(self):
m = Modifier(name='cici')
self.assertTrue(m.path)
self.assertEqual(m.path, 'cici')
def test_path_image(self):
m = Modifier(name='cici')
m.image = 'test.jpg'
self.assertTrue('test.jpg' in m.path)
class ArtifactTestCase(TestCase):
def testArtifactCreateUnique(self):
""" Test if we cannot create two artifacts with the same name in a group
"""
group = ArtifactGroup.objects.create(name='gigi')
a1 = Artifact.objects.create(group=group, name='name')
self.assertRaises(IntegrityError, Artifact.objects.create, group=group, name='name')
def test_no_artifact_behavior(self):
noartifact = NoArtifactLevel(1)
self.assertTrue(artifact(noartifact))
class SpellTestCase(WousoTest):
def test_expired(self):
player = self._get_player()
spell = Spell.objects.create(name='test-spell', available=True, price=10)
obs = PlayerSpellDue.objects.create(player=player, source=player, spell=spell, due=datetime.now() + timedelta(days=1))
self.assertFalse(PlayerSpellDue.get_expired(datetime.today()))
obs.due = datetime.now() - timedelta(days=1)
obs.save()
self.assertTrue(PlayerSpellDue.get_expired(datetime.today()))
self.assertIn(obs, PlayerSpellDue.get_expired(datetime.today()))
obs.due = datetime.now() - timedelta(days=1)
obs.save()
# Run management task: should delete expired dues
Bazaar.management_task()
self.assertFalse(PlayerSpellDue.get_expired(datetime.today()))
def test_dispell(self):
"""
Test if dispell works on a player
"""
player = self._get_player()
pos_spell = Spell.objects.create(name='positive-test-spell', available=True, price=10, type='p')
neg_spell = Spell.objects.create(name='negative-test-spell', available=True, price=10, type='n')
dispell = Spell.objects.create(name='dispell', available=True, price=20, type='o')
player.magic.add_spell(dispell)
obs = PlayerSpellDue.objects.create(player=player, source=player, spell=pos_spell, due=datetime.now() + timedelta(days=1))
obs = PlayerSpellDue.objects.create(player=player, source=player, spell=neg_spell, due=datetime.now() + timedelta(days=1))
self.assertTrue(player.magic.spells) # Check if there is an active spell on player
player.magic.cast_spell(dispell, player, datetime.now())
self.assertFalse(player.magic.spells) # No spells should be active on player after dispell
def test_dispell_no_due(self):
"""
Dispell should not remain active on player after cast
"""
player = self._get_player()
dispell = Spell.objects.create(name='dispell', available=True, price=20, type='o')
player.magic.add_spell(dispell)
player.magic.cast_spell(dispell, player)
self.assertFalse(PlayerSpellDue.objects.filter(player=player))
def test_cure_negative(self):
"""
Test if cure works on a negative spell
"""
player = self._get_player()
spell = Spell.objects.create(name='test-spell', available=True, price=10, type='n')
cure = Spell.objects.create(name='cure', available=True, price=10)
obs = PlayerSpellDue.objects.create(player=player, source=player, spell=spell, due=datetime.now() + timedelta(days=1))
player.magic.add_spell(cure)
player.magic.cast_spell(cure, player, datetime.now() + timedelta(days=1))
self.assertFalse(PlayerSpellDue.objects.filter(player=player)) # There isn't any spell left
def test_cure_positive(self):
"""
Cure should not remove positive spells
"""
player = self._get_player()
spell = Spell.objects.create(name='test-spell', available=True, price=10, type='p')
cure = Spell.objects.create(name='cure', available=True, price=10)
obs = PlayerSpellDue.objects.create(player=player, source=player, spell=spell, due=datetime.now() + timedelta(days=1))
player.magic.add_spell(cure)
player.magic.cast_spell(cure, player, datetime.now() + timedelta(days=1))
self.assertTrue(PlayerSpellDue.objects.filter(player=player)) # The spell is still present
def test_disguise_simple(self):
"""
Test if top-disguise spell works
"""
player = self._get_player()
Coin.add('points')
scoring.score_simple(player, 'points', 10)
self.assertEqual(player.points, 10)
disguise = Spell.objects.create(name='top-disguise', available=True, price=10, percents=50, type='s')
player.magic.add_spell(disguise)
player.magic.cast_spell(disguise, player, datetime.now() + timedelta(days=1))
self.assertTrue(player.magic.has_modifier('top-disguise'))
self.assertEqual(player.points, 15)
def test_disguise_expire_on_dispell(self):
player = self._get_player()
Coin.add('points')
scoring.score_simple(player, 'points', 10)
disguise = Spell.objects.create(name='top-disguise', available=True, price=10, percents=50, type='s')
player.magic.add_spell(disguise)
player.magic.cast_spell(disguise, player, datetime.now() + timedelta(days=1))
self.assertEqual(player.points, 15)
dispell = Spell.objects.create(name='dispell', available=True, price=10)
player.magic.add_spell(dispell)
player.magic.cast_spell(dispell, player)
self.assertFalse(player.magic.has_modifier('top-disguise'))
player = Player.objects.get(pk=player.pk)
self.assertEqual(player.points, 10)
def test_paralyze(self):
"""
Test if Paralyze spell works
"""
Formula.add('chall-warranty')
player = self._get_player()
chall_user = player.get_extension(ChallengeUser)
# Check if player can launch before spell is cast
self.assertTrue(chall_user.can_launch())
# Create and add spell to user
paralyze = Spell.objects.create(name='challenge-cannot-challenge', available=True, price=10, percents=100, type='n')
obs = PlayerSpellDue.objects.create(player=chall_user, source=chall_user, spell=paralyze, due=datetime.now() + timedelta(days=1))
# Check if player has the modifier
self.assertTrue(chall_user.magic.has_modifier('challenge-cannot-challenge'))
# Player should not be able to launch challenge with Paralyze on
self.assertFalse(chall_user.can_launch())
@unittest.skip
def test_evade(self):
"""
Test for Evade spell
"""
player = self._get_player()
player2 = self._get_player(2)
initial_points = 10
scoring.setup_scoring()
Coin.add('points')
scoring.score_simple(player, 'points', initial_points)
self.assertEqual(player.points, initial_points)
# Create and apply evade
evade = Spell.objects.create(name='challenge-evade', available=True, price=25, percents=100, type='p')
obs = PlayerSpellDue.objects.create(player=player, source=player, spell=evade, due=datetime.now() + timedelta(days=1))
self.assertTrue(player.magic.has_modifier('challenge-evade'))
# Get 'chall-lost' expression. By default you still win 2 points when losing a challenge
formulas = ChallengeGame.get_formulas()
exp = formulas[1]['expression'] # this will be 'points=XX'
index = exp.find('=') + 1 # get position of '='
points = int(exp[index:]) # get XX (nr of points won when losing challenge)
# Create challenge and make first player lose it
chall = Challenge.create(user_from=player2, user_to=player, ignore_questions=True)
chall.set_won_by_player(player2)
# If evade spell worked losing player should have initial_points + 'chall-lost' points
# Evade has 20% chance of activation so play challenge in loop while it activates
while player.points != initial_points + points:
player.points = initial_points
chall.set_expired()
chall = Challenge.create(user_from=player2, user_to=player, ignore_questions=True)
chall.set_won_by_player(player2)
# Check if final score is ok
self.assertEqual(player.points, initial_points + points)
def test_frenzy_win(self):
"""
If user wins while affected by frenzy he should win frenzy.percents more points
"""
initial_points = 100
win_points = 10
player_frenzy = self._get_player(1).get_extension(ChallengeUser)
player_dummy = self._get_player(2).get_extension(ChallengeUser)
scoring.setup_scoring()
Coin.add('points')
scoring.score_simple(player_frenzy, 'points', initial_points)
formula = Formula.get('chall-won')
formula.expression = 'points=' + str(win_points)
formula.save()
# Apply frenzy
frenzy = Spell.objects.create(name='challenge-affect-scoring', available=True, price=25, percents=66, type='o')
obs = PlayerSpellDue.objects.create(player=player_frenzy, source=player_frenzy, spell=frenzy, due=datetime.now() + timedelta(days=1))
# Win challenge
chall = Challenge.create(user_from=player_frenzy, user_to=player_dummy, ignore_questions=True)
chall.set_won_by_player(player_frenzy)
# Player should win frenzy.percents more points with frenzy applied
target_points = initial_points + win_points + frenzy.percents / 100.0 * win_points
self.assertEqual(player_frenzy.player_ptr.points, target_points)
def test_frenzy_loss(self):
"""
If user loses while affected by frenzy he should lose frenzy.percents more points
"""
initial_points = 100
loss_points = -10
warranty_points = -3
player_frenzy = self._get_player(1).get_extension(ChallengeUser)
player_dummy = self._get_player(2).get_extension(ChallengeUser)
scoring.setup_scoring()
Coin.add('points')
scoring.score_simple(player_frenzy, 'points', initial_points)
formula = Formula.get('chall-lost')
formula.expression = 'points=' + str(loss_points)
formula.save()
formula = Formula.get('chall-warranty')
formula.expression = 'points=' + str(warranty_points)
formula.save()
# Apply frenzy
frenzy = Spell.objects.create(name='challenge-affect-scoring', available=True, price=25, percents=66, type='o')
obs = PlayerSpellDue.objects.create(player=player_frenzy, source=player_frenzy, spell=frenzy, due=datetime.now() + timedelta(days=1))
# Win challenge with dummy player to see the amount of points lost by the player affected with frenzy
chall = Challenge.create(user_from=player_frenzy, user_to=player_dummy, ignore_questions=True)
chall.set_won_by_player(player_dummy)
# Player should lose frenzy.percents more points with frenzy applied
target_points = initial_points + loss_points + frenzy.percents / 100.0 * loss_points + warranty_points
self.assertEqual(player_frenzy.player_ptr.points, target_points)
def test_weakness(self):
"""
Test for Weakness spell
"""
initial_points = 100
win_points = 10
player_weakness = self._get_player(1).get_extension(ChallengeUser)
player_dummy = self._get_player(2).get_extension(ChallengeUser)
scoring.setup_scoring()
Coin.add('points')
scoring.score_simple(player_weakness, 'points', initial_points)
formula = Formula.get('chall-won')
formula.expression = 'points=' + str(win_points)
formula.save()
# Apply weakness
weakness = Spell.objects.create(name='challenge-affect-scoring-lost', available=True, price=10, percents=-66, type='n')
obs = PlayerSpellDue.objects.create(player=player_weakness, source=player_weakness, spell=weakness, due=datetime.now() + timedelta(days=1))
# Win challenge with player_weakness
chall = Challenge.create(user_from=player_weakness, user_to=player_dummy, ignore_questions=True)
chall.set_won_by_player(player_weakness)
# Player should win weakness.percents less points with weakness applied
target_points = initial_points + win_points + weakness.percents / 100.0 * win_points
self.assertEqual(player_weakness.player_ptr.points, target_points)
def test_charge(self):
"""
Test for Charge spell
"""
initial_points = 100
win_points = 10
player_charge = self._get_player(1).get_extension(ChallengeUser)
player_dummy = self._get_player(2).get_extension(ChallengeUser)
scoring.setup_scoring()
Coin.add('points')
scoring.score_simple(player_charge, 'points', initial_points)
formula = Formula.get('chall-won')
formula.expression = 'points=' + str(win_points)
formula.save()
# Apply charge
charge = Spell.objects.create(name='challenge-affect-scoring-won', available=True, price=10, percents=33, type='p')
obs = PlayerSpellDue.objects.create(player=player_charge, source=player_charge, spell=charge, due=datetime.now() + timedelta(days=1))
chall = Challenge.create(user_from=player_charge, user_to=player_dummy, ignore_questions=True)
chall.set_won_by_player(player_charge)
# Player should win weakness.percents more points with charge applied
target_points = initial_points + win_points + charge.percents / 100.0 * win_points
self.assertEqual(player_charge.player_ptr.points, target_points)
def test_weakness_and_charge(self):
"""
If both Weakness and Charge are active, a player should win weakness.percents + charge.percents less/more points
after winning a challenge
"""
initial_points = 100
win_points = 10
player = self._get_player(1).get_extension(ChallengeUser)
player_dummy = self._get_player(2).get_extension(ChallengeUser)
scoring.setup_scoring()
Coin.add('points')
scoring.score_simple(player, 'points', initial_points)
formula = Formula.get('chall-won')
formula.expression = 'points=' + str(win_points)
formula.save()
# Apply charge
charge = Spell.objects.create(name='challenge-affect-scoring-won', available=True, price=10, percents=33, type='p')
obs = PlayerSpellDue.objects.create(player=player, source=player, spell=charge, due=datetime.now() + timedelta(days=1))
# Apply weakness
weakness = Spell.objects.create(name='challenge-affect-scoring-won', available=True, price=10, percents=-66, type='p')
obs = PlayerSpellDue.objects.create(player=player, source=player, spell=weakness, due=datetime.now() + timedelta(days=1))
chall = Challenge.create(user_from=player, user_to=player_dummy, ignore_questions=True)
chall.set_won_by_player(player)
percents = (charge.percents + weakness.percents) / 100.0
target_points = initial_points + win_points + percents * win_points
self.assertEqual(player.player_ptr.points, target_points)
def test_blind(self):
"""
Test for Blind spell
"""
# Create a question and a test user
super_user = self._get_superuser()
qotd_user = self._get_player(1)
qotd_user = qotd_user.get_extension(QotdUser)
scoring.setup_scoring()
question = _make_question_for_today(super_user, 'question1')
c = Client()
c.login(username='testuser1', password='<PASSWORD>')
# Cast blind on qotd_user
blind = Spell.objects.create(name='qotd-blind', available=True, price=10, type='n')
PlayerSpellDue.objects.create(player=qotd_user, source=qotd_user, spell=blind, due=datetime.now() + timedelta(days=1))
self.assertTrue(qotd_user.magic.has_modifier('qotd-blind'))
# Check if it blocks the user from answering the Question of the Day
response = c.get(reverse('qotd_index_view'), follow=True)
self.assertContains(response, "You have been blinded, you cannot answer to the Question of the Day")
class TemplatetagsTest(WousoTest):
def test_spell_due(self):
player = self._get_player()
spell = Spell.objects.create(name='test-spell', available=True, price=10)
obs = PlayerSpellDue.objects.create(player=player, source=player, spell=spell, due=datetime.now() + timedelta(days=1))
self.assertTrue(spell_due(obs))
def test_artifact_full(self):
self.assertFalse(artifact_full(None))
player = self._get_player()
self.assertTrue(artifact_full(player.level))
class TestMagicViews(WousoTest):
def setUp(self):
super(TestMagicViews, self).setUp()
self.p1 = self._get_player(1)
self.p2 = self._get_player(2)
self.p1.points = 500
self.p1.save()
self.spell_1 = Spell.objects.create(name='spell1', title='Spell no. 1')
self.spell_2 = Spell.objects.create(name='spell2', title='Spell no. 2')
self.c = Client()
self.c.login(username='testuser1', password='<PASSWORD>')
self.activity = Activity.objects.create(user_from=self.p1, user_to=self.p2,
action='gold-won')
scoring.setup_scoring()
def test_buy_spell(self):
Coin.add('gold')
Formula.add('buy-spell', expression="gold=-{price}")
spell = Spell.objects.create(name='test-spell', available=True, price=10)
player = User.objects.create_user('test', '<EMAIL>', password='<PASSWORD>').get_profile()
scoring.score_simple(player, 'gold', 100)
self.assertEqual(player.coins['gold'], 100)
response = self.client.get(reverse('bazaar_home'))
self.assertTrue('test-spell' in response.content)
self.client.login(username='test', password='<PASSWORD>')
response = self.client.get(reverse('bazaar_buy', kwargs={'spell': spell.id}))
self.assertFalse('error' in response.content)
player = Player.objects.get(user__username='test')
self.assertEqual(player.coins['gold'], 90)
def test_bazaar_view(self):
response = self.c.get(reverse('bazaar_home'))
self.assertEqual(response.status_code, 200)
self.assertContains(response, 'Bazaar')
self.assertContains(response, 'Exchange')
self.assertContains(response, 'Rate')
self.assertContains(response, 'testuser1')
self.assertContains(response, 'testuser2')
self.assertContains(response, 'Spell no. 1')
self.assertContains(response, 'Spell no. 2')
def test_bazaar_exchange_success_message(self):
data = {'points': 10}
response = self.c.post(reverse('bazaar_exchange'), data)
self.assertContains(response, _('Converted successfully'))
def test_bazaar_exchange_error_message(self):
data = {'points': 1000}
response = self.c.post(reverse('bazaar_exchange'), data)
self.assertContains(response, _('Insufficient points'))
response = self.c.get(reverse('bazaar_exchange'))
self.assertContains(response, _('Expected post'))
def test_magic_cast_error_message(self):
data = {'days': 10, 'spell': self.spell_1.id}
self.p1.magic.add_spell(self.spell_1)
response = self.c.post(reverse('magic_cast', args=[self.p2.id]), data)
self.assertContains(response, _('Invalid number of days'))
|
11582218
|
import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name='netCDF4-enhancement',
version='0.1.2',
author='<NAME>',
author_email='<EMAIL>',
description='Extends the default NetCDF4 driver by providing helpful'
' functionality like reading and writing to variable in'
' some chunks or dealing with variables regardless of'
' its dimension order. Principally directly extends netCDF4'
' Dataset class with new functionality. Covers most of the'
' functionality that is often the only reason why developers'
' choose to use stogy libraries like xarray.',
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/david-salac/NetCDF4-variable-streamer",
packages=setuptools.find_packages(),
install_requires=['numpy', 'netCDF4'],
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
python_requires='>=3.6',
)
|
11582221
|
import logging
import numpy as np
from . import IRCdata, IRCfollowing, addIntcos, convcheck, hessian, history, intcosMisc
from . import optparams as op
from . import stepAlgorithms, testB
from .exceptions import AlgError, IRCendReached, OptError
from .linearAlgebra import lowest_eigenvector_symm_mat, symm_mat_inv, symm_mat_root
from .molsys import Molsys
from .printTools import print_array_string, print_geom_grad, print_mat_string
def optimize(o_molsys, computer):
"""Driver for OptKing's optimization procedure. Suggested that users use optimize_psi4 or
optimize_qcengine to perform a normal (full) optimization
Parameters
----------
o_molsys : cls
optking molecular system
computer : compute_wrappers.ComputeWrapper
Returns
-------
float, float or dict
energy and nuclear repulsion energy or MolSSI qc_schema_output as dict
"""
logger = logging.getLogger(__name__)
# Take care of some initial variable declarations
step_number = 0 # number of steps taken. Partial. IRC alg uses two step counters
irc_step_number = None
total_steps_taken = 0
H = 0 # hessian in internals
# Try to optimize one structure OR set of IRC points. OptError and all Exceptions caught below.
try:
# Prepare for multiple IRC computation
if op.Params.opt_type == "IRC":
irc_step_number = 0
IRCdata.history = IRCdata.IRCdata()
IRCdata.history.set_atom_symbols(o_molsys.atom_symbols)
# Why do we need to have IRCdata.history store its own copy?
IRCdata.history.set_step_size_and_direction(op.Params.irc_step_size, op.Params.irc_direction)
logger.debug("\tIRC data object created\n")
converged = False
# o_molsys = make_internal_coords(o_molsys)
if not o_molsys.intcos_present:
make_internal_coords(o_molsys)
logger.debug("Molecular systems after make_internal_coords:")
logger.debug(str(o_molsys))
# following loop may repeat over multiple algorithms OR over IRC points
while not converged:
try:
# if optimization coordinates are absent, choose them. Could be erased after AlgError
if not o_molsys.intcos_present:
make_internal_coords(o_molsys)
logger.debug("Molecular systems after make_internal_coords:")
logger.debug(str(o_molsys))
logger.info("\tStarting optimization algorithm.\n")
logger.info(str(o_molsys))
# Do special initial step-0 for each IRC point.
# For IRC point, we form/get the Hessian now.
if op.Params.opt_type == "IRC":
if irc_step_number == 0:
# Step along lowest eigenvector of mass-weighted Hessian.
logger.info("\tBeginning IRC from the transition state.\n")
logger.info("\tStepping along lowest Hessian eigenvector.\n")
H, gX = get_pes_info(H, computer, o_molsys, step_number, irc_step_number)
logger.debug(print_mat_string(H, title="Transformed Hessian in internals."))
# Add the transition state as the first IRC point
q_0 = o_molsys.q_array()
x_0 = o_molsys.geom
f_q = o_molsys.gradient_to_internals(gX, -1.0)
f_x = np.multiply(-1, gX)
E = computer.energies[-1]
IRCdata.history.add_irc_point(0, q_0, x_0, f_q, f_x, E)
irc_step_number += 1
# Lowest eigenvector of mass-weighted Hessian.
G = o_molsys.Gmat(massWeight=True)
G_root = symm_mat_root(G)
H_q_m = np.dot(np.dot(G_root, H), G_root.T)
vM = lowest_eigenvector_symm_mat(H_q_m)
logger.info(print_array_string(vM, title="Lowest evect of H_q_M"))
# Un mass-weight vector.
G_root_inv = symm_mat_inv(G_root, redundant=True)
v = np.dot(G_root_inv, vM)
if op.Params.irc_direction == "BACKWARD":
v *= -1
# end if IRCStepNumber == 0
else: # Step along gradient.
logger.info("\tBeginning search for next IRC point.\n")
logger.info("\tStepping along gradient.\n")
v = IRCdata.history.f_q()
irc_step_number += 1
IRCfollowing.compute_pivot_and_guess_points(o_molsys, v, op.Params.irc_step_size)
# end if 'IRC'
for step_number in range(op.Params.alg_geom_maxiter):
header = f"{'----------------------------':^74}"
header += f"\n{'Taking A Step: Step Number %d' % (step_number + 1):^90}"
header += f"\n{'----------------------------':^90}"
logger.info(header)
total_steps_taken += 1
H, gX = get_pes_info(H, computer, o_molsys, step_number, irc_step_number)
E = computer.energies[-1]
logger.info("%s", print_geom_grad(o_molsys.geom, gX))
if op.Params.print_lvl >= 4:
hessian.show(H, o_molsys)
f_q = o_molsys.gradient_to_internals(gX, -1.0)
o_molsys.apply_external_forces(f_q, H, step_number)
o_molsys.project_redundancies_and_constraints(f_q, H)
o_molsys.q_show()
if op.Params.test_B:
testB.test_b(o_molsys)
if op.Params.test_derivative_B:
testB.test_derivative_b(o_molsys)
# Check if forces indicate we are approaching minimum.
if op.Params.opt_type == "IRC" and irc_step_number > 2:
if IRCdata.history.test_for_irc_minimum(f_q):
logger.info("A minimum has been reached on the IRC. Stopping here.\n")
raise IRCendReached()
logger.info(print_array_string(f_q, title="Internal forces in au:"))
history.oHistory.append(o_molsys.geom, E, f_q) # Save initial step info.
history.oHistory.nuclear_repulsion_energy = computer.trajectory[-1]["properties"][
"nuclear_repulsion_energy"
]
# Analyze previous step performance; adjust trust radius accordingly.
# Returns true on first step (no history)
lastStepOK = history.oHistory.current_step_report()
# If step was bad, take backstep here or raise exception.
if lastStepOK:
history.oHistory.consecutiveBacksteps = 0
else:
# Don't go backwards until we've gone a few iterations.
if len(history.oHistory.steps) < 5:
logger.info("\tNear start of optimization, so ignoring bad step.\n")
elif history.History.consecutiveBacksteps < op.Params.consecutiveBackstepsAllowed:
history.History.consecutiveBacksteps += 1
logger.info(
"\tCalling for consecutive backstep number %d.\n" % history.History.consecutiveBacksteps
)
stepAlgorithms.take_step(o_molsys, E, f_q, H, stepType="BACKSTEP")
logger.info("\tStructure for next step (au):\n")
o_molsys.show_geom()
continue
elif op.Params.dynamic_level == 0: # not using dynamic level, so ignore.
logger.info("\tNo more backsteps allowed." + "Dynamic level is off.\n")
pass
else:
raise AlgError("Bad step, and no more backsteps allowed.")
if op.Params.opt_type == "IRC":
DqGuess = IRCdata.history.q_pivot() - IRCdata.history.q()
Dq = IRCfollowing.dq_irc(o_molsys, E, f_q, H, op.Params.irc_step_size, DqGuess)
else: # Displaces and adds step to history.
Dq = stepAlgorithms.take_step(o_molsys, E, f_q, H, op.Params.step_type, computer)
if op.Params.opt_type == "IRC":
converged = convcheck.conv_check(
step_number,
o_molsys,
Dq,
f_q,
computer.energies,
IRCdata.history,
)
logger.info("\tConvergence check returned %s." % converged)
if converged:
q_irc_point = o_molsys.q_array()
forces_irc_point = o_molsys.gradient_to_internals(gX, -1.0)
lineDistStep = IRCfollowing.calc_line_dist_step(o_molsys)
arcDistStep = IRCfollowing.calc_arc_dist_step(o_molsys)
IRCdata.history.add_irc_point(
irc_step_number,
q_irc_point,
o_molsys.geom,
forces_irc_point,
np.multiply(-1, gX),
computer.energies[-1],
lineDistStep,
arcDistStep,
)
IRCdata.history.progress_report()
else: # not IRC.
converged = convcheck.conv_check(step_number, o_molsys, Dq, f_q, computer.energies)
logger.info("\tConvergence check returned %s" % converged)
if converged: # changed from elif when above if statement active
logger.info("\tConverged in %d steps!" % (step_number + 1))
logger.info("\tFinal energy is %20.13f" % E)
logger.info("\tFinal structure (Angstroms): \n" + o_molsys.show_geom())
break # break out of step_number loop
logger.info("\tStructure for next step (au):\n" + o_molsys.show_geom())
# Hard quit if too many total steps taken (inc. all IRC points and algorithms).
if total_steps_taken == op.Params.geom_maxiter:
logger.error(
"\tTotal number of steps (%d) exceeds maximum allowed (%d).\n"
% (total_steps_taken, op.Params.geom_maxiter)
)
raise OptError(
"Maximum number of steps exceeded: {}.".format(op.Params.geom_maxiter),
"OptError",
)
else: # Associated with above for loop, executes if break is not reached
logger.error(
"\tNumber of steps (%d) exceeds maximum for algorithm (%d).\n"
% (step_number + 1, op.Params.alg_geom_maxiter)
)
raise AlgError("Maximum number of steps exceeded for algorithm")
# For IRC, save and queue up for the optimization of the next point.
if op.Params.opt_type == "IRC":
if irc_step_number == op.Params.irc_points:
logger.info(f"\tThe requested {op.Params.irc_points} IRC points have been obtained.")
raise IRCendReached()
else:
logger.info("\tStarting search for next IRC point.")
logger.info("\tClearing old constrained optimization history.")
history.oHistory.reset_to_most_recent() # delete old steps
converged = False
# Catch non-fatal algorithm errors and try modifying internals,
# changing run-levels, optimization parameters, etc. and start over again.
except AlgError as AF:
logger.error("\n\tCaught AlgError exception\n")
eraseIntcos = False
if AF.linearBends:
# New linear bends detected; Add them, and continue at current level.
# from . import bend # import not currently being used according to IDE
for l in AF.linearBends:
if l.bend_type == "LINEAR": # no need to repeat this code for "COMPLEMENT"
iF = addIntcos.check_fragment(l.atoms, o_molsys)
F = o_molsys.fragments[iF]
intcosMisc.remove_old_now_linear_bend(l.atoms, F.intcos)
F.add_intcos_from_connectivity()
eraseHistory = True
elif op.Params.dynamic_level == op.Params.dynamic_level_max:
logger.critical("\n\t Current algorithm/dynamic_level is %d.\n" % op.Params.dynamic_level)
logger.critical("\n\t Alternative approaches are not available or turned on.\n")
raise OptError("Maximum dynamic_level reached.")
else:
op.Params.dynamic_level += 1
logger.warning("\n\t Increasing dynamic_level algorithm to %d.\n" % op.Params.dynamic_level)
logger.warning("\n\t Erasing old history, hessian, intcos.\n")
eraseIntcos = True
eraseHistory = True
op.Params.updateDynamicLevelParameters(op.Params.dynamic_level)
if eraseIntcos:
logger.warning("\n\t Erasing coordinates.\n")
for f in o_molsys.fragments:
del f.intcos[:]
if eraseHistory:
logger.warning("\n\t Erasing history.\n")
step_number = 0
del H
H = 0
del history.oHistory[:] # delete steps in history
history.oHistory.stepsSinceLastHessian = 0
history.oHistory.consecutiveBacksteps = 0
# print summary
logger.info("\tOptimization Finished\n" + history.oHistory.summary_string())
if op.Params.opt_type == "linesearch":
logger.info("\tObtaining gradient at the final geometry for line-search optimization\n")
# Calculate gradient to show user
gX = computer.compute(o_molsys.geom, driver="gradient", return_full=False)
del gX
qc_output = prepare_opt_output(o_molsys, computer, error=None)
del H
del history.oHistory[:]
o_molsys.clear()
del op.Params
return qc_output
# Expect to hit this error. not an issue
except IRCendReached:
logger.info("\t\tFinal IRC Point\n%s", o_molsys)
logger.info("Tabulating rxnpath results.")
IRCdata.history.progress_report()
np.multiply(-1, IRCdata.history.f_x(-1))
rxnpath = IRCdata.history.rxnpath_dict()
logger.info(rxnpath)
qc_output = prepare_opt_output(o_molsys, computer, rxnpath=rxnpath, error=None)
# delete some stuff
del H
del history.oHistory[:]
o_molsys.clear()
del op.Params
return qc_output
# Fatal error. Cannot proceed.
except OptError as error:
logger.critical("\tA critical optimization-specific error has occured.\n")
logger.critical("\tResetting all optimization options for potential queued jobs.\n")
logger.exception("Error Type: " + str(type(error)))
logger.exception("Error caught:" + str(error))
# Dump histories if possible
try:
logging.debug("\tDumping history: Warning last point not converged.\n" + history.oHistory.summary_string())
if op.Params.opt_type == "IRC":
logging.info("\tDumping IRC points completed")
IRCdata.history.progress_report()
del history.oHistory[:]
except NameError:
pass
rxnpath = None
if op.Params.opt_type == "IRC":
rxnpath = IRCdata.history.rxnpath_dict()
logger.debug(rxnpath)
qc_output = prepare_opt_output(o_molsys, computer, rxnpath=rxnpath, error=error)
del history.oHistory[:]
o_molsys.clear()
del op.Params
del computer
return qc_output
except Exception as error:
logger.critical("\tA non-optimization-specific error has occurred.\n")
logger.critical("\tResetting all optimization options for potential queued jobs.\n")
logger.exception("Error Type: " + str(type(error)))
logger.exception("Error caught:" + str(error))
rxnpath = None
if len(history.oHistory.steps) >= 1:
rxnpath = None
if op.Params.opt_type == "IRC":
rxnpath = IRCdata.history.rxnpath_dict()
logger.debug(rxnpath)
qc_output = prepare_opt_output(o_molsys, computer, rxnpath=rxnpath, error=error)
del history.oHistory[:]
o_molsys.clear()
del op.Params
del computer
return qc_output
def get_pes_info(H, computer, o_molsys, step_number, irc_step_number, hist=None):
"""Calculate, update, or guess hessian as appropriate. Calculate gradient, pulling
gradient from hessian output if possible.
Parameters
----------
H: np.ndarray
current Hessian
computer: compute_wrappers.ComputeWrapper
o_molsys : molsys.Molsys
step_number: int
irc_step_number: int
hist: history.History
Returns
-------
np.ndarray,
"""
if hist is None:
hist = history.oHistory
logger = logging.getLogger(__name__)
if step_number == 0:
if op.OptParams.opt_type != "IRC":
if op.Params.full_hess_every > -1: # compute hessian at least once.
H, g_X = get_hess_grad(computer, o_molsys)
else:
logger.debug(f"Guessing Hessian with {str(op.Params.intrafrag_hess)}")
H = hessian.guess(o_molsys, guessType=op.Params.intrafrag_hess)
grad = computer.compute(o_molsys.geom, driver="gradient", return_full=False)
g_X = np.asarray(grad)
else: # IRC
if irc_step_number == 0:
# OLD COMMENT: Initial H chosen in pre-optimization.
"""hessian was calculated explicitly in IRC section of optimize at the time of this
comment. Moving here"""
# TODO read in hessian so only 1 needs to be calculated for IRC forward/backward
H, g_X = get_hess_grad(computer, o_molsys)
else:
logger.critical(f"""It should be impossible to hit this. Ever""")
raise OptError(
"irc_step_number is {irc_step_number} but step_number is \
{step_number}. Values not allowed."
)
else:
if op.Params.full_hess_every < 1:
logger.debug(f"Updating Hessian with {str(op.Params.hess_update)}")
hist.hessian_update(H, o_molsys)
grad = computer.compute(o_molsys.geom, driver="gradient", return_full=False)
g_X = np.asarray(grad)
elif step_number % op.Params.full_hess_every == 0:
H, g_X = get_hess_grad(computer, o_molsys)
else:
logger.debug(f"Updating Hessian with {str(op.Params.hess_update)}")
hist.hessian_update(H, o_molsys)
grad = computer.compute(o_molsys.geom, driver="gradient", return_full=False)
g_X = np.asarray(grad)
logger.debug(print_mat_string(H, title="Hessian matrix"))
return H, g_X
def get_hess_grad(computer, o_molsys):
"""Compute hessian and fetch gradient from output if possible. Perform separate gradient
calculation if needed
Parameters
----------
computer: compute_wrappers.ComputeWrapper
o_molsys: molsys.Molsys
Returns
-------
tuple(np.ndarray, np.ndarray)
Notes
-----
Hessian is in internals gradient is in cartesian
"""
# Not sure why we need a copy here
logger = logging.getLogger(__name__)
logger.debug("Computing an analytical hessian")
xyz = o_molsys.geom.copy()
# Always return_true so we don't have to compute the gradient as well
ret = computer.compute(xyz, driver="hessian", return_full=True, print_result=False)
h_cart = np.asarray(ret["return_result"]).reshape(o_molsys.geom.size, o_molsys.geom.size)
try:
logger.debug("Looking for gradient in hessian output")
g_cart = ret["extras"]["qcvars"]["CURRENT GRADIENT"]
except KeyError:
logger.error("Could not find the gradient in qcschema")
grad = computer.compute(o_molsys.geom, driver="gradient", return_full=False)
g_cart = np.asarray(grad)
# Likely not at stationary point. Include forces
# ADDENDUM currently neglects forces term for all points - including non-stationary
H = o_molsys.hessian_to_internals(h_cart)
return H, g_cart
def make_internal_coords(o_molsys, params=None):
"""
Add optimization coordinates to molecule system.
May be called if coordinates have not been added yet, or have been removed due to an
algorithm error (bend going linear, or energy increasing, etc.).
Parameters
----------
o_molsys: Molsys
current molecular system.
params: OptParams object or else use default module level
Returns
-------
o_molsys: Molsys
The molecular system updated with internal coordinates.
"""
if params is None:
params = op.Params
optimize_log = logging.getLogger(__name__)
optimize_log.debug("\t Adding internal coordinates to molecular system")
# Use covalent radii to determine bond connectivity.
connectivity = addIntcos.connectivity_from_distances(o_molsys.geom, o_molsys.Z)
optimize_log.debug("Connectivity Matrix\n" + print_mat_string(connectivity))
if params.frag_mode == "SINGLE":
# Make a single, supermolecule.
o_molsys.consolidate_fragments() # collapse into one frag (if > 1)
o_molsys.split_fragments_by_connectivity() # separate by connectivity
# increase connectivity until all atoms are connected
o_molsys.augment_connectivity_to_single_fragment(connectivity)
o_molsys.consolidate_fragments() # collapse into one frag
if params.opt_coordinates in ["REDUNDANT", "BOTH"]:
o_molsys.fragments[0].add_intcos_from_connectivity(connectivity)
if params.opt_coordinates in ["CARTESIAN", "BOTH"]:
o_molsys.fragments[0].add_cartesian_intcos()
elif params.frag_mode == "MULTI":
# if provided multiple frags, then we use these.
# if not, then split them (if not connected).
if o_molsys.nfragments == 1:
o_molsys.split_fragments_by_connectivity()
if o_molsys.nfragments > 1:
addIntcos.add_dimer_frag_intcos(o_molsys)
# remove connectivity so that we don't add redundant coordinates
# between fragments
o_molsys.purge_interfragment_connectivity(connectivity)
if params.opt_coordinates in ["REDUNDANT", "BOTH"]:
for iF, F in enumerate(o_molsys.fragments):
C = np.ndarray((F.natom, F.natom))
C[:] = connectivity[o_molsys.frag_atom_slice(iF), o_molsys.frag_atom_slice(iF)]
F.add_intcos_from_connectivity(C)
if params.opt_coordinates in ["CARTESIAN", "BOTH"]:
for F in o_molsys.fragments:
F.add_cartesian_intcos()
addIntcos.add_constrained_intcos(o_molsys) # make sure these are in the set
return
def prepare_opt_output(o_molsys, computer, rxnpath=False, error=None):
logger = logging.getLogger(__name__)
logger.debug("Preparing OptimizationResult")
# Get molecule from most recent step. Add provenance and fill in non-required fills.
# Turn back to dict
computer.update_geometry(o_molsys.geom)
final_molecule = computer.molecule
qc_output = {
"schema_name": "qcschema_optimization_output",
"trajectory": computer.trajectory,
"energies": computer.energies,
"final_molecule": final_molecule,
"extras": {},
"success": True,
}
if error:
qc_output.update(
{
"success": False,
"error": {"error_type": error.err_type, "error_message": error.mesg},
}
)
if rxnpath:
qc_output["extras"]["irc_rxn_path"] = rxnpath
return qc_output
|
11582232
|
from __future__ import absolute_import, division, print_function
from telnyx.api_resources.abstract import (
CreateableAPIResource,
DeletableAPIResource,
ListableAPIResource,
UpdateableAPIResource,
)
class TelephonyCredential(
CreateableAPIResource,
DeletableAPIResource,
ListableAPIResource,
UpdateableAPIResource,
):
OBJECT_NAME = "telephony_credential"
API_RECORD_TYPE_NAME = "credential"
|
11582234
|
import logging
import re
import json
import os
import tarfile
import zipfile
from abc import ABCMeta, abstractmethod
import requests
log = logging.getLogger(__name__)
CHUNK_SIZE = 32768
class BaseDownloader:
__metaclass__ = ABCMeta
def __init__(self, target_dir):
self.target_dir = target_dir
@abstractmethod
def download(self):
pass
def download_file_from_web_server(self, url, destination):
log.info('Downloading {} into {}'.format(url, destination))
local_filename = url.split('/')[-1]
target_file = os.path.join(destination, local_filename)
if not os.path.exists(target_file):
response = requests.get(url, stream=True)
self.save_response_content(response, target_file)
log.info('Finished download')
return local_filename
def download_file_from_google_drive(self, id, destination):
log.info('Downloading from Google Drive id={} into {}'.format(id, destination))
URL = "https://docs.google.com/uc?export=download"
session = requests.Session()
response = session.get(URL, params={'id': id}, stream=True)
token = self.get_confirm_token(response)
if token:
params = {'id': id, 'confirm': token}
response = session.get(URL, params=params, stream=True)
self.save_response_content(response, destination)
log.info('Finished download')
def download_file_from_baidu(self, id, destination):
log.info('Downloading from Baidu id={} into {}'.format(id, destination))
session = requests.Session()
HEADERS = {
"User-Agent": "Mozilla/5.0 (X11; Linux i686) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/32.0.1700.77 Safari/537.36"
}
response = session.get('https://pan.baidu.com/s/{}'.format(id), headers=HEADERS)
response.raise_for_status()
regex = r".*yunData\.setData\((?P<info>\{.*\})\)"
m = next(re.finditer(regex, response.text, re.MULTILINE))
info = json.loads(m.group('info'))
log.debug(json.dumps(info, indent=4))
qs = dict(
uk=info['uk'],
shareid=info['shareid'],
timestamp=info['timestamp'],
sign=info['sign']
)
download_link = 'http://pan.baidu.com/share/download?channel=chunlei&clienttype=0&web=1&uk={uk}&shareid={shareid}×tamp={timestamp}&sign={sign}'
fs_id = info['file_list']['list'][0]['fs_id']
data = dict(
fid_list='["{}"]'.format(fs_id)
)
log.debug(download_link.format(**qs))
response = session.post(download_link.format(**qs), data=data, headers=HEADERS)
response.raise_for_status()
download_info = response.json()
log.debug(download_info)
if 'dlink' in download_info:
dlink_ = download_info['dlink']
log.debug('Download link: {}'.format(dlink_))
response = session.get(dlink_, stream=True)
self.save_response_content(response, destination)
log.info('Finished download')
else:
log.warning('Could not download. Try again later.')
# TODO Add progress bar
@staticmethod
def get_confirm_token(response):
for key, value in response.cookies.items():
if key.startswith('download_warning'):
return value
return None
@staticmethod
def save_response_content(response, destination):
with open(destination, "wb") as f:
for chunk in response.iter_content(CHUNK_SIZE):
if chunk: # filter out keep-alive new chunks
f.write(chunk)
@staticmethod
def extract_zip_file(zip_file_name, destination):
log.info('Extracting {} into {}'.format(zip_file_name, destination))
zip_ref = zipfile.ZipFile(zip_file_name, 'r')
zip_ref.extractall(destination)
zip_ref.close()
@staticmethod
def extract_tar_file(zip_file_name, destination):
log.info('Extracting {} into {}'.format(zip_file_name, destination))
zip_ref = tarfile.TarFile.open(zip_file_name, 'r')
zip_ref.extractall(destination)
zip_ref.close()
|
11582245
|
from typing import Any, Dict, Tuple
import vapoursynth as vs
from lvsfunc.misc import source
from vardautomation import FileInfo, PresetAAC, PresetBD, VPath
from project_module import chain, encode
core = vs.core
# Sources
JP_BD = FileInfo(r'BDMV/GRANBLUE_FANTASY_SEASON2_7/BDMV/STREAM/00008.m2ts', (None, -24),
idx=lambda x: source(x, force_lsmas=True, cachedir=''),
preset=[PresetBD, PresetAAC])
JP_BD.name_file_final = VPath(fr"premux/{JP_BD.name} (Premux).mkv")
JP_BD.a_src_cut = VPath(f"{JP_BD.name}_cut.aac")
JP_BD.do_qpfile = True
zones: Dict[Tuple[int, int], Dict[str, Any]] = { # Zones for x265
}
if __name__ == '__main__':
filtered = chain.filterchain(JP_BD.clip_cut)
encode.Encoder(JP_BD, filtered).run(zones=zones)
elif __name__ == '__vapoursynth__':
filtered = chain.filterchain(JP_BD.clip_cut)
if not isinstance(filtered, vs.VideoNode):
raise RuntimeError("Multiple output nodes were set when `vspipe` only expected one")
else:
filtered.set_output(0)
else:
JP_BD.clip_cut.set_output(0)
FILTERED = chain.filterchain(JP_BD.clip_cut)
if not isinstance(FILTERED, vs.VideoNode):
for i, clip_filtered in enumerate(FILTERED, start=1): # type: ignore
clip_filtered.set_output(i)
else:
FILTERED.set_output(1)
|
11582288
|
from .losses import wind_mean_squared_error
from .util import agg_window,create_windowed_arr,save_multiple_graph,get_output,gather_auc_avg_per_tol,join_mean_std
import matplotlib.pyplot as plt
from keras.models import Sequential, Model
import numpy as np
import pandas as pd
import os
class Params(object):
"""
Parameters class to handlde parameters for the ROI based models
"""
def __init__(self,width=64, height=64,win_length=8,channels=1,dset='Thermal_track',d_type='frame',lambda_=1.0,regularizer_list=None,R_name=None,D_name=None,batch_size=32,break_win=10):
self.width=width
self.height=height
self.win_length=win_length
self.channels=channels
self.dset=dset
self.d_type=d_type
self.batch_size = batch_size
self.lambda_=lambda_
self.regularizer_list=regularizer_list
self.R_name=R_name
self.D_name=D_name
self.gap=break_win
def create_model_name(self):
return self.get_model_type()+ '_{}'.format(str(self.lambda_))
def create_hp_name(self):
return 'lambda_{}'.format(str(self.lambda_))
def get_model_type(self):
R_name=self.R_name
D_name=self.D_name
return R_name+'_'+D_name
def get_model_dir(self):
return self.get_root_path()+'/models'
def get_R_path(self,epochs_trained):
return self.get_root_path()+'/models/GAN_R_weights_epoch-{}.h5'.format(epochs_trained)
def get_D_path(self,epochs_trained):
return self.get_root_path()+'/models/GAN_D_weights_epoch-{}.h5'.format(epochs_trained)
def get_root_path(self):
return './{}/{}/{}/{}'.format(self.dset,self.d_type,self.get_model_type(),self.create_hp_name())
class CAE_GAN3D(object):
'''
Class used to train and test the base adversarial model
'''
def __init__(self, train_par=None,stride=1):
self.train_par=train_par
self.stride=stride
self.SHAPE = (self.train_par.width, self.train_par.height, train_par.channels)
def initialize_model(self,Reconstructor , Discriminator ):
print("Compiling GAN model.")
self.R = Reconstructor
self.D = Discriminator
print('Discriminator')
print(self.D.summary())
print('Reconstructor')
print(self.R.summary())
self.OPTIMIZER = 'adam'
self.stacked_R_D = self.stacked_R_D()
loss_weights = {'D':1.0, 'decoded':self.train_par.lambda_}
self.stacked_R_D.compile(loss={'D': 'binary_crossentropy', 'decoded': 'mean_squared_error'},\
optimizer=self.OPTIMIZER, loss_weights = loss_weights)
def stacked_R_D(self):
'''
Used for training Reconstructor. Dicscriminator is freezed.
'''
self.D.trainable = False
model = Model(inputs = self.R.input, outputs = [self.R.output, self.D(self.R.output)],name='stacked')
print('stacked')
print(model.summary())
return model
def create_windowed_data(self, videos_dic,stride=1,data_key='FRAME'):
'''
Create windows of frames
'''
total = []
img_width, img_height,channels,win_length=self.train_par.width,self.train_par.height,self.train_par.channels,self.train_par.win_length
if data_key=='FLOW':
win_length=win_length-1
for vid_name in videos_dic.keys():
# print('Video Name', vid_name)
vid_windowed_list=[]
sub_vid_list=videos_dic[vid_name][data_key]
for sub_vid in sub_vid_list:
vid_windowed_list.append(create_windowed_arr(sub_vid, stride, win_length))
# print("Number of sub videos: ",len(vid_windowed_list))
vid_windowed=np.concatenate(vid_windowed_list)
total.append(vid_windowed)
total=np.concatenate(total)
# print("Windowed data shape:")
# print(total.shape)
return total
def get_MSE_all_agg(self, test_data,type=['frame','window']):
"""
Anomaly scores based on MSE
"""
img_width, img_height, win_length, channels,model = self.train_par.width, self.train_par.height ,self.train_par.win_length, self.train_par.channels, self.R
if self.train_par.d_type=='opticalFLow':
win_length=win_length-1
recons_seq = model.predict([test_data]) #(samples-win_length+1, win_length, wd,ht,1)
print(recons_seq.shape)
RE=wind_mean_squared_error(test_data,recons_seq,win_length, img_height,img_width,channels)
print('RE.shape', RE.shape)
RE_dict = {}
agg_type_list=[]
if 'frame' in type:
agg_type_list.append('x_std')
agg_type_list.append('x_mean')
if 'window' in type:
agg_type_list.append('in_std')
agg_type_list.append('in_mean')
for agg_type in agg_type_list:
RE_dict[agg_type] = agg_window(RE, agg_type)
return RE_dict, recons_seq
def train(self, X_train_frame,epochs= 500,epochs_trained=0, save_interval = 10):
'''
Train the adversarial framework
X_train_frame- window of frames
'''
print('Using save root:', self.train_par.get_root_path())
self.save_root=self.train_par.get_root_path()
batch_size=self.train_par.batch_size
print('self.stacked_R_D.metrics_names', self.stacked_R_D.metrics_names)
print('self.D.metrics_names', self.D.metrics_names)
num_batches = int(X_train_frame.shape[0]/batch_size)
print("Train frame dataset shape",X_train_frame.shape)
print("Number of batches",num_batches)
#model save dir
if not os.path.isdir(self.train_par.get_model_dir()):
os.makedirs(self.train_par.get_model_dir())
d_loss_list = [] # Discriminator loss
r_loss_list_RE = [] #Reconstruction error
r_loss_list_BCE = [] #Binary cross entropy
loss_root = self.save_root + '/loss'
#Creating loss directory
if not os.path.isdir(loss_root):
print("Creating loss directory ")
os.makedirs(loss_root)
print("Loss file status................")
if os.path.isfile(loss_root + '/d_loss_epoch-{}.npy'.format(epochs_trained)):
print("D Loss file found")
d_loss_list=list(np.load(loss_root + '/d_loss_epoch-{}.npy'.format(epochs_trained)))
if os.path.isfile(loss_root + '/r_loss_RE_epoch-{}.npy'.format(epochs_trained)):
print("RE Loss file found")
r_loss_list_RE=list(np.load(loss_root + '/r_loss_RE_epoch-{}.npy'.format(epochs_trained)))
if os.path.isfile(loss_root + '/r_loss_BCE_epoch-{}.npy'.format(epochs_trained)):
print("BCE Loss file found")
r_loss_list_BCE=list(np.load(loss_root + '/r_loss_BCE_epoch-{}.npy'.format(epochs_trained)))
for epoch in range(epochs_trained+1,epochs):
## train discriminator
random_index = np.random.randint(0, len(X_train_frame) - batch_size)
permutated_indexes = np.random.permutation(X_train_frame.shape[0])
for step in range(num_batches):
batch_indeces = permutated_indexes[step*batch_size:(step+1)*batch_size]
legit_images = X_train_frame[batch_indeces]
#R Input
recons_images = self.R.predict([legit_images])
x_combined_batch_size = np.concatenate((legit_images,recons_images))
y_combined_batch_size = np.concatenate((np.ones((batch_size, 1)), np.zeros((batch_size, 1))))
# First train Discriminator
d_loss = self.D.train_on_batch(x_combined_batch_size, y_combined_batch_size)
d_loss_list.append(d_loss)
# Train Reconstructor
y_mislabled = np.ones((batch_size, 1))
r_loss = self.stacked_R_D.train_on_batch([legit_images], {'decoded':legit_images,'D':y_mislabled})
r_loss_list_RE.append(r_loss[1])
r_loss_list_BCE.append(r_loss[2])
if step % 10 == 0:
print('epoch: {}, step {}, [Discriminator :: d_loss: {}], [ Reconstructor :: RE loss, BCE loss: {}, {}]'.format(epoch, step, d_loss, r_loss[1], r_loss[2]))
if epoch % save_interval == 0 or epoch == epochs-1:
save_string = self.train_par.get_R_path(epoch)
self.R.save_weights(save_string)
save_string = self.train_par.get_D_path(epoch)
self.D.save_weights(save_string)
print('saving images')
np.random.seed(0)
test_idxs = np.random.choice(len(X_train_frame), 8, replace = False)
test_ims = X_train_frame[test_idxs]
print(test_ims.shape)
if self.train_par.d_type=='frame':
self.plot_images_3D(save2file=True, step=epoch, test_window = test_ims,d_type='thermal')
elif self.train_par.d_type=='opticalFLow':
self.plot_images_3D(save2file=True, step=epoch, test_window = test_ims,d_type='flow')
else:
print("Invalid data type in Params")
np.save(loss_root + '/d_loss_epoch-{}.npy'.format(epoch), np.array(d_loss_list))
np.save(loss_root + '/r_loss_RE_epoch-{}.npy'.format(epoch), np.array(r_loss_list_RE))
np.save(loss_root + '/r_loss_BCE_epoch-{}.npy'.format(epoch), np.array(r_loss_list_BCE))
save_multiple_graph(x_list=[r_loss_list_RE,r_loss_list_BCE,d_loss_list],labels=['R_RE','R_BCE','D_loss'],x_label='Batches',y_label='Losses',title='Loss Plot',path=loss_root+'/log_loss.png',log_plot=True)
save_multiple_graph(x_list=[d_loss_list,r_loss_list_RE,r_loss_list_BCE],labels=['D_loss','R_RE','R_BCE'],x_label='Batches',y_label='Losses',title='Loss Plot',path=loss_root+'/loss.png',log_plot=False)
def plot_images_3D(self, save2file=False, samples=16, step=0, test_window = None,d_type=None):
'''
Visualization of input and reconstrcuted sequence. Save or save 4th frame of the input and output windows.
'''
test_ims = test_window[:,4,:,:,:]
img_root = self.save_root + "/"+d_type+"_images/"
channels=self.train_par.channels
rec_images = self.R.predict([test_window])
if not os.path.isdir(img_root):
os.makedirs(img_root)
filename = img_root + "/img_{}.png".format(step)
rec_images = rec_images[:,4,:,:,:]
if d_type=='flow' and channels==3:
rec_images=rec_images[:,:,:,2:3]
test_ims=test_ims[:,:,:,2:3]
channels=1
plt.figure(figsize=(10,10))
for i in range(rec_images.shape[0]):
plt.subplot(4, 4, i+1)
image = rec_images[i, :, :, :]
if channels==3:
image = np.reshape(image, [ self.train_par.height, self.train_par.width, channels])
else:
image = np.reshape(image, [ self.train_par.height, self.train_par.width])
plt.imshow(image, cmap='gray')
plt.axis('off')
plt.tight_layout()
for i in range(test_ims.shape[0]):
i+=8
plt.subplot(4, 4, i+1)
image = test_ims[i-8, :, :, :]
if channels==3:
image = np.reshape(image, [ self.train_par.height, self.train_par.width, channels])
else:
image = np.reshape(image, [ self.train_par.height, self.train_par.width])
plt.imshow(image, cmap='gray')
plt.axis('off')
plt.tight_layout()
if save2file:
plt.savefig(filename)
plt.close('all')
else:
plt.show()
def test(self, test_videos, score_type = 'R', epochs = None,plot=False,tolerance_limit=8):
'''
Gets AUC ROC/PR for all videos using RE.
choose score type 'R'
'''
dset, img_width, img_height, win_length = self.train_par.dset, self.train_par.width, self.train_par.height, self.train_par.win_length
self.save_root = self.train_par.get_root_path() + '/testing/epochs_{}'.format(epochs)
stride = self.stride
model_name = self.train_par.create_model_name()
model_name += '_{}'.format(score_type)
print(model_name)
aucs = []
std_total = []
mean_total = []
labels_total_l = []
i = 0 #vid index TODO rename
num_vids = len(test_videos)
print('num of test vids', num_vids)
ROC_mat = np.zeros((num_vids,2*tolerance_limit+2)) # 35 is num_vids, 20 scores-Xstd,Xmean,tols std..,tols mean..
PR_mat = np.zeros((num_vids,2*tolerance_limit+2))
print('score_type', score_type)
for vid_name in test_videos.keys():
print("--------------------------")
print("--------------------------")
print("Processing ",vid_name)
print("--------------------------")
print("--------------------------")
# vid_total, labels_total = restore_Fall_vid(data_dict, Fall_name, NFF_name)
vid_total_list=None
frame_numbers_list=None
start,end=None,None
labels_total_list=test_videos[vid_name]['LABELS']
display_name = vid_name
test_labels = np.concatenate(labels_total_list)
print("Number of Labels",len(test_labels))
if self.train_par.d_type=='frame':
vid_total_list=test_videos[vid_name]['FRAME']
frame_numbers_list=test_videos[vid_name]['NUMBER']
frame_numbers = np.concatenate(frame_numbers_list)
print("Number of frames",len(frame_numbers))
start,end=test_videos[vid_name]['START_END']
test_data_list = [vid.reshape(len(vid), img_width, img_height, self.train_par.channels) for vid in vid_total_list]
test_data_windowed_list = [create_windowed_arr(test_data, stride, win_length) for test_data in test_data_list]#create_windowed_arr function in data_management.py
elif self.train_par.d_type=='opticalFLow':
vid_total_list=test_videos[vid_name]['FLOW']
#Create windows of length win_length-1
test_data_list = [vid.reshape(len(vid), img_width, img_height, self.train_par.channels) for vid in vid_total_list]
test_data_windowed_list = [create_windowed_arr(test_data, stride, win_length-1) for test_data in test_data_list]#create_windowed_arr function in data_management.py
else:
print("Invlaid d_type in the params")
num_sub_videos=len(test_data_windowed_list)
print("Number of sub flow videos,",num_sub_videos)
if score_type == 'R':
in_mean_RE=[]
in_std_RE=[]
x_std_RE=[]
x_mean_RE=[]
for index in range(num_sub_videos):
test_data_windowed=test_data_windowed_list[index]
RE_dict, recons_seq = self.get_MSE_all_agg(test_data_windowed) #Return dict with value for each score style
in_mean_RE.append(RE_dict['in_mean'])
in_std_RE.append(RE_dict['in_std'])
x_std_RE.append(RE_dict['x_std'])
x_mean_RE.append(RE_dict['x_mean'])
in_mean_RE=np.concatenate(in_mean_RE)
in_std_RE=np.concatenate(in_std_RE)
x_std_RE=np.concatenate(x_std_RE)
x_mean_RE=np.concatenate(x_mean_RE)
final_in_mean = in_mean_RE
final_in_std = in_std_RE
#frame based scores only for thermal frames
if self.train_par.d_type=='frame':
auc_x_std, conf_mat, g_mean, ap_x_std = get_output(labels = test_labels,\
predictions = x_std_RE, data_option = 'NA', to_plot = False)
auc_x_mean, conf_mat, g_mean, ap_x_mean = get_output(labels = test_labels,\
predictions = x_mean_RE, data_option = 'NA', to_plot = False)
ROC_mat[i,0] = auc_x_std
ROC_mat[i,1] = auc_x_mean
PR_mat[i,0] = ap_x_std
PR_mat[i,1] = ap_x_mean
#window based scores
# print('final_in_mean.shape', final_in_mean.shape, 'final_in_std.shape', final_in_std.shape)
tol_mat, tol_keys = gather_auc_avg_per_tol(final_in_mean, final_in_std, labels_list = labels_total_list, win_len = win_length,tolerance_limit=tolerance_limit)
AUROC_tol = tol_mat[0]
AUPR_tol = tol_mat[1]
num_scores_tol = tol_mat.shape[1]
for k in range(num_scores_tol):
j = k+2 #start at 2, first two were for X_std and X_mean
ROC_mat[i,j] = AUROC_tol[k]
PR_mat[i,j] = AUPR_tol[k]
i += 1
if plot == True and score_type == 'R' and self.train_par.d_type=='frame':
plt.plot(frame_numbers,x_std_RE, label='RE_std',linestyle='--', marker='.')
plt.plot(frame_numbers,x_mean_RE, label='RE_mean',linestyle='--', marker='.')
# plt.xticks([i+1 for i in range(max(frame_numbers))])
plt.xlim(1,max(frame_numbers))
# plt.ylim(0,1)
plt.legend()
plt.axvspan(start,end, alpha = 0.5)
plot_save_p = self.save_root + '/scores_plots/'
if not os.path.isdir(plot_save_p):
os.makedirs(plot_save_p)
plt.savefig(plot_save_p + '{}.jpg'.format(vid_name))
plt.close()
# break
AUROC_avg = np.mean(ROC_mat, axis = 0)
AUROC_std = np.std(ROC_mat, axis = 0)
AUROC_avg_std = join_mean_std(AUROC_avg, AUROC_std)
# print(AUROC_std)
AUPR_avg = np.mean(PR_mat, axis = 0)
AUPR_std = np.std(PR_mat, axis = 0)
AUPR_avg_std = join_mean_std(AUPR_avg, AUPR_std)
total = np.vstack((AUROC_avg_std, AUPR_avg_std))
total_no_std = np.vstack((AUROC_avg, AUPR_avg))
df = pd.DataFrame(data = total, index = ['AUROC','AUPR'], columns = ['X-STD','X-Mean'] + tol_keys)
df_no_std = pd.DataFrame(data = total_no_std, index = ['AUROC','AUPR'], columns = ['X-STD','X-Mean'] + tol_keys)
print(df)
print(df_no_std)
if not os.path.isdir(self.save_root):
os.makedirs(self.save_root)
save_path = self.save_root + '/AUC_{}.csv'.format(score_type)
save_path_no_std = self.save_root + '/AUC_{}_no_std.csv'.format(score_type)
print(save_path)
df.to_csv(save_path)
df_no_std.to_csv(save_path_no_std)
|
11582294
|
import logging
import sys
from pathlib import Path
from datetime import datetime
from Pegasus.api import *
logging.basicConfig(level=logging.DEBUG)
PEGASUS_LOCATION = "/usr/bin/pegasus-keg"
# --- Work Dir Setup -----------------------------------------------------------
RUN_ID = "black-diamond-5.0api-" + datetime.now().strftime("%s")
TOP_DIR = Path.cwd()
WORK_DIR = TOP_DIR / "work"
try:
Path.mkdir(WORK_DIR)
except FileExistsError:
pass
# --- Output Dir Setup for condorpool Site -------------------------------------
condorpool_local_storage_dir = Path("/lizard/scratch-90-days/bamboo/outputs") / RUN_ID
try:
Path.mkdir(condorpool_local_storage_dir)
except FileExistsError:
pass
# --- Configuration ------------------------------------------------------------
print("Generating pegasus.conf at: {}".format(TOP_DIR / "pegasus.properties"))
conf = Properties()
conf["pegasus.catalog.site"] = "YAML"
conf["pegasus.catalog.site.file"] = "sites.yml"
conf["pegasus.catalog.transformation"] = "YAML"
conf["pegasus.catalog.transformation.file"] = "करण-transformations.yml"
conf["pegasus.catalog.replica"] = "YAML"
conf["pegasus.catalog.replica.file"] = "replicas.yml"
conf["pegasus.data.configuration"] = "condorio"
conf["pegasus.integrity.checking"] = "none"
conf.write()
# --- Sites --------------------------------------------------------------------
LOCAL = "local"
CONDOR_POOL = "⿔condor-pool⼤"
shared_scratch_dir = str(WORK_DIR / "shared-scratch")
local_storage_dir = str(WORK_DIR / "outputs" / RUN_ID)
print("Generating site catalog at: {}".format(TOP_DIR / "sites.yml"))
SiteCatalog().add_sites(
Site(
LOCAL, arch=Arch.X86_64, os_type=OS.LINUX, os_release="rhel", os_version="7"
).add_directories(
Directory(Directory.SHARED_SCRATCH, shared_scratch_dir).add_file_servers(
FileServer("file://" + shared_scratch_dir, Operation.ALL)
),
Directory(Directory.LOCAL_STORAGE, local_storage_dir).add_file_servers(
FileServer("file://" + local_storage_dir, Operation.ALL)
),
),
Site(CONDOR_POOL, arch=Arch.X86_64, os_type=OS.LINUX)
.add_directories(
Directory(Directory.LOCAL_STORAGE, str(condorpool_local_storage_dir))
.add_file_servers(FileServer("file://" + str(condorpool_local_storage_dir), Operation.ALL))
)
.add_pegasus_profile(style="condor")
.add_pegasus_profile(auxillary_local="true")
.add_condor_profile(universe="vanilla"),
).write()
# --- Replicas -----------------------------------------------------------------
print("Generating replica catalog at: {}".format(TOP_DIR / "replicas.yml"))
# create initial input file
with open("f.å", "w") as f:
f.write("This is sample input to KEG\n")
fa = File("f.å").add_metadata({"㐦": "㒦"})
ReplicaCatalog().add_replica(LOCAL, fa, TOP_DIR / fa.lfn).write()
# --- Transformations ----------------------------------------------------------
print(
"Generating transformation catalog at: {}".format(TOP_DIR / "करण-transformations.yml")
)
preprocess = Transformation("pЯёprocess", namespace="pέgasuζ", version="4.0").add_sites(
TransformationSite(
CONDOR_POOL,
PEGASUS_LOCATION,
is_stageable=True,
arch=Arch.X86_64,
os_type=OS.LINUX,
)
)
findrage = Transformation("findrange", namespace="pέgasuζ", version="4.0").add_sites(
TransformationSite(
CONDOR_POOL,
PEGASUS_LOCATION,
is_stageable=True,
arch=Arch.X86_64,
os_type=OS.LINUX,
)
)
analyze = Transformation("analyze", namespace="pέgasuζ", version="4.0").add_sites(
TransformationSite(
CONDOR_POOL,
PEGASUS_LOCATION,
is_stageable=True,
arch=Arch.X86_64,
os_type=OS.LINUX,
)
)
TransformationCatalog().add_transformations(preprocess, findrage, analyze)\
.write("करण-transformations.yml")
# --- Workflow -----------------------------------------------------------------
print("Generating workflow")
fb1 = File("f.ƀ1")
fb2 = File("f.β2")
fc1 = File("f.Ҫ1")
fc2 = File("f.Ͻ2")
fd = File("f.Ɗ")
try:
Workflow("blÅckƊiamond㒀㑖").add_jobs(
Job(preprocess)
.add_args("-a", "preprocess", "-T", "60", "-i", fa, "-o", fb1, fb2)
.add_inputs(fa)
.add_outputs(fb1, fb2, register_replica=True),
Job(findrage)
.add_args("-a", "findrange", "-T", "60", "-i", fb1, "-o", fc1)
.add_inputs(fb1)
.add_outputs(fc1, register_replica=True),
Job(findrage)
.add_args("-a", "findrange", "-T", "60", "-i", fb2, "-o", fc2)
.add_inputs(fb2)
.add_outputs(fc2, register_replica=True),
Job(analyze)
.add_args("-a", "analyze", "-T", "60", "-i", fc1, fc2, "-o", fd)
.add_inputs(fc1, fc2)
.add_outputs(fd, register_replica=True),
).plan(
dir=str(WORK_DIR),
verbose=3,
relative_dir=RUN_ID,
sites=[CONDOR_POOL],
output_sites=[LOCAL, CONDOR_POOL],
force=True,
submit=True,
)
except PegasusClientError as e:
print(e.output)
|
11582313
|
from typing import Union
import hither2 as hi
def test_sorting(sorter_func, *, show_console=True, job_handler: Union[None, hi.JobHandler]=None):
import sortingview as sv
recording_name = 'paired_kampff/2014_11_25_Pair_3_0'
recording_uri = 'sha1://a205f87cef8b7f86df7a09cddbc79a1fbe5df60f/2014_11_25_Pair_3_0.json'
sorting_uri = 'sha1://c656add63d85a17840980084a1ff1cdc662a2cd5/2014_11_25_Pair_3_0.firings_true.json'
recording = sv.LabboxEphysRecordingExtractor(recording_uri, download=True)
sorting_true = sv.LabboxEphysSortingExtractor(sorting_uri)
channel_ids = recording.get_channel_ids()
samplerate = recording.get_sampling_frequency()
num_timepoints = recording.get_num_frames()
print(f'{recording_name}')
print(f'Recording has {len(channel_ids)} channels and {num_timepoints} timepoints (samplerate: {samplerate})')
unit_ids = sorting_true.get_unit_ids()
spike_train = sorting_true.get_unit_spike_train(unit_id=unit_ids[0])
print(f'Unit {unit_ids[0]} has {len(spike_train)} events')
with hi.Config(use_container=True, show_console=show_console, job_handler=job_handler):
sorting_object = hi.Job(sorter_func, {
'recording_object': recording.object()
}).wait().return_value
sorting = sv.LabboxEphysSortingExtractor(sorting_object)
unit_ids = sorting.get_unit_ids()
spike_train = sorting.get_unit_spike_train(unit_id=unit_ids[0])
print(f'Unit {unit_ids[0]} has {len(spike_train)} events')
|
11582331
|
import sys
import ast
import re
import mongoengine as me
import rmc.shared.constants as c
import rmc.models as m
# Normalize critique scores to be in [0, 1]
def normalize_score(score):
return (score['A'] * 4 + score['B'] * 3 +
score['C'] * 2 + score['D']) / 400.0
def clean_name(name):
return re.sub(r'\s+', ' ', name.strip())
# Stolen from processor.py
def get_prof_names(prof_name):
matches = re.findall(r'^(.+?), (.+)$', prof_name)[0]
return {
'first_name': clean_name(matches[1]),
'last_name': clean_name(matches[0]),
}
def import_engineering_critiques(input_file):
print 'Begin importing Engineering course critiques'
number_courses_imported = 0
number_reviews_imported = 0
line = input_file.readline()
while line:
data = ast.literal_eval(line)
course_id = (data['code'] + data['num']).lower()
for critique in data['critiques']:
# arch247 and math212 are dumb.
# Has 'n/a' or '' for prof, which becomes '/a' or '' after parsing
prof_name = critique['prof']
if prof_name == '/a' or prof_name == '':
continue
# Eg. Morton, Andrew OR Morton, A
# FIXME(Sandy): Normalize prof names
prof_names = get_prof_names(prof_name)
prof = m.Professor(**prof_names)
# Note: Manually verified that .save() will not erase existing
# fields that are not set on save (ie. ratings)
prof.save()
professor_id = prof.id
season = critique['term']
year = critique['year']
term_id = m.Term.get_id_from_year_season(year, season)
# The score index correspond directly to the question numbers
# (i.e. arrays are 1-indexed)
scores = critique['scores']
def clarity_from_scores(scores):
Q1_WEIGHT = 0.2
Q2_WEIGHT = 0.2
Q3_WEIGHT = 0.4
Q4_WEIGHT = 0.2
# CLARITY
# presentation in lectures (organization and clarity)
c1 = normalize_score(scores[1]) * Q1_WEIGHT
c1r = scores[1]['num_replies'] * Q1_WEIGHT
# response to questions
c2 = normalize_score(scores[2]) * Q2_WEIGHT
c2r = scores[2]['num_replies'] * Q2_WEIGHT
# oral presentation (audibility, articulation, english)
c3 = normalize_score(scores[3]) * Q3_WEIGHT
c3r = scores[3]['num_replies'] * Q3_WEIGHT
# visual presentation
# (organization, legibility, effective use of materials)
c4 = normalize_score(scores[4]) * Q4_WEIGHT
c4r = scores[4]['num_replies'] * Q4_WEIGHT
c_count = int(round(c1r + c2r + c3r + c4r))
c_rating = (c1 + c2 + c3 + c4) / max(1, c_count)
return m.AggregateRating(rating=c_rating, count=c_count)
def passion_from_scores(scores):
# PASSION
# attitude towards teachings the course
p_count = scores[8]['num_replies']
p_rating = normalize_score(scores[8]) / max(1, p_count)
return m.AggregateRating(rating=p_rating, count=p_count)
def overall_prof_from_scores(scores):
# OVERALL
# overall appraisal of quality of teaching
op_count = scores[10]['num_replies']
op_rating = normalize_score(scores[10]) / max(1, op_count)
return m.AggregateRating(rating=op_rating, count=op_count)
def interest_from_scores(scores):
# Course directed ratings
# INTEREST
# TODO(Sandy): Revise the use of this question-metric
# how many classes attended
i_count = scores[17]['num_replies']
i_rating = normalize_score(scores[17]) / max(1, i_count)
return m.AggregateRating(rating=i_rating, count=i_count)
def easiness_from_scores(scores):
Q11_WEIGHT = 0.5
Q12_WEIGHT = 0.5
# EASINESS
# difficulty of concepts
e1 = normalize_score(scores[11]) * Q11_WEIGHT
e1r = scores[11]['num_replies'] * Q11_WEIGHT
# workload
e2 = normalize_score(scores[12]) * Q12_WEIGHT
e2r = scores[12]['num_replies'] * Q12_WEIGHT
e_count = int(round(e1r + e2r))
e_rating = (e1 + e2) / max(1, e_count)
return m.AggregateRating(rating=e_rating, count=e_count)
def overall_course_from_interest_easiness(i, e):
INTEREST_WEIGHT = 0.5
EASINESS_WEIGHT = 0.5
# OVERALL
oc_count = int(round(i.count * INTEREST_WEIGHT +
e.count * EASINESS_WEIGHT))
oc_rating = (i.rating * INTEREST_WEIGHT +
e.rating * EASINESS_WEIGHT) / max(1, oc_count)
return m.AggregateRating(rating=oc_rating, count=oc_count)
# TODO(Sandy): Try different weightings to see if we can get better data
interest = interest_from_scores(scores)
easiness = easiness_from_scores(scores)
overall_course = overall_course_from_interest_easiness(interest,
easiness)
clarity = clarity_from_scores(scores)
passion = passion_from_scores(scores)
overall_prof = overall_prof_from_scores(scores)
critique_course = {
'course_id': course_id,
'professor_id': professor_id,
'term_id': term_id,
'interest': interest,
'easiness': easiness,
'overall_course': overall_course,
'clarity': clarity,
'passion': passion,
'overall_prof': overall_prof,
}
m.CritiqueCourse(**critique_course).save()
number_reviews_imported += 1
number_courses_imported += 1
line = input_file.readline()
print ('imported %d engineering course critiques reviews' %
number_reviews_imported)
print 'from %d courses' % number_courses_imported
print 'totalling %d courses critiques' % m.CritiqueCourse.objects.count()
print 'Finished importing Engineering course critiques'
# TODO(Sandy): Write a script that will fetch raw data and feed it into this
if __name__ == '__main__':
if (len(sys.argv) < 2):
print 'Please pass the Eng data filename as the first Argument'
sys.exit()
me.connect(c.MONGO_DB_RMC, host=c.MONGO_HOST, port=c.MONGO_PORT)
input_file = open(sys.argv[1], 'r')
import_engineering_critiques(input_file)
|
11582345
|
import numpy as np
from numpy.typing import ArrayLike
from scipy.interpolate import PPoly, lagrange
def interp_rolling_lagrange(x: ArrayLike, y: ArrayLike, order: int) -> PPoly:
x = np.asarray(x)
y = np.asarray(y)
# make sure x is sorted
assert np.all(x[:-1] < x[1:])
assert len(x) > order
if order % 2 == 1:
# The intervals are between the points
coeffs = []
for k in range(len(x) - 1):
idx = np.arange(k - order // 2, k + order // 2 + 2)
while idx[0] < 0:
idx += 1
while idx[-1] > len(x) - 1:
idx -= 1
lag = lagrange(x[idx] - x[k], y[idx])
c = lag.coefficients
if len(c) < order + 1:
# Prepend zeros if necessary; see
# <https://github.com/scipy/scipy/issues/14681>
c = np.concatenate([np.zeros(order + 1 - len(c)), c])
coeffs.append(c)
pp = PPoly(np.array(coeffs).T, x)
else:
# The intervals are around the points
breakpoints = np.concatenate([[x[0]], (x[:-1] + x[1:]) / 2, [x[-1]]])
coeffs = []
for k in range(len(x)):
idx = np.arange(k - order // 2, k + order // 2 + 1)
while idx[0] < 0:
idx += 1
while idx[-1] > len(x) - 1:
idx -= 1
lag = lagrange(x[idx] - breakpoints[k], y[idx])
c = lag.coefficients
if len(c) < order + 1:
# Prepend zeros if necessary; see
# <https://github.com/scipy/scipy/issues/14681>
c = np.concatenate([np.zeros(order + 1 - len(c)), c])
coeffs.append(c)
pp = PPoly(np.array(coeffs).T, breakpoints)
return pp
|
11582346
|
from Instrucciones.TablaSimbolos.Instruccion import Instruccion
from Instrucciones.TablaSimbolos.Simbolo import Simbolo
from Instrucciones.TablaSimbolos.Tipo import Tipo_Dato, Tipo
from datetime import datetime
class Now(Instruccion):
def __init__(self, strGram,linea, columna):
Instruccion.__init__(self,Tipo(Tipo_Dato.TIMESTAMP),linea,columna,strGram)
def ejecutar(self, tabla, arbol):
super().ejecutar(tabla,arbol)
todays_date = datetime.now()
current_time = todays_date.strftime("%Y-%m-%d %H:%M:%S")
return current_time
def analizar(self, tabla, arbol):
return super().analizar(tabla, arbol)
def traducir(self, ts, arbol):
super().traducir(ts, arbol)
cadena = f"now()"
return cadena
|
11582380
|
import pytest
from add_trailing_comma._main import _fix_src
@pytest.mark.parametrize(
'src',
(
'from os import path, makedirs\n',
'from os import (path, makedirs)\n',
'from os import (\n'
' path,\n'
' makedirs,\n'
')',
),
)
def test_fix_from_import_noop(src):
assert _fix_src(src, min_version=(2, 7)) == src
@pytest.mark.parametrize(
('src', 'expected'),
(
(
'from os import (\n'
' makedirs,\n'
' path\n'
')',
'from os import (\n'
' makedirs,\n'
' path,\n'
')',
),
(
'from os import \\\n'
' (\n'
' path,\n'
' makedirs\n'
' )\n',
'from os import \\\n'
' (\n'
' path,\n'
' makedirs,\n'
' )\n',
),
(
'from os import (\n'
' makedirs,\n'
' path,\n'
' )',
'from os import (\n'
' makedirs,\n'
' path,\n'
')',
),
(
'if True:\n'
' from os import (\n'
' makedirs\n'
' )',
'if True:\n'
' from os import (\n'
' makedirs,\n'
' )',
),
),
)
def test_fix_from_import(src, expected):
assert _fix_src(src, min_version=(2, 7)) == expected
|
11582391
|
import logging
from backend.utils.rlgarage_handler import RLGarageAPI
logger = logging.getLogger(__name__)
def get_car(index: int) -> str:
try:
return RLGarageAPI().get_item(index)['name']
except KeyError:
logger.warning(f"Could not find car: {index}.")
return "Unknown"
except:
logger.warning(f"Error getting car for index {index}")
return "Unknown"
|
11582424
|
from genericpath import isfile
FONTS_FOLDER = 'fonts'
FONTS_ALIASES = {'lange': 'engraversmtbold', 'lange_thin': 'engr',
'patek_date': 'steelfish.regular'}
def get_font_def(font_name):
font_path = f'{FONTS_FOLDER}/{font_name}.ttf'
if not isfile(font_path):
if font_name not in FONTS_ALIASES:
return
font_path = f'{FONTS_FOLDER}/{FONTS_ALIASES[font_name]}.ttf'
return '@font-face {' \
f' font-family: "{font_name}";' \
f' src: url({font_path}) format("truetype");' \
'}' \
'p.customfont {' \
f' font-family: "{font_name}", Arial;' \
'}'
|
11582438
|
import sys
import os
import argparse
import logging
import json
import time
import cv2
import numpy as np
import torch
from torch.utils.data import DataLoader
from torch.autograd import Variable
from torch.nn import functional as F
sys.path.append(os.path.dirname(os.path.abspath(__file__)) + '/../../')
torch.manual_seed(0)
torch.cuda.manual_seed_all(0)
import pandas as pd
from tcga.Classification.data.wsi_producer import GridWSIPatchDataset # noqa
from tcga.Classification.model import MODELS # noqa
from PIL import Image
parser = argparse.ArgumentParser(description='Get the probability map of tumor'
' patch predictions given a WSI')
parser.add_argument('--wsi_path', default='./data/svs/', metavar='WSI_PATH',
type=str, help='Path to the input WSI file')
parser.add_argument('--ckpt_path', default='...', metavar='CKPT_PATH', type=str,
help='Path to the saved ckpt file of a pytorch model')
parser.add_argument('--cfg_path', default='./configs/resnet18_tcga.json',
metavar='CFG_PATH', type=str,
help='Path to the config file in json format related to'
' the ckpt file')
parser.add_argument('--mask_path', default='./data/tissue_mask/',
metavar='MASK_PATH', type=str, help='Path to the tissue mask of the input WSI file')
parser.add_argument('--probs_map_path', default='./data/npy/', metavar='PROBS_MAP_PATH',
type=str, help='Path to the output probs_map numpy file')
parser.add_argument('--visualize_path', default='./data/vis/', metavar='PROBS_MAP_PATH',
type=str, help='Path to the output probs_map numpy file')
parser.add_argument('--GPU', default='0', type=str, help='which GPU to use'
', default 0')
parser.add_argument('--num_workers', default=5, type=int, help='number of '
'workers to use to make batch, default 5')
parser.add_argument('--eight_avg', default=0, type=int, help='if using average'
' of the 8 direction predictions for each patch,'
' default 0, which means disabled')
def get_probs_map(model, dataloader):
probs_map = np.zeros((dataloader.dataset._mask.shape))
num_batch = len(dataloader)
# only use the prediction of the center patch within the grid
idx_center = dataloader.dataset._grid_size // 2
count = 0
time_now = time.time()
with torch.no_grad():
for (data, x_mask, y_mask) in dataloader:
data = Variable(data.cuda(async=True))
output,_ = model(data)
batch_size, grid_size, _ = output.size()
output = output.sigmoid()
predict = torch.zeros_like(output)
predict[output > 0.5] = 1
predict = predict + 1
probs_map[x_mask, y_mask] = predict[:, idx_center].cpu().data.numpy().flatten()
count += 1
time_spent = time.time() - time_now
time_now = time.time()
logging.info(
'{}, flip : {}, rotate : {}, batch : {}/{}, Run Time : {:.2f}'
.format(
time.strftime("%Y-%m-%d %H:%M:%S"), dataloader.dataset._flip,
dataloader.dataset._rotate, count, num_batch, time_spent))
return probs_map
def make_dataloader(args, cfg, tif_pth, mask_pth, flip='NONE', rotate='NONE'):
batch_size = cfg['wsi_test_batch_size'] * 8
num_workers = args.num_workers
dataloader = DataLoader(
GridWSIPatchDataset(tif_pth, mask_pth,
image_size=cfg['image_size'],
patch_size=cfg['patch_size'],
crop_size=cfg['crop_size'], normalize=True,
flip=flip,
rotate=rotate,level=0),
batch_size=batch_size, num_workers=num_workers, drop_last=False)
return dataloader
def run(args, tif_pth, mask_pth, tif):
start_t = time.time()
os.environ["CUDA_VISIBLE_DEVICES"] = args.GPU
logging.basicConfig(level=logging.INFO)
with open(args.cfg_path) as f:
cfg = json.load(f)
if cfg['image_size'] % cfg['patch_size'] != 0:
raise Exception('Image size / patch size != 0 : {} / {}'.
format(cfg['image_size'], cfg['patch_size']))
patch_per_side = cfg['image_size'] // cfg['patch_size']
grid_size = patch_per_side * patch_per_side
mask = cv2.imread(mask_pth)
ckpt = torch.load(args.ckpt_path)
model = MODELS[cfg['model']](num_nodes=grid_size, use_crf=cfg['use_crf'])
model.load_state_dict(ckpt['state_dict'])
model = model.cuda()
model.eval()
if not args.eight_avg:
dataloader = make_dataloader(
args, cfg, tif_pth, mask_pth, flip='NONE', rotate='NONE')
probs_map = get_probs_map(model, dataloader)
else:
probs_map = np.zeros(mask.shape)
dataloader = make_dataloader(
args, cfg, flip='NONE', rotate='NONE')
probs_map += get_probs_map(model, dataloader)
dataloader = make_dataloader(
args, cfg, flip='NONE', rotate='ROTATE_90')
probs_map += get_probs_map(model, dataloader)
dataloader = make_dataloader(
args, cfg, flip='NONE', rotate='ROTATE_180')
probs_map += get_probs_map(model, dataloader)
dataloader = make_dataloader(
args, cfg, flip='NONE', rotate='ROTATE_270')
probs_map += get_probs_map(model, dataloader)
dataloader = make_dataloader(
args, cfg, flip='FLIP_LEFT_RIGHT', rotate='NONE')
probs_map += get_probs_map(model, dataloader)
dataloader = make_dataloader(
args, cfg, flip='FLIP_LEFT_RIGHT', rotate='ROTATE_90')
probs_map += get_probs_map(model, dataloader)
dataloader = make_dataloader(
args, cfg, flip='FLIP_LEFT_RIGHT', rotate='ROTATE_180')
probs_map += get_probs_map(model, dataloader)
dataloader = make_dataloader(
args, cfg, flip='FLIP_LEFT_RIGHT', rotate='ROTATE_270')
probs_map += get_probs_map(model, dataloader)
probs_map /= 8
nsp = args.probs_map_path + '/' + tif
np.save(nsp, probs_map)
all_time = time.time() - start_t
print("Classification need time:", all_time)
def main():
args = parser.parse_args()
if not os.path.isdir(args.visualize_path):
os.mkdir(args.visualize_path)
if not os.path.isdir(args.probs_map_path):
os.mkdir(args.probs_map_path)
data_path = args.wsi_path
mask_path = args.mask_path
tiffs = os.listdir(data_path)
for tif in tiffs:
if tif.strip('.svs') + '.npy' in os.listdir(args.probs_map_path):
continue
tif_pth = os.path.join(data_path, tif)
print(tif_pth)
mask = tif.strip('.svs') + '_tissue_mask_64.png'
mask_pth = os.path.join(mask_path, mask)
run(args, tif_pth, mask_pth, tif.strip('.svs'))
if __name__ == '__main__':
main()
|
11582485
|
import discord
from utils import emojis
from utils.funcs import get_platform_emoji
from .context import Context
from .exceptions import NoChoice, CannotEmbedLinks
PageT = str | dict | discord.Embed
class Paginator(discord.ui.View):
def __init__(self, pages: list[discord.Embed | str], *, ctx: "Context", **kwargs):
super().__init__(timeout=120.0, **kwargs)
if not isinstance(pages, list):
pages = [pages]
self.pages = pages
self.ctx = ctx
self.current: int = 0
self.message: discord.Message = None
self.clear_items()
self.fill_items()
@property
def total(self) -> int:
return len(self.pages) - 1
async def interaction_check(self, interaction: discord.Interaction) -> bool:
if interaction.user and interaction.user.id == self.ctx.author.id:
return True
await interaction.response.send_message(
"This command was not initiated by you.", ephemeral=True
)
return False
async def on_timeout(self) -> None:
if self.message:
try:
await self.message.edit(view=None)
except Exception:
pass
def fill_items(self) -> None:
if self.total > 2:
self.add_item(self.first)
if self.total > 0:
self.add_item(self.previous)
self.add_item(self.stop_session)
self.add_item(self.next)
if self.total > 2:
self.add_item(self.last)
def _update_labels(self, page: PageT) -> None:
self.first.disabled = 0 <= page <= 1
self.previous.disabled = page == 0
self.next.disabled = page == self.total
self.last.disabled = self.total - 1 <= page <= self.total
def _get_kwargs_from_page(self, page: PageT) -> dict:
if isinstance(page, dict):
return page
elif isinstance(page, discord.Embed):
return {"content": None, "embed": page}
elif isinstance(page, str):
return {"content": page, "embed": None}
else:
return {}
async def _update(self, interaction: discord.Interaction) -> None:
kwargs = self._get_kwargs_from_page(self.pages[self.current])
self._update_labels(self.current)
if kwargs:
if interaction.response.is_done():
if self.message:
await self.message.edit(**kwargs, view=self)
else:
await interaction.response.edit_message(**kwargs, view=self)
def _ensure_permissions(self):
permissions = self.ctx.channel.permissions_for(self.ctx.me)
if not permissions.send_messages:
return
if not permissions.embed_links:
raise CannotEmbedLinks
async def start(self) -> None:
self._ensure_permissions()
kwargs = self._get_kwargs_from_page(self.pages[0])
self._update_labels(0)
self.message = await self.ctx.send(**kwargs, view=self)
@discord.ui.button(label="<<", style=discord.ButtonStyle.blurple)
async def first(self, button: discord.ui.Button, interaction: discord.Interaction) -> None:
if self.current > 0:
self.current = 0
await self._update(interaction)
@discord.ui.button(label="<", style=discord.ButtonStyle.blurple)
async def previous(self, button: discord.ui.Button, interaction: discord.Interaction) -> None:
if self.current - 1 >= 0:
self.current -= 1
await self._update(interaction)
@discord.ui.button(label="Quit", style=discord.ButtonStyle.red)
async def stop_session(
self, button: discord.ui.Button, interaction: discord.Interaction
) -> None:
await interaction.response.defer()
await interaction.delete_original_message()
self.stop()
@discord.ui.button(label=">", style=discord.ButtonStyle.blurple)
async def next(self, button: discord.ui.Button, interaction: discord.Interaction) -> None:
if self.current + 1 <= self.total:
self.current += 1
await self._update(interaction)
@discord.ui.button(label=">>", style=discord.ButtonStyle.blurple)
async def last(self, button: discord.ui.Button, interaction: discord.Interaction) -> None:
if self.current < self.total:
self.current = self.total
await self._update(interaction)
class ProfileManagerView(Paginator):
def __init__(self, pages, **kwargs):
super().__init__(pages, **kwargs)
self.action = None
def fill_items(self) -> None:
self.add_item(self.link)
self.add_item(self.unlink)
self.add_item(self.update)
if self.total == 0:
self.stop_session.row = 1
self.add_item(self.stop_session)
super().fill_items()
async def _handle(self, interaction: discord.Interaction) -> None:
await interaction.response.defer()
await interaction.delete_original_message()
self.stop()
@discord.ui.button(label="Link", style=discord.ButtonStyle.blurple, row=1)
async def link(self, button: discord.ui.Button, interaction: discord.Interaction) -> None:
self.action = "link"
await self._handle(interaction)
@discord.ui.button(label="Unlink", style=discord.ButtonStyle.blurple, row=1)
async def unlink(self, button: discord.ui.Button, interaction: discord.Interaction) -> None:
self.action = "unlink"
await self._handle(interaction)
@discord.ui.button(label="Update", style=discord.ButtonStyle.blurple, row=1)
async def update(self, button: discord.ui.Button, interaction: discord.Interaction) -> None:
self.action = "update"
await self._handle(interaction)
class ChooseSelect(discord.ui.Select):
async def callback(self, interaction: discord.Interaction) -> None:
await self.view.handle(interaction, self.values[0])
class ChooseView(discord.ui.View):
def __init__(
self,
entries: None | list[str] = None,
*,
ctx: "Context",
timeout: float = 120.0,
) -> None:
super().__init__(timeout=timeout)
self.entries = entries
self.ctx = ctx
self.choice: None | str = None
self.message: None | discord.Message = None
async def interaction_check(self, interaction: discord.Interaction) -> bool:
if interaction.user and interaction.user.id == self.ctx.author.id:
return True
await interaction.response.send_message(
"This command was not initiated by you.", ephemeral=True
)
return False
async def on_timeout(self) -> None:
if self.message:
await self.message.delete()
async def handle(self, interaction: discord.Interaction, selected: str) -> None:
self.choice = selected
await interaction.response.defer()
await interaction.delete_original_message()
self.stop()
async def choose_profile(ctx: "Context", message: str, member: discord.Member = None) -> str:
view = ChooseView(ctx=ctx)
select = ChooseSelect(placeholder="Select a profile...")
view.add_item(select)
profiles = await ctx.bot.get_cog("Profile").get_profiles(ctx, member)
for profile in profiles:
id_, platform, username = profile
emoji = get_platform_emoji(platform)
select.add_option(label=f"{username}", value=id_, emoji=emoji)
view.message = await ctx.send(message, view=view)
await view.wait()
if (choice := view.choice) is not None:
return await ctx.bot.get_cog("Profile").get_profile(choice)
raise NoChoice()
async def choose_answer(
entries: list[str | discord.Embed],
*,
ctx: "Context",
timeout: float,
embed: discord.Embed,
) -> str:
view = ChooseView(entries, ctx=ctx, timeout=timeout)
select = ChooseSelect(placeholder="Select the correct answer...")
view.add_item(select)
embed.description = ""
for index, entry in enumerate(entries, start=1):
select.add_option(label=entry)
embed.description = f"{embed.description}{index}. {entry}\n"
view.message = await ctx.send(embed=embed, view=view)
await view.wait()
if (choice := view.choice) is not None:
return choice
raise NoChoice()
async def choose_platform(ctx: "Context") -> str:
options = [
discord.SelectOption(label="PC", value="pc", emoji=emojis.battlenet),
discord.SelectOption(label="Playstation", value="psn", emoji=emojis.psn),
discord.SelectOption(label="XBOX", value="xbl", emoji=emojis.xbl),
discord.SelectOption(label="Nintendo Switch", value="nintendo-switch", emoji=emojis.switch),
]
view = ChooseView(ctx=ctx)
select = ChooseSelect(placeholder="Select a platform...")
view.add_item(select)
for option in options:
select.append_option(option)
view.message = await ctx.send("Select a platform...", view=view)
await view.wait()
if (choice := view.choice) is not None:
return choice
raise NoChoice()
|
11582493
|
expected_output = {
"vrf": {
"default": {
"vrf_index": "0x60000000",
"interfaces": {
"TenGigE0/3/0/0": {
"interface_index": "0xa0004c0",
"enabled": {
"LDP interface": { "via": "config"}
},
},
"HundredGigE0/5/0/0": {
"interface_index": "0xe0000c0",
"disabled": {},
},
"HundredGigE0/5/0/0.100": {
"interface_index": "0xe0001c0",
"disabled": {},
},
"TenGigE0/3/0/1.100": {
"interface_index": "0xa001940",
"disabled": {},
},
},
}
}
}
|
11582539
|
from __future__ import print_function
import sys
if not sys.argv[1:]:
from subprocess import Popen, PIPE
p = Popen([sys.executable, __file__, 'subprocess'], stdin=PIPE, stdout=PIPE, stderr=PIPE)
out, err = p.communicate(b'hello world\n')
code = p.poll()
assert p.poll() == 0, (out, err, code)
assert out.strip() == b'11 chars.', (out, err, code)
# XXX: This is seen sometimes to fail on Travis with the following value in err but a code of 0;
# it seems load related:
# 'Unhandled exception in thread started by \nsys.excepthook is missing\nlost sys.stderr\n'.
# If warnings are enabled, Python 3 has started producing this:
# '...importlib/_bootstrap.py:219: ImportWarning: can't resolve package from __spec__
# or __package__, falling back on __name__ and __path__\n return f(*args, **kwds)\n'
assert err == b'' or b'sys.excepthook' in err or b'Warning' in err, (out, err, code)
elif sys.argv[1:] == ['subprocess']: # pragma: no cover
import gevent
import gevent.monkey
gevent.monkey.patch_all(sys=True)
def printline():
try:
line = raw_input()
except NameError:
line = input()
print('%s chars.' % len(line))
gevent.spawn(printline).join()
else: # pragma: no cover
sys.exit('Invalid arguments: %r' % (sys.argv, ))
|
11582542
|
from django.conf.urls.defaults import patterns, url, include
from hyperadmin.resources.directory.resources import ResourceDirectory
#gets replaced
class SiteResource(ResourceDirectory):
resource_class = 'resourcelisting'
auth_resource = None
@property
def auth_resource(self):
return self.site.auth_resource
def get_prompt(self):
return self._site.name
def get_app_name(self):
return self._site.name
app_name = property(get_app_name)
def get_urls(self):
urlpatterns = super(SiteResource, self).get_urls()
urlpatterns += patterns('',
url(r'^-authentication/',
include(self.auth_resource.urls)),
)
return urlpatterns
|
11582549
|
import os.path as osp
import tempfile
import numpy as np
import pytest
import torch
from mmocr.models.textrecog.convertors import BaseConvertor, CTCConvertor
def _create_dummy_dict_file(dict_file):
chars = list('helowrd')
with open(dict_file, 'w') as fw:
for char in chars:
fw.write(char + '\n')
def test_ctc_label_convertor():
tmp_dir = tempfile.TemporaryDirectory()
# create dummy data
dict_file = osp.join(tmp_dir.name, 'fake_chars.txt')
_create_dummy_dict_file(dict_file)
# test invalid arguments
with pytest.raises(AssertionError):
CTCConvertor(5)
label_convertor = CTCConvertor(dict_file=dict_file, with_unknown=False)
# test init and parse_chars
assert label_convertor.num_classes() == 8
assert len(label_convertor.idx2char) == 8
assert label_convertor.idx2char[0] == '<BLK>'
assert label_convertor.char2idx['h'] == 1
assert label_convertor.unknown_idx is None
# test encode str to tensor
strings = ['hell']
expect_tensor = torch.IntTensor([1, 2, 3, 3])
targets_dict = label_convertor.str2tensor(strings)
assert torch.allclose(targets_dict['targets'][0], expect_tensor)
assert torch.allclose(targets_dict['flatten_targets'], expect_tensor)
assert torch.allclose(targets_dict['target_lengths'], torch.IntTensor([4]))
# test decode output to index
dummy_output = torch.Tensor([[[1, 100, 3, 4, 5, 6, 7, 8],
[100, 2, 3, 4, 5, 6, 7, 8],
[1, 2, 100, 4, 5, 6, 7, 8],
[1, 2, 100, 4, 5, 6, 7, 8],
[100, 2, 3, 4, 5, 6, 7, 8],
[1, 2, 3, 100, 5, 6, 7, 8],
[100, 2, 3, 4, 5, 6, 7, 8],
[1, 2, 3, 100, 5, 6, 7, 8]]])
indexes, scores = label_convertor.tensor2idx(
dummy_output, img_metas=[{
'valid_ratio': 1.0
}])
assert np.allclose(indexes, [[1, 2, 3, 3]])
# test encode_str_label_to_index
with pytest.raises(AssertionError):
label_convertor.str2idx('hell')
tmp_indexes = label_convertor.str2idx(strings)
assert np.allclose(tmp_indexes, [[1, 2, 3, 3]])
# test deocde_index_to_str_label
input_indexes = [[1, 2, 3, 3]]
with pytest.raises(AssertionError):
label_convertor.idx2str('hell')
output_strings = label_convertor.idx2str(input_indexes)
assert output_strings[0] == 'hell'
tmp_dir.cleanup()
def test_base_label_convertor():
with pytest.raises(NotImplementedError):
label_convertor = BaseConvertor()
label_convertor.str2tensor(None)
label_convertor.tensor2idx(None)
|
11582569
|
import sklearn.cluster
import sklearn.metrics.cluster
def cluster_by_kmeans(X, nb_clusters):
"""
xs : embeddings with shape [nb_samples, nb_features]
nb_clusters : in this case, must be equal to number of classes
"""
return sklearn.cluster.KMeans(nb_clusters).fit(X).labels_
def calc_normalized_mutual_information(ys, xs_clustered):
return sklearn.metrics.cluster.normalized_mutual_info_score(xs_clustered, ys)
|
11582573
|
import io, os, sys
def combineSubdirs(dirname, has_hdr_line):
if not os.path.isdir(dirname): return
print(dirname)
subdirs = [dirname + '/' + x for x in os.listdir(dirname) if os.path.isdir(dirname + '/' + x)]
if len(subdirs) == 0: return
all_files = set(os.listdir(subdirs[0]))
for subdir in subdirs:
all_files = all_files.union(set(os.listdir(subdir)))
for filename in all_files:
fout = io.open(dirname + '/' + filename, 'w')
i = 0
for subdir in subdirs:
if not os.path.isfile(subdir + '/' + filename): continue
f = io.open(subdir + '/' + filename)
if has_hdr_line and i != 0:
f.readline()
for line in f: fout.write(line)
f.close()
i += 1
fout.close()
if len(sys.argv) != 3:
print('Usage: combine_results_subdirs.py <results_dir> <has hdr line>')
else:
results_dir = sys.argv[1]
has_hdr_line = eval(sys.argv[2])
dirnames = os.listdir(results_dir)
for dirname in dirnames:
combineSubdirs(results_dir + '/' + dirname, has_hdr_line)
|
11582577
|
def isPalindrome(s):
if len(s) <= 1:
return ("Palindrome")
return s[0] == s[-1] and isPalindrome(s[1:-1])
# give input like isPalindrome([1, 2, 2, 1])
|
11582638
|
import unittest
import numpy as np
import torch
from rlil import nn
from rlil.environments import Action
from rlil.policies.deterministic import DeterministicPolicyNetwork
from rlil.memory import ExperienceReplayBuffer
from rlil.initializer import get_replay_buffer, get_n_step
from rlil.utils import Samples
class MockAgent:
def __init__(self, env):
model = nn.Sequential(
nn.Flatten(),
nn.Linear(env.state_space.shape[0],
Action.action_space().shape[0])
)
self.policy_model = DeterministicPolicyNetwork(
model, Action.action_space())
self._state = None
self._action = None
self.replay_buffer = get_replay_buffer()
def act(self, state, reward):
samples = Samples(self._state, self._action, reward, state)
self.replay_buffer.store(samples)
self._state = state
with torch.no_grad():
action = self.policy_model(
state.to(self.policy_model.device))
self._action = Action(action).to("cpu")
return self._action
def make_lazy_agent(self):
return MockLazyAgent(self.policy_model)
def train(self):
pass
class MockLazyAgent:
def __init__(self, policy_model):
self._state = None
self._action = None
self.policy_model = policy_model
self.replay_buffer = None
# for N step replay buffer
self._n_step, self._discount_factor = get_n_step()
def set_replay_buffer(self, env):
self.replay_buffer = ExperienceReplayBuffer(
1e7, env, n_step=self._n_step,
discount_factor=self._discount_factor)
def act(self, state, reward):
samples = Samples(self._state, self._action, reward, state)
self.replay_buffer.store(samples)
self._state = state
with torch.no_grad():
action = self.policy_model(
state.to(self.policy_model.device))
self._action = Action(action).to("cpu")
return self._action
def compute_priorities(self, samples):
return None
|
11582647
|
import pytest
import os
from textwrap import dedent
from ...preprocessors import LimitOutput
from .base import BaseTestPreprocessor
from .. import create_code_cell, create_text_cell
@pytest.fixture
def preprocessor():
return LimitOutput()
class TestLimitOutput(BaseTestPreprocessor):
def test_long_output(self):
nb = self._read_nb(os.path.join("files", "long-output.ipynb"))
cell, = nb.cells
output, = cell.outputs
assert len(output.text.split("\n")) > 1000
pp = LimitOutput()
nb, resources = pp.preprocess(nb, {})
cell, = nb.cells
output, = cell.outputs
assert len(output.text.split("\n")) == 1000
def test_infinite_recursion(self):
nb = self._read_nb(os.path.join("files", "infinite-recursion.ipynb"))
pp = LimitOutput()
nb, resources = pp.preprocess(nb, {})
cell, = nb.cells
output, = cell.outputs
assert len(output.traceback) == 100
|
11582656
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import itertools
import models.models as models
import torch
from torch import nn
from torch.autograd import Variable
import torchvision
import torchvision.datasets as dsets
import torchvision.transforms as transforms
import utils.utils as utils
from PIL import Image
from utils.logger import Logger
""" gpu """
gpu_id = [6]
utils.cuda_devices(gpu_id)
""" param """
epochs = 6
batch_size = 3
lr = 0.0002
dataset_dir = '../Cycledata/market2duke'
use_tensorboard = 1
if use_tensorboard:
log_dir = './checkpoints/cyclegan'
utils.mkdir(log_dir)
Logger = Logger(log_dir)
""" data """
load_size_w = 144
load_size_h = 286
crop_size_w = 128
crop_size_h = 256
transform = transforms.Compose(
[transforms.RandomHorizontalFlip(),
transforms.Resize((load_size_h, load_size_w), Image.BICUBIC),
transforms.RandomCrop((crop_size_h, crop_size_w)),
transforms.ToTensor(),
transforms.Normalize(mean=[0.5] * 3, std=[0.5] * 3)])
test_transform = transforms.Compose(
[transforms.Resize((crop_size_h, crop_size_w), Image.BICUBIC),
transforms.ToTensor(),
transforms.Normalize(mean=[0.5] * 3, std=[0.5] * 3)])
dataset_dirs = utils.reorganize(dataset_dir)
a_data = dsets.ImageFolder(dataset_dirs['trainA'], transform=transform)
b_data = dsets.ImageFolder(dataset_dirs['trainB'], transform=transform)
a_test_data = dsets.ImageFolder(dataset_dirs['testA'], transform=test_transform)
b_test_data = dsets.ImageFolder(dataset_dirs['testB'], transform=test_transform)
a_loader = torch.utils.data.DataLoader(a_data, batch_size=batch_size, shuffle=True, num_workers=0)
b_loader = torch.utils.data.DataLoader(b_data, batch_size=batch_size, shuffle=True, num_workers=0)
a_test_loader = torch.utils.data.DataLoader(a_test_data, batch_size=1, shuffle=True, num_workers=0)
b_test_loader = torch.utils.data.DataLoader(b_test_data, batch_size=1, shuffle=True, num_workers=0)
a_fake_pool = utils.ItemPool()
b_fake_pool = utils.ItemPool()
""" model """
Da = models.Discriminator()
Db = models.Discriminator()
Ga = models.Generator()
Gb = models.Generator()
MSE = nn.MSELoss()
L1 = nn.L1Loss()
utils.cuda([Da, Db, Ga, Gb])
da_optimizer = torch.optim.Adam(Da.parameters(), lr=lr, betas=(0.5, 0.999))
db_optimizer = torch.optim.Adam(Db.parameters(), lr=lr, betas=(0.5, 0.999))
ga_optimizer = torch.optim.Adam(Ga.parameters(), lr=lr, betas=(0.5, 0.999))
gb_optimizer = torch.optim.Adam(Gb.parameters(), lr=lr, betas=(0.5, 0.999))
""" load checkpoint """
ckpt_dir = './checkpoints/cyclegan'
utils.mkdir(ckpt_dir)
try:
ckpt = utils.load_checkpoint(ckpt_dir)
start_epoch = ckpt['epoch']
Da.load_state_dict(ckpt['Da'])
Db.load_state_dict(ckpt['Db'])
Ga.load_state_dict(ckpt['Ga'])
Gb.load_state_dict(ckpt['Gb'])
da_optimizer.load_state_dict(ckpt['da_optimizer'])
db_optimizer.load_state_dict(ckpt['db_optimizer'])
ga_optimizer.load_state_dict(ckpt['ga_optimizer'])
gb_optimizer.load_state_dict(ckpt['gb_optimizer'])
except:
print(' [*] No checkpoint!')
start_epoch = 0
""" run """
loss = {}
for epoch in range(start_epoch, epochs):
for i, (a_real, b_real) in enumerate(itertools.izip(a_loader, b_loader)):
# step
step = epoch * min(len(a_loader), len(b_loader)) + i + 1
# set train
Ga.train()
Gb.train()
# leaves
a_real = Variable(a_real[0])
b_real = Variable(b_real[0])
a_real, b_real = utils.cuda([a_real, b_real])
# train G
a_fake = Ga(b_real)
b_fake = Gb(a_real)
a_rec = Ga(b_fake)
b_rec = Gb(a_fake)
# gen losses
a_f_dis = Da(a_fake)
b_f_dis = Db(b_fake)
r_label = utils.cuda(Variable(torch.ones(a_f_dis.size())))
a_gen_loss = MSE(a_f_dis, r_label)
b_gen_loss = MSE(b_f_dis, r_label)
# identity loss
b2b = Gb(b_real)
a2a = Ga(a_real)
idt_loss_b = L1(b2b, b_real)
idt_loss_a = L1(a2a, a_real)
idt_loss = idt_loss_a + idt_loss_b
# rec losses
a_rec_loss = L1(a_rec, a_real)
b_rec_loss = L1(b_rec, b_real)
rec_loss = a_rec_loss + b_rec_loss
# g loss
g_loss = a_gen_loss + b_gen_loss + rec_loss * 10.0 + 5.0 * idt_loss
loss['G/a_gen_loss'] = a_gen_loss.item()
loss['G/b_gen_loss'] = b_gen_loss.item()
loss['G/rec_loss'] = rec_loss.item()
loss['G/idt_loss'] = idt_loss.item()
loss['G/g_loss'] = g_loss.item()
# backward
Ga.zero_grad()
Gb.zero_grad()
g_loss.backward()
ga_optimizer.step()
gb_optimizer.step()
# leaves
a_fake = Variable(torch.Tensor(a_fake_pool([a_fake.cpu().data.numpy()])[0]))
b_fake = Variable(torch.Tensor(b_fake_pool([b_fake.cpu().data.numpy()])[0]))
a_fake, b_fake = utils.cuda([a_fake, b_fake])
# train D
a_r_dis = Da(a_real)
a_f_dis = Da(a_fake)
b_r_dis = Db(b_real)
b_f_dis = Db(b_fake)
r_label = utils.cuda(Variable(torch.ones(a_f_dis.size())))
f_label = utils.cuda(Variable(torch.zeros(a_f_dis.size())))
# d loss
a_d_r_loss = MSE(a_r_dis, r_label)
a_d_f_loss = MSE(a_f_dis, f_label)
b_d_r_loss = MSE(b_r_dis, r_label)
b_d_f_loss = MSE(b_f_dis, f_label)
a_d_loss = (a_d_r_loss + a_d_f_loss)*0.5
b_d_loss = (b_d_r_loss + b_d_f_loss)*0.5
loss['D/a_d_f_loss'] = a_d_f_loss.item()
loss['D/b_d_f_loss'] = b_d_f_loss.item()
loss['D/a_d_r_loss'] = a_d_r_loss.item()
loss['D/b_d_r_loss'] = b_d_r_loss.item()
# backward
Da.zero_grad()
Db.zero_grad()
a_d_loss.backward()
b_d_loss.backward()
da_optimizer.step()
db_optimizer.step()
if (i + 1) % 10 == 0:
print("Epoch: (%3d) (%5d/%5d)" % (epoch, i + 1, min(len(a_loader), len(b_loader))))
print("g_loss: (%f) a_d_loss: (%f) b_d_loss: (%f)" % (g_loss, a_d_loss, b_d_loss ))
if use_tensorboard:
for tag, value in loss.items():
Logger.scalar_summary(tag, value, i)
if (i + 1) % 50 == 0:
with torch.no_grad():
Ga.eval()
Gb.eval()
a_real_test = Variable(iter(a_test_loader).next()[0])
b_real_test = Variable(iter(b_test_loader).next()[0])
a_real_test, b_real_test = utils.cuda([a_real_test, b_real_test])
# train G
a_fake_test = Ga(b_real_test)
b_fake_test = Gb(a_real_test)
a_rec_test = Ga(b_fake_test)
b_rec_test = Gb(a_fake_test)
pic = (torch.cat([a_real_test, b_fake_test, a_rec_test, b_real_test, a_fake_test, b_rec_test], dim=0).data + 1) / 2.0
save_dir = './sample_images_while_training/cyclegan'
utils.mkdir(save_dir)
torchvision.utils.save_image(pic, '%s/Epoch_(%d)_(%dof%d).jpg' % (save_dir, epoch, i + 1, min(len(a_loader), len(b_loader))), nrow=3)
utils.save_checkpoint({'epoch': epoch + 1,
'Da': Da.state_dict(),
'Db': Db.state_dict(),
'Ga': Ga.state_dict(),
'Gb': Gb.state_dict(),
'da_optimizer': da_optimizer.state_dict(),
'db_optimizer': db_optimizer.state_dict(),
'ga_optimizer': ga_optimizer.state_dict(),
'gb_optimizer': gb_optimizer.state_dict()},
'%s/Epoch_(%d).ckpt' % (ckpt_dir, epoch + 1),
max_keep=4)
|
11582767
|
from dataset import *
from post_process import Post_Process_CR
from visualize import *
from forward import *
from evaluation.eval_func import *
# utils
from libs.utils import _init_fn
from libs.load_model import *
def prepare_dataloader(cfg, dict_DB):
# train dataloader
if cfg.run_mode == 'train':
dataset = Train_Dataset_RNet(cfg)
trainloader = torch.utils.data.DataLoader(dataset=dataset,
batch_size=cfg.batch_size['img'],
shuffle=True,
num_workers=cfg.num_workers,
worker_init_fn=_init_fn)
dict_DB['trainloader'] = trainloader
# test dataloader
if cfg.test_dataset == 'ICCV_2017':
dataset = ICCV_Test_Dataset(cfg)
testloader = torch.utils.data.DataLoader(dataset=dataset,
batch_size=cfg.batch_size['img'],
shuffle=False,
num_workers=cfg.num_workers,
worker_init_fn=_init_fn)
if cfg.test_dataset == 'NYU':
dataset = NYU_Test_Dataset(cfg)
testloader = torch.utils.data.DataLoader(dataset=dataset,
batch_size=cfg.batch_size['img'],
shuffle=False,
num_workers=cfg.num_workers,
worker_init_fn=_init_fn)
if cfg.test_dataset == 'SYM_Hard':
dataset = SYM_Hard_Test_Dataset(cfg)
testloader = torch.utils.data.DataLoader(dataset=dataset,
batch_size=cfg.batch_size['img'],
shuffle=False,
num_workers=cfg.num_workers,
worker_init_fn=_init_fn)
dict_DB['testloader'] = testloader
return dict_DB
def prepare_model(cfg, dict_DB):
if 'test' in cfg.run_mode:
dict_DB = load_DNet_for_test(cfg, dict_DB)
dict_DB = load_RNet_for_test(cfg, dict_DB)
if 'train' in cfg.run_mode:
dict_DB = load_DNet_for_test(cfg, dict_DB)
dict_DB = load_RNet_for_train(cfg, dict_DB)
dict_DB['forward_model'] = Forward_Model(cfg=cfg)
return dict_DB
def prepare_postprocessing(cfg, dict_DB):
dict_DB['CR_process'] = Post_Process_CR(dict_DB)
return dict_DB
def prepare_visualization(cfg, dict_DB):
dict_DB['visualize'] = Visualize_plt(cfg=cfg)
return dict_DB
def prepare_evaluation(cfg, dict_DB):
dict_DB['eval_func'] = Evaluation_Function(cfg=cfg)
return dict_DB
def prepare_training(cfg, dict_DB):
logfile = cfg.output_dir + 'train/log/logfile.txt'
mkdir(path=cfg.output_dir + 'train/log/')
if cfg.run_mode == 'train' and cfg.resume == True:
rmfile(path=logfile)
val_result = {'AUC_A': 0}
dict_DB['val_result'] = val_result
dict_DB['epoch'] = 0
dict_DB['logfile'] = logfile
return dict_DB
|
11582776
|
org_all_repos = """
query ($owner: String!, $endCursor: String) {
organization(login: $owner) {
repositories(first: 100, after: $endCursor) {
pageInfo {
hasNextPage
endCursor
}
totalCount
edges {
node {
nameWithOwner
name
isPrivate
... RepoTotalCounts
... RepoDefaultBranch
}
}
}
}
}
fragment RepoTotalCounts on Repository {
issues {
totalCount
}
openIssues: issues(states: OPEN) {
totalCount
}
closedIssues: issues(states: CLOSED) {
totalCount
}
pullRequests {
totalCount
}
openPullRequests: pullRequests(states: OPEN) {
totalCount
}
mergedPullRequests: pullRequests(states: MERGED) {
totalCount
}
closedPullRequests: pullRequests(states: CLOSED) {
totalCount
}
forkCount
# GraphQL API bug: below does not give accurate count
# forks {
# totalCount
# }
stargazers {
totalCount
}
watchers {
totalCount
}
# Data on collaborators requires repository push access
# collaborators {
# totalCount
# }
# directCollaborators: collaborators(affiliation: DIRECT) {
# totalCount
# }
# outsideCollaborators: collaborators(affiliation: OUTSIDE) {
# totalCount
# }
}
fragment RepoDefaultBranch on Repository {
defaultBranchRef {
name
associatedPullRequests {
totalCount
}
target {
... on Commit {
history(first: 0) {
totalCount
}
}
}
}
}
"""
repo_wise = """
query ($owner:String!, $repo:String!) {
repository(owner: $owner, name: $repo) {
nameWithOwner
name
isPrivate
... RepoTotalCounts
... RepoDefaultBranch
}
}
fragment RepoTotalCounts on Repository {
issues {
totalCount
}
openIssues: issues(states: OPEN) {
totalCount
}
closedIssues: issues(states: CLOSED) {
totalCount
}
pullRequests {
totalCount
}
openPullRequests: pullRequests(states: OPEN) {
totalCount
}
mergedPullRequests: pullRequests(states: MERGED) {
totalCount
}
closedPullRequests: pullRequests(states: CLOSED) {
totalCount
}
forkCount
# GraphQL API bug: below does not give accurate count
# forks {
# totalCount
# }
stargazers {
totalCount
}
watchers {
totalCount
}
# Data on collaborators requires repository push access
# collaborators {
# totalCount
# }
# directCollaborators: collaborators(affiliation: DIRECT) {
# totalCount
# }
# outsideCollaborators: collaborators(affiliation: OUTSIDE) {
# totalCount
# }
}
fragment RepoDefaultBranch on Repository {
defaultBranchRef {
name
associatedPullRequests {
totalCount
}
target {
... on Commit {
history(first: 0) {
totalCount
}
}
}
}
}
"""
|
11582789
|
import numpy as np
from gym import utils
from gym.envs.mujoco import mujoco_env
class PusherMugEnv(mujoco_env.MujocoEnv, utils.EzPickle):
def __init__(self):
utils.EzPickle.__init__(self)
mujoco_env.MujocoEnv.__init__(self, 'pusher_mug.xml', 5)
def _step(self, a):
vec_1 = self.get_body_com("object")-self.get_body_com("tips_arm")
vec_2 = self.get_body_com("object")-self.get_body_com("goal")
reward_near = - np.linalg.norm(vec_1)
reward_dist = - np.linalg.norm(vec_2)
reward_ctrl = - np.square(a).sum()
#the coefficients in the following line are ad hoc
reward = reward_dist + 0.1*reward_ctrl + 0.5*reward_near
self.do_simulation(a, self.frame_skip)
ob = self._get_obs()
done = False
return ob, reward, done, dict(reward_dist=reward_dist, reward_ctrl=reward_ctrl)
def viewer_setup(self):
self.viewer.cam.trackbodyid=0
self.viewer.cam.distance = 4.0
def reset_model(self):
qpos = self.np_random.uniform(low=-0.1, high=0.1, size=self.model.nq) + self.init_qpos
while True:
self.object = np.concatenate([self.np_random.uniform(low=-0.3, high=-0.05, size=1),
self.np_random.uniform(low=0.25, high=0.65, size=1)])
#self.goal = self.np_random.uniform(low=-1, high=1, size=2)
self.goal = np.asarray([-0.05, 0.45])
# if np.linalg.norm(self.object) > 0.7 and np.linalg.norm(self.goal) > 0.7:
if np.linalg.norm(self.object-self.goal) > 0.17: break
qpos[-4:-2] = self.object
qpos[-2:] = self.goal
qvel = self.init_qvel + self.np_random.uniform(low=-.005, high=.005, size=self.model.nv)
qvel[-4:] = 0
self.set_state(qpos, qvel)
#import IPython; IPython.embed()
return self._get_obs()
def _get_obs(self):
return np.concatenate([
self.model.data.qpos.flat[:-4],
self.model.data.qvel.flat[:-4],
#self.get_body_com("r_wrist_roll_link"),
self.get_body_com("tips_arm"),
self.get_body_com("object"),
self.get_body_com("goal"),
])
# theta = self.model.data.qpos.flat[:-4]
# return np.concatenate([
# np.sin(theta),
# np.cos(theta),
# self.model.data.qvel.flat[:-4],
# self.get_body_com("r_wrist_roll_link"),
# self.get_body_com("object"),
# self.get_body_com("goal"),
# ])
|
11582791
|
import argparse
import colour
import inspect
import importlib
import os
import sys
import yaml
from contextlib import contextmanager
from screeninfo import get_monitors
from manimlib.utils.config_ops import merge_dicts_recursively
from manimlib.utils.init_config import init_customization
from manimlib.logger import log
__config_file__ = "custom_config.yml"
def parse_cli():
try:
parser = argparse.ArgumentParser()
module_location = parser.add_mutually_exclusive_group()
module_location.add_argument(
"file",
nargs="?",
help="path to file holding the python code for the scene",
)
parser.add_argument(
"scene_names",
nargs="*",
help="Name of the Scene class you want to see",
)
parser.add_argument(
"-w", "--write_file",
action="store_true",
help="Render the scene as a movie file",
)
parser.add_argument(
"-s", "--skip_animations",
action="store_true",
help="Save the last frame",
)
parser.add_argument(
"-l", "--low_quality",
action="store_true",
help="Render at a low quality (for faster rendering)",
)
parser.add_argument(
"-m", "--medium_quality",
action="store_true",
help="Render at a medium quality",
)
parser.add_argument(
"--hd",
action="store_true",
help="Render at a 1080p",
)
parser.add_argument(
"--uhd",
action="store_true",
help="Render at a 4k",
)
parser.add_argument(
"-f", "--full_screen",
action="store_true",
help="Show window in full screen",
)
parser.add_argument(
"-g", "--save_pngs",
action="store_true",
help="Save each frame as a png",
)
parser.add_argument(
"-i", "--gif",
action="store_true",
help="Save the video as gif",
)
parser.add_argument(
"-t", "--transparent",
action="store_true",
help="Render to a movie file with an alpha channel",
)
parser.add_argument(
"-q", "--quiet",
action="store_true",
help="",
)
parser.add_argument(
"-a", "--write_all",
action="store_true",
help="Write all the scenes from a file",
)
parser.add_argument(
"-o", "--open",
action="store_true",
help="Automatically open the saved file once its done",
)
parser.add_argument(
"--finder",
action="store_true",
help="Show the output file in finder",
)
parser.add_argument(
"--config",
action="store_true",
help="Guide for automatic configuration",
)
parser.add_argument(
"--file_name",
help="Name for the movie or image file",
)
parser.add_argument(
"-n", "--start_at_animation_number",
help="Start rendering not from the first animation, but"
"from another, specified by its index. If you pass"
"in two comma separated values, e.g. \"3,6\", it will end"
"the rendering at the second value",
)
parser.add_argument(
"-e", "--embed", metavar="LINENO",
help="Takes a line number as an argument, and results"
"in the scene being called as if the line `self.embed()`"
"was inserted into the scene code at that line number."
)
parser.add_argument(
"-r", "--resolution",
help="Resolution, passed as \"WxH\", e.g. \"1920x1080\"",
)
parser.add_argument(
"--frame_rate",
help="Frame rate, as an integer",
)
parser.add_argument(
"-c", "--color",
help="Background color",
)
parser.add_argument(
"--leave_progress_bars",
action="store_true",
help="Leave progress bars displayed in terminal",
)
parser.add_argument(
"--video_dir",
help="Directory to write video",
)
parser.add_argument(
"--config_file",
help="Path to the custom configuration file",
)
parser.add_argument(
"-v", "--version",
action="store_true",
help="Display the version of manimgl"
)
parser.add_argument(
"--log-level",
help="Level of messages to Display, can be DEBUG / INFO / WARNING / ERROR / CRITICAL"
)
args = parser.parse_args()
return args
except argparse.ArgumentError as err:
log.error(str(err))
sys.exit(2)
def get_manim_dir():
manimlib_module = importlib.import_module("manimlib")
manimlib_dir = os.path.dirname(inspect.getabsfile(manimlib_module))
return os.path.abspath(os.path.join(manimlib_dir, ".."))
def get_module(file_name):
if file_name is None:
return None
module_name = file_name.replace(os.sep, ".").replace(".py", "")
spec = importlib.util.spec_from_file_location(module_name, file_name)
module = importlib.util.module_from_spec(spec)
spec.loader.exec_module(module)
return module
@contextmanager
def insert_embed_line(file_name, lineno):
with open(file_name, 'r') as fp:
lines = fp.readlines()
line = lines[lineno - 1]
n_spaces = len(line) - len(line.lstrip())
lines.insert(lineno, " " * n_spaces + "self.embed()\n")
alt_file = file_name.replace(".py", "_inserted_embed.py")
with open(alt_file, 'w') as fp:
fp.writelines(lines)
try:
yield alt_file
finally:
os.remove(alt_file)
def get_custom_config():
global __config_file__
global_defaults_file = os.path.join(get_manim_dir(), "manimlib", "default_config.yml")
if os.path.exists(global_defaults_file):
with open(global_defaults_file, "r") as file:
config = yaml.safe_load(file)
if os.path.exists(__config_file__):
with open(__config_file__, "r") as file:
local_defaults = yaml.safe_load(file)
if local_defaults:
config = merge_dicts_recursively(
config,
local_defaults,
)
else:
with open(__config_file__, "r") as file:
config = yaml.safe_load(file)
return config
def check_temporary_storage(config):
if config["directories"]["temporary_storage"] == "" and sys.platform == "win32":
log.warning(
"You may be using Windows platform and have not specified the path of"
" `temporary_storage`, which may cause OSError. So it is recommended"
" to specify the `temporary_storage` in the config file (.yml)"
)
def get_configuration(args):
global __config_file__
# ensure __config_file__ always exists
if args.config_file is not None:
if not os.path.exists(args.config_file):
log.error(f"Can't find {args.config_file}.")
if sys.platform == 'win32':
log.info(f"Copying default configuration file to {args.config_file}...")
os.system(f"copy default_config.yml {args.config_file}")
elif sys.platform in ["linux2", "darwin"]:
log.info(f"Copying default configuration file to {args.config_file}...")
os.system(f"cp default_config.yml {args.config_file}")
else:
log.info("Please create the configuration file manually.")
log.info("Read configuration from default_config.yml.")
else:
__config_file__ = args.config_file
global_defaults_file = os.path.join(get_manim_dir(), "manimlib", "default_config.yml")
if not (os.path.exists(global_defaults_file) or os.path.exists(__config_file__)):
log.info("There is no configuration file detected. Switch to the config file initializer:")
init_customization()
elif not os.path.exists(__config_file__):
log.info(f"Using the default configuration file, which you can modify in `{global_defaults_file}`")
log.info(
"If you want to create a local configuration file, you can create a file named"
f" `{__config_file__}`, or run `manimgl --config`"
)
custom_config = get_custom_config()
check_temporary_storage(custom_config)
write_file = any([args.write_file, args.open, args.finder])
if args.transparent:
file_ext = ".mov"
elif args.gif:
file_ext = ".gif"
else:
file_ext = ".mp4"
file_writer_config = {
"write_to_movie": not args.skip_animations and write_file,
"break_into_partial_movies": custom_config["break_into_partial_movies"],
"save_last_frame": args.skip_animations and write_file,
"save_pngs": args.save_pngs,
# If -t is passed in (for transparent), this will be RGBA
"png_mode": "RGBA" if args.transparent else "RGB",
"movie_file_extension": file_ext,
"mirror_module_path": custom_config["directories"]["mirror_module_path"],
"output_directory": args.video_dir or custom_config["directories"]["output"],
"file_name": args.file_name,
"input_file_path": args.file or "",
"open_file_upon_completion": args.open,
"show_file_location_upon_completion": args.finder,
"quiet": args.quiet,
}
if args.embed is None:
module = get_module(args.file)
else:
with insert_embed_line(args.file, int(args.embed)) as alt_file:
module = get_module(alt_file)
config = {
"module": module,
"scene_names": args.scene_names,
"file_writer_config": file_writer_config,
"quiet": args.quiet or args.write_all,
"write_all": args.write_all,
"skip_animations": args.skip_animations,
"start_at_animation_number": args.start_at_animation_number,
"end_at_animation_number": None,
"preview": not write_file,
"leave_progress_bars": args.leave_progress_bars,
}
# Camera configuration
config["camera_config"] = get_camera_configuration(args, custom_config)
# Default to making window half the screen size
# but make it full screen if -f is passed in
monitors = get_monitors()
mon_index = custom_config["window_monitor"]
monitor = monitors[min(mon_index, len(monitors) - 1)]
window_width = monitor.width
if not (args.full_screen or custom_config["full_screen"]):
window_width //= 2
window_height = window_width * 9 // 16
config["window_config"] = {
"size": (window_width, window_height),
}
# Arguments related to skipping
stan = config["start_at_animation_number"]
if stan is not None:
if "," in stan:
start, end = stan.split(",")
config["start_at_animation_number"] = int(start)
config["end_at_animation_number"] = int(end)
else:
config["start_at_animation_number"] = int(stan)
return config
def get_camera_configuration(args, custom_config):
camera_config = {}
camera_qualities = get_custom_config()["camera_qualities"]
if args.low_quality:
quality = camera_qualities["low"]
elif args.medium_quality:
quality = camera_qualities["medium"]
elif args.hd:
quality = camera_qualities["high"]
elif args.uhd:
quality = camera_qualities["ultra_high"]
else:
quality = camera_qualities[camera_qualities["default_quality"]]
if args.resolution:
quality["resolution"] = args.resolution
if args.frame_rate:
quality["frame_rate"] = int(args.frame_rate)
width_str, height_str = quality["resolution"].split("x")
width = int(width_str)
height = int(height_str)
camera_config.update({
"pixel_width": width,
"pixel_height": height,
"frame_rate": quality["frame_rate"],
})
try:
bg_color = args.color or custom_config["style"]["background_color"]
camera_config["background_color"] = colour.Color(bg_color)
except ValueError as err:
log.error("Please use a valid color")
log.error(err)
sys.exit(2)
# If rendering a transparent image/move, make sure the
# scene has a background opacity of 0
if args.transparent:
camera_config["background_opacity"] = 0
return camera_config
|
11582804
|
import enum
class CurrentMediaStateValues(enum.IntEnum):
"""States that a TV can be."""
PLAYING = 0
PAUSED = 1
STOPPED = 2
class TargetMediaStateValues(enum.IntEnum):
"""States that a TV can be set to."""
PLAY = 0
PAUSE = 1
STOP = 2
class RemoteKeyValues(enum.IntEnum):
"""Keys that can be send using the Remote Key characteristic."""
REWIND = 0
FAST_FORWARD = 1
NEXT_TRACK = 2
PREVIOUS_TRACK = 3
ARROW_UP = 4
ARROW_DOWN = 5
ARROW_LEFT = 6
ARROW_RIGHT = 7
SELECT = 8
BACK = 9
EXIT = 10
PLAY_PAUSE = 11
INFORMATION = 15
class InputEventValues(enum.IntEnum):
"""Types of button press for CharacteristicsTypes.INPUT_EVENT."""
SINGLE_PRESS = 0
DOUBLE_PRESS = 1
LONG_PRESS = 2
class HeatingCoolingCurrentValues(enum.IntEnum):
"""What is a thermostat currently doing."""
IDLE = 0
HEATING = 1
COOLING = 2
class HeatingCoolingTargetValues(enum.IntEnum):
"""What is the current 'goal' for the thermostat."""
OFF = 0
HEAT = 1
COOL = 2
AUTO = 3
class InUseValues(enum.IntEnum):
"""Whether or not something is in use."""
NOT_IN_USE = 0
IN_USE = 1
class IsConfiguredValues(enum.IntEnum):
"""Whether or not something is configured."""
NOT_CONFIGURED = 0
CONFIGURED = 1
class ProgramModeValues(enum.IntEnum):
"""Whether or not a program is set."""
NO_PROGRAM_SCHEDULED = 0
PROGRAM_SCHEDULED = 1
PROGRAM_SCHEDULED_MANUAL_MODE = 2
class ValveTypeValues(enum.IntEnum):
"""The type of valve."""
GENERIC_VALVE = 0
IRRIGATION = 1
SHOWER_HEAD = 2
WATER_FAUCET = 3
class ActivationStateValues(enum.IntEnum):
"""Possible values for the current status of an accessory.
https://developer.apple.com/documentation/homekit/hmcharacteristicvalueactivationstate"""
INACTIVE = 0
ACTIVE = 1
class SwingModeValues(enum.IntEnum):
"""Possible values for fan movement.
https://developer.apple.com/documentation/homekit/hmcharacteristicvalueswingmode"""
DISABLED = 0
ENABLED = 1
class CurrentHeaterCoolerStateValues(enum.IntEnum):
"""Possible values for the current state of a device that heats or cools.
https://developer.apple.com/documentation/homekit/hmcharacteristicvaluecurrentheatercoolerstate"""
INACTIVE = 0
IDLE = 1
HEATING = 2
COOLING = 3
class TargetHeaterCoolerStateValues(enum.IntEnum):
"""Possible values for the target state of a device that heats or cools.
https://developer.apple.com/documentation/homekit/hmcharacteristicvaluetargetheatercoolerstate"""
AUTOMATIC = 0
HEAT = 1
COOL = 2
class StreamingStatusValues(enum.IntEnum):
"""The current streaming state of a camera."""
AVAILABLE = 0
IN_USE = 1
UNAVAILABLE = 2
class SessionControlCommandValues(enum.IntEnum):
"""Session control commands."""
END_SESSION = 0
START_SESSION = 1
SUSPEND_SESSION = 2
RESUME_SESSION = 3
RECONFIGURE_SESSION = 4
class VideoCodecTypeValues(enum.IntEnum):
H264 = 0
class ProfileIDValues(enum.IntEnum):
"""
The type of H.264 profile used.
3-255 are vendor specific.
"""
CONTRAINED_BASELINE_PROFILE = 0
MAIN_PROFILE = 1
HIGH_PROFILE = 2
class ProfileSupportLevelValues(enum.IntEnum):
"""
3-255 are reserved by Apple.
"""
THREE_ONE = 0
THREE_TWO = 1
FOUR = 2
class PacketizationModeValues(enum.IntEnum):
"""
1 - 255 are reserved by Apple.
"""
NON_INTERLEAVED_MODE = 0
class CVOEnabledValues(enum.IntEnum):
NOT_SUPPORTED = 0
SUPPORTED = 1
class AudioCodecValues(enum.IntEnum):
"""
7-255 reserved for Apple.
"""
AAC_ELD = 2
OPUS = 3
AMR = 5
AMR_WB = 6
class BitRateValues(enum.IntEnum):
VARIABLE = 0
CONSTANT = 1
class SampleRateValues(enum.IntEnum):
EIGHT_KHZ = 0
SIXTEEN_KHZ = 1
TWENTY_FOUR_KHZ = 2
class SRTPCryptoSuiteValues(enum.IntEnum):
AES_CM_128_HMAC_SHA1_80 = 0
AES_256_CM_HMAC_SHA1_80 = 1
DISABLED = 2
|
11582816
|
import abc
from typing import (
Dict,
Any,
TypeVar,
Sequence,
NamedTuple,
Optional,
List,
Union,
Generic,
)
import torch
EnvType = TypeVar("EnvType")
DistributionType = TypeVar("DistributionType")
class RLStepResult(NamedTuple):
observation: Optional[Any]
reward: Optional[Union[float, List[float]]]
done: Optional[bool]
info: Optional[Dict[str, Any]]
def clone(self, new_info: Dict[str, Any]):
return RLStepResult(
observation=self.observation
if "observation" not in new_info
else new_info["observation"],
reward=self.reward if "reward" not in new_info else new_info["reward"],
done=self.done if "done" not in new_info else new_info["done"],
info=self.info if "info" not in new_info else new_info["info"],
)
def merge(self, other: "RLStepResult"):
return RLStepResult(
observation=self.observation
if other.observation is None
else other.observation,
reward=self.reward if other.reward is None else other.reward,
done=self.done if other.done is None else other.done,
info={
**(self.info if self.info is not None else {}),
**(other.info if other is not None else {}),
},
)
class ActorCriticOutput(tuple, Generic[DistributionType]):
distributions: DistributionType
values: torch.FloatTensor
extras: Dict[str, Any]
# noinspection PyTypeChecker
def __new__(
cls,
distributions: DistributionType,
values: torch.FloatTensor,
extras: Dict[str, Any],
):
self = tuple.__new__(cls, (distributions, values, extras))
self.distributions = distributions
self.values = values
self.extras = extras
return self
def __repr__(self) -> str:
return (
f"Group(distributions={self.distributions},"
f" values={self.values},"
f" extras={self.extras})"
)
class Loss(abc.ABC):
def __init__(self, *args, **kwargs):
pass
@abc.abstractmethod
def loss(self, *args, **kwargs):
raise NotImplementedError()
class Memory(Dict):
def __init__(self, *args, **kwargs):
super().__init__()
if len(args) > 0:
assert len(args) == 1, (
"Only one of Sequence[Tuple[str, Tuple[torch.Tensor, int]]]"
"or Dict[str, Tuple[torch.Tensor, int]] accepted as unnamed args"
)
if isinstance(args[0], Sequence):
for key, tensor_dim in args[0]:
assert (
len(tensor_dim) == 2
), "Only Tuple[torch.Tensor, int]] accepted as second item in Tuples"
tensor, dim = tensor_dim
self.check_append(key, tensor, dim)
elif isinstance(args[0], Dict):
for key in args[0]:
assert (
len(args[0][key]) == 2
), "Only Tuple[torch.Tensor, int]] accepted as values in Dict"
tensor, dim = args[0][key]
self.check_append(key, tensor, dim)
elif len(kwargs) > 0:
for key in kwargs:
assert (
len(kwargs[key]) == 2
), "Only Tuple[torch.Tensor, int]] accepted as keyword arg"
tensor, dim = kwargs[key]
self.check_append(key, tensor, dim)
def check_append(
self, key: str, tensor: torch.Tensor, sampler_dim: int
) -> "Memory":
"""Appends a new memory type given its identifier, its memory tensor
and its sampler dim.
# Parameters
key: string identifier of the memory type
tensor: memory tensor
sampler_dim: sampler dimension
# Returns
Updated Memory
"""
assert isinstance(key, str), "key {} must be str".format(key)
assert isinstance(
tensor, torch.Tensor
), "tensor {} must be torch.Tensor".format(tensor)
assert isinstance(sampler_dim, int), "sampler_dim {} must be int".format(
sampler_dim
)
assert key not in self, "Reused key {}".format(key)
assert (
0 <= sampler_dim < len(tensor.shape)
), "Got sampler_dim {} for tensor with shape {}".format(
sampler_dim, tensor.shape
)
self[key] = (tensor, sampler_dim)
return self
def tensor(self, key: str) -> torch.Tensor:
"""Returns the memory tensor for a given memory type.
# Parameters
key: string identifier of the memory type
# Returns
Memory tensor for type `key`
"""
assert key in self, "Missing key {}".format(key)
return self[key][0]
def sampler_dim(self, key: str) -> int:
"""Returns the sampler dimension for the given memory type.
# Parameters
key: string identifier of the memory type
# Returns
The sampler dim
"""
assert key in self, "Missing key {}".format(key)
return self[key][1]
def sampler_select(self, keep: Sequence[int]) -> "Memory":
"""Equivalent to PyTorch index_select along the `sampler_dim` of each
memory type.
# Parameters
keep: a list of sampler indices to keep
# Returns
Selected memory
"""
res = Memory()
valid = False
for name in self:
sampler_dim = self.sampler_dim(name)
tensor = self.tensor(name)
assert len(keep) == 0 or (
0 <= min(keep) and max(keep) < tensor.shape[sampler_dim]
), "Got min(keep)={} max(keep)={} for memory type {} with shape {}, dim {}".format(
min(keep), max(keep), name, tensor.shape, sampler_dim
)
if tensor.shape[sampler_dim] > len(keep):
tensor = tensor.index_select(
dim=sampler_dim,
index=torch.as_tensor(
list(keep), dtype=torch.int64, device=tensor.device
),
)
res.check_append(name, tensor, sampler_dim)
valid = True
if valid:
return res
return self
def set_tensor(self, key: str, tensor: torch.Tensor) -> "Memory":
"""Replaces tensor for given key with an updated version.
# Parameters
key: memory type identifier to update
tensor: updated tensor
# Returns
Updated memory
"""
assert key in self, "Missing key {}".format(key)
assert (
tensor.shape == self[key][0].shape
), "setting tensor with shape {} for former {}".format(
tensor.shape, self[key][0].shape
)
self[key] = (tensor, self[key][1])
return self
def step_select(self, step: int) -> "Memory":
"""Equivalent to slicing with length 1 for the `step` (i.e first)
dimension in rollouts storage.
# Parameters
step: step to keep
# Returns
Sliced memory with a single step
"""
res = Memory()
for key in self:
tensor = self.tensor(key)
assert (
tensor.shape[0] > step
), "attempting to access step {} for memory type {} of shape {}".format(
step, key, tensor.shape
)
if step != -1:
res.check_append(
key, self.tensor(key)[step : step + 1, ...], self.sampler_dim(key)
)
else:
res.check_append(
key, self.tensor(key)[step:, ...], self.sampler_dim(key)
)
return res
def step_squeeze(self, step: int) -> "Memory":
"""Equivalent to simple indexing for the `step` (i.e first) dimension
in rollouts storage.
# Parameters
step: step to keep
# Returns
Sliced memory with a single step (and squeezed step dimension)
"""
res = Memory()
for key in self:
tensor = self.tensor(key)
assert (
tensor.shape[0] > step
), "attempting to access step {} for memory type {} of shape {}".format(
step, key, tensor.shape
)
res.check_append(
key, self.tensor(key)[step, ...], self.sampler_dim(key) - 1
)
return res
def slice(
self,
dim: int,
start: Optional[int] = None,
stop: Optional[int] = None,
step: int = 1,
) -> "Memory":
"""Slicing for dimensions that have same extents in all memory types.
It also accepts negative indices.
# Parameters
dim: the dimension to slice
start: the index of the first item to keep if given (default 0 if None)
stop: the index of the first item to discard if given (default tensor size along `dim` if None)
step: the increment between consecutive indices (default 1)
# Returns
Sliced memory
"""
checked = False
total: Optional[int] = None
res = Memory()
for key in self:
tensor = self.tensor(key)
assert (
len(tensor.shape) > dim
), "attempting to access dim {} for memory type {} of shape {}".format(
dim, key, tensor.shape
)
if not checked:
total = tensor.shape[dim]
checked = True
assert (
total == tensor.shape[dim]
), "attempting to slice along non-uniform dimension {}".format(dim)
if start is not None or stop is not None or step != 1:
slice_tuple = (
(slice(None),) * dim
+ (slice(start, stop, step),)
+ (slice(None),) * (len(tensor.shape) - (1 + dim))
)
sliced_tensor = tensor[slice_tuple]
res.check_append(
key=key, tensor=sliced_tensor, sampler_dim=self.sampler_dim(key),
)
else:
res.check_append(
key, tensor, self.sampler_dim(key),
)
return res
def to(self, device: torch.device) -> "Memory":
for key in self:
tensor = self.tensor(key)
if tensor.device != device:
self.set_tensor(key, tensor.to(device))
return self
|
11582841
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from easydict import EasyDict as edict
import torch
import torch.nn as nn
import torch.utils.model_zoo as model_zoo
from torchvision.models.resnet import model_urls
from torchvision.models.resnet import BasicBlock, Bottleneck
resnet_spec = {
18: (BasicBlock, [2, 2, 2, 2], [64, 64, 128, 256, 512], 'resnet18'),
34: (BasicBlock, [3, 4, 6, 3], [64, 64, 128, 256, 512], 'resnet34'),
50: (Bottleneck, [3, 4, 6, 3], [64, 256, 512, 1024, 2048], 'resnet50'),
101: (Bottleneck, [3, 4, 23, 3], [64, 256, 512, 1024, 2048], 'resnet101'),
152: (Bottleneck, [3, 8, 36, 3], [64, 256, 512, 1024, 2048], 'resnet152')
}
class ResNetBackbone(nn.Module):
def __init__(self, block, layers, in_channel=3):
self.inplanes = 64
super(ResNetBackbone, self).__init__()
self.conv1 = nn.Conv2d(in_channel, 64, kernel_size=7, stride=2, padding=3,
bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
self.layer4 = self._make_layer(block, 512, layers[3], stride=2)
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.normal_(m.weight, mean=0, std=0.001)
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
def _make_layer(self, block, planes, blocks, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
return x
class DeconvHead(nn.Module):
def __init__(self, in_channels, num_layers, num_filters, kernel_size, conv_kernel_size, num_joints, depth_dim,
with_bias_end=True):
super(DeconvHead, self).__init__()
conv_num_filters = num_joints * depth_dim
assert kernel_size == 2 or kernel_size == 3 or kernel_size == 4, 'Only support kenerl 2, 3 and 4'
padding = 1
output_padding = 0
if kernel_size == 3:
output_padding = 1
elif kernel_size == 2:
padding = 0
assert conv_kernel_size == 1 or conv_kernel_size == 3, 'Only support kenerl 1 and 3'
if conv_kernel_size == 1:
pad = 0
elif conv_kernel_size == 3:
pad = 1
self.features = nn.ModuleList()
for i in range(num_layers):
if i == 0:
_in_channels = in_channels
self.features.append(
nn.Conv2d(_in_channels, num_filters, kernel_size=1, stride=1, bias=False))
self.features.append(nn.BatchNorm2d(num_filters))
self.features.append(nn.ReLU(inplace=True))
else:
_in_channels = num_filters
self.features.append(
nn.ConvTranspose2d(_in_channels, num_filters, kernel_size=kernel_size, stride=2, padding=padding,
output_padding=output_padding, bias=False))
self.features.append(nn.BatchNorm2d(num_filters))
self.features.append(nn.ReLU(inplace=True))
if with_bias_end:
self.features.append(
nn.Conv2d(num_filters, conv_num_filters, kernel_size=conv_kernel_size, padding=pad, bias=True))
else:
self.features.append(
nn.Conv2d(num_filters, conv_num_filters, kernel_size=conv_kernel_size, padding=pad, bias=False))
self.features.append(nn.BatchNorm2d(conv_num_filters))
self.features.append(nn.ReLU(inplace=True))
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.normal_(m.weight, mean=0, std=0.001)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.ConvTranspose2d):
nn.init.normal_(m.weight, mean=0, std=0.001)
def forward(self, x):
features = []
for i, l in enumerate(self.features):
x = l(x)
if (i+1) % 3 == 0:
# collect multi-scale feature maps from intermediate layers, will be used for both Discriminator and RB
features.append(x)
return x, features
class Bottleneck_refinenet(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1):
super(Bottleneck_refinenet, self).__init__()
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride,
padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = nn.Conv2d(planes, planes * 2, kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(planes * 2)
self.relu = nn.ReLU(inplace=True)
self.downsample = nn.Sequential(
nn.Conv2d(inplanes, planes * 2,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(planes * 2),
)
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
# structure for the refinement block, code are adapted from https://github.com/chenyilun95/tf-cpn
class refineNet(nn.Module):
def __init__(self, lateral_channel, out_shape, num_class, dual_branch=False):
super(refineNet, self).__init__()
cascade = []
num_cascade = 4
for i in range(num_cascade):
cascade.append(self._make_layer(lateral_channel, num_cascade-i-1, out_shape))
self.cascade = nn.ModuleList(cascade)
self.final_predict = self._predict(4 * lateral_channel, num_class)
self.dual_branch = dual_branch
if dual_branch:
self.final_predict_2 = self._predict(4 * lateral_channel, num_class)
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.normal_(m.weight, mean=0, std=0.001)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.ConvTranspose2d):
nn.init.normal_(m.weight, mean=0, std=0.001)
def _make_layer(self, input_channel, num, output_shape):
layers = []
for i in range(num):
layers.append(Bottleneck_refinenet(input_channel, 128))
layers.append(nn.Upsample(size=output_shape, mode='bilinear', align_corners=True))
return nn.Sequential(*layers)
def _predict(self, input_channel, num_class, with_bias_end=True):
layers = []
layers.append(Bottleneck_refinenet(input_channel, 128))
layers.append(nn.Conv2d(256, num_class, kernel_size=3, stride=1, padding=1, bias=with_bias_end))
return nn.Sequential(*layers)
def forward(self, x):
refine_fms = []
for i in range(4):
refine_fms.append(self.cascade[i](x[i]))
out = torch.cat(refine_fms, dim=1)
out1 = self.final_predict(out)
if self.dual_branch:
out2 = self.final_predict_2(out)
return out1, out2
else:
return out1
class ResPoseNet_refine(nn.Module):
def __init__(self, backbone, head, refinenet):
super(ResPoseNet_refine, self).__init__()
self.backbone = backbone
self.head = head
self.refinenet = refinenet
def forward(self, x):
x = self.backbone(x)
x, features = self.head(x)
x_refine = self.refinenet(features)
return x, x_refine
def get_default_network_config():
config = edict()
config.from_model_zoo = True
config.pretrained = ''
config.num_layers = 101
config.num_deconv_layers = 4
config.num_deconv_filters = 256
config.num_deconv_kernel = 4
config.final_conv_kernel = 1
config.depth_dim = 1
config.input_channel = 3
return config
def init_pose_net(pose_net, name):
org_resnet = model_zoo.load_url(model_urls[name])
# drop orginal resnet fc layer, add 'None' in case of no fc layer, that will raise error
org_resnet.pop('fc.weight', None)
org_resnet.pop('fc.bias', None)
pose_net.backbone.load_state_dict(org_resnet)
print("Init Network from model zoo")
def pose_resnet_refine(**kwargs):
cfg = get_default_network_config()
block_type, layers, channels, name = resnet_spec[kwargs['resnet_layers']]
backbone_net = ResNetBackbone(block_type, layers)
head_net = DeconvHead(
channels[-1], cfg.num_deconv_layers,
cfg.num_deconv_filters, cfg.num_deconv_kernel,
cfg.final_conv_kernel, kwargs['num_classes'], cfg.depth_dim
)
refinenet = refineNet(256, (64, 64), kwargs['num_classes'])
pose_net = ResPoseNet_refine(backbone_net, head_net, refinenet)
init_pose_net(pose_net, name)
return pose_net
|
11582877
|
import aiohttp
import pytest
@pytest.fixture(scope="function", name="http_session")
@pytest.mark.asyncio
def create_session():
session = aiohttp.ClientSession()
yield session
session.close()
|
11582885
|
from distutils.core import setup
setup(name='pybilt',
version='0.3.0',
description='Lipid bilayer analysis toolkit.',
author='<NAME>',
author_email='<EMAIL>',
url='http://pybilt.readthedocs.io/en/latest/index.html',
packages=['pybilt', 'pybilt.bilayer_analyzer', 'pybilt.common',
'pybilt.com_trajectory', 'pybilt.diffusion',
'pybilt.lipid_grid', 'pybilt.mda_tools',
'pybilt.plot_generation'],
license='MIT',
keywords=['lipid bilayer', 'molecular dynamics', 'analysis']
)
|
11582915
|
import boto3
import decimal
import json
import os
ALARM_NAME_PREFIX = 'InstanceAlarm:'
ALARM_TEMPLATES_BUCKET = os.environ['ALARM_TEMPLATES_BUCKET']
ALARM_TEMPLATES_CACHE = {}
# Maximum number of alarms to delete per API call.
DELETE_ALARMS_MAX_NAMES = 100
autoscaling = boto3.client('autoscaling')
cloudwatch = boto3.client('cloudwatch')
s3 = boto3.client('s3')
def create_instance_alarms(asg_name, instance_id):
"""
Creates alarms for the specified EC2 instance.
"""
asgs = describe_auto_scaling_groups(
AutoScalingGroupNames=[asg_name],
)
for asg in asgs:
alarms_to_create = get_alarms_to_create(asg, instance_id)
for alarm in alarms_to_create:
print('Creating alarm: {}'.format(alarm['AlarmName']))
put_metric_alarm(**alarm)
def delete_alarms(alarm_names):
"""
Deletes the specified alarms.
"""
# Delete as many alarms as possible in one API call.
# Use a list and go through it in chunks.
alarm_names = list(alarm_names)
while alarm_names:
# Delete a chunk of alarms.
response = cloudwatch.delete_alarms(
AlarmNames=alarm_names[:DELETE_ALARMS_MAX_NAMES],
)
if response['ResponseMetadata']['HTTPStatusCode'] != 200:
raise Exception('ERROR: {}'.format(response))
# Move to the next chunk.
alarm_names = alarm_names[DELETE_ALARMS_MAX_NAMES:]
def delete_instance_alarms(instance_id):
"""
Delete all alarms that exist for the specified EC2 instance.
"""
# This Lambda function always create alarms for instances using a standard
# prefix and then the instance id. Find any delete any alarms that have
# this naming convention and this instance id.
alarms = describe_alarms(
AlarmNamePrefix=ALARM_NAME_PREFIX + instance_id,
)
alarm_names = [alarm['AlarmName'] for alarm in alarms]
print('Deleting alarms: {}'.format(alarm_names))
delete_alarms(alarm_names)
def describe_alarms(**kwargs):
"""
Returns CloudWatch Metric Alarms.
"""
paginator = cloudwatch.get_paginator('describe_alarms')
pages = paginator.paginate(**kwargs)
for page in pages:
if page['ResponseMetadata']['HTTPStatusCode'] != 200:
raise Exception('ERROR: {}'.format(page))
for alarm in page['MetricAlarms']:
yield alarm
def describe_auto_scaling_groups(**kwargs):
"""
Returns Auto Scaling Groups.
"""
paginator = autoscaling.get_paginator('describe_auto_scaling_groups')
pages = paginator.paginate(**kwargs)
for page in pages:
if page['ResponseMetadata']['HTTPStatusCode'] != 200:
raise Exception('ERROR: {}'.format(page))
for asg in page['AutoScalingGroups']:
yield asg
def full_sweep():
"""
Creates any instance alarms that should exist but don't, and deletes
any instance alarms that shouldn't exist but do.
"""
# Get a list of all instance alarms in the AWS account.
found_alarm_names = set()
alarms = describe_alarms(
AlarmNamePrefix=ALARM_NAME_PREFIX,
)
for alarm in alarms:
alarm_name = alarm['AlarmName']
found_alarm_names.add(alarm_name)
# Go through all ASGs and their EC2 instances and create an alarms that
# should exist but don't. Build a list of the alarms that should exist.
expected_alarm_names = set()
for asg in describe_auto_scaling_groups():
for instance in asg['Instances']:
if instance['LifecycleState'] != 'InService':
continue
alarms = get_alarms_to_create(asg, instance['InstanceId'])
for alarm in alarms:
alarm_name = alarm['AlarmName']
expected_alarm_names.add(alarm_name)
if alarm_name not in found_alarm_names:
print('Creating missing alarm: {}'.format(alarm_name))
put_metric_alarm(**alarm)
# Delete any instance alarms that shouldn't exist.
orphan_alarm_names = found_alarm_names - expected_alarm_names
if orphan_alarm_names:
print('Deleting orphan alarms: {}'.format(orphan_alarm_names))
delete_alarms(orphan_alarm_names)
def get_alarm_keys(asg):
"""
Returns alarm keys as defined by the ASG's tags.
"""
for tag in asg['Tags']:
tag_key = tag['Key']
if tag_key.startswith(ALARM_NAME_PREFIX):
alarm_key = tag_key[len(ALARM_NAME_PREFIX):]
yield alarm_key
def get_alarms_to_create(asg, instance_id):
"""
Returns alarm dictionaries that should be created for an EC2 instance.
"""
for alarm_key in get_alarm_keys(asg):
# Read alarm templates from S3 and cache them in memory.
if alarm_key not in ALARM_TEMPLATES_CACHE:
ALARM_TEMPLATES_CACHE[alarm_key] = get_s3_object_body(
Bucket=ALARM_TEMPLATES_BUCKET,
Key=alarm_key,
)
template_string = ALARM_TEMPLATES_CACHE[alarm_key]
# Render the template using variables from the ASG and instance.
template_variables = {
'asg.AutoScalingGroupName': asg['AutoScalingGroupName'],
'instance.InstanceId': instance_id,
}
for tag in asg['Tags']:
var_name = 'asg.Tags.' + tag['Key']
template_variables[var_name] = tag['Value']
for var_name, value in template_variables.items():
template_string = template_string.replace(
'{{' + var_name + '}}',
value,
)
# It should be valid JSON now.
alarm = json.loads(template_string)
# Set the alarm name programatically so it can be found and deleted
# after the instance has been terminated.
alarm['AlarmName'] = ALARM_NAME_PREFIX + instance_id + ':' + alarm_key
yield alarm
def get_s3_object_body(**kwargs):
"""
Returns the content of an object in S3.
"""
response = s3.get_object(**kwargs)
if response['ResponseMetadata']['HTTPStatusCode'] != 200:
raise Exception('ERROR: {}'.format(response))
return response['Body'].read().decode('utf-8')
def put_metric_alarm(**alarm):
"""
Creates a CloudWatch Metric Alarm.
"""
# Convert numeric fields into appropriate types.
alarm['EvaluationPeriods'] = int(alarm['EvaluationPeriods'])
alarm['Period'] = int(alarm['Period'])
alarm['Threshold'] = decimal.Decimal(alarm['Threshold'])
# Create the alarm.
response = cloudwatch.put_metric_alarm(**alarm)
if response['ResponseMetadata']['HTTPStatusCode'] != 200:
raise Exception('ERROR: {}'.format(response))
def lambda_handler(event, context):
print('Received event: {}'.format(event))
if event['detail-type'] == 'EC2 Instance Launch Successful':
asg_name = event['detail']['AutoScalingGroupName']
instance_id = event['detail']['EC2InstanceId']
create_instance_alarms(asg_name, instance_id)
elif event['detail-type'] == 'EC2 Instance State-change Notification':
if event['detail']['state'] not in ('pending', 'running'):
instance_id = event['detail']['instance-id']
delete_instance_alarms(instance_id)
else:
full_sweep()
|
11582916
|
import uvicore
from uvicore.typing import Any, Dict, Optional, List
from starlette.exceptions import HTTPException as _HTTPException
from uvicore.http import status
# This is how you could do it, if you wanted to log
#log = lambda : uvicore.log.name('uvicore.http')
# See https://www.restapitutorial.com/httpstatuscodes.html for a good list to follow
class HTTPException(_HTTPException):
"""Main Base HTTP Exception"""
# Message is optional and will default the the HTTP status codes TEXT as outlined in the python http module
# Detail is a more detailed text description of the issue
# Extra lets you pass in a dict of options or extra information that some handlers may want to use
def __init__(self,
status_code: int,
detail: Optional[str] = None,
*,
message: Optional[str] = None,
exception: Optional[str] = None,
extra: Optional[Dict] = None,
headers: Optional[Dict[str, Any]] = None
) -> None:
# Call starlette exception where their detail is my message
super().__init__(status_code=status_code, detail=message)
# Swap starlette detail to my message
self.message = self.detail
self.detail = detail
self.exception = exception if uvicore.config.app.debug else None # Hidden unless in debug mode
self.extra = extra
self.headers = headers
class PermissionDenied(HTTPException):
"""Permission Denied Exception"""
def __init__(self,
permissions: Optional[List] = None,
detail: Optional[str] = None,
*,
extra: Optional[Dict] = None,
headers: Optional[Dict[str, Any]] = None
) -> None:
detail = "Permission denied"
if permissions:
if type(permissions) != list: permissions = [permissions]
detail += " to {}".format(permissions)
super().__init__(
status_code=status.HTTP_401_UNAUTHORIZED,
message='Permission Denied',
detail=detail,
extra=extra,
headers=headers,
)
class NotAuthenticated(HTTPException):
"""Not Authenticated Exception"""
def __init__(self,
detail: Optional[str] = None,
*,
extra: Optional[Dict] = None,
headers: Optional[Dict[str, Any]] = None
) -> None:
super().__init__(
status_code=status.HTTP_401_UNAUTHORIZED,
message='Not Authenticated',
detail=detail,
extra=extra,
headers=headers,
)
class InvalidCredentials(HTTPException):
"""Invalid Credentials Exception"""
def __init__(self,
detail: Optional[str] = None,
*,
extra: Optional[Dict] = None,
headers: Optional[Dict[str, Any]] = None
) -> None:
super().__init__(
status_code=status.HTTP_401_UNAUTHORIZED,
message='Invalid Credentials',
detail=detail,
extra=extra,
headers=headers,
)
class NotFound(HTTPException):
"""Not Found Exception"""
def __init__(self,
detail: Optional[str] = None,
*,
extra: Optional[Dict] = None,
headers: Optional[Dict[str, Any]] = None
) -> None:
super().__init__(
status_code=status.HTTP_404_NOT_FOUND,
message='Not Found',
detail=detail,
extra=extra,
headers=headers,
)
class BadParameter(HTTPException):
"""Bad Parameter"""
def __init__(self,
detail: Optional[str] = None,
*,
exception: Optional[str] = None,
extra: Optional[Dict] = None,
headers: Optional[Dict[str, Any]] = None
) -> None:
super().__init__(
status_code=status.HTTP_400_BAD_REQUEST,
message='Bad Parameter',
detail=detail,
exception=exception,
extra=extra,
headers=headers,
)
|
11583025
|
from .dqn import DeepQNetwork
from .drqn import DeepRecurrentQNetwork
from .a2c import AdvantageActorCritic
|
11583044
|
from .Base import Base
from pegasus.tools import run_find_markers
class FindMarkers(Base):
"""
Find markers using gradient boosting.
Usage:
pegasus find_markers [options] <input_data_file> <output_spreadsheet>
pegasus find_markers -h
Arguments:
input_data_file Single cell data after running the de_analysis.
output_spreadsheet Output spreadsheet with LightGBM detected markers.
Options:
-p <threads> Use <threads> threads. [default: 1]
--labels <attr> <attr> used as cluster labels. [default: louvain_labels]
--de-key <key> Key for storing DE results in 'varm' field. [default: de_res]
--remove-ribo Remove ribosomal genes with either RPL or RPS as prefixes.
--min-gain <gain> Only report genes with a feature importance score (in gain) of at least <gain>. [default: 1.0]
--random-state <seed> Random state for initializing LightGBM and KMeans. [default: 0]
-h, --help Print out help information.
Outputs:
output_spreadsheet An excel spreadsheet containing detected markers. Each cluster has one tab in the spreadsheet and each tab has six columns, listing markers that are strongly up-regulated, weakly up-regulated, down-regulated and their associated LightGBM gains.
Examples:
pegasus find_markers --labels louvain_labels --remove-ribo --min-gain 10.0 -p 10 manton_bm.zarr.zip manton_bm.markers.xlsx
"""
def execute(self):
run_find_markers(
self.args["<input_data_file>"],
self.args["<output_spreadsheet>"],
self.args["--labels"],
de_key=self.args["--de-key"],
n_jobs=int(self.args["-p"]),
min_gain=float(self.args["--min-gain"]),
random_state=int(self.args["--random-state"]),
remove_ribo=self.args["--remove-ribo"],
)
|
11583053
|
import scanpy as sc
import numpy as np
import scipy as sp
from skmisc.loess import loess
from statsmodels.stats.multitest import multipletests
from scipy.stats import rankdata
import pandas as pd
import time
def score_cell(data,
gene_list,
gene_weight=None,
suffix='',
ctrl_opt='mean_match',
trs_opt='vst',
bc_opt='empi',
ctrlgene_list=None,
n_ctrl=1,
n_genebin=200,
cov_list=None,
random_seed=0,
verbose=False,
copy=False,
return_list=['trs_ep', 'trs_ez']):
"""Score cells based on the trait gene set
Args
----
data (n_cell, n_gene) : AnnData
adata.X should contain size-normalized log1p transformed count data
gene_list (n_trait_gene) : list
Trait gene list
gene_weight (n_trait_gene) : list/np.ndarray
Gene weights for genes in the gene_list.
If gene_weight=None, the weigts are set to be one.
suffix : str
The name of the added cell-level annotations would be
['trs', 'trs_z', 'trs_tp', 'trs_ep', 'trs_ez']+suffix
ctrl_opt : str
Option for selecting the null geneset
None: not using a null geneset
'random': size matched random geneset
'mean_match' size-and-mean-matched random geneset
'mean_bvar_match': size-and-mean-and-bvar-matched random geneset. bvar means biological variance.
trs_opt : str
Option for computing TRS
'mean': average over the genes in the gene_list
'vst': weighted average with weights equal to 1/sqrt(technical_variance_of_logct)
'inv_std': weighted average with weights equal to 1/std
bc_opt : str
Option for cell-wise background correction
None: no correction.
'recipe_vision': normalize by cell-wise mean&var computed using all genes.
'empi': normalize by cell-wise mean&var stratified by mean bins.
ctrlgene_list (n_ctrl_gene) : list
List of control genes to use
n_ctrl : int
Number of control genesets
n_genebin : int
Number of gene bins (to divide the genes by expression)
Only useful when ctrl_opt is not None
cov_list : list of str
Covariates to control for.
The covariates are first centered and then regressed out.
Elements in cov_list should be present in data.obs.columns
random_seed : int
Random seed
copy : bool
If to make copy of the AnnData object
return_list : list
Items to return
Should be a subset of ['trs', 'trs_z', 'trs_tp', 'trs_ep', 'trs_ez']
Returns
-------
adata (n_cell, n_gene) : AnnData
Columns added to data.obs as specified by return_list
"""
np.random.seed(random_seed)
adata = data.copy() if copy else data
# Pre-compute statistics
var_set = set(['mean','var','var_tech'])
obs_set = set(['mean','var'])
if (len(var_set-set(adata.var.columns))>0) | (len(obs_set-set(adata.obs.columns))>0):
if verbose: print('# score_cell: recompute statistics using method.compute_stats')
compute_stats(adata)
# Check options
ctrl_opt_list = [None, 'given', 'random', 'mean_match', 'mean_bvar_match']
trs_opt_list = ['mean', 'vst', 'inv_std']
bc_opt_list = [None, 'recipe_vision', 'empi']
if ctrl_opt not in ctrl_opt_list:
raise ValueError('# score_cell: ctrl_opt not in [%s]'%', '.join([str(x) for x in ctrl_opt_list]))
if trs_opt not in trs_opt_list:
raise ValueError('# score_cell: trs_opt not in [%s]'%', '.join([str(x) for x in trs_opt_list]))
if bc_opt not in bc_opt_list:
raise ValueError('# score_cell: bc_opt not in [%s]'%', '.join([str(x) for x in bc_opt_list]))
if cov_list is not None:
temp_list = list(set(cov_list) - set(adata.obs.columns))
if len(temp_list)>0:
raise ValueError('# score_cell: covariates %s not in data.obs.columns'%','.join(temp_list))
if (len(cov_list)>0) & ('mean' not in cov_list):
raise ValueError('# score_cell: mean needs to be in cov_list')
if verbose:
print('# score_cell: suffix=%s, ctrl_opt=%s, trs_opt=%s, bc_opt=%s'%(suffix, ctrl_opt, trs_opt, bc_opt))
print('# score_cell: n_ctrl=%d, n_genebin=%d'%(n_ctrl, n_genebin))
# Gene-wise statistics
df_gene = pd.DataFrame(index=adata.var_names)
df_gene['gene'] = df_gene.index
df_gene['mean'] = adata.var['mean']
df_gene['var'] = adata.var['var'].values
df_gene['tvar'] = adata.var['var_tech'].values
df_gene['bvar'] = df_gene['var'].values - df_gene['tvar'].values
df_gene.drop_duplicates(subset='gene', inplace=True)
# Update gene_list
gene_list = list(gene_list)
n_gene_old = len(gene_list)
df_trait_gene = pd.DataFrame(index=gene_list, columns=['gene', 'gene_weight'], data=0)
df_trait_gene['gene'] = df_trait_gene.index
df_trait_gene['gene_weight'] = 1 if gene_weight is None else np.array(gene_weight)
df_trait_gene.drop_duplicates(subset='gene', inplace=True)
gene_list = list(set(df_gene['gene'].values) & set(gene_list))
gene_list.sort()
df_trait_gene = df_trait_gene.loc[gene_list].copy()
gene_weight = df_trait_gene['gene_weight'].values.copy()
if verbose:
print('# score_cell: %-15s %-15s %-20s'
%('trait geneset,', '%d/%d genes,'%(len(gene_list),n_gene_old),
'mean_exp=%0.2e'%df_gene.loc[gene_list, 'mean'].mean()))
# Select control genes: put all methods in _select_ctrl_geneset
dic_ctrl_list,dic_ctrl_weight = _select_ctrl_geneset(df_gene,
gene_list, gene_weight,
ctrl_opt, ctrlgene_list,
n_ctrl, n_genebin,
random_seed, verbose)
# Compute TRS: put all methods in _compute_trs
dic_trs = {}
dic_trs['trs'] = _compute_trs(adata, gene_list, gene_weight, trs_opt, cov_list=cov_list)
for i_list in dic_ctrl_list.keys():
dic_trs['trs_ctrl%d'%i_list] = _compute_trs(adata,
dic_ctrl_list[i_list],
dic_ctrl_weight[i_list],
trs_opt, cov_list=cov_list)
# Correct cell-specific and geneset-specific background: put all methods in _correct_background
_correct_background(adata, dic_trs, bc_opt)
# Get p-value
if 'trs_tp' in return_list:
dic_trs['trs_tp'] = 1 - sp.stats.norm.cdf(dic_trs['trs_z'])
if len(dic_ctrl_list.keys())>0:
v_ctrl_trs_z = []
for i_list in dic_ctrl_list.keys():
v_ctrl_trs_z += list(dic_trs['trs_ctrl%d_z'%i_list])
dic_trs['trs_ep'] = get_p_from_empi_null(dic_trs['trs_z'], v_ctrl_trs_z)
if 'trs_ez' in return_list:
dic_trs['trs_ez'] = -sp.stats.norm.ppf(dic_trs['trs_ep'])
dic_trs['trs_ez'] = dic_trs['trs_ez'].clip(min=-10,max=10)
for term in return_list:
if term in dic_trs.keys():
adata.obs['%s%s'%(term,suffix)] = dic_trs[term].copy()
else:
print('# score_cell: %s not computed'%term)
return adata if copy else None
def _select_ctrl_geneset(input_df_gene, gene_list, gene_weight,
ctrl_opt, ctrlgene_list,
n_ctrl, n_genebin, random_seed, verbose):
"""Subroutine for score_cell, select control genesets
Args
----
input_df_gene (adata.shape[1], n_statistic) : pd.DataFrame
Gene-wise statistics
gene_list (n_trait_gene) : list
Trait gene list
gene_weight (n_trait_gene) : list/np.ndarray
Gene weights for genes in the gene_list.
ctrl_opt : str
Option for selecting the null geneset
None: not using a null geneset
'random': size matched random geneset
'mean_match' size-and-mean-matched random geneset
'mean_bvar_match': size-and-mean-and-bvar-matched random geneset. bvar means biological variance.
ctrlgene_list (n_ctrl_gene) : list
List of control genes to use
n_ctrl : int
Number of control genesets
n_genebin : int
Number of gene bins (to divide the genes by expression)
Only useful when ctrl_opt is not None
random_seed : int
Random seed
Returns
-------
dic_ctrl_list : dictionary
dic_ctrl_list[i]: the i-th control gene list (a list)
dic_ctrl_weight : dictionary
dic_ctrl_weight[i]: weights for the i-th control gene list (a list)
"""
np.random.seed(random_seed)
df_gene = input_df_gene.copy()
gene_list = list(gene_list)
df_trait_gene = pd.DataFrame(index=gene_list, columns=['gene', 'gene_weight'], data=0)
df_trait_gene['gene'] = df_trait_gene.index
df_trait_gene['gene_weight'] = list(gene_weight)
dic_ctrl_list = {}
dic_ctrl_weight = {}
if ctrl_opt=='given':
dic_ctrl_list[0] = ctrlgene_list
dic_ctrl_weight[0] = np.ones(len(ctrlgene_list))
if ctrl_opt=='random':
for i_list in np.arange(n_ctrl):
ind_select = np.random.permutation(df_gene.shape[0])[:len(gene_list)]
dic_ctrl_list[i_list] = list(df_gene['gene'].values[ind_select])
dic_ctrl_weight[i_list] = df_trait_gene['gene_weight'].values.copy()
if ctrl_opt=='mean_match':
# Divide genes into bins based on their rank of mean expression
df_gene['qbin'] = pd.qcut(df_gene['mean'], q=n_genebin, labels=False)
df_gene_bin = df_gene.groupby('qbin').agg({'gene':set})
gene_list_as_set = set(gene_list)
for i_list in np.arange(n_ctrl):
dic_ctrl_list[i_list] = []
dic_ctrl_weight[i_list] = []
for bin_ in df_gene_bin.index:
temp_overlap_list = list(df_gene_bin.loc[bin_,'gene'] & gene_list_as_set)
temp_overlap_list.sort()
n_gene_in_bin = len(temp_overlap_list)
if n_gene_in_bin>0:
temp_list = list(df_gene_bin.loc[bin_, 'gene'])
temp_list.sort()
v_gene_bin = np.array(temp_list)
ind_select = np.random.permutation(v_gene_bin.shape[0])[0:n_gene_in_bin]
dic_ctrl_list[i_list] += list(v_gene_bin[ind_select])
dic_ctrl_weight[i_list] += list(df_trait_gene.loc[temp_overlap_list,'gene_weight'].values)
if ctrl_opt=='mean_bvar_match':
# Divide genes into bins based on their rank of mean expression and biological variance
n_qbin = int(np.ceil(np.sqrt(n_genebin)))
df_gene['mean_qbin'] = pd.qcut(df_gene['mean'], q=n_qbin, labels=False)
df_gene['qbin'] = ''
for bin_ in set(df_gene['mean_qbin']):
ind_select = (df_gene['mean_qbin']==bin_)
df_gene.loc[ind_select,'qbin'] = ['%d.%d'%(bin_,x) for x in pd.qcut(df_gene.loc[ind_select,'bvar'],
q=n_qbin, labels=False)]
df_gene_bin = df_gene.groupby('qbin').agg({'gene':set})
gene_list_as_set = set(gene_list)
for i_list in np.arange(n_ctrl):
dic_ctrl_list[i_list] = []
dic_ctrl_weight[i_list] = []
for bin_ in df_gene_bin.index:
temp_overlap_list = list(df_gene_bin.loc[bin_,'gene'] & gene_list_as_set)
temp_overlap_list.sort()
n_gene_in_bin = len(temp_overlap_list)
if n_gene_in_bin>0:
temp_list = list(df_gene_bin.loc[bin_, 'gene'])
temp_list.sort()
v_gene_bin = np.array(temp_list)
ind_select = np.random.permutation(v_gene_bin.shape[0])[0:n_gene_in_bin]
dic_ctrl_list[i_list] += list(v_gene_bin[ind_select])
dic_ctrl_weight[i_list] += list(df_trait_gene.loc[temp_overlap_list,'gene_weight'].values)
if verbose:
for i_list in dic_ctrl_list.keys():
print('# score_cell: %-15s %-15s %-20s'
%('ctrl%d geneset,'%i_list, '%d genes,'%len(dic_ctrl_list[i_list]),
'mean_exp=%0.2e'%df_gene.loc[dic_ctrl_list[i_list], 'mean'].mean()))
return dic_ctrl_list,dic_ctrl_weight
def _compute_trs(adata, gene_list, gene_weight, trs_opt, cov_list=None):
"""Compute TRS
Args
----
adata (n_cell, n_gene) : AnnData
adata.X should contain size-normalized log1p transformed count data
gene_list (n_trait_gene) : list
Trait gene list
gene_weight (n_trait_gene) : list/np.ndarray
Gene weights for genes in the gene_list
trs_opt : str
Option for computing TRS
'mean': average over the genes in the gene_list
'vst': weighted average with weights equal to 1/sqrt(technical_variance_of_logct)
'inv_std': weighted average with weights equal to 1/std
Returns
-------
v_trs (n_cell,) : np.ndarray
Raw TRS
"""
gene_list = list(gene_list)
gene_weight = np.ones(len(gene_list)) if gene_weight is None else np.array(gene_weight)
if trs_opt=='mean':
v_trs_weight = np.ones(len(gene_list))
v_trs_weight *= gene_weight
v_trs_weight /= v_trs_weight.sum()
temp_v = adata[:, gene_list].X.dot(v_trs_weight)
v_trs = np.array(temp_v, dtype=np.float64).reshape([-1])
if trs_opt=='vst':
# v_trs_weight = 1 / np.sqrt(adata.var.loc[gene_list,'var_tech'].values.clip(min=1e-1))
v_trs_weight = 1 / np.sqrt(adata.var.loc[gene_list,'var_tech'].values.clip(min=1e-2))
v_trs_weight *= gene_weight
v_trs_weight /= v_trs_weight.sum()
temp_v = adata[:, gene_list].X.dot(v_trs_weight)
v_trs = np.array(temp_v, dtype=np.float64).reshape([-1])
if trs_opt=='inv_std':
# v_trs_weight = 1 / np.sqrt(adata.var.loc[gene_list,'var'].values.clip(min=1e-1))
v_trs_weight = 1 / np.sqrt(adata.var.loc[gene_list,'var'].values.clip(min=1e-2))
v_trs_weight *= gene_weight
v_trs_weight /= v_trs_weight.sum()
temp_v = adata[:, gene_list].X.dot(v_trs_weight)
v_trs = np.array(temp_v, dtype=np.float64).reshape([-1])
# Regress out covariates if needed
if cov_list is not None:
mat_X = adata.obs[cov_list].values.copy()
mat_X = mat_X - mat_X.mean(axis=0)
v_trs = _reg_out(v_trs, mat_X)
return v_trs
def _reg_out(mat_Y, mat_X):
"""Regress mat_X out of mat_Y
Args
----
mat_Y (n_sample, n_response) : np.ndarray
Response variable
mat_X (n_sample, n_covariates) : np.ndarray
Covariates
Returns
-------
mat_Y_resid (n_sample, n_response) : np.ndarray
Response variable residual
"""
mat_X = np.array(mat_X)
if len(mat_X.shape)==1:
mat_X = mat_X.reshape([-1,1])
mat_Y = np.array(mat_Y)
if len(mat_Y.shape)==1:
mat_Y = mat_Y.reshape([-1,1])
n_sample = mat_Y.shape[0]
mat_xtx = np.dot(mat_X.T, mat_X)/n_sample
mat_xty = np.dot(mat_X.T, mat_Y)/n_sample
mat_coef = np.linalg.solve(mat_xtx, mat_xty)
mat_Y_resid = mat_Y - mat_X.dot(mat_coef)
if mat_Y_resid.shape[1]==1:
mat_Y_resid = mat_Y_resid.reshape([-1])
return mat_Y_resid
def _correct_background(adata, dic_trs, bc_opt):
"""Cell-wise and gene-wise background correction
Args
----
adata (n_cell, n_gene) : AnnData
adata.X should contain size-normalized log1p transformed count data
dic_trs : dictionary
Each element has dimension (n_cell,)
Trait TRS and control TRSs
bc_opt : str
Option for cell-wise background correction
None: no correction.
'recipe_vision': normalize by cell-wise mean&var computed using all genes.
'empi': normalize by cell-wise mean&var stratified by mean bins.
Returns
-------
Add trs_z and trs_ctrl%d_z to dic_trs (n_cell,) : np.ndarray
Normalized TRS z_score
"""
# Cell-specific background correction
trs_ctrl_list = [x for x in dic_trs if 'ctrl' in x]
v_mean,v_std = adata.obs['mean'].values,np.sqrt(adata.obs['var'].values)
n_cell = adata.shape[0]
if bc_opt is None:
for trs_name in ['trs']+trs_ctrl_list:
dic_trs['%s_z'%trs_name] = dic_trs[trs_name]
if bc_opt == 'recipe_vision':
for trs_name in ['trs']+trs_ctrl_list:
dic_trs['%s_z'%trs_name] = (dic_trs[trs_name] - v_mean) / v_std
if bc_opt == 'empi':
# Using TRSs to estimate empirical cell-specific background TRS mean&std
if len(trs_ctrl_list)==0:
raise ValueError('# score_cell: bc_opt=%s only works when n_ctrl>0'%bc_opt)
df_cell = None
for trs_name in ['trs']+trs_ctrl_list:
temp_df = pd.DataFrame()
temp_df['mean'] = v_mean
temp_df['trs'] = dic_trs[trs_name]
if df_cell is None:
df_cell = temp_df.copy()
else:
df_cell = pd.concat([df_cell, temp_df], axis=0)
df_cell['qbin'] = pd.qcut(df_cell['mean'], q=100, labels=False)
# bin-specific mean and var
dic_bin_mean = {x:df_cell.loc[df_cell['qbin']==x, 'trs'].values.mean() for x in set(df_cell['qbin'])}
dic_bin_std = {x:df_cell.loc[df_cell['qbin']==x, 'trs'].values.std() for x in set(df_cell['qbin'])}
v_mean_ctrl = np.array([dic_bin_mean[x] for x in df_cell['qbin'][:n_cell]])
v_std_ctrl = np.array([dic_bin_std[x] for x in df_cell['qbin'][:n_cell]]).clip(min=1e-8)
for trs_name in ['trs']+trs_ctrl_list:
dic_trs['%s_z'%trs_name] = (dic_trs[trs_name] - v_mean_ctrl)/v_std_ctrl
# Z-transform each gene set (across cells)
for trs_name in ['trs']+trs_ctrl_list:
dic_trs['%s_z'%trs_name] = (dic_trs['%s_z'%trs_name] - dic_trs['%s_z'%trs_name].mean()) \
/ dic_trs['%s_z'%trs_name].std()
# Set cells with TRS=0 to the minimum TRS z-score value
trs_min = dic_trs['trs_z'].min()
for trs_name in trs_ctrl_list:
trs_min = min(trs_min, dic_trs['%s_z'%trs_name].min())
dic_trs['trs_z'][dic_trs['trs']==0] = trs_min-1e-8
for trs_name in trs_ctrl_list:
dic_trs['%s_z'%trs_name][dic_trs[trs_name]==0] = trs_min
return
def get_sparse_var(sparse_X, axis=0):
"""
Compute mean and var of a sparse matrix.
"""
v_mean = sparse_X.mean(axis=axis)
v_mean = np.array(v_mean).reshape([-1])
v_var = sparse_X.power(2).mean(axis=axis)
v_var = np.array(v_var).reshape([-1])
v_var = v_var - v_mean**2
return v_mean,v_var
def compute_stats(data, copy=False):
"""
Precompute mean for each gene and mean&var for each cell
"""
# Gene-wise statistics
adata = data.copy() if copy else data
adata.var['mean'],adata.var['var'] = get_sparse_var(adata.X, axis=0)
# Get the mean and var for the size-factor-normalized counts
# It is highly correlated to the non-size-factor-normalized counts
temp_X = adata.X.copy().expm1() # exp(X)-1 to get ct matrix from logct
adata.var['ct_mean'],adata.var['ct_var'] = get_sparse_var(temp_X, axis=0)
del temp_X
# Borrowed from scanpy _highly_variable_genes_seurat_v3
not_const = adata.var['ct_var'].values>0
estimat_var = np.zeros(adata.shape[1], dtype=np.float64)
y = np.log10(adata.var['ct_var'].values[not_const])
x = np.log10(adata.var['ct_mean'].values[not_const])
model = loess(x, y, span=0.3, degree=2)
model.fit()
estimat_var[not_const] = model.outputs.fitted_values
adata.var['ct_var_tech'] = 10**estimat_var
# Recipe from Frost Nucleic Acids Research 2020
adata.var['var_tech'] = adata.var['var']*adata.var['ct_var_tech']/adata.var['ct_var']
adata.var.loc[adata.var['var_tech'].isna(),'var_tech'] = 0
# Cell-wise statistics
adata.obs['mean'],adata.obs['var'] = get_sparse_var(adata.X, axis=1)
return adata if copy else None
def get_p_from_empi_null(v_t,v_t_null):
"""Compute p-value from empirical null
For score T and a set of null score T_1,...T_N, the p-value is
p=1/(N+1) * [1 + \Sigma_{i=1}^N 1_{ (T_i \geq T) }]
If T, T1, ..., T_N are i.i.d. variables following a null distritbuion,
then p is super-uniform.
The naive algorithm is N^2. Here we provide an O(N log N) algorithm to
compute the p-value for each of the N elements in v_t
Args
----
v_t (M,): np.ndarray
The observed score.
v_t_null (N,): np.ndarray
The null score.
Returns
-------
v_p: (M,): np.ndarray
P-value for each element in v_t
"""
v_t = np.array(v_t)
v_t_null = np.array(v_t_null)
v_t_null = np.sort(v_t_null)
v_pos = np.searchsorted(v_t_null, v_t, side='left')
v_p = (v_t_null.shape[0]-v_pos+1)/(v_t_null.shape[0]+1)
return v_p
##############################################################################
######################## Code for downstream analysis ########################
##############################################################################
def correlate_gene(data,
trs_name='trs_ez',
suffix='',
corr_opt='pearson',
cov_list=None,
copy=False):
"""Compute the correlation between gene expressions and TRS
Args
----
data (n_cell, n_gene) : AnnData
adata.X should contain size-normalized log1p transformed count data
trs_name : str
The variable to correlate gene expression with. Should be one column in data.obs.
suffix : str
The name of the added gene-wise correlation would be 'trs_corr'+suffix.
corr_opt : str
Option for computing the correlation
'pearson': Pearson's correlation
'spearman': Spearman's correlation
cov_list : list of str
Covariates to control for.
The covariates are first centered and then regressed out from
both trs_name and the gene expression before computing the correlation.
Elements in cov_list should be present in data.obs.columns
copy : bool
If to make copy of the AnnData object
Returns
-------
adata (AnnData):
Add the columns 'trs_corr'+suffix to data.var
"""
adata = data.copy() if copy else data
# Check options
corr_opt_list = ['pearson', 'spearman']
if corr_opt not in corr_opt_list:
raise ValueError('# compute_trs_corr: corr_opt not in [%s]'
%', '.join([str(x) for x in corr_opt_list]))
if trs_name not in adata.obs.columns:
raise ValueError('# compute_trs_corr: %s not in data.obs.columns'%trs_name)
if cov_list is not None:
temp_list = list(set(cov_list) - set(adata.obs.columns))
if len(temp_list)>0:
raise ValueError('# compute_trs_corr: covariates %s not in data.obs.columns'
%','.join(temp_list))
# Get data
mat_X = data.X.toarray()
v_trs = data.obs[trs_name].values.copy()
# Regress out covariates
if cov_list is not None:
mat_cov = adata.obs[cov_list].values.copy()
mat_cov = mat_cov - mat_cov.mean(axis=0)
v_trs = _reg_out(v_trs, mat_cov)
mat_X = _reg_out(mat_X, mat_cov)
# Compute correlation
if corr_opt=='pearson':
v_corr = _pearson_corr(mat_X, v_trs)
if corr_opt=='spearman':
v_corr = _spearman_corr(mat_X, v_trs)
adata.var['trs_corr'+suffix] = v_corr
return adata if copy else None
def _pearson_corr(mat_X, mat_Y):
"""Pearson's correlation between every columns in mat_X and mat_Y
Args
----
mat_X (N,M1): np.ndarray
mat_Y (N,M2): np.ndarray
Returns
-------
mat_corr: (M1,M2): np.ndarray
Correlation matrix
"""
# Reshape
if len(mat_X.shape)==1:
mat_X = mat_X.reshape([-1,1])
if len(mat_Y.shape)==1:
mat_Y = mat_Y.reshape([-1,1])
mat_X = (mat_X-mat_X.mean(axis=0))/mat_X.std(axis=0).clip(min=1e-8)
mat_Y = (mat_Y-mat_Y.mean(axis=0))/mat_Y.std(axis=0).clip(min=1e-8)
mat_corr = mat_X.T.dot(mat_Y)/mat_X.shape[0]
if mat_corr.shape[1]==1:
return mat_corr.reshape([-1])
else:
return mat_corr
def _spearman_corr(mat_X, mat_Y):
"""Spearman's correlation between every columns in mat_X and mat_Y
Args
----
mat_X (N,M1): np.ndarray
mat_Y (N,M2): np.ndarray
Returns
-------
mat_corr (M1,M2): np.ndarray
Correlation matrix
"""
# Reshape
if len(mat_X.shape)==1:
mat_X = mat_X.reshape([-1,1])
if len(mat_Y.shape)==1:
mat_Y = mat_Y.reshape([-1,1])
mat_X = _get_rank(mat_X, axis=0)
mat_Y = _get_rank(mat_Y, axis=0)
mat_X = (mat_X-mat_X.mean(axis=0))/mat_X.std(axis=0).clip(min=1e-8)
mat_Y = (mat_Y-mat_Y.mean(axis=0))/mat_Y.std(axis=0).clip(min=1e-8)
mat_corr = mat_X.T.dot(mat_Y)/mat_X.shape[0]
if mat_corr.shape[1]==1:
return mat_corr.reshape([-1])
else:
return mat_corr
def _get_rank(mat_X, axis=0):
"""Spearman's correlation between every columns in mat_X and mat_Y
Args
----
mat_X (N,M): np.ndarray
axis: int
axis=0: column-wise rank (across rows)
axis=1: row-wise rank (across columns)
Returns
-------
mat_rank (N,M): np.ndarray
Rank matrix
"""
if axis==0:
mat_X = np.argsort(mat_X, axis=0)
mat_rank = np.empty_like(mat_X)
temp_v = np.arange(mat_X.shape[0])
for i_col in range(mat_X.shape[1]):
mat_rank[mat_X[:,i_col], i_col] = temp_v
if axis==1:
mat_X = np.argsort(mat_X, axis=1)
mat_rank = np.empty_like(mat_X)
temp_v = np.arange(mat_X.shape[1])
for i_row in range(mat_X.shape[0]):
mat_rank[i_row, mat_X[i_row,:]] = temp_v
return mat_rank
##############################################################################
################################## Old code ##################################
##############################################################################
def score_cell_081520(data,
gene_list,
suffix='',
flag_correct_background=False,
verbose=True,
copy=False):
"""score cells based on the geneset
Args:
data (AnnData): AnnData object
adata.X should contain size-normalized log1p transformed count data
gene_list (list): gene list
suffix (str): 'trs_'+suffix+['', '_z', '_p', '_bhp'] would be the name
flag_correct_background (bool):
If normalize for background mean and std. If True, normalize by
score = (score - mean)/std
tissue (str): 'all' or one of the facs or droplet tissues
Returns:
adata (AnnData): Combined data for FACS and droplet
"""
adata = data.copy() if copy else data
gene_list_overlap = list(set(adata.var_names) & set(gene_list))
if verbose:
print('# score_cell: %d/%d gene_list genes also in adata'
%(len(gene_list), len(gene_list_overlap)))
print('# score_cell: suffix=%s, flag_correct_background=%s'
%(suffix, flag_correct_background))
trs_name = 'trs_%s'%suffix
if trs_name in adata.obs.columns:
print('# score_cell: overwrite original %s in adata.obs.columns'
%trs_name)
adata.obs[trs_name] = adata[:, gene_list_overlap].X.mean(axis=1)
if flag_correct_background:
v_mean,v_var = get_sparse_var(adata.X, axis=1)
v_std = np.sqrt(v_var)
adata.obs[trs_name] = (adata.obs[trs_name] - v_mean) / v_std * \
np.sqrt(len(gene_list_overlap))
# Add z_score, p_value, and fdr
temp_v = adata.obs[trs_name].values
adata.obs['%s_z'%trs_name] = (temp_v - temp_v.mean())/ temp_v.std()
adata.obs['%s_p'%trs_name] = 1 - sp.stats.norm.cdf(adata.obs['%s_z'%trs_name].values)
adata.obs['%s_bhp'%trs_name] = multipletests(adata.obs['%s_p'%trs_name].values,
method='fdr_bh')[1]
return adata if copy else None
def score_cell_kangcheng_072920(data,
gene_list,
suffix='',
flag_correct_background=False,
flag_specific_expressed=False,
verbose=True,
copy=False):
"""score cells based on the geneset
Args:
data (AnnData): AnnData object
adata.X should contain size-normalized log1p transformed count data
gene_list (list): gene list
suffix (str): 'trs_'+suffix+['', '_z', '_p', '_bhp'] would be the name
flag_correct_background (bool):
If normalize for background mean and std per_cell. If True, normalize by
score = (score - mean)/std, where mean and std is calculated within each cell
flag_specific_expressed (bool):
Whether transform gene expression to identify specific expressed genes.
If True, for each gene, normalize score = (score - mean) / std, where mean and
std is calculated across the cells when calculating the TRS score,
tissue (str): 'all' or one of the facs or droplet tissues
Returns:
adata (AnnData): Combined data for FACS and droplet
"""
adata = data.copy() if copy else data
gene_list_overlap = list(set(adata.var_names) & set(gene_list))
if verbose:
print('# score_cell: %d/%d gene_list genes also in adata'
%(len(gene_list), len(gene_list_overlap)))
print('# score_cell: suffix=%s, flag_correct_background=%s, flag_specific_expressed=%s'
%(suffix, flag_correct_background, flag_specific_expressed))
trs_name = 'trs_%s'%suffix
if trs_name in adata.obs.columns:
print('# score_cell: overwrite original %s in adata.obs.columns'
%trs_name)
adata.obs[trs_name] = adata[:, gene_list_overlap].X.mean(axis=1)
if flag_correct_background:
cell_mean,cell_var = get_sparse_var(adata.X, axis=1)
cell_std = np.sqrt(cell_var)
# reshape to (1, #cells) vector
cell_mean = cell_mean[:, np.newaxis]
cell_std = cell_std[:, np.newaxis]
gwas_mat = adata[:, gene_list_overlap].X
if flag_correct_background:
# normalize for each cell
gwas_mat = (gwas_mat - cell_mean) / cell_std
if flag_specific_expressed:
# normalize for each gene
gene_mean, gene_std = np.mean(gwas_mat, axis=0), np.std(gwas_mat, axis=0)
gwas_mat = (gwas_mat - gene_mean) / gene_std
adata.obs[trs_name] = gwas_mat.mean(axis=1)
# Add z_score, p_value, and fdr
temp_v = adata.obs[trs_name].values
adata.obs['%s_z'%trs_name] = (temp_v - temp_v.mean())/ temp_v.std()
adata.obs['%s_p'%trs_name] = 1 - sp.stats.norm.cdf(adata.obs['%s_z'%trs_name].values)
adata.obs['%s_bhp'%trs_name] = multipletests(adata.obs['%s_p'%trs_name].values,
method='fdr_bh')[1]
return adata if copy else None
def gearys_c(adata, val_obs, prefix, stratify_obs=None, copy=False):
"""
Interface of computing Geary's C statistics
Args:
adata: Anndata object
val_obs: the obs name to calculate this statistics
prefix: the name will be `prefix`_gearys_C
stratify_obs: Calculate the statistics using `stratify_obs` obs column,
must be a categorical variable
"""
adata = adata.copy() if copy else adata
if stratify_obs is not None:
assert adata.obs[stratify_obs].dtype.name == 'category', \
"`stratify_obs` must correspond to a Categorical column"
categories = adata.obs[stratify_obs].unique()
all_c_stats = np.zeros(adata.shape[0])
for cat in categories:
s_index = adata.obs[stratify_obs] == cat
all_c_stats[s_index] = _gearys_c(adata[s_index], adata[s_index].obs[val_obs])
else:
all_c_stats = _gearys_c(adata, adata.obs[val_obs])
gearys_C_name = prefix + '_gearys_C'
if gearys_C_name in adata.obs.columns:
print('# gearys_c: overwrite original %s in adata.obs.columns'
%gearys_C_name)
adata.obs[gearys_C_name] = all_c_stats
# adata.obs[gearys_C_name] = adata.obs[gearys_C_name].astype('category')
return adata if copy else None
def _gearys_c(adata, vals):
"""Compute Geary's C statistics for an AnnData
Adopted from https://github.com/ivirshup/scanpy/blob/metrics/scanpy/metrics/_gearys_c.py
C =
\frac{
(N - 1)\sum_{i,j} w_{i,j} (x_i - x_j)^2
}{
2W \sum_i (x_i - \bar{x})^2
}
Args:
adata (AnnData): AnnData object
adata.obsp["Connectivities] should contain the connectivity graph,
with shape `(n_obs, n_obs)`
vals (Array-like):
Values to calculate Geary's C for. If one dimensional, should have
shape `(n_obs,)`.
Returns:
C: the Geary's C statistics
"""
graph = adata.obsp["connectivities"]
assert graph.shape[0] == graph.shape[1]
graph_data = graph.data.astype(np.float_, copy=False)
assert graph.shape[0] == vals.shape[0]
assert(np.ndim(vals) == 1)
W = graph_data.sum()
N = len(graph.indptr) - 1
vals_bar = vals.mean()
vals = vals.astype(np.float_)
# numerators
total = 0.0
for i in range(N):
s = slice(graph.indptr[i], graph.indptr[i + 1])
# indices of corresponding neighbors
i_indices = graph.indices[s]
# corresponding connecting weights
i_data = graph_data[s]
total += np.sum(i_data * ((vals[i] - vals[i_indices]) ** 2))
numer = (N - 1) * total
denom = 2 * W * ((vals - vals_bar) ** 2).sum()
C = numer / denom
return C
def generate_null_genes_kh_081520(adata, gene_list, method, random_width=5):
"""
Generate null gene set
adata: AnnData
gene_list: original gene list, should be a list of gene names
method: One of 'mean_equal', 'mean_inflate'
return a list of null genes
"""
temp_df = pd.DataFrame(index=adata.var_names)
temp_df['mean'] = np.array(adata.X.mean(axis=0)).reshape([-1])
temp_df['rank'] = rankdata(temp_df['mean'], method='ordinal') - 1
temp_df = temp_df.sort_values('rank')
assert (method in ['mean_equal', 'mean_inflate']), "method must be in [mean_equal, mean_inflate]"
if method == 'mean_equal':
random_range = np.concatenate([np.arange(-random_width, 0), np.arange(1, random_width + 1)])
if method == 'mean_inflate':
random_range = np.arange(1, random_width + 1)
# ordered gene_list
gene_list_rank = sorted(temp_df.loc[gene_list, 'rank'].values)
gene_list_null = []
for rank in gene_list_rank:
choices = set(rank + random_range) - set(gene_list_rank) - set(gene_list_null)
gene_list_null.append(np.random.choice(list(choices)))
# in case there is replicate / intersect with the gene_list_overlap
gene_list_null = list(set(gene_list_null) - set(gene_list_rank))
gene_list_null = temp_df.index[gene_list_null]
return gene_list_null
def generate_null_dist_kh_081520(
adata,
gene_list,
flag_correct_background=False,
flag_nullgene=False,
random_seed=0,
verbose=True):
"""Generate null distributions
Args:
data (AnnData): AnnData object
adata.X should contain size-normalized log1p transformed count data
gene_list (list): gene list
flag_correct_background (bool):
If normalize for background mean and std. If True, normalize by
score = (score - mean)/std
tissue (str): 'all' or one of the facs or droplet tissues
Returns:
A dict with different null distributions
"""
dic_null_dist = dict()
np.random.seed(random_seed)
gene_list_overlap = list(set(adata.var_names) & set(gene_list))
if verbose:
print('# generate_null_dist: %d/%d gene_list genes also in adata'
%(len(gene_list), len(gene_list_overlap)))
print('# generate_null_dist: flag_correct_background=%s'
%(flag_correct_background))
# Compute TRS with simple average
dic_null_dist['TRS'] = adata[:, gene_list_overlap].X.mean(axis=1).A1
if flag_nullgene:
temp_df = pd.DataFrame(index=adata.var_names)
temp_df['mean'] = np.array(adata.X.mean(axis=0)).reshape([-1])
# A random set
ind_select = np.random.permutation(adata.shape[1])[:len(gene_list_overlap)]
gene_list_null = list(adata.var_names[ind_select])
dic_null_dist['nullgene_random'] = adata[:, gene_list_null].X.mean(axis=1).A1
# Random set with matching mean expression
gene_list_null_me = generate_null_genes(adata, gene_list_overlap, method='mean_equal')
dic_null_dist['nullgene_mean_equal'] = adata[:, gene_list_null_me].X.mean(axis=1).A1
if verbose:
print('# generate_null_dist: %d trait genes with mean_exp=%0.3f'
%(len(gene_list_overlap), temp_df.loc[gene_list_overlap,'mean'].values.mean()))
print('# generate_null_dist: %d null_me genes with mean_exp=%0.3f'
%(len(gene_list_null_me), temp_df.loc[gene_list_null_me,'mean'].values.mean()))
# Cell background correction
if flag_correct_background:
v_mean,v_var = util.get_sparse_var(adata.X, axis=1)
v_std = np.sqrt(v_var)
dic_null_dist['TRS'] = (dic_null_dist['TRS'] - v_mean) / v_std * \
np.sqrt(len(gene_list_overlap))
if flag_nullgene:
dic_null_dist['nullgene_random'] = \
(dic_null_dist['nullgene_random'] - v_mean) / v_std * np.sqrt(len(gene_list_null))
dic_null_dist['nullgene_mean_equal'] = \
(dic_null_dist['nullgene_mean_equal'] - v_mean) / v_std * np.sqrt(len(gene_list_null_me))
return dic_null_dist
|
11583066
|
from item import Item
import pygame
import unittest
from buffalo import utils
import random
utils.init()
class TestItem:
def test_init(self):
i = Item("test")
assert i.name == "test"
assert i.quantity == 1
#assert i.info["maxQuantity"] == 99
def test_unknown_item(self):
i = Item(str(random.random))
assert i.surface is not None
def test_item_types(self):
from item import ItemType
assert ItemType.WEAPON == 0
assert ItemType.TOOL == 1
assert ItemType.QUEST == 2
assert ItemType.ARMOR == 3
assert ItemType.RESOURCE == 4
def test_item_render(self):
i = Item("test")
i.renderItemQuantity()
|
11583107
|
import unittest
class RandomSelfTestCase(unittest.TestCase):
def testRandomSelf(self):
import hnswlib
import numpy as np
dim = 16
num_elements = 10000
# Generating sample data
data = np.float32(np.random.random((num_elements, dim)))
# Declaring index
p = hnswlib.Index(space='l2', dim=dim) # possible options are l2, cosine or ip
# Initing index
# max_elements - the maximum number of elements, should be known beforehand
# (probably will be made optional in the future)
#
# ef_construction - controls index search speed/build speed tradeoff
# M - is tightly connected with internal dimensionality of the data
# stronlgy affects the memory consumption
p.init_index(max_elements = num_elements, ef_construction = 100, M = 16, random_seed=45)
# Controlling the recall by setting ef:
# higher ef leads to better accuracy, but slower search
p.set_ef(10)
p.set_num_threads(4) # by default using all available cores
# We split the data in two batches:
data1 = data[:num_elements // 2]
data2 = data[num_elements // 2:]
print("Adding first batch of %d elements" % (len(data1)))
p.add_items(data1)
p.add_tags([1, 5, 100, 33], 8)
p.add_tags([2, 5, 66, 17], 66)
p.add_tags(list(range(1,4000, 3)), 3)
p.index_tagged(8)
p.index_tagged(66)
p.index_tagged(3, m=4)
p.index_cross_tagged([8, 66])
check_exception = False
try:
p.index_tagged(100)
except RuntimeError as e:
print('Correct exception:', e)
check_exception = True
self.assertTrue(check_exception, 'had not throw an exception')
# Query the elements for themselves and measure recall:
labels, _ = p.knn_query(data1, k=1)
labels = np.array(labels)
self.assertAlmostEqual(np.mean(labels.reshape(-1) == np.arange(len(data1))),1.0,3)
# Serializing and deleting the index:
index_path='first_half.bin'
print("Saving index to '%s'" % index_path)
p.save_index("first_half.bin")
del p
# Reiniting, loading the index
p = hnswlib.Index(space='l2', dim=dim) # you can change the sa
print("\nLoading index from 'first_half.bin'\n")
p.load_index("first_half.bin")
self.assertIn(8, p.get_tags(5))
self.assertIn(66, p.get_tags(5))
print("Adding the second batch of %d elements" % (len(data2)))
p.add_items(data2)
p.add_tags([1011, 6015], 18)
p.add_tags([7819], 22)
self.assertIn(18, p.get_tags(6015))
self.assertIn(18, p.get_tags(6015))
p.reset_tags()
p.add_tags([5], 1)
p.add_tags([5], 2)
p.add_tags([5], 3)
self.assertIn(2, p.get_tags(5))
self.assertNotIn(66, p.get_tags(5))
# Query the elements for themselves and measure recall:
labels, _ = p.knn_query(data, k=1)
labels = np.array(labels)
self.assertAlmostEqual(np.mean(labels.reshape(-1) == np.arange(len(data))),1.0,3)
if __name__ == "__main__":
unittest.main()
|
11583155
|
from setuptools import setup, find_packages
setup(
name="simulation_based_calibration",
version="0.0.1",
description='PyMC3 implementation of "Simulation Based Calibration"',
author="<NAME>",
url="http://github.com/colcarroll/simulation_based_calibration",
packages=find_packages(),
install_requires=["pymc3", "tqdm", "matplotlib", "numpy"],
long_description=open("README.md").read(),
long_description_content_type="text/markdown",
include_package_data=True,
)
|
11583167
|
import torch
import itertools
# At pain of messing up a good thing, also collect standard deviation (total) -- divided by total items for average
def update_info_dict(info_dict, labels, preds, threshold=0.5, std=None):
preds = (torch.tensor(preds) > threshold).long()
labels = (torch.tensor(labels) > threshold).long()
# For backward compatibility -- if no std, assume it's zero -- and put it on CUDA if needed
if std is not None:
info_dict['std'] += torch.sum(torch.tensor(std)).float()
else:
info_dict['std'] += torch.sum((preds == 1) & (preds == 0)).float()
info_dict['tp'] += torch.sum((preds == 1) & (labels == 1)).float()
info_dict['tn'] += torch.sum((preds == 0) & (labels == 0)).float()
info_dict['fp'] += torch.sum((preds == 1) & (labels == 0)).float()
info_dict['fn'] += torch.sum((preds == 0) & (labels == 1)).float()
return info_dict
# Mis-nomer -- returns standard deviation per class.
def get_variance(tp, tn, fp, fn, std):
total = tp + tn + fp + fn
return std / total
# TODO: Also return variance per class (in multihead sense) as a metric
def get_metric(infos, metric=None, micro=False):
"""Essentially a case-switch for getting a metric"""
metrics = {
'acc' : get_accuracy,
'jacc' : get_jaccard_index,
'f1' : get_f1,
'mcc' : get_mcc,
'recall': get_recall,
'precision': get_precision,
'var' : get_variance
}
tp = tn = fp = fn = std = 0
if isinstance(infos, dict):
infos = [infos]
metric = metrics[infos[0].get('metric') or metric]
micro = infos[0].get('micro') or micro
stats = ['tp', 'tn', 'fp', 'fn', 'std']
if micro:
# micro averaging computes the metric after aggregating
# all of the parameters from sets being averaged
for info in infos:
tp += info['tp']
tn += info['tn']
fp += info['fp']
fn += info['fn']
std += info['std']
return metric(tp, tn, fp, fn, std)
else:
# macro averaging computes the metric on each set
# and averages the metrics afterward
individual_metrics = []
for info in infos:
individual_metrics.append(metric(*[info[s].item() for s in stats]))
return sum(individual_metrics) / len(individual_metrics)
# Metrics as functions of true positive, true negative,
# false positive, false negative, standard deviation
def get_precision(tp, tn, fp, fn, std):
if tp == 0:
return 0
return tp / (tp + fp)
def get_recall(tp, tn, fp, fn, std):
if tp == 0:
return 0
return tp / (tp + fn)
def get_jaccard_index(tp, tn, fp, fn, std):
if tp == 0:
return 0
return (tp) / (tp + fp + fn)
def get_accuracy(tp, tn, fp, fn, std):
return (tp + tn) / (tp + tn + fp + fn)
def get_f1(tp, tn, fp, fn, std):
if tp == 0:
return 0
return 2.0 * tp / (2 * tp + fp + fn)
def get_mcc(tp, tn, fp, fn, std):
total = (tp + tn + fp + fn)
for v in tp, tn, fp, fn:
v /= total
denom = ((tp + fp) * (tp + fn) * (tn + fp) * (tn + fn)) ** 0.5
denom = denom if denom > 1e-8 else 1
return (tp * tn - fp * fn) / denom
|
11583168
|
from Compiler.types import *
from Compiler.instructions import *
from Compiler.util import tuplify,untuplify
from Compiler import instructions,instructions_base,comparison,program
import inspect,math
import random
import collections
from Compiler.library import *
from Compiler.types_gc import *
from operator import itemgetter
import numpy as np
def get_diff_types(data_list):
cint_data = [d for d in data_list if type(d) == cint]
pint_data = [(d, d.pid) for d in data_list if type(d) == pint]
sint_data = [d for d in data_list if type(d) == sint]
if len(pint_data) > 1:
pint_data = sorted(pint_data, key=itemgetter(1))
return (cint_data, pint_data, sint_data)
# This is not parallelized
def int_add(data_list, nparallel=1):
(cint_data, pint_data, sint_data) = get_diff_types(data_list)
c_res = cint(0)
for cd in cint_data:
c_res += cd
pd_res = []
current_pid = None
for (pd, pid) in pint_data:
if pid != current_pid:
current_pid = pid
pd_res.append(pint(0))
pd_res[-1] += pd
res = cint(0)
res += c_res
for pd in pd_res:
res += pd
for sd in sint_data:
res += sd
return res
def sum_lib(lst):
flattened_lst = []
for i in range(len(lst)):
print "TYPE?", type(lst[i])
if type(lst[i]) in (sfixMatrix, cfixMatrix, sfixMatrixGC, cfixMatrixGC):
flattened_lst += flatten(lst[i])
print flattened_lst
else:
flattened_lst.append(lst[i])
return sum(flattened_lst)
def max_lib(lst):
flattened_lst = []
for i in range(len(lst)):
print "TYPE?", type(lst[i])
if type(lst[i]) in (sfixMatrix, cfixMatrix, sfixMatrixGC, cfixMatrixGC):
flattened_lst += flatten(lst[i])
print flattened_lst
else:
flattened_lst.append(lst[i])
return max(flattened_lst)
def min_lib(lst):
flattened_lst = []
for i in range(len(lst)):
print "TYPE?", type(lst[i])
if type(lst[i]) in (sfixMatrix, cfixMatrix, sfixMatrixGC, cfixMatrixGC):
flattened_lst += flatten(lst[i])
print flattened_lst
else:
flattened_lst.append(lst[i])
return min(flattened_lst)
def flatten(A):
lst = []
if type(A) in (sfixMatrix, sfixMatrixGC, cfixMatrix, cfixMatrixGC):
for i in range(A.rows):
for j in range(A.columns):
lst.append(A[i][j])
return lst
import functools
def reduce_lib(lst, reduce_fn):
flattened_lst = []
for i in range(len(lst)):
if type(lst[i]) in(sfixMatrix, cfixMatrix, sfixMatrixGC, cfixMatrixGC):
flattened_lst += flatten(lst[i])
else:
flattened_lst.append(lst[i])
return reduce(reduce_fn, flattened_lst)
# Copy a portion of the large matrix to the small matrix.
def copy_matrix(dest, src, rows, cols, index):
for i in range(rows):
for j in range(cols):
dest[i][j] = src[index * rows + j][j]
# Tree-based multiplication
def int_multiply(data_list, nparallel=2):
length = len(data_list)
data = []
data.append(Array(length, sint))
for i in range(length):
data[0][i] = data_list[i]
while length > 1:
length = (length / 2) + (length % 2)
data.append(Array(length, sint))
@for_range(length)
def f(i):
data[-1][i] = sint(0)
level = 0
for x in range(len(data) - 1):
print("level = {}, length = {}".format(level+1, data[level+1].length))
exec_len = data[level].length / 2
@for_range_multithread(nparallel, exec_len, exec_len)
def _multiply(i):
data[level+1][i] = data[level][2 * i] * data[level][2 * i + 1]
if data[level].length % 2 > 0:
data[level+1][data[level+1].length - 1] = data[level][data[level].length - 1]
level += 1
return data[-1][0]
def _transpose(A, B):
@for_range(A.rows)
def f(i):
@for_range(A.columns)
def g(j):
B[j][i] = A[i][j]
def _transpose_gc(A, B):
for i in range(A.rows):
for j in range(A.columns):
B[j][i] = A[i][j]
def transpose(A):
if isinstance(A, np.ndarray):
return A.transpose()
if not isinstance(A, (Matrix, MatrixGC)):
raise ValueError("Only matrix can be transposed")
if isinstance(A, (sintMatrix, sfixMatrix, cintMatrix, cfixMatrix)):
B = A.__class__(A.columns, A.rows)
_transpose(A, B)
return B
elif isinstance(A, (sintMatrixGC, sfixMatrixGC)):
B = A.__class__(A.columns, A.rows)
_transpose_gc(A, B)
return B
else:
raise NotImplementedError
def _matmul(A, B, C, D, int_type, nparallel=1):
total = A.rows * B.columns * A.columns
@for_range_multithread(nparallel, total, total)
def _multiply(i):
i_index = i / (B.columns * A.columns)
j_index = i % (B.columns * A.columns) / (A.columns)
k_index = i % A.columns
D[i] = A[i_index][k_index] * B[k_index][j_index]
@for_range_multithread(nparallel, A.rows * B.columns, A.rows * B.columns)
def _add(i):
i_index = i / B.columns
j_index = i % B.columns
C[i_index][j_index] = int_type(0)
@for_range(A.columns)
def _add_element(j):
C[i_index][j_index] += D[i * A.columns + j]
return C
# Not parallelized
def _matmul_mix(A, B, nparallel=1):
C = MixMatrix(A.rows, B.columns)
@for_range(A.rows * B.columns)
def f(i):
@for_range(A.columns)
def g(j):
v = C.get(i)
v += A.get(i * A.columns + j) * B.get(j * B.columns + i)
C.set(i, v)
return C
def _matmul_gc(A, B, C):
for i in range(A.rows):
for j in range(B.columns):
v = A[i][0] * B[0][j]
for k in range(1, A.columns):
v += A[i][k] * B[k][j]
C[i][j] = v
def matmul(A, B, left_rows, left_cols, right_rows, right_cols, mat_type, nparallel=1):
if isinstance(A, np.ndarray) and isinstance(B, np.ndarray):
return np.matmul(A, B)
# Tentative, very janky. Yep, this doesn't work :(. Buyer BEWARE!
if isinstance(A, sintMatrix) and isinstance(B, sintMatrix):
C = sintMatrix(A.rows, B.columns)
D = sintArray(A.rows * B.columns * A.columns)
return _matmul(A, B, C, D, sint, nparallel)
#C = sintMatrix(left_rows, right_cols)
#D = sintArray(left_rows * right_cols * left_cols)
#return _matmul(A, B, C, D, sint, nparallel)
elif isinstance(A, cintMatrix) and isinstance(B, cintMatrix):
C = cintMatrix(A.rows, B.columns)
D = cintArray(A.rows * B.columns * A.columns)
return _matmul(A, B, C, D, cint, nparallel)
elif isinstance(A, sfixMatrix) and isinstance(B, sfixMatrix):
C = sfixMatrix(A.rows, B.columns)
D = sfixArray(A.rows * B.columns * A.columns)
return _matmul(A, B, C, D, sfix, nparallel)
elif isinstance(A, cfixMatrixGC) or isinstance(B, cfixMatrixGC):
C = cfixMatrixGC(A.rows, B.columns)
_matmul_gc(A, B, C)
return C
elif isinstance(A, sfixMatrixGC) or isinstance(B, sfixMatrixGC):
C = sfixMatrixGC(A.rows, B.columns)
_matmul_gc(A, B, C)
return C
elif isinstance(A, MixMatrix) and isinstance(B, MixMatrix):
return _matmul_mix(A, B, nparallel)
elif isinstance(A, (sintMatrix, cintMatrix, cfixMatrix, sfixMatrix)) and isinstance(B, (sintMatrix, cintMatrix, cfixMatrix, sfixMatrix)):
C = sintMatrix(A.rows, B.columns)
D = sintArray(A.rows * B.columns * A.columns)
return _matmul(A, B, C, D, sint, nparallel)
else:
raise NotImplementedError
def _matadd(A, B, C, int_type, nparallel=1):
@for_range_multithread(nparallel, A.rows * A.columns, A.rows * A.columns)
def _add(i):
i_index = i / A.columns
j_index = i % A.columns
C[i_index][j_index] = A[i_index][j_index] + B[i_index][j_index]
def matadd(A, B, nparallel=1):
if isinstance(A, np.ndarray) and isinstance(B, np.ndarray):
return np.add(A, B)
if A.rows != B.rows or A.columns != B.columns:
raise NotImplementedError
if isinstance(A, cintMatrix) and isinstance(B, cintMatrix):
C = cintMatrix(A.rows, A.columns)
_matadd(A, B, C, cint, nparallel)
return C
elif isinstance(A, sintMatrix) and isinstance(B, sintMatrix):
C = sintMatrix(A.rows, A.columns)
_matadd(A, B, C, sint, nparallel)
return C
elif isinstance(A, sfixMatrix) and isinstance(B, sfixMatrix):
C = sfixMatrix(A.rows, A.columns)
_matadd(A, B, C, sfix, nparallel)
return C
elif type(A) in (sfixMatrix, cfixMatrix) and type(B) in (sfixMatrix, cfixMatrix):
C = sfixMatrix(A.rows, A.columns)
_matadd(A, B, C, sfix, nparallel)
return C
elif type(A) in (sfixMatrixGC, cfixMatrixGC) and type(B) in (sfixMatrixGC, cfixMatrixGC):
C = cfixMatrixGC(A.rows, A.columns, cfix_gc)
_matadd(A, B, C, cfix_gc, nparallel)
return C
def _matsub(A, B, C, int_type, nparallel=1):
@for_range_multithread(nparallel, A.rows * A.columns, A.rows * A.columns)
def _add(i):
i_index = i / A.columns
j_index = i % A.columns
C[i_index][j_index] = A[i_index][j_index] - B[i_index][j_index]
def _matsub_gc(A, B, C):
for i in range(A.rows):
for j in range(A.columns):
C[i][j] = A[i][j] - B[i][j]
def matsub(A, B, nparallel=1):
if isinstance(A, np.ndarray) and isinstance(B, np.ndarray):
return np.subtract(A, B)
if A.rows != B.rows or A.columns != B.columns:
raise ValueError("[matsub] Matrices must have the same sizes")
if isinstance(A, cintMatrix) and isinstance(B, cintMatrix):
C = cintMatrix(A.rows, A.columns)
_matsub(A, B, C, cint, nparallel)
return C
elif isinstance(A, sintMatrix) and isinstance(B, sintMatrix):
C = sintMatrix(A.rows, A.columns)
_matsub(A, B, C, sint, nparallel)
return C
elif isinstance(A, sfixMatrix) and isinstance(B, sfixMatrix):
C = sfixMatrix(A.rows, A.columns)
_matsub(A, B, C, sfix, nparallel)
return C
elif isinstance(A, sfixMatrixGC) and isinstance(B, sfixMatrixGC):
C = sfixMatrixGC(A.rows, A.columns)
_matsub_gc(A, B, C)
return C
else:
raise NotImplementedError
# horizontally stack the input matrices
def matstack_int(matrices):
pid = None
s = set([m.columns for m in matrices])
if s > 1:
raise ValueError("Can only stack matrices with the same number of columns")
num_rows_list = [m.rows for m in matrices]
M_rows = sum(num_rows_list)
M_columns = s.pop()
M = cintMatrix(M_rows, M_columns)
int_type = cint
pid = 0
s = set(type(m) for m in matrices)
if len(s) == 1 and cintMatrix in s:
M = cintMatrix(M_rows, M_columns)
int_type = cint
elif len(s) == 1 and pintMatrix in s:
parties = set([m.pid for m in matrices])
if len(parties) == 1:
pid = parties.pop()
M = pintMatrix(pid, M_rows, M_columns)
int_type = pint
else:
M = sintMatrix(M_rows, M_columns)
int_type = sint
else:
M = sintMatrix(M_rows, M_columns)
int_type = sint
row_count = 0
for m in matrices:
@for_range(m.rows)
def f(i):
@for_range(m.columns)
def g(j):
if int_type == pint:
M[row_count + i][j] = pint(pid, 0)
else:
M[row_count + i][j] = int_type(0)
M[row_count + i][j] += m[i][j]
return M
def matstack(matrices):
if isinstance(matrices[0], (cintMatrix, pintMatrix, sintMatrix)):
return matstack_int(matrices)
else:
raise NotImplementedError
def _sigmoid_sfix(v):
sign_v = cfix(1) - cfix(2) * (v < 0)
denom = (v * sign_v) + sfix(1)
res = v / denom
return res
def _sigmoid_sfix_gc(v):
abs_v = v.absolute()
denom = abs_v + cfix_gc(1)
res = v / denom
return res
def sigmoid(v, nparallel=1):
if isinstance(v, sfix):
return _sigmoid_sfix(v)
elif isinstance(v, (sfixMatrix)):
res = v.__class__(v.rows, v.columns)
@for_range_multithread(nparallel, v.rows, v.rows)
def a(i):
@for_range_multithread(nparallel, v.columns, v.columns)
def b(j):
res[i][j] = _sigmoid_sfix(v[i][j])
return res
elif isinstance(v, sfixMatrixGC):
res = v.__class__(v.rows, v.columns)
for i in range(v.rows):
for j in range(v.columns):
res[i][j] = _sigmoid_sfix_gc(v[i][j])
return res
else:
raise NotImplementedError
def mat_const_mul(c, m, nparallel=1):
if isinstance(m, np.ndarray):
if type(c) in (float, int):
return c * m
else:
raise ValueError("Type of constant is: {0} when expected float and int.".format(type(c)))
if isinstance(m, sfixMatrix) or isinstance(m, cfixMatrix):
if isinstance(m, sfixMatrix):
res = sfixMatrix(m.rows, m.columns)
else:
res = cfixMatrix(m.rows, m.columns)
"""
@for_range_multithread(nparallel, m.rows * m.columns, m.rows * m.columns)
def f(i):
@for_range_multithread(nparallel, m.columns, m.columns)
def g(j):
res[i][j] = c * m[i][j]
"""
@for_range_multithread(nparallel, m.rows * m.columns, m.rows * m.columns)
def loop(i):
i_index = i / m.columns
j_index = i % m.columns
res[i_index][j_index] = c * m[i_index][j_index]
return res
elif isinstance(m, sfixMatrixGC) or isinstance(m, cfixMatrixGC):
if isinstance(m, sfixMatrixGC):
res = sfixMatrixGC(m.rows, m.columns)
else:
res = cfixMatrixGC(m.rows, m.columns)
for i in range(m.rows):
for j in range(m.columns):
res[i][j] = c * m[i][j]
return res
else:
raise NotImplementedError
def mat_assign(o, i, nparallel=1):
if isinstance(i, (Array, ArrayGC)):
if o.length != i.length:
raise ValueError("Arrays must be of the same sizes")
if isinstance(i, Array):
@for_range(i.length)
def f(u):
o[u] = i[u]
elif isinstance(i, ArrayGC):
for u in range(i.length):
o[u] = i[u]
elif isinstance(i, (Matrix, MatrixGC)):
if o.rows != i.rows or o.columns != i.columns:
raise ValueError("Matrices must be of the same sizes")
if isinstance(i, Matrix):
@for_range_multithread(nparallel, i.rows, i.rows)
def f(u):
@for_range_multithread(nparallel, i.columns, i.columns)
def g(v):
o[u][v] = i[u][v]
elif isinstance(i, MatrixGC):
for u in range(i.rows):
for v in range(i.columns):
o[u][v] = i[u][v]
elif isinstance(i, list):
for u in range(len(i)):
o[u] = i[u]
else:
raise NotImplementedError
def array_index_secret_load_if(condition, l, index_1, index_2, nparallel=1):
supported_types_a = (sint, sfix)
supported_types_b = (sint_gc, sfix_gc)
if isinstance(index_1, supported_types_a) and isinstance(index_2, supported_types_a):
index = ((1 - condition) * index_1) + (condition * index_2)
return array_index_secret_load_a(l, index, nparallel=nparallel)
elif isinstance(index_1, supported_types_b) and isinstance(index_2, supported_types_b):
index = ((~condition) & index_1).__xor__(condition & index_2)
return array_index_secret_load_gc(l, index)
else:
raise NotImplementedError
def get_identity_matrix(value_type, n):
if isinstance(value_type, (sfix, sfixMatrix)):
ret = sfixMatrix(n, n)
@for_range(n)
def f(i):
@for_range(n)
def g(j):
v = (i == j)
v = sint(v)
vfix = sfix.load_sint(v)
ret[i][j] = vfix
return ret
elif isinstance(value_type, (sfix_gc, sfixMatrixGC, cfix_gc, cfixMatrixGC)):
ret = sfixMatrixGC(n, n)
for i in range(n):
for j in range(n):
ret[i][j] = cfix_gc(int(i == j))
return ret
else:
raise NotImplementedError
def cond_assign(cond, val1, val2):
res = ((~cond) & val1).__xor__(cond & val2)
return res
def matinv(A, nparallel=1):
if isinstance(A, np.ndarray):
return np.linalg.inv(A)
#if not isinstance(A, sfixMatrix) and not isinstance(A, cfixMatrix):
#raise NotImplementedError
n = A.rows
X = A.__class__(A.rows, A.columns, cfix_gc)
mat_assign(X, A)
I = get_identity_matrix(A, A.rows)
for j in range(n):
for i in range(j, n):
b1 = X[i][j].__lt__(cfix_gc(0.00001))
b2 = X[i][j].__gt__(cfix_gc(-0.00001))
b = ~(b1 & b2) #1 - b1 * b2
X[i][j] = b & X[i][j]
for k in range(n):
a1 = X[j][k]
a2 = X[i][k]
X[j][k] = cond_assign(b, a2, a1)
X[i][k] = cond_assign(b, a1, a2)
a1 = I[j][k]
a2 = I[i][k]
I[j][k] = cond_assign(b, a2, a1)
I[i][k] = cond_assign(b, a1, a2)
xjj_inv = cfix_gc(1).__div__(X[j][j])
t = cond_assign(b, xjj_inv, cfix_gc(1))
for k in range(n):
X[j][k] = t * X[j][k]
I[j][k] = t * I[j][k]
for L in range(j):
t = cfix_gc(-1) * X[L][j]
for k in range(n):
a1 = X[L][k] + t * X[j][k]
a2 = X[L][k]
b1 = I[L][k] + t * I[j][k]
b2 = I[L][k]
X[L][k] = cond_assign(b, a1, a2)
I[L][k] = cond_assign(b, b1, b2)
for L in range(j+1, n):
# from j+1 to n
t = cfix_gc(-1) * X[L][j]
for k in range(n):
a1 = X[L][k] + t * X[j][k]
a2 = X[L][k]
b1 = I[L][k] + t * I[j][k]
b2 = I[L][k]
X[L][k] = cond_assign(b, a1, a2)
I[L][k] = cond_assign(b, b1, b2)
return I
"""
@for_range(n)
def f0(j):
#@for_range(j, n)
@for_range(n)
def f1(i):
@if_(i >= j)
def h():
b1 = X[i][j].__lt__(sfix(0.00001))
b2 = X[i][j].__gt__(sfix(-0.00001))
b = 1 - b1 * b2
X[i][j] = b * X[i][j]
@for_range_multithread(nparallel, n, n)
def f2(k):
a1 = X[j][k]
a2 = X[i][k]
X[j][k] = cond_assign_a(b, a2, a1)
X[i][k] = cond_assign_a(b, a1, a2)
a1 = I[j][k]
a2 = I[i][k]
I[j][k] = cond_assign_a(b, a2, a1)
I[i][k] = cond_assign_a(b, a1, a2)
xjj_inv = sfix(1).__div__(X[j][j])
t = cond_assign_a(b, xjj_inv, sfix(1))
@for_range_multithread(nparallel, n, n)
def f3(k):
X[j][k] = t * X[j][k]
I[j][k] = t * I[j][k]
@for_range(n)
def f4(L):
@if_(L < j)
def h():
t = sfix(-1) * X[L][j]
@for_range_multithread(nparallel, n, n)
def g0(k):
a1 = X[L][k] + t * X[j][k]
a2 = X[L][k]
b1 = I[L][k] + t * I[j][k]
b2 = I[L][k]
X[L][k] = cond_assign_a(b, a1, a2)
I[L][k] = cond_assign_a(b, b1, b2)
# from j+1 to n
@for_range(n)
def f5(L):
@if_(L > j)
def h():
t = sfix(-1) * X[L][j]
@for_range_multithread(nparallel, n, n)
def g0(k):
a1 = X[L][k] + t * X[j][k]
a2 = X[L][k]
b1 = I[L][k] + t * I[j][k]
b2 = I[L][k]
X[L][k] = cond_assign_a(b, a1, a2)
I[L][k] = cond_assign_a(b, b1, b2)
return I
"""
# Assumes that the piecewise function is public for now
# Format: bounds in the form of [lower, upper]
# Function in the form of a*x + b
class Piecewise(object):
def __init__(self, num_boundaries):
self.lower_bound = sfixArray(3)
self.upper_bound = sfixArray(3)
self.boundary_points = sfixMatrix(num_boundaries - 2, 4)
self.counter = regint(0)
def add_boundary(self, lower, upper, a, b):
if lower is None:
self.lower_bound[0] = upper
self.lower_bound[1] = a
self.lower_bound[2] = b
elif upper is None:
self.upper_bound[0] = lower
self.upper_bound[1] = a
self.upper_bound[2] = b
else:
self.boundary_points[self.counter][0] = lower
self.boundary_points[self.counter][1] = upper
self.boundary_points[self.counter][2] = a
self.boundary_points[self.counter][3] = b
self.counter += regint(1)
# For debugging purposes only
def debug(self):
print_ln("[-inf, %s],: %s * x + %s", self.lower_bound[0].reveal(), self.lower_bound[1].reveal(), self.lower_bound[2].reveal())
@for_range(self.boundary_points.rows)
def f(i):
print_ln("[%s, %s]: %s * x + %s", self.boundary_points[i][0].reveal(), self.boundary_points[i][1].reveal(), self.boundary_points[i][2].reveal(), self.boundary_points[i][3].reveal())
print_ln("[%s, inf],: %s * x + %s", self.upper_bound[0].reveal(), self.upper_bound[1].reveal(), self.upper_bound[2].reveal())
def evaluate(self, x):
coefs = sfixArray(2)
coefs[0] = sfix(0)
coefs[1] = sfix(0)
# Check for lower bound
b = x.__le__(self.lower_bound[0])
coefs[0] += b * self.lower_bound[1]
coefs[1] += b * self.lower_bound[2]
@for_range(self.boundary_points.rows)
def f(i):
lower = self.boundary_points[i][0]
upper = self.boundary_points[i][1]
b1 = x.__gt__(lower)
b2 = x.__le__(upper)
b = b1 * b2
coefs[0] += b * self.boundary_points[i][2]
coefs[1] += b * self.boundary_points[i][3]
# Check for upper bound
b = x.__gt__(self.upper_bound[0])
coefs[0] += b * self.upper_bound[1]
coefs[1] += b * self.upper_bound[2]
res = coefs[0] * x + coefs[1]
return res
def LogisticRegression(X, y, batch_size, sgd_iters, dim):
assert(isinstance(X, Matrix))
assert(isinstance(y, Matrix))
if batch_size * sgd_iters >= X.rows:
raise ValueError("batch_size * sgd_iters = {0} * {1} >= # of rows in X: {2}".format(batch_size, sgd_iters. X.rows))
if batch_size * sgd_iters >= y.rows:
raise ValueError("batch_size * sgd_iters = {0} * {1} >= # of rows in X: {2}".format(batch_size, sgd_iters. X.rows))
if isinstance(X, sfixMatrix):
w = sfixMatrix(dim, 1)
#alpha_B = cfix(0.01 / batch_size) currently cfix and sfix multiplying doesn't work
alpha_B = cfix(0.01 / batch_size)
XB = sfixMatrix(batch_size, dim)
yB = sfixMatrix(batch_size, 1)
else:
w = sfixMatrixGC(dim, 1)
alpha_B = cfix_gc(0.01 / batch_size)
XB = sfixMatrixGC(batch_size, dim)
yB = sfixMatrixGC(batch_size, 1)
for i in range(sgd_iters):
batch_low = i * batch_size
batch_high = (i + 1) * batch_size
for j in range(batch_size):
for d in range(dim):
XB[j][d] = X[batch_low + j][d]
yB[j][0] = y[batch_low + j][0]
w_ret = matmul(XB, w, batch_size, dim, dim, 1, sfix)
#reveal_all(w_ret, "w_ret")
w_sigmoid = sigmoid(w_ret)
#reveal_all(w_sigmoid, "w_sigmoid")
w_sub = matsub(w_sigmoid, yB)
XB_T = transpose(XB)
w_1 = matmul(XB_T, w_sub, dim, batch_size, batch_size, 1, sfix)
#reveal_all(w_1, "w_1")
w_2 = mat_const_mul(alpha_B, w_1)
#reveal_all(w_2, "w_2")
w_res = matsub(w, w_2)
mat_assign(w, w_res)
#print_ln("Iter: %s", i)
return w
def DecisionTree(tree, levels):
w = tree[0]
for i in range(levels-1):
index = w[0]
split = w[1]
left_child = w[2]
right_child = w[3]
f = x[index]
cond = (f < split)
w_res = array_index_secret_load_if(cond, tree, left_child, right_child)
mat_assign(w, w_res)
# Return the final prediction class.
return w[1]
def get_ith_matrix(mat, index, rows, cols, mat_type=sfixMatrix):
#ret = s_fix_mat(rows, cols)
#ret = sfixMatrix(rows, cols)
ret = mat_type(rows, cols)
for i in range(rows):
for j in range(cols):
ret[i][j] = mat[index * rows + i][j]
return ret
def copy_ith_matrix(dest, src, index, rows, cols):
for i in range(rows):
for j in range(cols):
dest[index * rows + i][j] = src[i][j]
# Local computation of weight vector.
def admm_local(XXinv, Xy, u, z, rho, num_cols):
temp = matsub(z, u)
z_u = mat_const_mul(rho, temp)
#for i in range(z_u.rows):
#print_ln("Admm local z: %s, temp: %s", z_u[i][0].reveal(), temp[i][0].reveal())
second_term = matadd(Xy, z_u) #add_matrices(Xy, z_u, NUM_COLS, 1)
w = matmul(XXinv, second_term, num_cols, num_cols, num_cols, 1, sfix)
return w
def soft_threshold_vec(threshold, vec, num_cols, mat_type=sfixMatrix):
#vec_new = s_fix_mat(NUM_COLS, 1)
#vec_new = sfixMatrix(num_cols, 1)
vec_new = mat_type(num_cols, 1)
neg_threshold = sfix(-1) * threshold
#neg_threshold = threshold.__neg__()
for i in range(num_cols):
threshold_fn = Piecewise(3)
threshold_fn.add_boundary(None, neg_threshold, sfix(0), vec[i][0] + threshold)
#threshold_fn.add_boundary(None, neg_threshold, c_fix(0), vec[i][0] + threshold)
threshold_fn.add_boundary(neg_threshold, threshold, sfix(0), sfix(0))
#threshold_fn.add_boundary(neg_threshold, threshold, c_fix(0), c_fix(0))
threshold_fn.add_boundary(threshold, None, sfix(0), vec[i][0] - threshold)
#threshold_fn.add_boundary(threshold, None, c_fix(0), vec[i][0] - threshold)
val = threshold_fn.evaluate(vec[i][0])
vec_new[i][0] = val
return vec_new
def admm_coordinate(w_list, u_list, z, rho, l, num_cols, num_parties, mat_type=sfixMatrix):
#w_avg = s_fix_mat(num_cols, 1)
#u_avg = s_fix_mat(num_cols, 1)
#w_avg = sfixMatrix(num_cols, 1)
#u_avg = sfixMatrix(num_cols, 1)
w_avg = mat_type(num_cols, 1)
u_avg = mat_type(num_cols, 1)
w_avg = mat_const_mul(cfix(0), w_avg)
u_avg = mat_const_mul(cfix(0), u_avg)
for i in range(num_parties):
w = get_ith_matrix(w_list, i, num_cols, 1, mat_type)
u = get_ith_matrix(u_list, i, num_cols, 1, mat_type)
new_w_avg = matadd(w_avg, w) #add_matrices(w_avg, w, NUM_COLS, 1)
new_u_avg = matadd(u_avg, u) #add_matrices(u_avg, u, NUM_COLS, 1)
mat_assign(w_avg, new_w_avg)
mat_assign(u_avg, new_u_avg)
#avg = c_fix(1.0 / NUM_PARTIES) cfix multiplication doesn't work
if mat_type in [sfixMatrix, sintMatrix]:
avg = sfix(1.0 / num_parties) # Changing THIS line to cfix completely breaks everything wtf.
threshold = l / (rho * num_parties) #sfix(l/(rho * num_parties))
else:
avg = sfix_gc(1.0 / num_parties)
threshold = sfix_gc(l/(rho * num_parties))
"""
for i in range(w_avg.rows):
print_ln("w_avg_mul: %s, w_avg: %s", (w_avg[i][0] * cfix(1.0 / num_parties)).reveal(), w_avg[i][0].reveal())
print_ln("u_avg_mul: %s, u_avg: %s", (u_avg[i][0] * cfix(1.0 / num_parties)).reveal(), u_avg[i][0].reveal())
"""
new_w_avg = mat_const_mul(avg, w_avg)
new_u_avg = mat_const_mul(avg, u_avg)
mat_assign(w_avg, new_w_avg)
mat_assign(u_avg, new_u_avg)
# Applying thresholding
u_plus_w = matadd(w_avg, u_avg)
z_new = soft_threshold_vec(threshold, u_plus_w, num_cols, mat_type)
#u_list_new = s_fix_mat(num_parties * num_cols, 1)
#neg_z = s_fix_mat(num_cols, 1)
#u_list_new = sfixMatrix(num_parties * num_cols, 1)
#neg_z = sfixMatrix(num_cols, 1)
u_list_new = mat_type(num_parties * num_cols, 1)
neg_z = mat_type(num_cols, 1)
for i in range(z_new.rows):
for j in range(z_new.columns):
neg_z[i][j] = z_new[i][j].__neg__()
for i in range(num_parties):
u_i = get_ith_matrix(u_list, i, num_cols, 1, mat_type)
w_i = get_ith_matrix(w_list, i, num_cols, 1, mat_type)
intermediate_vec = matadd(u_i, w_i) #add_matrices(u_i, w_i, NUM_COLS, 1)
sum_vec = matadd(intermediate_vec, neg_z) #add_matrices(intermediate_vec, neg_z, NUM_COLS, 1)
copy_ith_matrix(u_list_new, sum_vec, i, num_cols, 1)
#reveal_all(z_new, "intermediate_weights")
return u_list_new, z_new
def ADMM_preprocess(x_data, y_data, rho, num_parties, num_rows, num_cols, mat_type=sfixMatrix):
#XTX_inv_lst = s_fix_mat(NUM_PARTIES * NUM_COLS, NUM_COLS)
#XTy_lst = s_fix_mat(NUM_PARTIES * NUM_COLS, 1)
#XTX_inv_lst = sfixMatrix(num_parties * num_cols, num_cols)
#XTy_lst = sfixMatrix(num_parties * num_cols, 1)
XTX_inv_lst = mat_type(num_parties * num_cols, num_cols)
XTy_lst = mat_type(num_parties * num_cols, 1)
for i in range(num_parties):
x_i = get_ith_matrix(x_data, i, num_rows, num_cols, mat_type)
y_i = get_ith_matrix(y_data, i, num_rows, 1, mat_type)
X_T = transpose(x_i)
XTy = matmul(X_T, y_i, num_cols, num_rows, num_rows, 1, sfix)
XTX = matmul(X_T, x_i, num_cols, num_rows, num_rows, num_cols, sfix)
#rho_identity = s_fix_mat(NUM_COLS, NUM_COLS)
#rho_identity = sfixMatrix(num_cols, num_cols)
rho_identity = mat_type(num_cols, num_cols)
rho_identity = mat_const_mul(cfix(0), rho_identity)
for j in range(num_cols):
rho_identity[j][j] = rho #rho_val #sfix(rho_val)
XTX_rho_identity = matadd(XTX, rho_identity) #add_matrices(XTX, rho_identity, NUM_COLS, NUM_COLS)
XTX_inv = matinv(XTX_rho_identity)
copy_ith_matrix(XTX_inv_lst, XTX_inv, i, num_cols, num_cols)
copy_ith_matrix(XTy_lst, XTy, i, num_cols, 1)
return XTX_inv_lst, XTy_lst
def ADMM(XTX_inv_lst, XTy_lst, admm_iter, num_parties, num_cols, rho, l):
#XTX_inv_lst, XTy_lst = local_compute(x_data, y_data, num_parties. num_rows, num_cols)
#w_list = s_fix_mat(num_parties * num_cols, 1)
mat_type = None
if isinstance(XTX_inv_lst, sfixMatrix):
mat_type = sfixMatrix
elif isinstance(XTX_inv_lst, sfixMatrixGC):
mat_type = sfixMatrixGC
elif isinstance(XTX_inv_lst, sintMatrix):
mat_type = sintMatrix
else:
raise ValueError("Type of matrix: {0} does not correspond to anything supported by this library".format(mat_type))
#w_list = sfixMatrix(num_parties * num_cols, 1)
#u_list = sfixMatrix(num_parties * num_cols, 1)
#z = sfixMatrix(num_cols, 1)
w_list = mat_type(num_parties * num_cols, 1)
u_list = mat_type(num_parties * num_cols, 1)
z = mat_type(num_cols, 1)
w_list = mat_const_mul(cfix(0), w_list)
u_list = mat_const_mul(cfix(0), u_list)
z = mat_const_mul(cfix(0), z)
"""
for i in range(w_list.rows):
for j in range(w_list.columns):
print_ln("%s, %s", w_list[i][j].reveal(), u_list[i][j].reveal())
"""
for i in range(admm_iter):
for j in range(num_parties):
XTX_inv = get_ith_matrix(XTX_inv_lst, j, num_cols, num_cols, mat_type)
XTy = get_ith_matrix(XTy_lst, j, num_cols, 1, mat_type)
u = get_ith_matrix(u_list, j, num_cols, 1, mat_type)
w = admm_local(XTX_inv, XTy, u, z, rho, num_cols)
#reveal_all(w, "local_weight")
copy_ith_matrix(w_list, w, j, num_cols, 1)
new_u_lst, new_z = admm_coordinate(w_list, u_list, z, rho, l, num_cols, num_parties, mat_type)
mat_assign(u_list, new_u_lst)
mat_assign(z, new_z)
return z
|
11583218
|
from django import template
from socialregistration.templatetags import resolve, get_bits
register = template.Library()
@register.tag
def openid_form(parser, token):
"""
Render OpenID form. Allows to pre set the provider::
{% openid_form "https://www.google.com/accounts/o8/id" %}
Also creates custom button URLs by concatenating all arguments
after the provider's URL
{% openid_form "https://www.google.com/accounts/o8/id" STATIC_URL "image/for/google.jpg" %}
"""
bits = get_bits(token)
if len(bits) > 1:
return FormNode(bits[0], bits[1:])
if len(bits) == 1:
return FormNode(bits[0])
return FormNode(None)
class FormNode(template.Node):
def __init__(self, provider, params = []):
self.provider = provider
self.params = params
def render(self, context):
if self.provider:
provider = resolve(self.provider, context)
else:
provider = None
if self.params:
button = ''.join([resolve(bit, context) for bit in self.params])
else:
button = None
return template.loader.render_to_string(
'socialregistration/openid/form.html',{
'provider': provider,
'button': button},
context_instance = context)
|
11583231
|
from django.test import SimpleTestCase
from mock import Mock, patch
from cms.serializers import LandingPageSerializer
class LandingPageSerializerTestCase(SimpleTestCase):
def test_serialize(self):
landing_page = Mock()
landing_page.fields = {
'navbar_title_value': 'a',
'navbar_subtitle_value': 'b',
'demo_video_text_value': 'What is CPDP?'
}
serializer = LandingPageSerializer(landing_page)
fields = {
field['name']: field
for field in serializer.data['fields']
}
self.assertDictEqual(fields['navbar_title'], {
'name': 'navbar_title',
'type': 'rich_text',
'value': 'a'
})
self.assertDictEqual(fields['navbar_subtitle'], {
'name': 'navbar_subtitle',
'type': 'rich_text',
'value': 'b'
})
self.assertDictEqual(fields['demo_video_text'], {
'name': 'demo_video_text',
'type': 'rich_text',
'value': 'What is CPDP?'
})
def test_update(self):
data = {
'fields': [{
'name': 'navbar_title',
'type': 'rich_text',
'value': {
'blocks': [
{
'data': {},
'depth': 0,
'entityRanges': [],
'inlineStyleRanges': [],
'key': 'abc12',
'text': 'text',
'type': 'unstyled'
}
],
'entityMap': {}
}
}]
}
landing_page = Mock()
landing_page.save = Mock()
landing_page.fields = dict()
serializer = LandingPageSerializer(landing_page, data=data)
self.assertTrue(serializer.is_valid())
serializer.save()
landing_page.save.assert_called()
self.assertDictEqual(landing_page.fields, {
'navbar_title_type': 'rich_text',
'navbar_title_value': {
'blocks': [
{
'data': {},
'depth': 0,
'entityRanges': [],
'inlineStyleRanges': [],
'key': 'abc12',
'text': 'text',
'type': 'unstyled'
}
],
'entityMap': {}
}
})
def test_create(self):
data = {
'fields': [{
'name': 'navbar_title',
'type': 'rich_text',
'value': {
'blocks': [
{
'data': {},
'depth': 0,
'entityRanges': [],
'inlineStyleRanges': [],
'key': 'abc12',
'text': 'text',
'type': 'unstyled'
}
],
'entityMap': {}
}
}]
}
with patch('cms.serializers.SlugPage.objects.create') as mock_func:
serializer = LandingPageSerializer(data=data)
self.assertTrue(serializer.is_valid())
serializer.save()
mock_func.assert_called_with(**{
'fields': {
'navbar_title_type': 'rich_text',
'navbar_title_value': {
'blocks': [
{
'data': {},
'depth': 0,
'entityRanges': [],
'inlineStyleRanges': [],
'key': 'abc12',
'text': 'text',
'type': 'unstyled'
}
],
'entityMap': {}
}
},
'slug': 'landing-page',
'serializer_class': 'LandingPageSerializer'
})
|
11583246
|
import torch
def bbox_is_valid_vectorized(bbox: torch.Tensor):
validity = bbox[..., :2] < bbox[..., 2:]
return torch.logical_and(validity[..., 0], validity[..., 1])
|
11583265
|
import diffractsim
diffractsim.set_backend("CPU") #Change the string to "CUDA" to use GPU acceleration
from diffractsim import MonochromaticField, GaussianBeam,Lens,ApertureFromImage, nm, mm, cm
F = MonochromaticField(
wavelength=488 * nm, extent_x=19. * mm, extent_y=19. * mm, Nx=2000, Ny=2000,intensity = 0.2
)
F.add(GaussianBeam(4*mm))
F.add(Lens(f = 60*cm))
F.propagate(30*cm)
F.add(ApertureFromImage("./apertures/QWT.png", image_size = (10. * mm, 10. * mm), simulation = F))
F.propagate(30*cm)
rgb = F.get_colors()
F.plot_colors(rgb, xlim=[-6.0*mm,6.0*mm], ylim=[-6.0*mm,6.0*mm])
|
11583290
|
import socket
from queue import Queue
import turtle
from turtle import Turtle
from threading import Thread, current_thread
from port import PORT
serverSocket = None
cmd_queue = Queue()
class Move:
def __init__(self, parts):
self.name = parts[0]
self.x = int(parts[1])
self.y = int(parts[2])
self.color = parts[3]
turtles = {} # Client name -> turtle
def handle_client(client_socket, address):
print('handle_client on ' + current_thread().name)
client_socket.send("Hi there\n".encode())
while True:
line = client_socket.recv(1024).decode()
parts = line.split('\t')
if len(parts) == 4:
cmd_queue.put(Move(parts))
print(address, line)
client_socket.send('Thanks for that\n'.encode())
def listen_thread():
global serverSocket
print('listen_thread on ' + current_thread().name)
serverSocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
serverSocket.bind(('', PORT))
serverSocket.listen(100)
while True:
print('Game server waiting for connection')
(client_socket, address) = serverSocket.accept()
print('How exciting! A connection from', address)
Thread(target=handle_client, args=(client_socket, address)).start()
Thread(target=listen_thread).start()
turtle_shapes = ('arrow', 'circle', 'square', 'triangle', 'classic', 'turtle')
def add_turtle(name):
t = Turtle()
t.speed('fast')
#t.penup()
t.shape(turtle_shapes[len(turtles) % len(turtle_shapes)])
turtles[name] = t
t.setheading(90) # Point up
return t
try:
turtle.setup(650, 650)
turtle.hideturtle()
for n in range(100000):
cmd = cmd_queue.get()
client_turtle = turtles.get(cmd.name) or add_turtle(cmd.name)
if n % 50 == 0: # Clear all turtles periodically
for turtle in turtles.values():
turtle.clear()
client_turtle.pencolor(cmd.color)
client_turtle.goto(cmd.x, cmd.y)
except KeyboardInterrupt:
print('Stopping server')
serverSocket.close()
|
11583298
|
from . import *
# Declaration of models available
__all__=[
'oicr_lambda_log_distillation',
]
|
11583338
|
import _osx_support # -- Import it For Mac Users
# import os -- For Windows Users
from tkinter import *
from tkinter import filedialog, colorchooser, font
from tkinter.messagebox import *
from tkinter.filedialog import *
def change_color():
color = colorchooser.askcolor(title="pick a color...or else")
text_area.config(fg=color[1])
def change_font(*args):
text_area.config(font=(font_name.get(), size_box.get()))
def new_file():
window.title("Untitled")
text_area.delete(1.0, END)
def open_file():
file = askopenfilename(defaultextension=".txt",
file=[("All Files", "*.*"),
("Text Documents", "*.txt")])
if file is None:
return
else:
try:
window.title(os.path.basename(file))
text_area.delete(1.0, END)
file = open(file, "r")
text_area.insert(1.0, file.read())
except Exception:
print("couldn't read file")
finally:
file.close()
def save_file():
file = filedialog.asksaveasfilename(initialfile='unititled.txt',
defaultextension=".txt",
filetypes=[("All Files", "*.*"),
("Text Documents", "*.txt")])
if file is None:
return
else:
try:
window.title(os.path.basename(file))
file = open(file, "w")
file.write(text_area.get(1.0, END))
except Exception:
print("couldn't save file")
finally:
file.close()
def cut():
text_area.event_generate("<<Cut>>")
def copy():
text_area.event_generate("<<Copy>>")
def paste():
text_area.event_generate("<<Paste>>")
def about():
showinfo("About this program", "This is a program written by YOUUUUU!!!")
def quit():
window.destroy()
window = Tk()
window.title("Text editor program")
file = None
window_width = 500
window_height = 500
screen_width = window.winfo_screenwidth()
screen_height = window.winfo_screenheight()
x = int((screen_width / 2) - (window_width / 2))
y = int((screen_height / 2) - (window_height / 2))
window.geometry("{}x{}+{}+{}".format(window_width, window_height, x, y))
font_name = StringVar(window)
font_name.set("Arial")
font_size = StringVar(window)
font_size.set("25")
text_area = Text(window, font=(font_name.get(), font_size.get()))
scroll_bar = Scrollbar(text_area)
window.grid_rowconfigure(0, weight=1)
window.grid_columnconfigure(0, weight=1)
text_area.grid(sticky=N + E + S + W)
scroll_bar.pack(side=RIGHT, fill=Y)
text_area.config(yscrollcommand=scroll_bar.set)
frame = Frame(window)
frame.grid()
color_button = Button(frame, text="color", command=change_color)
color_button.grid(row=0, column=0)
font_box = OptionMenu(frame, font_name, *font.families(), command=change_font)
font_box.grid(row=0, column=1)
size_box = Spinbox(frame, from_=1, to=100, textvariable=font_size, command=change_font)
size_box.grid(row=0, column=2)
menu_bar = Menu(window)
window.config(menu=menu_bar)
file_menu = Menu(menu_bar, tearoff=0)
menu_bar.add_cascade(label="File", menu=file_menu)
file_menu.add_command(label="New", command=new_file)
file_menu.add_command(label="Open", command=open_file)
file_menu.add_command(label="Save", command=save_file)
file_menu.add_separator()
file_menu.add_command(label="Exit", command=quit)
edit_menu = Menu(menu_bar, tearoff=0)
menu_bar.add_cascade(label="Edit", menu=edit_menu)
edit_menu.add_command(label="Cut", command=cut)
edit_menu.add_command(label="Copy", command=copy)
edit_menu.add_command(label="Paste", command=paste)
help_menu = Menu(menu_bar, tearoff=0)
menu_bar.add_cascade(label="Help", menu=help_menu)
help_menu.add_command(label="About", command=about)
window.mainloop()
|
11583373
|
import numpy as np
import pytest
import scipy.stats as st
from sklearn.decomposition import PCA
from Starfish.emulator._utils import (
get_w_hat,
get_phi_squared,
get_altered_prior_factors,
Gamma,
)
class TestEmulatorUtils:
@pytest.fixture
def grid_setup(self, mock_hdf5_interface):
fluxes = np.array(list(mock_hdf5_interface.fluxes))
# Normalize to an average of 1 to remove uninteresting correlation
fluxes /= fluxes.mean(1, keepdims=True)
# Center and whiten
flux_mean = fluxes.mean(0)
fluxes -= flux_mean
flux_std = fluxes.std(0)
fluxes /= flux_std
# Perform PCA using sklearn
default_pca_kwargs = dict(n_components=0.99, svd_solver="full")
pca = PCA(**default_pca_kwargs)
weights = pca.fit_transform(fluxes)
eigenspectra = pca.components_
yield eigenspectra, fluxes
def test_altered_lambda_xi(self, grid_setup):
a_p, b_p = get_altered_prior_factors(*grid_setup)
assert np.isfinite(a_p)
assert np.isfinite(b_p)
def test_w_hat(self, grid_setup):
eigs, fluxes = grid_setup
w_hat = get_w_hat(eigs, fluxes)
assert len(w_hat) == len(fluxes) * len(eigs)
assert np.all(np.isfinite(w_hat))
def test_phi_squared(self, grid_setup):
eigs, fluxes = grid_setup
M = len(fluxes)
m = len(eigs)
phi2 = get_phi_squared(eigs, M)
assert phi2.shape == (M * m, M * m)
assert np.all(np.isfinite(phi2))
@pytest.mark.parametrize("params", [(1, 0.001), (2, 0.075)])
def test_gamma_dist(self, params):
a, b = params
mine = Gamma(a, b)
theirs = st.gamma(a, scale=1 / b)
x = np.linspace(1e-6, 1e4)
assert np.allclose(mine.logpdf(x), theirs.logpdf(x))
|
11583378
|
from django.contrib.contenttypes.models import ContentType
from django import forms
from hyperadmin.resources.models.resources import InlineModelResource
class GenericInlineModelResource(InlineModelResource):
model = None
ct_field = "content_type"
ct_fk_field = "object_id"
def post_register(self):
self._ct_field = self.opts.get_field(self.ct_field)
self._ct_fk_field = self.opts.get_field(self.ct_fk_field)
if self.rel_name is None:
self.rel_name = '-'.join((
self.opts.app_label, self.opts.object_name.lower(),
self.ct_field, self.ct_fk_field,
))
super(InlineModelResource, self).post_register()
@property
def content_type(self):
return ContentType.objects.get_for_model(self.model)
def get_queryset(self, parent):
queryset = self.resource_adaptor.objects.all()
queryset = queryset.filter(**{
self.ct_field: self.content_type,
self.ct_fk_field: parent.pk,
})
if not self.has_create_permission():
queryset = queryset.none()
return queryset
def get_primary_query(self, **kwargs):
return self.get_queryset(parent=self.state['parent'].instance)
def get_form_class(self):
if self.form_class:
return self.form_class
resource = self
class AdminForm(forms.ModelForm):
state = self.state
def save(self, commit=True):
instance = super(AdminForm, self).save(commit=False)
setattr(instance, resource._ct_field.get_attname(), resource.content_type.pk)
setattr(instance, resource._ct_fk_field.get_attname(), self.state['parent'].instance.pk)
if commit:
instance.save()
return instance
class Meta:
model = self.model
exclude = self.get_exclude() + [self.ct_field, self.ct_fk_field]
#TODO formfield overides
#TODO fields
return AdminForm
|
11583383
|
from numpy.random import random_integers
from numpy.random import randn
import numpy as np
import timeit
import argparse
import matplotlib.pyplot as plt
from joblib import Parallel
from joblib import delayed
import multiprocessing as mp
def simulate(size):
n = 0
mean = 0
M2 = 0
speed = randn(10000)
for i in range(1000):
n = n + 1
indices = random_integers(0, len(speed)-1, size=size)
x = (1 + speed[indices]).prod()
delta = x - mean
mean = mean + delta/n
M2 = M2 + delta*(x - mean)
return mean
def serial():
start = timeit.default_timer()
for i in range(10, 50):
simulate(i)
end = timeit.default_timer() - start
print("Serial time", end)
return end
def parallel(nprocs):
start = timeit.default_timer()
Parallel(nprocs)(delayed(simulate)(i) for i in range(10, 50))
end = timeit.default_timer() - start
print(nprocs, "Parallel time", end)
return end
if __name__ == "__main__":
ratios = []
baseline = serial()
for i in range(1, mp.cpu_count()):
ratios.append(baseline/parallel(i))
plt.xlabel('# processes')
plt.ylabel('Serial/Parallel')
plt.plot(np.arange(1, mp.cpu_count()), ratios)
plt.grid(True)
plt.show()
|
11583387
|
from debpackager.packages.general_package import GeneralPackage
from debpackager.utils.general import create_virtual_env, \
install_deb_dependencies
import debpackager.packages.conf.configurations as cfg
class Python(GeneralPackage):
def __init__(self, kwargs):
super(Python, self).__init__(**kwargs)
self.install_debians = kwargs.get('install_dependencies')
def build(self):
if self.install_debians:
install_deb_dependencies(self.extra_args)
for debian in self.extra_args.get('pom').project.get('debians', []):
install_path = debian.get('install_path')
ve_args = debian.get('ve_args', [])
create_virtual_env(self.project_path, install_path, ve_args)
super(Python, self).build()
# virtualenv dir will be deleted if --no-clean flag is given
self.extra_files.append(cfg.VIRTUAL_ENV_PATH)
super(Python, self).build()
|
11583393
|
import asyncio
import os
import pytest
import sys
import tempfile
import time
import ray
from ray._private.test_utils import Semaphore
def test_nested_tasks(shutdown_only):
ray.init(num_cpus=1)
@ray.remote
class Counter:
def __init__(self):
self.count = 0
def inc(self):
self.count += 1
# Since we relex the cap after a timeout we can have slightly more
# than 1 task. We should never have 20 though since that takes 2^20
# * 10ms time.
assert self.count < 20
def dec(self):
self.count -= 1
counter = Counter.remote()
@ray.remote(num_cpus=1)
def g():
return None
@ray.remote(num_cpus=1)
def f():
ray.get(counter.inc.remote())
res = ray.get(g.remote())
ray.get(counter.dec.remote())
return res
ready, _ = ray.wait(
[f.remote() for _ in range(1000)], timeout=60.0, num_returns=1000
)
assert len(ready) == 1000, len(ready)
# Ensure the assertion in `inc` didn't fail.
ray.get(ready)
def test_recursion(shutdown_only):
ray.init(num_cpus=1)
@ray.remote
def summer(n):
if n == 0:
return 0
return n + ray.get(summer.remote(n - 1))
assert ray.get(summer.remote(10)) == sum(range(11))
def test_out_of_order_scheduling(shutdown_only):
"""Ensure that when a task runs before its dependency, and they're of the same
scheduling class, the dependency is eventually able to run."""
ray.init(num_cpus=1)
@ray.remote
def foo(arg, path):
(ref,) = arg
should_die = not os.path.exists(path)
with open(path, "w") as f:
f.write("")
if should_die:
print("dying!!!")
os._exit(-1)
if ref:
print("hogging the only available slot for a while")
ray.get(ref)
return "done!"
with tempfile.TemporaryDirectory() as tmpdir:
path = f"{tmpdir}/temp.txt"
first = foo.remote((None,), path)
second = foo.remote((first,), path)
print(ray.get(second))
def test_limit_concurrency(shutdown_only):
ray.init(num_cpus=1)
block_task = Semaphore.remote(0)
block_driver = Semaphore.remote(0)
ray.get([block_task.locked.remote(), block_driver.locked.remote()])
@ray.remote(num_cpus=1)
def foo():
ray.get(block_driver.release.remote())
ray.get(block_task.acquire.remote())
refs = [foo.remote() for _ in range(20)]
block_driver_refs = [block_driver.acquire.remote() for _ in range(20)]
# Some of the tasks will run since we relax the cap, but not all because it
# should take exponentially long for the cap to be increased.
ready, not_ready = ray.wait(block_driver_refs, timeout=10, num_returns=20)
assert len(not_ready) >= 1
# Now the first instance of foo finishes, so the second starts to run.
ray.get([block_task.release.remote() for _ in range(19)])
ready, not_ready = ray.wait(block_driver_refs, timeout=10, num_returns=20)
assert len(not_ready) == 0
ready, not_ready = ray.wait(refs, num_returns=20, timeout=15)
assert len(ready) == 19
assert len(not_ready) == 1
def test_zero_cpu_scheduling(shutdown_only):
ray.init(num_cpus=1)
block_task = Semaphore.remote(0)
block_driver = Semaphore.remote(0)
@ray.remote(num_cpus=0)
def foo():
ray.get(block_driver.release.remote())
ray.get(block_task.acquire.remote())
foo.remote()
foo.remote()
ray.get(block_driver.acquire.remote())
block_driver_ref = block_driver.acquire.remote()
# Both tasks should be running, so the driver should be unblocked.
timeout_value = 5 if sys.platform == "win32" else 1
_, not_ready = ray.wait([block_driver_ref], timeout=timeout_value)
assert len(not_ready) == 0
def test_exponential_wait(shutdown_only):
ray.init(num_cpus=2)
num_tasks = 6
@ray.remote(num_cpus=0)
class Barrier:
def __init__(self, limit):
self.i = 0
self.limit = limit
async def join(self):
self.i += 1
while self.i < self.limit:
await asyncio.sleep(1)
b = Barrier.remote(num_tasks)
@ray.remote
def f(i, start):
delta = time.time() - start
print("Launch", i, delta)
ray.get(b.join.remote())
return delta
start = time.time()
results = ray.get([f.remote(i, start) for i in range(num_tasks)])
last_wait = results[-1] - results[-2]
second_last = results[-2] - results[-3]
# Assert that last_wwait / second_last ~= 2, with a healthy buffer since ci
# is noisy.
assert second_last < last_wait < 4 * second_last
assert 7 < last_wait
if __name__ == "__main__":
os.environ["RAY_worker_cap_enabled"] = "true"
sys.exit(pytest.main(["-v", __file__]))
|
11583405
|
from collections import Counter
# Score categories
ONES = "ONES"
TWOS = "TWOS"
THREES = "THREES"
FOURS = "FOURS"
FIVES = "FIVES"
SIXES = "SIXES"
FULL_HOUSE = "FULL_HOUSE"
FOUR_OF_A_KIND = "FOUR_OF_A_KIND"
STRAIGHT = "STRAIGHT"
LITTLE_STRAIGHT = "LITTLE_STRAIGHT"
BIG_STRAIGHT = "BIG_STRAIGHT"
CHOICE = "CHOICE"
YACHT = "YACHT"
# Score per category
scores = {
ONES: 1,
TWOS: 2,
THREES: 3,
FOURS: 4,
FIVES: 5,
SIXES: 6,
STRAIGHT: 30,
YACHT: 50,
}
def score_ones(dice):
return scores[ONES] * dice.count(1)
def score_twos(dice):
return scores[TWOS] * dice.count(2)
def score_threes(dice):
return scores[THREES] * dice.count(3)
def score_fours(dice):
return scores[FOURS] * dice.count(4)
def score_fives(dice):
return scores[FIVES] * dice.count(5)
def score_sixes(dice):
return scores[SIXES] * dice.count(6)
def score_full_house(dice):
return sum(dice) if sorted(set(Counter(dice).values())) == [2, 3] else 0
def score_four_of_a_kind(dice):
counter = Counter(dice)
if 4 in set(counter.values()):
for k, v in counter.items():
if v == 4:
return k * 4
if 5 in set(counter.values()):
return dice[0] * 4
return 0
def score_little_straight(dice):
return scores[STRAIGHT] if sorted(dice) == [1, 2, 3, 4, 5] else 0
def score_big_straight(dice):
return scores[STRAIGHT] if sorted(dice) == [2, 3, 4, 5, 6] else 0
def score_choice(dice):
return sum(dice)
def score_yacht(dice):
return scores[YACHT] if len(set(dice)) == 1 else 0
def score(dice, category):
if (category == ONES):
return score_ones(dice)
if (category == TWOS):
return score_twos(dice)
if (category == THREES):
return score_threes(dice)
if (category == FOURS):
return score_fours(dice)
if (category == FIVES):
return score_fives(dice)
if (category == SIXES):
return score_sixes(dice)
if (category == FULL_HOUSE):
return score_full_house(dice)
if (category == FOUR_OF_A_KIND):
return score_four_of_a_kind(dice)
if (category == LITTLE_STRAIGHT):
return score_little_straight(dice)
if (category == BIG_STRAIGHT):
return score_big_straight(dice)
if (category == CHOICE):
return score_choice(dice)
if (category == YACHT):
return score_yacht(dice)
|
11583451
|
from gen import *
from pdataset import *
import numpy as np
import argparse
from tempfile import TemporaryFile
from fg import Foreground, FGTextureType
import time
def save_to_file(npy_file_name, n_examples, dataset, use_patch_centers=False, e=16):
#The pentomino images
np_data = np.array(np.zeros(e**2))
#Target variables
np_targets = np.array(np.zeros(1))
if use_patch_centers:
np_patch_centers = np.array(np.zeros(64))
n_count = 0
for data in dataset:
if n_count == n_examples:
break
np_data = np.vstack((np_data, data[0]))
np_targets = np.vstack((np_targets, data[1]))
if use_patch_centers:
np_patch_centers = np.vstack((np_patch_centers, data[2]))
n_count += 1
np_data = np_data[1:]
np_data = np.float32(np_data)
np_targets = np_targets[1:]
np_targets = np.uint8(np_targets)
if use_patch_centers:
np_patch_centers = np_patch_centers[1:]
np_patch_centers = np.int8(np_patch_centers)
np.savez(npy_file_name, data=np_data, targets=np_targets, patch_centers=np_patch_centers)
else:
np.savez(npy_file_name, data=np_data, targets=np_targets)
print "Converted %s to a numpy array." % npy_file_name
if __name__=="__main__":
parser = argparse.ArgumentParser(description="premade script")
parser.add_argument("--seed", action="store", help="seed for the random number generator", type=int)
parser.add_argument("--no-of-exs", action="store", help="the number of examples", type=int, required=True)
parser.add_argument("--bg-texture-type",
action="store",
choices=["perlin", "hilbert", "plain"],
help="Determine the type of the texture for the background. Default is plain binary.")
parser.add_argument("--task",
action="store",
choices=[1, 2, 3, 4],
help="Determine the type of the texture for the background. Default is plain binary.",
default=1,
type=int)
parser.add_argument("--fg-texture-type", action="store",
choices=["gradient_rad", "gradient_lin", "plain"],
help="Determine the type of the texture for the foreground. Default is plain binary.")
parser.add_argument("--out-file-name", action="store", help="The output file name.", required=True)
parser.add_argument("--center-objects", action="store", choices=[0, 1], help="To center the \
objects set this flag to 1", default=0, type=int)
args = parser.parse_args()
seed = args.seed
task = args.task
no_of_exs = args.no_of_exs
texture_type = args.fg_texture_type
bg_texture_type = args.bg_texture_type
out_file_name = args.out_file_name
center_objs = args.center_objects
patch_size = (8, 8)
if out_file_name is None:
raise Exception("The output file name can not be empty.")
if seed is None:
seed = int(time.time())
if no_of_exs is None:
no_of_exs = 20000
if texture_type is None:
texture_type = "plain"
if texture_type == "plain":
fg = Foreground(size=patch_size, texture_type=FGTextureType.PlainBin)
elif texture_type == "gradient_rad":
fg = Foreground(size=patch_size, texture_type=FGTextureType.GradientRadial)
elif texture_type == "gradient_lin":
fg = Foreground(size=patch_size, texture_type=FGTextureType.GradientLinear)
texture = fg.generate_texture()
# PENTOMINO
pentomino_gen = lambda w, h: TwoGroups("pentl/pentn/pentp/pentf/penty/pentj/pentn2/pentq/pentf2/penty2",
seed,
w,
h,
use_patch_centers=True,
n1=1,
n2=2,
rot=True,
texture=texture,
scale=True,
center_objects=center_objs,
patch_size=patch_size,
task=task)
if bg_texture_type == "perlin":
enable_perlin = True
else:
enable_perlin = False
pentomino = lambda w, h: SpritePlacer(pentomino_gen(w, h), collision_check=True, enable_perlin=enable_perlin)
pentomino64x64 = pentomino(64, 64)
pentomino_dir = "./"
assert pentomino_dir is not None, "Please specify the dataset path that you want to say your files to."
pentomino64x64_file = pentomino_dir + out_file_name + "_seed_" + str(seed) + "_64patches" + ".npz"
print "Started saving pentomino64x64"
save_to_file(pentomino64x64_file, no_of_exs, pentomino64x64, use_patch_centers=True, e=64)
|
11583452
|
import random
def roll_dice():
dice_number = random.randint(1, 6)
return dice_number
print("===== Welcome to Dice Rolling Simulator =====")
while 1:
choice = input("Do you wanna roll a dice (y/n)")
if 'y' in choice.lower():
print("Rolling dice...")
number = roll_dice()
print("Dice has the number:", number)
elif 'n' in choice.lower():
print('Exiting...')
break
else:
print("Invalid input...please try again")
|
11583483
|
from django.conf.urls import url
from django_prometheus_metrics.views import MetricsView
urlpatterns = [
url(r'^metrics$', MetricsView.as_view(), name='prometheus-django-metrics')
]
|
11583507
|
import numpy as np
import pytest
import sklearn
import pandas as pd
from sklearn.preprocessing import StandardScaler
from sklearn.utils.validation import check_is_fitted
from sklearn.exceptions import NotFittedError
from distutils.version import LooseVersion
from dirty_cat import SuperVectorizer
from dirty_cat import GapEncoder
def check_same_transformers(expected_transformers: dict, actual_transformers: list):
# Construct the dict from the actual transformers
actual_transformers_dict = dict([(name, cols) for name, trans, cols in actual_transformers])
assert actual_transformers_dict == expected_transformers
def _get_clean_dataframe():
"""
Creates a simple DataFrame with various types of data,
and without missing values.
"""
return pd.DataFrame({
'int': pd.Series([15, 56, 63, 12, 44], dtype='int'),
'float': pd.Series([5.2, 2.4, 6.2, 10.45, 9.], dtype='float'),
'str1': pd.Series(['public', 'private', 'private', 'private', 'public'], dtype='string'),
'str2': pd.Series(['officer', 'manager', 'lawyer', 'chef', 'teacher'], dtype='string'),
'cat1': pd.Series(['yes', 'yes', 'no', 'yes', 'no'], dtype='category'),
'cat2': pd.Series(['20K+', '40K+', '60K+', '30K+', '50K+'], dtype='category'),
})
def _get_dirty_dataframe():
"""
Creates a simple DataFrame with some missing values.
We'll use different types of missing values (np.nan, pd.NA, None)
to see how robust the vectorizer is.
"""
return pd.DataFrame({
'int': pd.Series([15, 56, pd.NA, 12, 44], dtype='Int64'),
'float': pd.Series([5.2, 2.4, 6.2, 10.45, np.nan], dtype='Float64'),
'str1': pd.Series(['public', np.nan, 'private', 'private', 'public'], dtype='object'),
'str2': pd.Series(['officer', 'manager', None, 'chef', 'teacher'], dtype='object'),
'cat1': pd.Series([np.nan, 'yes', 'no', 'yes', 'no'], dtype='object'),
'cat2': pd.Series(['20K+', '40K+', '60K+', '30K+', np.nan], dtype='object'),
})
def _test_possibilities(
X,
expected_transformers_df,
expected_transformers_2,
expected_transformers_np_no_cast,
expected_transformers_series,
expected_transformers_plain,
expected_transformers_np_cast,
):
"""
Do a bunch of tests with the SuperVectorizer.
We take some expected transformers results as argument. They're usually
lists or dictionaries.
"""
# Test with low cardinality and a StandardScaler for the numeric columns
vectorizer_base = SuperVectorizer(
cardinality_threshold=4,
# we must have n_samples = 5 >= n_components
high_card_cat_transformer=GapEncoder(n_components=2),
numerical_transformer=StandardScaler(),
)
# Warning: order-dependant
vectorizer_base.fit_transform(X)
check_same_transformers(expected_transformers_df, vectorizer_base.transformers)
# Test with higher cardinality threshold and no numeric transformer
vectorizer_default = SuperVectorizer() # Using default values
vectorizer_default.fit_transform(X)
check_same_transformers(expected_transformers_2, vectorizer_default.transformers)
# Test with a numpy array
arr = X.to_numpy()
# Instead of the columns names, we'll have the column indices.
vectorizer_base.fit_transform(arr)
check_same_transformers(expected_transformers_np_no_cast, vectorizer_base.transformers)
# Test with pandas series
vectorizer_base.fit_transform(X['cat1'])
check_same_transformers(expected_transformers_series, vectorizer_base.transformers)
# Test casting values
vectorizer_cast = SuperVectorizer(
cardinality_threshold=4,
# we must have n_samples = 5 >= n_components
high_card_cat_transformer=GapEncoder(n_components=2),
numerical_transformer=StandardScaler(),
)
X_str = X.astype('object')
# With pandas
vectorizer_cast.fit_transform(X_str)
check_same_transformers(expected_transformers_plain, vectorizer_cast.transformers)
# With numpy
vectorizer_cast.fit_transform(X_str.to_numpy())
check_same_transformers(expected_transformers_np_cast, vectorizer_cast.transformers)
def test_with_clean_data():
"""
Defines the expected returns of the vectorizer in different settings,
and runs the tests with a clean dataset.
"""
X = _get_clean_dataframe()
# Define the transformers we'll use throughout the test.
expected_transformers_df = {
'numeric': ['int', 'float'],
'low_card_cat': ['str1', 'cat1'],
'high_card_cat': ['str2', 'cat2'],
}
expected_transformers_2 = {
'low_card_cat': ['str1', 'str2', 'cat1', 'cat2'],
}
expected_transformers_np_no_cast = {
'low_card_cat': [2, 4],
'high_card_cat': [3, 5],
'numeric': [0, 1]
}
expected_transformers_series = {
'low_card_cat': ['cat1'],
}
expected_transformers_plain = {
'high_card_cat': ['str2', 'cat2'],
'low_card_cat': ['str1', 'cat1'],
'numeric': ['int', 'float']
}
expected_transformers_np_cast = {
'numeric': [0, 1],
'low_card_cat': [2, 4],
'high_card_cat': [3, 5],
}
_test_possibilities(
X,
expected_transformers_df,
expected_transformers_2,
expected_transformers_np_no_cast,
expected_transformers_series,
expected_transformers_plain,
expected_transformers_np_cast,
)
def test_with_dirty_data():
"""
Defines the expected returns of the vectorizer in different settings,
and runs the tests with a dataset containing missing values.
"""
X = _get_dirty_dataframe()
# Define the transformers we'll use throughout the test.
expected_transformers_df = {
'numeric': ['int', 'float'],
'low_card_cat': ['str1', 'cat1'],
'high_card_cat': ['str2', 'cat2'],
}
expected_transformers_2 = {
'low_card_cat': ['str1', 'str2', 'cat1', 'cat2'],
}
expected_transformers_np_no_cast = {
'low_card_cat': [2, 4],
'high_card_cat': [3, 5],
'numeric': [0, 1],
}
expected_transformers_series = {
'low_card_cat': ['cat1'],
}
expected_transformers_plain = {
'high_card_cat': ['str2', 'cat2'],
'low_card_cat': ['str1', 'cat1'],
'numeric': ['int', 'float']
}
expected_transformers_np_cast = {
'numeric': [0, 1],
'low_card_cat': [2, 4],
'high_card_cat': [3, 5],
}
_test_possibilities(
X,
expected_transformers_df,
expected_transformers_2,
expected_transformers_np_no_cast,
expected_transformers_series,
expected_transformers_plain,
expected_transformers_np_cast,
)
def test_get_feature_names():
X = _get_clean_dataframe()
vectorizer_w_pass = SuperVectorizer(remainder='passthrough')
vectorizer_w_pass.fit(X)
if LooseVersion(sklearn.__version__) < LooseVersion('0.23'):
with pytest.raises(NotImplementedError):
# Prior to sklearn 0.23, ColumnTransformer.get_feature_names
# with "passthrough" transformer(s) raises a NotImplementedError
assert vectorizer_w_pass.get_feature_names()
assert vectorizer_w_pass.get_feature_names_out()
else:
expected_feature_names_pass = [ # Order matters. If it doesn't, convert to set.
'str1_private', 'str1_public',
'str2_chef', 'str2_lawyer', 'str2_manager', 'str2_officer', 'str2_teacher',
'cat1_no', 'cat1_yes', 'cat2_20K+', 'cat2_30K+', 'cat2_40K+', 'cat2_50K+', 'cat2_60K+',
'int', 'float'
]
assert vectorizer_w_pass.get_feature_names() == expected_feature_names_pass
assert vectorizer_w_pass.get_feature_names_out() == expected_feature_names_pass
vectorizer_w_drop = SuperVectorizer(remainder='drop')
vectorizer_w_drop.fit(X)
expected_feature_names_drop = [ # Order matters. If it doesn't, convert to set.
'str1_private', 'str1_public',
'str2_chef', 'str2_lawyer', 'str2_manager', 'str2_officer', 'str2_teacher',
'cat1_no', 'cat1_yes', 'cat2_20K+', 'cat2_30K+', 'cat2_40K+', 'cat2_50K+', 'cat2_60K+'
]
assert vectorizer_w_drop.get_feature_names() == expected_feature_names_drop
assert vectorizer_w_drop.get_feature_names_out() == expected_feature_names_drop
def test_fit():
# Simply checks sklearn's `check_is_fitted` function raises an error if
# the SuperVectorizer is instantiated but not fitted.
# See GH#193
sup_vec = SuperVectorizer()
with pytest.raises(NotFittedError):
if LooseVersion(sklearn.__version__) >= LooseVersion('0.22'):
assert check_is_fitted(sup_vec)
else:
assert check_is_fitted(sup_vec, attributes=dir(sup_vec))
def test_transform():
X = _get_clean_dataframe()
sup_vec = SuperVectorizer()
sup_vec.fit(X)
s = [34, 5.5, 'private', 'manager', 'yes', '60K+']
x = np.array(s).reshape(1, -1)
x_trans = sup_vec.transform(x)
assert (x_trans == [[1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 1, 34, 5.5]]).all()
def fit_transform_equiv():
"""
We will test the equivalence between using `.fit_transform(X)`
and `.fit(X).transform(X).`
"""
X1 = _get_clean_dataframe()
X2 = _get_dirty_dataframe()
sup_vec1 = SuperVectorizer()
sup_vec2 = SuperVectorizer()
sup_vec3 = SuperVectorizer()
sup_vec4 = SuperVectorizer()
enc1_x1 = sup_vec1.fit_transform(X1)
enc2_x1 = sup_vec2.fit(X1).transform(X1)
enc1_x2 = sup_vec3.fit_transform(X2)
enc2_x2 = sup_vec4.fit(X2).transform(X2)
assert enc1_x1 == enc2_x1
assert sup_vec1 == sup_vec2
assert enc1_x2 == enc2_x2
assert sup_vec3 == sup_vec4
if __name__ == '__main__':
print('start test_super_vectorizer with clean df')
test_with_clean_data()
print('test_super_vectorizer with clean df passed')
print('start test_super_vectorizer with dirty df')
test_with_dirty_data()
print('test_super_vectorizer with dirty df passed')
print('start test_get_feature_names')
test_get_feature_names()
print('test_get_feature_names passed')
print('start test_fit')
test_fit()
print('test_fit passed')
print('start fit_transform_equiv')
fit_transform_equiv()
print('fit_transform_equiv passed')
print('Done')
|
11583576
|
from abc import ABCMeta, abstractmethod
class IperfPort:
pass
class TrafficStream:
def __init__(self, source, destination):
self.rate = None
self.frame_size = 1518
self.vlan = 0
self.l4stack = None
self.source = source
self.destination = destination
class L3Device:
def __init__(self, ip, mask, gateway, parent):
self.ip = ip
self.mask = mask
self.gateway = gateway
self.parent = parent
class Endpoint:
def __init__(self, name, comm):
self.name = name
self.comm = comm
self.devices = list()
class TrafficTester(metaclass=ABCMeta):
def __init__(self, streams):
self.traffic_streams = streams
self.duration = 0
self.warmup = 5
@abstractmethod
def config(self):
pass
@abstractmethod
def test(self):
pass
@abstractmethod
def cleanup(self):
pass
@staticmethod
def create(streams):
# 具体实例生成的工厂类
pass
class IperfTrafficTester(TrafficTester):
def config(self):
# 实现 iperf的配置方法
pass
|
11583607
|
import torch
import torch.nn as nn
from torch.autograd import Variable
import time
import numpy as np
import sys
sys.path.append('../')
sys.path.append('../../')
sys.path.append('../../../')
from table import get_occupancy_table
from functions.occupancy_to_topology import OccupancyToTopology
from parse_args import parse_args
# look-up-tables
acceptTopology = np.arange(256)
vertexTable=[ [0, 1, 0],
[1, 1, 0],
[1, 0, 0],
[0, 0, 0],
[0, 1, 1],
[1, 1, 1],
[1, 0, 1],
[0, 0, 1] ]
occupancyTable=get_occupancy_table()
# check the cuda extension or c extension
args = parse_args()
if args.with_cuda:
print "Testing CUDA extension..."
dtype = torch.cuda.FloatTensor
else:
print "Testing C extension..."
dtype = torch.FloatTensor
# get (WxH)xT topology map from (W+1)x(Hx1) occupancy map
# note here T=14 because of the inside/outside distinction
def occupancy_to_topology(occ):
W = occ.size()[0]-1
H = occ.size()[1]-1
D = occ.size()[2]-1
T = len(acceptTopology)
topology = Variable(torch.zeros(W*H*D, T)).type(torch.FloatTensor)
xv, yv, zv = np.meshgrid(range(W), range(H), range(D), indexing='ij')
xv = xv.flatten()
yv = yv.flatten()
zv = zv.flatten()
for i,j,k in zip(xv, yv, zv):
p_occ = []
for v in range(8):
p_occ.append( occ[i+vertexTable[v][0], j+vertexTable[v][1], k+vertexTable[v][2]] )
p_occ.append( 1 - occ[i+vertexTable[v][0], j+vertexTable[v][1], k+vertexTable[v][2]] )
for t in range(T):
topology_ind = acceptTopology[t]
p_accumu = 1
for v in range(8):
p_accumu = p_accumu*p_occ[ v*2 + int(occupancyTable[topology_ind][v]) ]
topology[i*H*D+j*D+k, t] = p_accumu
return topology
if __name__ == '__main__':
W = H = D = args.num_cells
T = 256
print "=========== Input ============="
occupancy = Variable(torch.rand(W+1, H+1, D+1).type(dtype), requires_grad=True)
rnd_weights = Variable(torch.rand(W*H*D, T).type(dtype))
print occupancy
print "============= cffi ============"
# forward
topology = OccupancyToTopology()(occupancy)
tf_c = time.time()
topology = OccupancyToTopology()(occupancy)
tf_c = time.time() - tf_c
print "cffi forward time: ", tf_c
print topology
# backward
tb_c = time.time()
torch.sum(torch.mul(topology, rnd_weights)).backward()
tb_c = time.time() - tb_c
print "cffi backward time: ", tb_c
grad_np = np.copy(occupancy.grad.data.cpu().numpy())
print grad_np
print "============= auto ============"
occupancy = Variable(occupancy.data.cpu(), requires_grad=True)
rnd_weights = Variable(rnd_weights.data.cpu())
# forward
tf_py = time.time()
topology_auto = occupancy_to_topology(occupancy)
tf_py = time.time()-tf_py
print "auto forward time: ", tf_py
print topology_auto
# backward
#occupancy.grad.data.zero_()
tb_py = time.time()
torch.sum(torch.mul(topology_auto, rnd_weights)).backward()
tb_py = time.time()-tb_py
print "auto backward time: ", tf_py
grad_auto_np = np.copy(occupancy.grad.data.cpu().numpy())
print grad_auto_np
print "========== summary ==========="
print "Forward difference between cffi and auto: ", np.sum(np.abs(topology.data.cpu().numpy()-topology_auto.data.numpy()))
print "Backward difference between cffi and auto: ", np.sum(np.abs(grad_np-grad_auto_np))
print "cffi forward time: %f, backward time: %f, full time: %f " % (tf_c, tb_c, tf_c+tb_c)
print "auto forward time: %f, backward time: %f, full time: %f " % (tf_py, tb_py, tf_py+tb_py)
print "ratio: ", (tf_py+tb_py)/(tf_c + tb_c)
|
11583612
|
import os
import ray
import sys
RAY_VERSION = "RAY_VERSION"
RAY_COMMIT = "RAY_HASH"
ray_version = os.getenv(RAY_VERSION)
ray_commit = os.getenv(RAY_COMMIT)
if __name__ == "__main__":
print("Sanity check python version: {}".format(sys.version))
assert (
ray_version == ray.__version__
), "Given Ray version {} is not matching with downloaded " "version {}".format(
ray_version, ray.__version__
)
assert (
ray_commit == ray.__commit__
), "Given Ray commit {} is not matching with downloaded " "version {}".format(
ray_commit, ray.__commit__
)
assert ray.__file__ is not None
ray.init()
assert ray.is_initialized()
@ray.remote
def return_arg(arg):
return arg
val = 3
print("Running basic sanity check.")
assert ray.get(return_arg.remote(val)) == val
ray.shutdown()
|
11583631
|
from pyradioconfig.parts.ocelot.profiles.Profile_Base import Profile_Base_Ocelot
from pyradioconfig.calculator_model_framework.interfaces.iprofile import IProfile
from pyradioconfig.parts.ocelot.profiles.frame_profile_inputs_common import frame_profile_inputs_common_ocelot
from pyradioconfig.parts.ocelot.profiles.sw_profile_outputs_common import sw_profile_outputs_common_ocelot
class Profile_Connect_Ocelot(Profile_Base_Ocelot):
def __init__(self):
self._profileName = "Connect"
self._readable_name = "Connect Profile"
self._category = ""
self._description = "Profile used for Connect phys"
self._default = False
self._activation_logic = ""
self._family = "ocelot"
self._frame_profile_inputs_common = frame_profile_inputs_common_ocelot()
self._sw_profile_outputs_common = sw_profile_outputs_common_ocelot()
"""
Builds inputs, forced, outputs into modem model
"""
def buildProfileModel(self, model):
# Start with base profile
profile = super(Profile_Connect_Ocelot, self).buildProfileModel(model)
# Start with a profile that has lots of knobs, and remove most of them.
# Remove those that are not in a certain category
for input in profile.inputs:
# Force things in these categories
if (input.category.startswith('frame_')) \
or (input.category == 'crc') \
or (input.category == 'whitening'):
# Don't force these specific inputs in the categories above
if (input._var._name == "crc_byte_endian") \
or (input._var._name == "crc_bit_endian") \
or (input._var._name == "white_poly"):
continue
self._removeVariableFromInputs(profile, input._var, input.default)
#Hidden inputs to allow for fixed frame length testing
IProfile.make_hidden_input(profile, model.vars.frame_length_type, 'frame_general',
readable_name="Frame Length Algorithm")
IProfile.make_hidden_input(profile, model.vars.fixed_length_size, category='frame_fixed_length',
readable_name="Fixed Payload Size", value_limit_min=0, value_limit_max=0x7fffffff)
return profile
def profile_calculate(self, model):
# frame_general
model.vars.frame_bitendian.value_forced = model.vars.frame_bitendian.var_enum.LSB_FIRST
model.vars.frame_length_type.value_forced = model.vars.frame_length_type.var_enum.VARIABLE_LENGTH
model.vars.header_en.value_forced = True
model.vars.frame_coding.value_forced = model.vars.frame_coding.var_enum.NONE
# frame_payload
model.vars.payload_white_en.value_forced = False
model.vars.payload_crc_en.value_forced = True
# frame_header
model.vars.header_size.value_forced = 1
model.vars.header_calc_crc.value_forced = False
model.vars.header_white_en.value_forced = False
# frame_fixed_length
model.vars.fixed_length_size.value_forced = 16
# frame_var_length
model.vars.var_length_numbits.value_forced = 7
model.vars.var_length_bitendian.value_forced = model.vars.var_length_bitendian.var_enum.LSB_FIRST
model.vars.var_length_shift.value_forced = 0
model.vars.var_length_minlength.value_forced = 5
model.vars.var_length_maxlength.value_forced = 127
model.vars.var_length_includecrc.value_forced = True
model.vars.var_length_adjust.value_forced = 0
# frame_type_length
model.vars.frame_type_loc.value_forced = 0
model.vars.frame_type_bits.value_forced = 3
model.vars.frame_type_lsbit.value_forced = 0
model.vars.frame_type_0_length.value_forced = 0
model.vars.frame_type_1_length.value_forced = 0
model.vars.frame_type_2_length.value_forced = 0
model.vars.frame_type_3_length.value_forced = 0
model.vars.frame_type_4_length.value_forced = 0
model.vars.frame_type_5_length.value_forced = 0
model.vars.frame_type_6_length.value_forced = 0
model.vars.frame_type_7_length.value_forced = 0
model.vars.frame_type_0_valid.value_forced = False
model.vars.frame_type_1_valid.value_forced = False
model.vars.frame_type_2_valid.value_forced = False
model.vars.frame_type_3_valid.value_forced = False
model.vars.frame_type_4_valid.value_forced = False
model.vars.frame_type_5_valid.value_forced = False
model.vars.frame_type_6_valid.value_forced = False
model.vars.frame_type_7_valid.value_forced = False
# crc
model.vars.crc_poly.value_forced = model.vars.crc_poly.var_enum.CCITT_16
model.vars.crc_seed.value_forced = 0x00000000
model.vars.crc_pad_input.value_forced = False
model.vars.crc_input_order.value_forced = model.vars.crc_input_order.var_enum.LSB_FIRST
model.vars.crc_invert.value_forced = False
# whitening
model.vars.white_seed.value_forced = 0
model.vars.white_output_bit.value_forced = 0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.