code
stringlengths 22
1.05M
| apis
listlengths 1
3.31k
| extract_api
stringlengths 75
3.25M
|
|---|---|---|
import os
from argparse import ArgumentParser
from dodo_commands import Dodo
def _args():
parser = ArgumentParser(
description=(
"Writes (or removes) a small script that activates the latest "
+ "Dodo Commands project"
)
)
parser.add_argument("status", choices=["on", "off"])
return Dodo.parse_args(parser)
if Dodo.is_main(__name__, safe=False):
args = _args()
for shell, activate_cmd in (
("bash", "$(dodo env --latest --shell=bash) &&"),
("fish", "eval (dodo env --latest --shell=fish); and"),
):
confd_dir = os.path.expanduser("~/.config/%s/conf.d" % shell)
if not os.path.exists(confd_dir):
Dodo.run(["mkdir", "-p", confd_dir])
script = os.path.join(confd_dir, "dodo_autostart." + shell)
if args.status == "on" and not os.path.exists(script):
with open(script, "w") as f:
f.write("# NOTE: automatically generated file, don't edit.\n")
f.write("%s dodo check-version --dodo --config\n" % activate_cmd)
if args.status == "off" and os.path.exists(script):
os.unlink(script)
|
[
"os.path.expanduser",
"argparse.ArgumentParser",
"os.unlink",
"os.path.exists",
"dodo_commands.Dodo.parse_args",
"os.path.join",
"dodo_commands.Dodo.is_main",
"dodo_commands.Dodo.run"
] |
[((371, 405), 'dodo_commands.Dodo.is_main', 'Dodo.is_main', (['__name__'], {'safe': '(False)'}), '(__name__, safe=False)\n', (383, 405), False, 'from dodo_commands import Dodo\n'), ((106, 232), 'argparse.ArgumentParser', 'ArgumentParser', ([], {'description': "('Writes (or removes) a small script that activates the latest ' +\n 'Dodo Commands project')"}), "(description=\n 'Writes (or removes) a small script that activates the latest ' +\n 'Dodo Commands project')\n", (120, 232), False, 'from argparse import ArgumentParser\n'), ((342, 365), 'dodo_commands.Dodo.parse_args', 'Dodo.parse_args', (['parser'], {}), '(parser)\n', (357, 365), False, 'from dodo_commands import Dodo\n'), ((610, 659), 'os.path.expanduser', 'os.path.expanduser', (["('~/.config/%s/conf.d' % shell)"], {}), "('~/.config/%s/conf.d' % shell)\n", (628, 659), False, 'import os\n'), ((768, 818), 'os.path.join', 'os.path.join', (['confd_dir', "('dodo_autostart.' + shell)"], {}), "(confd_dir, 'dodo_autostart.' + shell)\n", (780, 818), False, 'import os\n'), ((675, 700), 'os.path.exists', 'os.path.exists', (['confd_dir'], {}), '(confd_dir)\n', (689, 700), False, 'import os\n'), ((714, 750), 'dodo_commands.Dodo.run', 'Dodo.run', (["['mkdir', '-p', confd_dir]"], {}), "(['mkdir', '-p', confd_dir])\n", (722, 750), False, 'from dodo_commands import Dodo\n'), ((1120, 1142), 'os.path.exists', 'os.path.exists', (['script'], {}), '(script)\n', (1134, 1142), False, 'import os\n'), ((1156, 1173), 'os.unlink', 'os.unlink', (['script'], {}), '(script)\n', (1165, 1173), False, 'import os\n'), ((858, 880), 'os.path.exists', 'os.path.exists', (['script'], {}), '(script)\n', (872, 880), False, 'import os\n')]
|
import numpy as np
import jetyak
import jviz
import sensors
import shapefile
import matplotlib
import matplotlib.pyplot as plt
import pandas as pd
import utm
from mpl_toolkits.basemap import Basemap
import mpl_toolkits.basemap as mb
from scipy import stats
def lat2str(deg):
min = 60 * (deg - np.floor(deg))
deg = np.floor(deg)
dir = 'N'
if deg < 0:
if min != 0.0:
deg += 1.0
min -= 60.0
dir = 'S'
return ("%d$\degree$ %g' N") % (np.abs(deg),np.abs(min))
def lon2str(deg):
min = 60 * (deg - np.floor(deg))
deg = np.floor(deg)
dir = 'E'
if deg < 0:
if min != 0.0:
deg += 1.0
min -= 60.0
dir = 'W'
return ("%d$\degree$ %g' W") % (np.abs(deg),np.abs(min))
if __name__ == '__main__':
#69.121595, -105.019215
base = Basemap(llcrnrlon=-170, llcrnrlat=0, urcrnrlon=-30, urcrnrlat=80,
resolution='l', projection='merc', suppress_ticks=True)
# base = Basemap(llcrnrlon=-120, llcrnrlat=68, urcrnrlon=-100, urcrnrlat=74,
# resolution='h', projection='merc', suppress_ticks=True)
# base.arcgisimage(service='World_Topo_Map', xpixels=1500, verbose=True)
base.drawcoastlines()
base.drawcountries()
# base.drawlakes()
# base.fillcontinents(color='coral',lake_color='aqua')
# base.drawlsmask(land_color='coral', ocean_color='aqua', lakes=True)
# base.drawparallels(np.arange(-80.,81.,2.),labels=[True,True,False,False],dashes=[2,2],color='white')
# base.drawmeridians(np.arange(-180.,181.,10.),labels=[True,True,True,False],dashes=[2,2],color='white')
# base.drawmapboundary(fill_color='aqua')
# base.drawrivers(linewidth=0.5, linestyle='solid', color='blue')
base.drawparallels(np.arange(-90.,91.,10.),labels=[True,True,False,False],dashes=[2,2],color='white')
base.drawmeridians(np.arange(-180.,181.,30.),labels=[False,False,False,True],dashes=[2,2],color='white')
base.drawparallels(np.arange(66.,67., 100.),labels=[False,False,False,True],dashes=[2,2],color='red')
base.drawstates(linewidth=2., color='grey')
base.bluemarble()
plt.show()
# base.scatter(dock_reference[1], dock_reference[0], s=500, marker='*', label='Freshwater Creek Mouth', zorder=10, edgecolor='k', facecolor='r')
# for radius in [500*i for i in range(10)]:
# lats, lons = getCircle(dock_reference[0], dock_reference[1], radius)
# base.plot(lons, lats, c='grey')
# if radius == 0:
# pass
# # plt.gca().annotate('Embayment', xy=(lons[270], lats[270]+0.001), xytext=(lons[270]+0.0005, lats[270]+0.002), fontsize=22, ha='center')
# # plt.gca().annotate('Freshwater Creek Mouth', xy=(lons[270], lats[270]+0.0005), fontsize=10, ha='right')
# else:
# plt.gca().annotate(str(radius)+'m', xy=(lons[270], lats[270]+0.0003), fontsize=22, ha='center')
# colors = np.flip(plt.cm.viridis(np.linspace(0,1,5)), axis=0)
# for i, m in enumerate(jy.mission[0:5]):
# base.scatter(m['Longitude'], m['Latitude'], label=date_labels[i], s=1, c=colors[i], zorder=10-i, lw=0)
# lgnd = plt.legend(loc='upper left')
# for handle in lgnd.legendHandles[1:]:
# handle.set_sizes([200])
# ax = plt.gca()
# def xformat(x, pos=None): return lon2str(x)
# def yformat(x, pos=None): return lat2str(x)
# ax.xaxis.set_major_formatter(matplotlib.ticker.FuncFormatter(xformat))
# ax.yaxis.set_major_formatter(matplotlib.ticker.FuncFormatter(yformat))
# plt.show()
# plt.close()
|
[
"numpy.abs",
"matplotlib.pyplot.show",
"numpy.floor",
"numpy.arange",
"mpl_toolkits.basemap.Basemap"
] |
[((323, 336), 'numpy.floor', 'np.floor', (['deg'], {}), '(deg)\n', (331, 336), True, 'import numpy as np\n'), ((582, 595), 'numpy.floor', 'np.floor', (['deg'], {}), '(deg)\n', (590, 595), True, 'import numpy as np\n'), ((844, 969), 'mpl_toolkits.basemap.Basemap', 'Basemap', ([], {'llcrnrlon': '(-170)', 'llcrnrlat': '(0)', 'urcrnrlon': '(-30)', 'urcrnrlat': '(80)', 'resolution': '"""l"""', 'projection': '"""merc"""', 'suppress_ticks': '(True)'}), "(llcrnrlon=-170, llcrnrlat=0, urcrnrlon=-30, urcrnrlat=80,\n resolution='l', projection='merc', suppress_ticks=True)\n", (851, 969), False, 'from mpl_toolkits.basemap import Basemap\n'), ((2157, 2167), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2165, 2167), True, 'import matplotlib.pyplot as plt\n'), ((1785, 1813), 'numpy.arange', 'np.arange', (['(-90.0)', '(91.0)', '(10.0)'], {}), '(-90.0, 91.0, 10.0)\n', (1794, 1813), True, 'import numpy as np\n'), ((1891, 1921), 'numpy.arange', 'np.arange', (['(-180.0)', '(181.0)', '(30.0)'], {}), '(-180.0, 181.0, 30.0)\n', (1900, 1921), True, 'import numpy as np\n'), ((2000, 2028), 'numpy.arange', 'np.arange', (['(66.0)', '(67.0)', '(100.0)'], {}), '(66.0, 67.0, 100.0)\n', (2009, 2028), True, 'import numpy as np\n'), ((298, 311), 'numpy.floor', 'np.floor', (['deg'], {}), '(deg)\n', (306, 311), True, 'import numpy as np\n'), ((491, 502), 'numpy.abs', 'np.abs', (['deg'], {}), '(deg)\n', (497, 502), True, 'import numpy as np\n'), ((503, 514), 'numpy.abs', 'np.abs', (['min'], {}), '(min)\n', (509, 514), True, 'import numpy as np\n'), ((557, 570), 'numpy.floor', 'np.floor', (['deg'], {}), '(deg)\n', (565, 570), True, 'import numpy as np\n'), ((750, 761), 'numpy.abs', 'np.abs', (['deg'], {}), '(deg)\n', (756, 761), True, 'import numpy as np\n'), ((762, 773), 'numpy.abs', 'np.abs', (['min'], {}), '(min)\n', (768, 773), True, 'import numpy as np\n')]
|
# -*- coding: utf-8 -*-
"""
ShrinkageLoss
"""
import torch
import torch.nn as nn
class ShrinkageLoss(nn.Module):
""" ShrinkageLoss class.
Modified version of shrinkage loss tailored to images:
http://openaccess.thecvf.com/content_ECCV_2018/papers/Xiankai_Lu_Deep_Regression_Tracking_ECCV_2018_paper.pdf
It basically computes a point-wise shrinkage loss.
"""
def __init__(self, speed=10.0, loc=0.2, verbose=False):
""" Initialize ShrinkageLoss class with user-defined parameters.
Arguments:
shrink_speed (float): Shrinkage speed, i.e., weight assigned to hard samples.
shrink_loc (float): Shrinkage localization, i.e., threshold for hard mining.
verbose (bool): Whether the log will be shown in the shell.
"""
nn.Module.__init__(self)
self.shrink_speed = speed
self.shrink_loc = loc
def forward(self, estimate, ground_truth):
""" Calculate shrinkage loss between the estimate and grount truth, if any.
Otherwise, the loss is computed using the estimate, which is already
the difference to the ground truth or the parameters.
Arguments:
estimate (tensor): Estimate or delta (MxC, where M, C are
the number of points and channels, respectively).
ground_truth (tensor): Ground truth (optional). MxC, where M, C are
the number of points and channels, respectively
Return:
Mean per-point shrinkage loss (float)
"""
# Compute point errors (l2 norm).
l2_loss = torch.norm(estimate - ground_truth, p=2, dim=1)
# Compute mean shrinkage loss.
shrink_loss = torch.mul(l2_loss,l2_loss)/(
1.0 + torch.exp(self.shrink_speed*(self.shrink_loc - l2_loss)))
return torch.mean(shrink_loss)
|
[
"torch.mean",
"torch.norm",
"torch.mul",
"torch.exp",
"torch.nn.Module.__init__"
] |
[((823, 847), 'torch.nn.Module.__init__', 'nn.Module.__init__', (['self'], {}), '(self)\n', (841, 847), True, 'import torch.nn as nn\n'), ((1678, 1725), 'torch.norm', 'torch.norm', (['(estimate - ground_truth)'], {'p': '(2)', 'dim': '(1)'}), '(estimate - ground_truth, p=2, dim=1)\n', (1688, 1725), False, 'import torch\n'), ((1916, 1939), 'torch.mean', 'torch.mean', (['shrink_loss'], {}), '(shrink_loss)\n', (1926, 1939), False, 'import torch\n'), ((1796, 1823), 'torch.mul', 'torch.mul', (['l2_loss', 'l2_loss'], {}), '(l2_loss, l2_loss)\n', (1805, 1823), False, 'import torch\n'), ((1843, 1901), 'torch.exp', 'torch.exp', (['(self.shrink_speed * (self.shrink_loc - l2_loss))'], {}), '(self.shrink_speed * (self.shrink_loc - l2_loss))\n', (1852, 1901), False, 'import torch\n')]
|
import logging
from rest_framework import serializers
from rest_flex_fields.serializers import FlexFieldsSerializerMixin
from ..models import WorkflowTaskInstanceExecution
from .workflow_task_instance_execution_base_serializer import WorkflowTaskInstanceExecutionBaseSerializer
from .serializer_helpers import SerializerHelpers
from .task_execution_serializer import TaskExecutionSerializer
logger = logging.getLogger(__name__)
class WorkflowTaskInstanceExecutionSerializer(SerializerHelpers,
FlexFieldsSerializerMixin, WorkflowTaskInstanceExecutionBaseSerializer):
"""
WorkflowTaskInstanceExecutions hold the execution information
for a WorkflowTaskInstance (which holds a Task) for a specific
WorkflowExection (run of a Workflow).
"""
class Meta:
model = WorkflowTaskInstanceExecution
fields = ('uuid', 'workflow_execution',
'workflow_task_instance', 'task_execution',
'is_latest', 'created_at')
read_only_fields = [
'uuid', 'workflow_execution',
'workflow_task_instance', 'task_execution',
'is_latest', 'created_at',
]
task_execution = TaskExecutionSerializer(read_only=True)
|
[
"logging.getLogger"
] |
[((417, 444), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (434, 444), False, 'import logging\n')]
|
# target: export keras model we used as tensorflow model
import tensorflow as tf
from keras.backend.tensorflow_backend import set_session
import os
import os.path as osp
from keras import backend as K
import tensorflow as tf
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
config.gpu_options.per_process_gpu_memory_fraction = 0.499
sess = tf.Session(config=config)
set_session(sess)
import sys
sys.path.append('.')
from apps.pspnet.pkg.pspnet import deeplearning as dpl
from apps.pspnet.pkg.pspnet.psp_tf.pspnet import PSPNet50
model=PSPNet50(
nb_classes=150,
input_shape=(473, 473),
weights="pspnet50_ade20k",
path="./pspnet/weights")
net_model=model.model
print('input is :', net_model.input.name)
print ('output is:', net_model.output.name)
output_graph_name = 'tensor_model.pb'
output_fld = './tensorflow_model/'
def freeze_session(session, keep_var_names=None, output_names=None, clear_devices=True):
"""
Freezes the state of a session into a prunned computation graph.
Creates a new computation graph where variable nodes are replaced by
constants taking their current value in the session. The new graph will be
prunned so subgraphs that are not neccesary to compute the requested
outputs are removed.
@param session The TensorFlow session to be frozen.
@param keep_var_names A list of variable names that should not be frozen,
or None to freeze all the variables in the graph.
@param output_names Names of the relevant graph outputs.
@param clear_devices Remove the device directives from the graph for better portability.
@return The frozen graph definition.
"""
from tensorflow.python.framework.graph_util import convert_variables_to_constants
graph = session.graph
with graph.as_default():
freeze_var_names = list(set(v.op.name for v in tf.global_variables()).difference(keep_var_names or []))
output_names = output_names or []
output_names += [v.op.name for v in tf.global_variables()]
input_graph_def = graph.as_graph_def()
if clear_devices:
for node in input_graph_def.node:
node.device = ""
frozen_graph = convert_variables_to_constants(session, input_graph_def,
output_names, freeze_var_names)
return frozen_graph
frozen_graph = freeze_session(K.get_session(), output_names=[net_model.output.op.name])
from tensorflow.python.framework import graph_io
graph_io.write_graph(frozen_graph, output_fld, output_graph_name, as_text=False)
print('saved the constant graph (ready for inference) at: ', osp.join(output_fld, output_graph_name))
|
[
"sys.path.append",
"keras.backend.get_session",
"apps.pspnet.pkg.pspnet.psp_tf.pspnet.PSPNet50",
"tensorflow.Session",
"tensorflow.ConfigProto",
"tensorflow.global_variables",
"tensorflow.python.framework.graph_util.convert_variables_to_constants",
"tensorflow.python.framework.graph_io.write_graph",
"keras.backend.tensorflow_backend.set_session",
"os.path.join"
] |
[((243, 259), 'tensorflow.ConfigProto', 'tf.ConfigProto', ([], {}), '()\n', (257, 259), True, 'import tensorflow as tf\n'), ((368, 393), 'tensorflow.Session', 'tf.Session', ([], {'config': 'config'}), '(config=config)\n', (378, 393), True, 'import tensorflow as tf\n'), ((395, 412), 'keras.backend.tensorflow_backend.set_session', 'set_session', (['sess'], {}), '(sess)\n', (406, 412), False, 'from keras.backend.tensorflow_backend import set_session\n'), ((426, 446), 'sys.path.append', 'sys.path.append', (['"""."""'], {}), "('.')\n", (441, 446), False, 'import sys\n'), ((571, 675), 'apps.pspnet.pkg.pspnet.psp_tf.pspnet.PSPNet50', 'PSPNet50', ([], {'nb_classes': '(150)', 'input_shape': '(473, 473)', 'weights': '"""pspnet50_ade20k"""', 'path': '"""./pspnet/weights"""'}), "(nb_classes=150, input_shape=(473, 473), weights='pspnet50_ade20k',\n path='./pspnet/weights')\n", (579, 675), False, 'from apps.pspnet.pkg.pspnet.psp_tf.pspnet import PSPNet50\n'), ((2626, 2711), 'tensorflow.python.framework.graph_io.write_graph', 'graph_io.write_graph', (['frozen_graph', 'output_fld', 'output_graph_name'], {'as_text': '(False)'}), '(frozen_graph, output_fld, output_graph_name, as_text=False\n )\n', (2646, 2711), False, 'from tensorflow.python.framework import graph_io\n'), ((2517, 2532), 'keras.backend.get_session', 'K.get_session', ([], {}), '()\n', (2530, 2532), True, 'from keras import backend as K\n'), ((2769, 2808), 'os.path.join', 'osp.join', (['output_fld', 'output_graph_name'], {}), '(output_fld, output_graph_name)\n', (2777, 2808), True, 'import os.path as osp\n'), ((2311, 2403), 'tensorflow.python.framework.graph_util.convert_variables_to_constants', 'convert_variables_to_constants', (['session', 'input_graph_def', 'output_names', 'freeze_var_names'], {}), '(session, input_graph_def, output_names,\n freeze_var_names)\n', (2341, 2403), False, 'from tensorflow.python.framework.graph_util import convert_variables_to_constants\n'), ((2108, 2129), 'tensorflow.global_variables', 'tf.global_variables', ([], {}), '()\n', (2127, 2129), True, 'import tensorflow as tf\n'), ((1963, 1984), 'tensorflow.global_variables', 'tf.global_variables', ([], {}), '()\n', (1982, 1984), True, 'import tensorflow as tf\n')]
|
# -*- coding: utf-8 -*-
from data.reader import wiki_from_pickles, corpus_to_pickle
from data.corpus import Sentences
from stats.stat_functions import compute_freqs, merge_to_joint
from stats.entropy import typicality
from filtering.typicality import setup_filtering, filter_typicality_incremental
from operator import lt, gt
import argparse
def parse_args():
p = argparse.ArgumentParser()
p.add_argument("--lang", type=str)
p.add_argument("--n_tokens", type=int)
p.add_argument("--factor", type=float,
help="The factor to multiply epsilon with; determines"
"the degree of atypicality.")
args = p.parse_args()
return args.lang, args.n_tokens, args.factor
if __name__ == "__main__":
lang, n, factor = parse_args()
big_n = lambda wiki: len([w for a in wiki for s in a for w in s])*.49
setup_m = 100
m = 10
wiki = list(wiki_from_pickles("data/"+lang+"_pkl"))
sents = [s for a in wiki for s in a]
zipf_model, rank_dict, mean_typ, std_typ, auto_typ = setup_filtering(wiki,
big_n(wiki),
n,
setup_m)
mean_corrected = abs(mean_typ - auto_typ)
epsilon_f_plus = mean_corrected + std_typ*factor
epsilon_f_minus = - epsilon_f_plus
print("\nModel and Epsilon established")
print(auto_typ, mean_typ, std_typ)
print(epsilon_f_minus, epsilon_f_plus)
for m_i in range(m):
print("started ", m_i)
filtered = list(filter_typicality_incremental(sents, zipf_model,
rank_dict, auto_typ, n, epsilon_f_minus, lt))
filtered_freqs = compute_freqs(Sentences(filtered))
print("filtered ", m_i, " typicality: ",
typicality(zipf_model, merge_to_joint(rank_dict, filtered_freqs)))
name = "_".join((str(n), str(factor), str(m_i)))
corpus_to_pickle(filtered, "results/" + lang + "/TF", name)
|
[
"argparse.ArgumentParser",
"stats.stat_functions.merge_to_joint",
"filtering.typicality.filter_typicality_incremental",
"data.corpus.Sentences",
"data.reader.corpus_to_pickle",
"data.reader.wiki_from_pickles"
] |
[((372, 397), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (395, 397), False, 'import argparse\n'), ((918, 960), 'data.reader.wiki_from_pickles', 'wiki_from_pickles', (["('data/' + lang + '_pkl')"], {}), "('data/' + lang + '_pkl')\n", (935, 960), False, 'from data.reader import wiki_from_pickles, corpus_to_pickle\n'), ((2085, 2144), 'data.reader.corpus_to_pickle', 'corpus_to_pickle', (['filtered', "('results/' + lang + '/TF')", 'name'], {}), "(filtered, 'results/' + lang + '/TF', name)\n", (2101, 2144), False, 'from data.reader import wiki_from_pickles, corpus_to_pickle\n'), ((1699, 1796), 'filtering.typicality.filter_typicality_incremental', 'filter_typicality_incremental', (['sents', 'zipf_model', 'rank_dict', 'auto_typ', 'n', 'epsilon_f_minus', 'lt'], {}), '(sents, zipf_model, rank_dict, auto_typ, n,\n epsilon_f_minus, lt)\n', (1728, 1796), False, 'from filtering.typicality import setup_filtering, filter_typicality_incremental\n'), ((1858, 1877), 'data.corpus.Sentences', 'Sentences', (['filtered'], {}), '(filtered)\n', (1867, 1877), False, 'from data.corpus import Sentences\n'), ((1966, 2007), 'stats.stat_functions.merge_to_joint', 'merge_to_joint', (['rank_dict', 'filtered_freqs'], {}), '(rank_dict, filtered_freqs)\n', (1980, 2007), False, 'from stats.stat_functions import compute_freqs, merge_to_joint\n')]
|
import torch
import torch.utils.data as data
from torchvision.datasets.folder import has_file_allowed_extension, is_image_file, IMG_EXTENSIONS, pil_loader, accimage_loader,default_loader
from PIL import Image
import sys
import os
import os.path
import numpy as np
from random import shuffle
REGIONS_DICT={'Alabama': 'South', 'Arizona': 'SW',
'California': 'Pacific',
'Florida': 'South',
'Indiana': 'MW',
'Iowa': 'MW',
'Kansas': 'MW',
'Massachusetts': 'NE',
'Michigan': 'MW',
'Missouri': 'South',
'Montana': 'RM',
'New-York': 'MA',
'North-Carolina': 'South',
'Ohio': 'MW',
'Oklahoma': 'SW',
'Oregon': 'Pacific',
'Pennsylvania': 'MA',
'South-Carolina': 'South',
'South-Dakota': 'MW',
'Texas': 'SW',
'Utah': 'RM',
'Vermont': 'NE',
'Virginia': 'South',
'Washington': 'Pacific',
'Wyoming': 'RM'}
REGIONS_TO_IDX={'RM': 6,'MA': 1,'NE': 2,'South': 3, 'Pacific': 4, 'MW': 0 , 'SW': 5}
IDX_TO_REGIONS={ 6:'RM',1:'MA',2:'NE',3:'South',4: 'Pacific', 0:'MW', 5:'SW'}
def make_dataset(dir, class_to_idx, extensions, domains,start=1934):
images = []
meta = []
dir = os.path.expanduser(dir)
for target in sorted(os.listdir(dir)):
d = os.path.join(dir, target)
if not os.path.isdir(d):
continue
for root, _, fnames in sorted(os.walk(d)):
for fname in sorted(fnames):
if has_file_allowed_extension(fname, extensions):
path = os.path.join(root, fname)
year=int(path.split('/')[-1].split('_')[0])
city=(path.split('/')[-1].split('_')[1])
region=REGIONS_DICT[city]
pivot_year=start+(year-start)//10*10
if (pivot_year, region) in domains:
item = (path, class_to_idx[target])
images.append(item)
meta.append([year,region])
return images, meta
class MNIST(data.Dataset):
def __init__(self, root, transform=None, target_transform=None,domains=[]):
extensions = IMG_EXTENSIONS
loader = default_loader
# classes, class_to_idx = self._find_classes(root)
# samples, self.meta = make_dataset(root, class_to_idx, extensions, domains)
# if len(samples) == 0:
# raise(RuntimeError("Found 0 files in subfolders of: " + root + "\n"
# "Supported extensions are: " + ",".join(extensions)))
self.root = root
X = np.load("{}/X.npy".format(self.root))
Y = np.load("{}/Y.npy".format(self.root))
A = np.load("{}/A.npy".format(self.root))
U = np.load("{}/U.npy".format(self.root))
# print(domains)
U_ = (U*6).astype('d')
indices = []
for d in domains:
# print(d)
indices += [i for i, x in enumerate(U_) if x == d[0]]
# print(len(indices))
self.X = X[indices]
self.Y = Y[indices]
self.U = U[indices]
self.A = A[indices]
self.loader = loader
# self.extensions = extensions
# self.classes = classes
# self.class_to_idx = class_to_idx
# self.samples = samples
# self.transform = transform
# self.target_transform = target_transform
# self.imgs = self.samples
def _find_classes(self, dir):
if sys.version_info >= (3, 5):
# Faster and available in Python 3.5 and above
classes = [d.name for d in os.scandir(dir) if d.is_dir()]
else:
classes = [d for d in os.listdir(dir) if os.path.isdir(os.path.join(dir, d))]
classes.sort()
class_to_idx = {classes[i]: i for i in range(len(classes))}
return classes, class_to_idx
def __getitem__(self, index):
"""
Args:
index (int): Index
Returns:
tuple: (sample, target) where target is class_index of the target class.
"""
# path, target = self.samples[index]
sample = self.X[index]
target = self.Y[index]
# print(sample.shape)
# if self.transform is not None:
# sample = self.transform(sample)
# if self.target_transform is not None:
# target = self.target_transform(target)
y,p = self.U[index], self.A[index]
return np.repeat(sample,3,axis=0).astype('f'), int(y*6), target
def get_meta(self):
return np.array(self.meta)
def __len__(self):
return len(self.X)
def __repr__(self):
fmt_str = 'Dataset ' + self.__class__.__name__ + '\n'
fmt_str += ' Number of datapoints: {}\n'.format(self.__len__())
fmt_str += ' Root Location: {}\n'.format(self.root)
tmp = ' Transforms (if any): '
fmt_str += '{0}{1}\n'.format(tmp, self.transform.__repr__().replace('\n', '\n' + ' ' * len(tmp)))
tmp = ' Target Transforms (if any): '
fmt_str += '{0}{1}'.format(tmp, self.target_transform.__repr__().replace('\n', '\n' + ' ' * len(tmp)))
return fmt_str
class MNISTSampler(torch.utils.data.sampler.Sampler):
r"""Base class for all Samplers.
Every Sampler subclass has to provide an __iter__ method, providing a way
to iterate over indices of dataset elements, and a __len__ method that
returns the length of the returned iterators.
"""
def __init__(self, data_source, bs):
self.data_source=data_source
self.meta=self.data_source.U
self.dict_meta={}
self.indeces={}
self.keys=[]
self.bs=bs
for idx, u in enumerate(self.meta):
try:
self.dict_meta[u].append(idx)
except:
self.dict_meta[u]=[idx]
self.keys.append(u)
self.indeces[u]=0
for idx in self.keys:
shuffle(self.dict_meta[idx])
def _sampling(self,idx, n):
if self.indeces[idx]+n>=len(self.dict_meta[idx]):
self.dict_meta[idx]=self.dict_meta[idx]+self.dict_meta[idx]
self.indeces[idx]=self.indeces[idx]+n
return self.dict_meta[idx][self.indeces[idx]-n:self.indeces[idx]]
def _shuffle(self):
order=np.random.randint(len(self.keys),size=(len(self.data_source)//(self.bs)))
sIdx=[]
for i in order:
sIdx=sIdx+self._sampling(self.keys[i],self.bs)
return np.array(sIdx)
def __iter__(self):
return iter(self._shuffle())
def __len__(self):
return len(self.data_source)/self.bs*self.bs
|
[
"os.path.expanduser",
"torchvision.datasets.folder.has_file_allowed_extension",
"os.path.isdir",
"random.shuffle",
"os.walk",
"numpy.array",
"os.path.join",
"os.listdir",
"os.scandir",
"numpy.repeat"
] |
[((1095, 1118), 'os.path.expanduser', 'os.path.expanduser', (['dir'], {}), '(dir)\n', (1113, 1118), False, 'import os\n'), ((1144, 1159), 'os.listdir', 'os.listdir', (['dir'], {}), '(dir)\n', (1154, 1159), False, 'import os\n'), ((1174, 1199), 'os.path.join', 'os.path.join', (['dir', 'target'], {}), '(dir, target)\n', (1186, 1199), False, 'import os\n'), ((4443, 4462), 'numpy.array', 'np.array', (['self.meta'], {}), '(self.meta)\n', (4451, 4462), True, 'import numpy as np\n'), ((6402, 6416), 'numpy.array', 'np.array', (['sIdx'], {}), '(sIdx)\n', (6410, 6416), True, 'import numpy as np\n'), ((1215, 1231), 'os.path.isdir', 'os.path.isdir', (['d'], {}), '(d)\n', (1228, 1231), False, 'import os\n'), ((1293, 1303), 'os.walk', 'os.walk', (['d'], {}), '(d)\n', (1300, 1303), False, 'import os\n'), ((5861, 5889), 'random.shuffle', 'shuffle', (['self.dict_meta[idx]'], {}), '(self.dict_meta[idx])\n', (5868, 5889), False, 'from random import shuffle\n'), ((1366, 1411), 'torchvision.datasets.folder.has_file_allowed_extension', 'has_file_allowed_extension', (['fname', 'extensions'], {}), '(fname, extensions)\n', (1392, 1411), False, 'from torchvision.datasets.folder import has_file_allowed_extension, is_image_file, IMG_EXTENSIONS, pil_loader, accimage_loader, default_loader\n'), ((1440, 1465), 'os.path.join', 'os.path.join', (['root', 'fname'], {}), '(root, fname)\n', (1452, 1465), False, 'import os\n'), ((3496, 3511), 'os.scandir', 'os.scandir', (['dir'], {}), '(dir)\n', (3506, 3511), False, 'import os\n'), ((3575, 3590), 'os.listdir', 'os.listdir', (['dir'], {}), '(dir)\n', (3585, 3590), False, 'import os\n'), ((4346, 4374), 'numpy.repeat', 'np.repeat', (['sample', '(3)'], {'axis': '(0)'}), '(sample, 3, axis=0)\n', (4355, 4374), True, 'import numpy as np\n'), ((3608, 3628), 'os.path.join', 'os.path.join', (['dir', 'd'], {}), '(dir, d)\n', (3620, 3628), False, 'import os\n')]
|
from django.urls import path, include
from django.contrib.auth import views as auth_views
from .views import (
PostlistView,
PostCreateView,
PostDetailView,
PostUpdateView,
PostDeleteView,
saved_posts,
PostLikeToggle,
)
app_name = 'insta'
urlpatterns = [
#local : http://127.0.0.1:8000/
path('', PostlistView.as_view(), name='post_list'),
path('new/', PostCreateView.as_view(), name='post_create'),
path('<int:id>', PostDetailView.as_view(), name='post_detail'),
path('new/', PostCreateView.as_view(), name='post_new'),
path('<int:id>/update/', PostUpdateView.as_view(), name='post_update'),
path('<int:id>/delete/', PostDeleteView.as_view(), name='post_delete'),
path('<int:id>/likes/', PostLikeToggle.as_view(), name='like_toggle'),
path('saved/', saved_posts, name='saved_posts'),
# path('login/', auth_views.login, name='login'),
# path('user_profile/', auth_views.user_profile, name='user_profile'),
]
|
[
"django.urls.path"
] |
[((807, 854), 'django.urls.path', 'path', (['"""saved/"""', 'saved_posts'], {'name': '"""saved_posts"""'}), "('saved/', saved_posts, name='saved_posts')\n", (811, 854), False, 'from django.urls import path, include\n')]
|
#!/usr/bin/env python3
import argparse
import csv
import logging
import os
import sys
import typing
from typing import Dict, Iterator, Optional, Tuple
from common import IncludeChange
from include_analysis import ParseError, parse_raw_include_analysis_output
from utils import (
get_include_analysis_edges_centrality,
get_include_analysis_edge_prevalence,
get_include_analysis_edge_sizes,
load_config,
)
def set_edge_weights(
changes_file: typing.TextIO, edge_weights: Dict[str, Dict[str, int]]
) -> Iterator[Tuple[IncludeChange, int, str, str, Optional[int]]]:
"""Set edge weights in the include changes output"""
change_type_value: str
for change_type_value, line, filename, header, *_ in csv.reader(changes_file):
change_type = IncludeChange.from_value(change_type_value)
change = (line, filename, header)
if change_type is IncludeChange.REMOVE:
# For now, only removes have edge weights
if filename not in edge_weights:
logging.warning(f"Skipping filename not found in weights, file may be removed: {filename}")
elif header not in edge_weights[filename]:
logging.warning(f"Skipping edge not found in weights: {filename},{header}")
else:
change = change + (edge_weights[filename][header],)
elif change_type is IncludeChange.ADD:
# TODO - Some metric for how important they are to add, if there
# is one? Maybe something like the ratio of occurrences to
# direct includes, suggesting it's used a lot, but has lots
# of missing includes? That metric wouldn't really work well
# since leaf headers of commonly included headers would end
# up with a high ratio, despite not really being important to
# add anywhere. Maybe there's no metric here and instead an
# analysis is done at the end to rank headers by how many
# suggested includes there are for that file.
pass
full_change: Tuple[IncludeChange, int, str, str, Optional[int]] = (change_type_value, *change)
yield full_change
def main():
parser = argparse.ArgumentParser(description="Set edge weights in include changes output")
parser.add_argument(
"changes_file",
type=argparse.FileType("r"),
help="CSV of include changes to set edge weights for.",
)
parser.add_argument(
"include_analysis_output",
type=argparse.FileType("r"),
help="The include analysis output to use.",
)
parser.add_argument(
"--metric",
choices=["centrality", "input_size", "prevalence"],
default="input_size",
help="Metric to use for edge weights.",
)
parser.add_argument("--config", help="Name of config file to use.")
parser.add_argument("--verbose", action="store_true", default=False, help="Enable verbose logging.")
args = parser.parse_args()
try:
include_analysis = parse_raw_include_analysis_output(args.include_analysis_output.read())
except ParseError as e:
message = str(e)
print("error: Could not parse include analysis output file")
if message:
print(message)
return 2
if args.verbose:
logging.basicConfig(level=logging.DEBUG)
config = None
if args.config:
config = load_config(args.config)
csv_writer = csv.writer(sys.stdout)
if args.metric == "input_size":
edge_weights = get_include_analysis_edge_sizes(include_analysis, config.includeDirs if config else None)
elif args.metric == "centrality":
edge_weights = get_include_analysis_edges_centrality(include_analysis, config.includeDirs if config else None)
elif args.metric == "prevalence":
edge_weights = get_include_analysis_edge_prevalence(include_analysis, config.includeDirs if config else None)
try:
for row in set_edge_weights(args.changes_file, edge_weights):
csv_writer.writerow(row)
sys.stdout.flush()
except BrokenPipeError:
devnull = os.open(os.devnull, os.O_WRONLY)
os.dup2(devnull, sys.stdout.fileno())
sys.exit(1)
return 0
if __name__ == "__main__":
try:
sys.exit(main())
except KeyboardInterrupt:
pass # Don't show the user anything
|
[
"os.open",
"csv.reader",
"csv.writer",
"argparse.ArgumentParser",
"logging.basicConfig",
"utils.get_include_analysis_edges_centrality",
"logging.warning",
"sys.stdout.fileno",
"utils.get_include_analysis_edge_sizes",
"utils.get_include_analysis_edge_prevalence",
"utils.load_config",
"sys.stdout.flush",
"common.IncludeChange.from_value",
"argparse.FileType",
"sys.exit"
] |
[((729, 753), 'csv.reader', 'csv.reader', (['changes_file'], {}), '(changes_file)\n', (739, 753), False, 'import csv\n'), ((2268, 2354), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Set edge weights in include changes output"""'}), "(description=\n 'Set edge weights in include changes output')\n", (2291, 2354), False, 'import argparse\n'), ((3523, 3545), 'csv.writer', 'csv.writer', (['sys.stdout'], {}), '(sys.stdout)\n', (3533, 3545), False, 'import csv\n'), ((777, 820), 'common.IncludeChange.from_value', 'IncludeChange.from_value', (['change_type_value'], {}), '(change_type_value)\n', (801, 820), False, 'from common import IncludeChange\n'), ((3382, 3422), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.DEBUG'}), '(level=logging.DEBUG)\n', (3401, 3422), False, 'import logging\n'), ((3480, 3504), 'utils.load_config', 'load_config', (['args.config'], {}), '(args.config)\n', (3491, 3504), False, 'from utils import get_include_analysis_edges_centrality, get_include_analysis_edge_prevalence, get_include_analysis_edge_sizes, load_config\n'), ((3606, 3699), 'utils.get_include_analysis_edge_sizes', 'get_include_analysis_edge_sizes', (['include_analysis', '(config.includeDirs if config else None)'], {}), '(include_analysis, config.includeDirs if\n config else None)\n', (3637, 3699), False, 'from utils import get_include_analysis_edges_centrality, get_include_analysis_edge_prevalence, get_include_analysis_edge_sizes, load_config\n'), ((4135, 4153), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (4151, 4153), False, 'import sys\n'), ((2412, 2434), 'argparse.FileType', 'argparse.FileType', (['"""r"""'], {}), "('r')\n", (2429, 2434), False, 'import argparse\n'), ((2579, 2601), 'argparse.FileType', 'argparse.FileType', (['"""r"""'], {}), "('r')\n", (2596, 2601), False, 'import argparse\n'), ((3757, 3856), 'utils.get_include_analysis_edges_centrality', 'get_include_analysis_edges_centrality', (['include_analysis', '(config.includeDirs if config else None)'], {}), '(include_analysis, config.includeDirs if\n config else None)\n', (3794, 3856), False, 'from utils import get_include_analysis_edges_centrality, get_include_analysis_edge_prevalence, get_include_analysis_edge_sizes, load_config\n'), ((4200, 4232), 'os.open', 'os.open', (['os.devnull', 'os.O_WRONLY'], {}), '(os.devnull, os.O_WRONLY)\n', (4207, 4232), False, 'import os\n'), ((4287, 4298), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (4295, 4298), False, 'import sys\n'), ((1027, 1123), 'logging.warning', 'logging.warning', (['f"""Skipping filename not found in weights, file may be removed: {filename}"""'], {}), "(\n f'Skipping filename not found in weights, file may be removed: {filename}')\n", (1042, 1123), False, 'import logging\n'), ((3914, 4012), 'utils.get_include_analysis_edge_prevalence', 'get_include_analysis_edge_prevalence', (['include_analysis', '(config.includeDirs if config else None)'], {}), '(include_analysis, config.includeDirs if\n config else None)\n', (3950, 4012), False, 'from utils import get_include_analysis_edges_centrality, get_include_analysis_edge_prevalence, get_include_analysis_edge_sizes, load_config\n'), ((4258, 4277), 'sys.stdout.fileno', 'sys.stdout.fileno', ([], {}), '()\n', (4275, 4277), False, 'import sys\n'), ((1190, 1265), 'logging.warning', 'logging.warning', (['f"""Skipping edge not found in weights: {filename},{header}"""'], {}), "(f'Skipping edge not found in weights: {filename},{header}')\n", (1205, 1265), False, 'import logging\n')]
|
import argparse
import json
import os
import subprocess
import sys
top_path = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
parser = argparse.ArgumentParser(description='''
Script to generate a list of service accounts along with their privilege levels
''')
parser.add_argument('org',
help='The organization resource ID I.e. organizations/999999999999')
parser.add_argument('--project_labels',
help='A set of labels to filter projects on \n' +
'I.e. env:dev,project:foo')
parser.add_argument('--data_dir', help='location of raw JSON data', default='data')
parser.add_argument('--limit', default=10, type=int,
help='the max number of accounts to return')
parser.add_argument('--member_type',
choices=['service_account','user_account','group'],
help='the type of member to filter results by')
parser.add_argument('--skip_collect', action='store_true',
help='gather data from Google APIs')
parser.add_argument('--sort_type', default='total_sum',
choices=['total_sum', 'top_sum'],
help='the sort function used to order the members')
def main():
parsed_args = parser.parse_args()
if not parsed_args.skip_collect:
gather_data(parsed_args.org, parsed_args.project_labels, parsed_args.data_dir)
with open(os.path.join(parsed_args.data_dir, 'members.json')) as f:
members = json.load(f)
service_accounts = filter_service_accounts(members, parsed_args.member_type)
sort_fn = getattr(sys.modules[__name__], parsed_args.sort_type)
sorted_members = sorted(service_accounts, key=sort_fn, reverse=True)
print_permissions(sorted_members[:parsed_args.limit])
def print_permissions(permissions):
for item in permissions:
email, data = item
print("\n{}:".format(email))
for resource in data['resources']:
roles = [
"{} ({} permissions)".format(role['name'], role['permission_count'])
for role in resource['roles']
]
print(" {}: {}".format(resource['name'], ",".join(roles)))
def gather_data(org, project_labels, data_dir):
proc_args = [
"go",
"run",
f'{top_path}/cmd/checker/main.go',
f'-org={org}',
]
if project_labels:
proc_args.append(f'-project_labels={project_labels}')
if data_dir:
proc_args.append(f'-data={data_dir}')
subprocess.run(proc_args, check=True)
def filter_service_accounts(members, member_type):
startswith_map = {
'service_account': 'serviceAccount:',
'user_account': 'user:',
'group': 'group:',
None: ''
}
return (
(memberEmail, member) for memberEmail, member in members.items()
if memberEmail.startswith(startswith_map[member_type])
)
def total_sum(data):
_, member = data
return sum(
sum(
role['permission_count'] for role in resource['roles']
)
for resource in member['resources']
)
def top_sum(data):
_, member = data
return max(
sum(
role['permission_count'] for role in resource['roles']
)
for resource in member['resources']
)
if __name__ == '__main__':
main()
|
[
"subprocess.run",
"json.load",
"argparse.ArgumentParser",
"os.path.realpath",
"os.path.join"
] |
[((150, 284), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""\nScript to generate a list of service accounts along with their privilege levels\n"""'}), '(description=\n """\nScript to generate a list of service accounts along with their privilege levels\n"""\n )\n', (173, 284), False, 'import argparse\n'), ((2530, 2567), 'subprocess.run', 'subprocess.run', (['proc_args'], {'check': '(True)'}), '(proc_args, check=True)\n', (2544, 2567), False, 'import subprocess\n'), ((111, 137), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (127, 137), False, 'import os\n'), ((1499, 1511), 'json.load', 'json.load', (['f'], {}), '(f)\n', (1508, 1511), False, 'import json\n'), ((1423, 1473), 'os.path.join', 'os.path.join', (['parsed_args.data_dir', '"""members.json"""'], {}), "(parsed_args.data_dir, 'members.json')\n", (1435, 1473), False, 'import os\n')]
|
import random
import numpy as np
from bayesnet.network import Network
def hmc(model, call_args, parameter=None, sample_size=100, step_size=1e-3, n_step=10):
"""
Hamiltonian Monte Carlo sampling aka Hybrid Monte Carlo sampling
Parameters
----------
model : Network
bayesian network
call_args : tuple or dict
observations of the model
parameter : dict
dict of parameter to be sampled
sample_size : int
number of samples to be generated
step_size : float
update size of parameters
n_step : int
number of updation of parameters
Returns
-------
sample : dict of list of np.ndarray
samples from the model given observations
"""
if not isinstance(model, Network):
raise TypeError("model must be Network object")
if not isinstance(sample_size, int):
raise TypeError(f"sample_size must be int, not {type(sample_size)}")
if not isinstance(step_size, (int, float)):
raise TypeError(f"step_size must be float, not {type(step_size)}")
if not isinstance(n_step, int):
raise TypeError(f"n_step must be int, not {type(n_step)}")
def run_model():
model.clear()
if isinstance(call_args, tuple):
model(*call_args)
elif isinstance(call_args, dict):
model(**call_args)
else:
raise TypeError("call_args must be tuple or dict")
sample = dict()
previous = dict()
velocity = dict()
if parameter is not None:
if not isinstance(parameter, dict):
raise TypeError("parameter must be dict")
for key, p in parameter.items():
if p is not model.parameter[key]:
raise ValueError("parameter must be defined in the model")
variable = parameter
else:
variable = model.parameter
for key in variable:
sample[key] = []
for _ in range(sample_size):
run_model()
log_posterior = model.log_pdf()
log_posterior.backward()
kinetic_energy = 0
for key, v in variable.items():
previous[key] = v.value
velocity[key] = np.random.normal(size=v.shape)
kinetic_energy += 0.5 * np.square(velocity[key]).sum()
velocity[key] += 0.5 * v.grad * step_size
v.value = v.value + step_size * velocity[key]
hamiltonian = kinetic_energy - log_posterior.value
for _ in range(n_step):
run_model()
model.log_pdf().backward()
for key, v in variable.items():
velocity[key] += step_size * v.grad
v.value += step_size * velocity[key]
run_model()
log_posterior_new = model.log_pdf()
log_posterior_new.backward()
kinetic_energy_new = 0
for key, v in velocity.items():
v += 0.5 * step_size * variable[key].grad
kinetic_energy_new += 0.5 * np.square(v).sum()
hamiltonian_new = kinetic_energy_new - log_posterior_new.value
accept_proba = np.exp(hamiltonian - hamiltonian_new)
if random.random() < accept_proba:
for key, v in variable.items():
sample[key].append(v.value)
else:
for key, v in variable.items():
v.value = previous[key]
sample[key].append(v.value)
return sample
|
[
"random.random",
"numpy.square",
"numpy.exp",
"numpy.random.normal"
] |
[((3079, 3116), 'numpy.exp', 'np.exp', (['(hamiltonian - hamiltonian_new)'], {}), '(hamiltonian - hamiltonian_new)\n', (3085, 3116), True, 'import numpy as np\n'), ((2184, 2214), 'numpy.random.normal', 'np.random.normal', ([], {'size': 'v.shape'}), '(size=v.shape)\n', (2200, 2214), True, 'import numpy as np\n'), ((3129, 3144), 'random.random', 'random.random', ([], {}), '()\n', (3142, 3144), False, 'import random\n'), ((2251, 2275), 'numpy.square', 'np.square', (['velocity[key]'], {}), '(velocity[key])\n', (2260, 2275), True, 'import numpy as np\n'), ((2965, 2977), 'numpy.square', 'np.square', (['v'], {}), '(v)\n', (2974, 2977), True, 'import numpy as np\n')]
|
#!/usr/bin/env python
import binascii
import hashlib
import random
class Xbin(object):
# def __init__(self):
# get random hex by length
def get_random_hex(self, length=1, is_bytes=0):
random_hex = ''
for _ in range(0, length):
random_hex += "{:0>2x}".format(random.randrange(0, 255))
if is_bytes:
return bytes().fromhex(random_hex)
else:
return random_hex
def get_md5_value(src, is_bytes=0):
md5 = hashlib.md5()
md5.update(src)
md5_digest = md5.hexdigest()
if is_bytes:
return bytes().fromhex(md5_digest)
else:
return md5_digest
|
[
"hashlib.md5",
"random.randrange"
] |
[((496, 509), 'hashlib.md5', 'hashlib.md5', ([], {}), '()\n', (507, 509), False, 'import hashlib\n'), ((302, 326), 'random.randrange', 'random.randrange', (['(0)', '(255)'], {}), '(0, 255)\n', (318, 326), False, 'import random\n')]
|
# This file contains objects which represent QT models to encapsulate browser state
# (see BrowserState.py) which is NOT contained in any database (for that, see DbEntries.py and
# DbModel.py). Note that the models do not automatically react to
# any changes originating reloading the database entries, the controller must call the respective functions.
# Note that all these models are NOT editable, they only change by outside command.
from PyQt5 import QtCore, QtGui, QtWidgets
from . import BrowserState
class InvisibleFieldsModel(QtCore.QAbstractListModel):
def __init__(self,fields):
super().__init__()
self._fields = fields # an instance of BrowserState.Fields
self._fields.invisible_fields_to_be_changed.connect(self.slot_invisible_fields_to_be_changed)
self._fields.invisible_fields_changed.connect(self.slot_invisible_fields_changed)
def rowCount(self,idx):
assert not idx.isValid() # we only have top level data
return self._fields.invisible_fields_count()
def data(self,index,role):
row = index.row() # only relevant thing
if role == QtCore.Qt.DisplayRole or role == QtCore.Qt.ToolTipRole:
return self._fields.get_invisible_fields()[row][1] # remove the type
else:
return None
def slot_invisible_fields_to_be_changed(self,change_data):
# for the interpretation of change_data see BrowserState.py
if change_data.tp == BrowserState.Fields.ChangeType.Reset:
self.beginResetModel()
elif change_data.tp == BrowserState.Fields.ChangeType.Content:
pass
elif change_data.tp == BrowserState.Fields.ChangeType.Insert:
first = change_data.info[0]
last = change_data.info[0] + change_data.info[1] - 1
self.beginInsertRows(QtCore.QModelIndex(),first,last)
elif change_data.tp == BrowserState.Fields.ChangeType.Remove:
first = change_data.info[0]
last = change_data.info[0] + change_data.info[1] - 1
self.beginRemoveRows(QtCore.QModelIndex(),first,last)
def slot_invisible_fields_changed(self,new_fields,change_data):
# for the interpretation of change_data see BrowserState.py
if change_data.tp == BrowserState.Fields.ChangeType.Reset:
self.endResetModel()
elif change_data.tp == BrowserState.Fields.ChangeType.Content:
for row in change_data.info:
idx = self.index(row,0,QtCore.QModelIndex())
self.dataChanged.emit(idx,idx)
elif change_data.tp == BrowserState.Fields.ChangeType.Insert:
self.endInsertRows()
elif change_data.tp == BrowserState.Fields.ChangeType.Remove:
self.endRemoveRows()
class VisibleFieldsModel(QtCore.QAbstractListModel):
def __init__(self,fields):
super().__init__()
self._fields = fields # an instance of BrowserState.Fields
self._fields.visible_fields_to_be_changed.connect(self.slot_visible_fields_to_be_changed)
self._fields.visible_fields_changed.connect(self.slot_visible_fields_changed)
def rowCount(self,idx):
assert not idx.isValid() # we only have top level data
return self._fields.visible_fields_count()
def data(self,index,role):
row = index.row() # only relevant thing
if role == QtCore.Qt.DisplayRole or role == QtCore.Qt.ToolTipRole:
return self._fields.get_visible_fields()[row][1]
else:
return None
def slot_visible_fields_to_be_changed(self,change_data):
# for the interpretation of change_data see BrowserState.py
if change_data.tp == BrowserState.Fields.ChangeType.Reset:
self.beginResetModel()
elif change_data.tp == BrowserState.Fields.ChangeType.Content:
pass
elif change_data.tp == BrowserState.Fields.ChangeType.Insert:
first = change_data.info[0]
last = change_data.info[0] + change_data.info[1] - 1
self.beginInsertRows(QtCore.QModelIndex(),first,last)
elif change_data.tp == BrowserState.Fields.ChangeType.Remove:
first = change_data.info[0]
last = change_data.info[0] + change_data.info[1] - 1
self.beginRemoveRows(QtCore.QModelIndex(),first,last)
def slot_visible_fields_changed(self,new_fields,change_data):
# for the interpretation of change_data see BrowserState.py
if change_data.tp == BrowserState.Fields.ChangeType.Reset:
self.endResetModel()
elif change_data.tp == BrowserState.Fields.ChangeType.Content:
for row in change_data.info:
idx = self.index(row,0,QtCore.QModelIndex())
self.dataChanged.emit(idx,idx)
elif change_data.tp == BrowserState.Fields.ChangeType.Insert:
self.endInsertRows()
elif change_data.tp == BrowserState.Fields.ChangeType.Remove:
self.endRemoveRows()
|
[
"PyQt5.QtCore.QModelIndex"
] |
[((1833, 1853), 'PyQt5.QtCore.QModelIndex', 'QtCore.QModelIndex', ([], {}), '()\n', (1851, 1853), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((2495, 2515), 'PyQt5.QtCore.QModelIndex', 'QtCore.QModelIndex', ([], {}), '()\n', (2513, 2515), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((4058, 4078), 'PyQt5.QtCore.QModelIndex', 'QtCore.QModelIndex', ([], {}), '()\n', (4076, 4078), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((4718, 4738), 'PyQt5.QtCore.QModelIndex', 'QtCore.QModelIndex', ([], {}), '()\n', (4736, 4738), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((2074, 2094), 'PyQt5.QtCore.QModelIndex', 'QtCore.QModelIndex', ([], {}), '()\n', (2092, 2094), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((4299, 4319), 'PyQt5.QtCore.QModelIndex', 'QtCore.QModelIndex', ([], {}), '()\n', (4317, 4319), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n')]
|
import collections
import math
import numbers
import numpy as np
from .. import base
from .. import optim
from .. import utils
__all__ = [
'LinearRegression',
'LogisticRegression'
]
class GLM:
"""Generalized Linear Model.
Parameters:
optimizer (optim.Optimizer): The sequential optimizer used for updating the weights. Note
that the intercept is handled separately.
loss (optim.Loss): The loss function to optimize for.
l2 (float): Amount of L2 regularization used to push weights towards 0.
intercept (float): Initial intercept value.
intercept_lr (optim.schedulers.Scheduler or float): Learning rate scheduler used for
updating the intercept. If a `float` is passed, then an instance of
`optim.schedulers.Constant` will be used. Setting this to 0 implies that the intercept
will be not be updated.
clip_gradient (float): Clips the absolute value of each gradient value.
initializer (optim.initializers.Initializer): Weights initialization scheme.
Attributes:
weights (collections.defaultdict): The current weights.
"""
def __init__(self, optimizer, loss, l2, intercept, intercept_lr, clip_gradient, initializer):
self.optimizer = optimizer
self.loss = loss
self.l2 = l2
self.intercept = intercept
self.intercept_lr = (
optim.schedulers.Constant(intercept_lr)
if isinstance(intercept_lr, numbers.Number) else
intercept_lr
)
self.clip_gradient = clip_gradient
self.weights = collections.defaultdict(initializer)
self.initializer = initializer
def _raw_dot(self, x):
return utils.math.dot(self.weights, x) + self.intercept
def _eval_gradient(self, x, y, sample_weight):
"""Returns the gradient for a given observation.
This logic is put into a separate function for testing purposes.
"""
loss_gradient = self.loss.gradient(y_true=y, y_pred=self._raw_dot(x))
# Apply the sample weight
loss_gradient *= sample_weight
# Clip the gradient to avoid numerical instability
loss_gradient = utils.math.clamp(
loss_gradient,
minimum=-self.clip_gradient,
maximum=self.clip_gradient
)
return (
{
i: (
xi * loss_gradient +
2. * self.l2 * self.weights.get(i, 0)
)
for i, xi in x.items()
},
loss_gradient
)
def fit_one(self, x, y, sample_weight=1.):
# Some optimizers need to do something before a prediction is made
self.weights = self.optimizer.update_before_pred(w=self.weights)
# Calculate the gradient
gradient, loss_gradient = self._eval_gradient(x=x, y=y, sample_weight=sample_weight)
# Update the intercept
self.intercept -= self.intercept_lr.get(self.optimizer.n_iterations) * loss_gradient
# Update the weights
self.weights = self.optimizer.update_after_pred(w=self.weights, g=gradient)
return self
class LinearRegression(GLM, base.Regressor):
"""Linear regression.
Parameters:
optimizer (optim.Optimizer): The sequential optimizer used for updating the weights. Note
that the intercept is handled separately. Defaults to ``optim.SGD(.01)``.
loss (optim.RegressionLoss): The loss function to optimize for. Defaults to
``optim.losses.SquaredLoss``.
l2 (float): Amount of L2 regularization used to push weights towards 0.
intercept (float): Initial intercept value.
intercept_lr (optim.schedulers.Scheduler or float): Learning rate scheduler used for
updating the intercept. If a `float` is passed, then an instance of
`optim.schedulers.Constant` will be used. Setting this to 0 implies that the intercept
will be not be updated.
l2 (float): Amount of L2 regularization used to push weights towards 0.
clip_gradient (float): Clips the absolute value of each gradient value.
initializer (optim.initializers.Initializer): Weights initialization scheme.
Attributes:
weights (collections.defaultdict): The current weights.
Example:
::
>>> from creme import datasets
>>> from creme import linear_model
>>> from creme import metrics
>>> from creme import model_selection
>>> from creme import preprocessing
>>> X_y = datasets.TrumpApproval()
>>> model = (
... preprocessing.StandardScaler() |
... linear_model.LinearRegression(intercept_lr=.1)
... )
>>> metric = metrics.MAE()
>>> model_selection.progressive_val_score(X_y, model, metric)
MAE: 0.616405
>>> model['LinearRegression'].intercept
38.000439
Note:
Using a feature scaler such as `preprocessing.StandardScaler` upstream helps the optimizer
to converge.
"""
def __init__(self, optimizer=None, loss=None, l2=.0, intercept=0., intercept_lr=.01,
clip_gradient=1e12, initializer=None):
super().__init__(
optimizer=(
optim.SGD(optim.schedulers.InverseScaling(.01, .25))
if optimizer is None else
optimizer
),
loss=optim.losses.Squared() if loss is None else loss,
intercept=intercept,
intercept_lr=intercept_lr,
l2=l2,
clip_gradient=clip_gradient,
initializer=initializer if initializer else optim.initializers.Zeros()
)
def predict_one(self, x):
return self.loss.mean_func(self._raw_dot(x))
def debug_one(self, x, decimals=5, **print_params):
"""
Example:
::
>>> from creme import datasets
>>> from creme import linear_model
>>> from creme import metrics
>>> from creme import model_selection
>>> from creme import preprocessing
>>> X_y = datasets.TrumpApproval()
>>> model = (
... preprocessing.StandardScaler() |
... linear_model.LinearRegression(intercept_lr=.1)
... )
>>> for x, y in X_y:
... y_pred = model.predict_one(x)
... model = model.fit_one(x, y)
>>> model.debug_one(x)
0. Input
--------
gallup: 43.84321 (float)
ipsos: 40.57068 (float)
morning_consult: 37.81875 (float)
ordinal_date: 737389 (int)
rasmussen: 40.10469 (float)
you_gov: 41.63691 (float)
<BLANKLINE>
1. StandardScaler
-----------------
gallup: 1.18751 (float)
ipsos: -0.04683 (float)
morning_consult: -1.22583 (float)
ordinal_date: 1.72946 (float)
rasmussen: -0.23857 (float)
you_gov: 0.44131 (float)
<BLANKLINE>
2. LinearRegression
-------------------
Name Value Weight Contribution
Intercept 1.00000 38.00044 38.00044
ordinal_date 1.72946 2.23125 3.85885
gallup 1.18751 0.28647 0.34019
you_gov 0.44131 -0.01270 -0.00560
ipsos -0.04683 1.01815 -0.04768
rasmussen -0.23857 0.45099 -0.10759
morning_consult -1.22583 0.35181 -0.43126
<BLANKLINE>
Prediction: 41.60735
"""
def fmt_float(x):
return '{: ,.{prec}f}'.format(x, prec=decimals)
names = list(map(str, x.keys())) + ['Intercept']
values = list(map(fmt_float, list(x.values()) + [1]))
weights = list(map(fmt_float, [self.weights.get(i, 0) for i in x] + [self.intercept]))
contributions = [xi * self.weights.get(i, 0) for i, xi in x.items()] + [self.intercept]
order = reversed(np.argsort(contributions))
contributions = list(map(fmt_float, contributions))
table = utils.pretty.print_table(
headers=['Name', 'Value', 'Weight', 'Contribution'],
columns=[names, values, weights, contributions],
order=order
)
print(table, **print_params)
class LogisticRegression(GLM, base.BinaryClassifier):
"""Logistic regression.
Parameters:
optimizer (optim.Optimizer): The sequential optimizer used for updating the weights. Note
that the intercept is handled separately. Defaults to ``optim.SGD(.05)``.
loss (optim.BinaryLoss): The loss function to optimize for. Defaults to
``optim.losses.Log``.
l2 (float): Amount of L2 regularization used to push weights towards 0.
intercept (float): Initial intercept value.
intercept_lr (optim.schedulers.Scheduler or float): Learning rate scheduler used for
updating the intercept. If a `float` is passed, then an instance of
`optim.schedulers.Constant` will be used. Setting this to 0 implies that the intercept
will be not be updated.
l2 (float): Amount of L2 regularization used to push weights towards 0.
clip_gradient (float): Clips the absolute value of each gradient value.
initializer (optim.initializers.Initializer): Weights initialization scheme.
Attributes:
weights (collections.defaultdict): The current weights.
Example:
::
>>> from creme import datasets
>>> from creme import linear_model
>>> from creme import metrics
>>> from creme import model_selection
>>> from creme import optim
>>> from creme import preprocessing
>>> X_y = datasets.Phishing()
>>> model = (
... preprocessing.StandardScaler() |
... linear_model.LogisticRegression(optimizer=optim.SGD(.1))
... )
>>> metric = metrics.Accuracy()
>>> model_selection.progressive_val_score(X_y, model, metric)
Accuracy: 88.96%
Note:
Using a feature scaler such as `preprocessing.StandardScaler` upstream helps the optimizer
to converge.
"""
def __init__(self, optimizer=None, loss=None, l2=.0, intercept=0., intercept_lr=.01,
clip_gradient=1e12, initializer=None):
super().__init__(
optimizer=optim.SGD(.01) if optimizer is None else optimizer,
loss=optim.losses.Log() if loss is None else loss,
intercept=intercept,
intercept_lr=intercept_lr,
l2=l2,
clip_gradient=clip_gradient,
initializer=initializer if initializer else optim.initializers.Zeros()
)
def predict_proba_one(self, x):
p = self.loss.mean_func(self._raw_dot(x)) # Convert logit to probability
return {False: 1. - p, True: p}
|
[
"collections.defaultdict",
"numpy.argsort"
] |
[((1625, 1661), 'collections.defaultdict', 'collections.defaultdict', (['initializer'], {}), '(initializer)\n', (1648, 1661), False, 'import collections\n'), ((8519, 8544), 'numpy.argsort', 'np.argsort', (['contributions'], {}), '(contributions)\n', (8529, 8544), True, 'import numpy as np\n')]
|
from ROAR.agent_module.agent import Agent
from ROAR.utilities_module.data_structures_models import SensorsData
from ROAR.utilities_module.vehicle_models import Vehicle, VehicleControl
from ROAR.configurations.configuration import Configuration as AgentConfig
import cv2
import numpy as np
import open3d as o3d
from ROAR.utilities_module.occupancy_map import OccupancyGridMap
from ROAR.perception_module.depth_to_pointcloud_detector import DepthToPointCloudDetector
from ROAR.perception_module.ground_plane_detector import GroundPlaneDetector
from ROAR.perception_module.lane_detector import LaneDetector
class iOSAgent(Agent):
def __init__(self, vehicle: Vehicle, agent_settings: AgentConfig, **kwargs):
super().__init__(vehicle, agent_settings, **kwargs)
# initialize occupancy grid map content
self.occu_map = OccupancyGridMap(agent=self)
self.depth_to_pcd = DepthToPointCloudDetector(agent=self)
self.ground_plane_detector = GroundPlaneDetector(agent=self)
self.lane_detector = LaneDetector(agent=self)
# initialize open3d related content
self.vis = o3d.visualization.Visualizer()
self.vis.create_window(width=500, height=500)
self.pcd = o3d.geometry.PointCloud()
self.coordinate_frame = o3d.geometry.TriangleMesh.create_coordinate_frame()
self.points_added = False
def run_step(self, sensors_data: SensorsData, vehicle: Vehicle) -> VehicleControl:
super(iOSAgent, self).run_step(sensors_data, vehicle)
if self.front_depth_camera.data is not None and self.front_rgb_camera.data is not None:
depth_img = self.front_depth_camera.data.copy()
lane_mask = self.lane_detector.run_in_series()
none_lane = np.where(lane_mask < 0.5)
depth_img[none_lane] = 0
pcd = self.depth_to_pcd.run_in_series(depth_image=depth_img)
points: np.ndarray = np.asarray(pcd.points)
self.occu_map.update(points)
self.occu_map.visualize()
self.non_blocking_pcd_visualization(pcd=pcd, should_center=True,
should_show_axis=True, axis_size=1)
return VehicleControl()
def non_blocking_pcd_visualization(self, pcd: o3d.geometry.PointCloud,
should_center=False,
should_show_axis=False,
axis_size: float = 0.1):
points = np.asarray(pcd.points)
colors = np.asarray(pcd.colors)
if should_center:
points = points - np.mean(points, axis=0)
if self.points_added is False:
self.pcd = o3d.geometry.PointCloud()
self.pcd.points = o3d.utility.Vector3dVector(points)
self.pcd.colors = o3d.utility.Vector3dVector(colors)
if should_show_axis:
self.coordinate_frame = o3d.geometry.TriangleMesh.create_coordinate_frame(size=axis_size,
origin=np.mean(points,
axis=0))
self.vis.add_geometry(self.coordinate_frame)
self.vis.add_geometry(self.pcd)
self.points_added = True
else:
# print(np.shape(np.vstack((np.asarray(self.pcd.points), points))))
self.pcd.points = o3d.utility.Vector3dVector(points)
self.pcd.colors = o3d.utility.Vector3dVector(colors)
if should_show_axis:
self.coordinate_frame = o3d.geometry.TriangleMesh.create_coordinate_frame(size=axis_size,
origin=np.mean(points,
axis=0))
self.vis.update_geometry(self.coordinate_frame)
self.vis.update_geometry(self.pcd)
self.vis.poll_events()
self.vis.update_renderer()
|
[
"ROAR.perception_module.ground_plane_detector.GroundPlaneDetector",
"open3d.visualization.Visualizer",
"ROAR.utilities_module.occupancy_map.OccupancyGridMap",
"numpy.asarray",
"open3d.geometry.PointCloud",
"open3d.geometry.TriangleMesh.create_coordinate_frame",
"ROAR.utilities_module.vehicle_models.VehicleControl",
"numpy.where",
"numpy.mean",
"ROAR.perception_module.depth_to_pointcloud_detector.DepthToPointCloudDetector",
"open3d.utility.Vector3dVector",
"ROAR.perception_module.lane_detector.LaneDetector"
] |
[((842, 870), 'ROAR.utilities_module.occupancy_map.OccupancyGridMap', 'OccupancyGridMap', ([], {'agent': 'self'}), '(agent=self)\n', (858, 870), False, 'from ROAR.utilities_module.occupancy_map import OccupancyGridMap\n'), ((899, 936), 'ROAR.perception_module.depth_to_pointcloud_detector.DepthToPointCloudDetector', 'DepthToPointCloudDetector', ([], {'agent': 'self'}), '(agent=self)\n', (924, 936), False, 'from ROAR.perception_module.depth_to_pointcloud_detector import DepthToPointCloudDetector\n'), ((974, 1005), 'ROAR.perception_module.ground_plane_detector.GroundPlaneDetector', 'GroundPlaneDetector', ([], {'agent': 'self'}), '(agent=self)\n', (993, 1005), False, 'from ROAR.perception_module.ground_plane_detector import GroundPlaneDetector\n'), ((1035, 1059), 'ROAR.perception_module.lane_detector.LaneDetector', 'LaneDetector', ([], {'agent': 'self'}), '(agent=self)\n', (1047, 1059), False, 'from ROAR.perception_module.lane_detector import LaneDetector\n'), ((1123, 1153), 'open3d.visualization.Visualizer', 'o3d.visualization.Visualizer', ([], {}), '()\n', (1151, 1153), True, 'import open3d as o3d\n'), ((1227, 1252), 'open3d.geometry.PointCloud', 'o3d.geometry.PointCloud', ([], {}), '()\n', (1250, 1252), True, 'import open3d as o3d\n'), ((1285, 1336), 'open3d.geometry.TriangleMesh.create_coordinate_frame', 'o3d.geometry.TriangleMesh.create_coordinate_frame', ([], {}), '()\n', (1334, 1336), True, 'import open3d as o3d\n'), ((2207, 2223), 'ROAR.utilities_module.vehicle_models.VehicleControl', 'VehicleControl', ([], {}), '()\n', (2221, 2223), False, 'from ROAR.utilities_module.vehicle_models import Vehicle, VehicleControl\n'), ((2504, 2526), 'numpy.asarray', 'np.asarray', (['pcd.points'], {}), '(pcd.points)\n', (2514, 2526), True, 'import numpy as np\n'), ((2544, 2566), 'numpy.asarray', 'np.asarray', (['pcd.colors'], {}), '(pcd.colors)\n', (2554, 2566), True, 'import numpy as np\n'), ((1760, 1785), 'numpy.where', 'np.where', (['(lane_mask < 0.5)'], {}), '(lane_mask < 0.5)\n', (1768, 1785), True, 'import numpy as np\n'), ((1929, 1951), 'numpy.asarray', 'np.asarray', (['pcd.points'], {}), '(pcd.points)\n', (1939, 1951), True, 'import numpy as np\n'), ((2710, 2735), 'open3d.geometry.PointCloud', 'o3d.geometry.PointCloud', ([], {}), '()\n', (2733, 2735), True, 'import open3d as o3d\n'), ((2766, 2800), 'open3d.utility.Vector3dVector', 'o3d.utility.Vector3dVector', (['points'], {}), '(points)\n', (2792, 2800), True, 'import open3d as o3d\n'), ((2831, 2865), 'open3d.utility.Vector3dVector', 'o3d.utility.Vector3dVector', (['colors'], {}), '(colors)\n', (2857, 2865), True, 'import open3d as o3d\n'), ((3499, 3533), 'open3d.utility.Vector3dVector', 'o3d.utility.Vector3dVector', (['points'], {}), '(points)\n', (3525, 3533), True, 'import open3d as o3d\n'), ((3564, 3598), 'open3d.utility.Vector3dVector', 'o3d.utility.Vector3dVector', (['colors'], {}), '(colors)\n', (3590, 3598), True, 'import open3d as o3d\n'), ((2623, 2646), 'numpy.mean', 'np.mean', (['points'], {'axis': '(0)'}), '(points, axis=0)\n', (2630, 2646), True, 'import numpy as np\n'), ((3103, 3126), 'numpy.mean', 'np.mean', (['points'], {'axis': '(0)'}), '(points, axis=0)\n', (3110, 3126), True, 'import numpy as np\n'), ((3835, 3858), 'numpy.mean', 'np.mean', (['points'], {'axis': '(0)'}), '(points, axis=0)\n', (3842, 3858), True, 'import numpy as np\n')]
|
#!/usr/bin/env python3
from PIL import Image
import glob
import os
def crear_folder():
if not os.path.exists('/opt/icons/'):
os.makedirs('/opt/icons/')
def guardar(imagen, filename):
save_path = '/opt/icons/' + filename
imagen.save(save_path, 'JPEG')
print(imagen.format, imagen.size)
def rotate_resize(imagen):
new_image = imagen.rotate(-90).resize((128,128))
return new_image
def main():
# Script on images/ dir
crear_folder()
for filename in glob.glob("ic_*"):
imagen = Image.open(filename).convert('RGB')
new_image = rotate_resize(imagen)
guardar(new_image, filename)
print("Done!")
if __name__ == "__main__":
main()
|
[
"PIL.Image.open",
"os.path.exists",
"os.makedirs",
"glob.glob"
] |
[((494, 511), 'glob.glob', 'glob.glob', (['"""ic_*"""'], {}), "('ic_*')\n", (503, 511), False, 'import glob\n'), ((99, 128), 'os.path.exists', 'os.path.exists', (['"""/opt/icons/"""'], {}), "('/opt/icons/')\n", (113, 128), False, 'import os\n'), ((138, 164), 'os.makedirs', 'os.makedirs', (['"""/opt/icons/"""'], {}), "('/opt/icons/')\n", (149, 164), False, 'import os\n'), ((530, 550), 'PIL.Image.open', 'Image.open', (['filename'], {}), '(filename)\n', (540, 550), False, 'from PIL import Image\n')]
|
#!/usr/bin/env python3
"""Scan serial ports for ping devices
Symlinks to detected devices are created under /dev/serial/ping/
This script needs root permission to create the symlinks
"""
import subprocess
import numpy as np
import rospy
from brping import PingDevice, PingParser, PingMessage
from brping.definitions import *
import serial
import socket
from collections import deque
from sensor_msgs.msg import Range, MultiEchoLaserScan, LaserEcho
class PingEnumerator:
def legacy_detect_ping1d(self, ping):
"""
Detects Ping1D devices without DEVICE_INFORMATION implemented
"""
firmware_version = ping.request(PING1D_FIRMWARE_VERSION)
if firmware_version is None:
return None
description = "/dev/serial/ping/Ping1D-id-{}-t-{}-m-{}-v-{}.{}".format (
firmware_version.src_device_id,
firmware_version.device_type,
firmware_version.device_model,
firmware_version.firmware_version_major,
firmware_version.firmware_version_minor
)
return description
def detect_device(self, dev):
"""
Attempts to detect the Ping device attached to serial port 'dev'
Returns the new path with encoded name if detected, or None if the
device was not detected
"""
print("Checking if " + dev + " is a Ping device...")
try:
ping = PingDevice()
ping.connect_serial("/dev/serial/by-id/" + dev, 115200)
except Exception as exception:
print("An exception has occurred: ", exception)
return None
if not ping.initialize():
return None
device_info = ping.request(COMMON_DEVICE_INFORMATION)
if not device_info:
return self.legacy_detect_ping1d(ping)
if device_info.device_type == 1:
description = "/dev/serial/ping/Ping1D-id-{}-r-{}-v-{}.{}.{}"
elif device_info.device_type == 2:
description = "/dev/serial/ping/Ping360-id-{}-r-{}-v-{}.{}.{}"
# Open device with 2M baud to setup Ping360
print("Setting baud to 2M...")
ser = serial.Serial("/dev/serial/by-id/" + dev, 2000000)
ser.send_break()
ser.write("UUUUUUU".encode())
ser.close()
self.set_low_latency(dev)
else:
return None
return description.format (
device_info.src_device_id,
device_info.device_revision,
device_info.firmware_version_major,
device_info.firmware_version_minor,
device_info.firmware_version_patch
)
def set_low_latency(self, dev):
"""
Receives /dev/serial/by-id/...
maps to it to ttyUSB and sets the latency_timer for the device
"""
raise NotImplementedError("This method currently not supported, requires root permissions.")
target_device = subprocess.check_output(' '.join(["readlink", "-f", "/dev/serial/by-id/%s" % dev]), shell=True)
device_name = target_device.decode().strip().split("/")[-1]
latency_file = "/sys/bus/usb-serial/devices/{0}/latency_timer".format(device_name)
with open(latency_file, 'w') as p:
p.write("1")
p.flush()
def make_symlink(self, origin, target):
"""
follows target to real device an links origin to it
origin => target
Returns True if sucessful
"""
raise NotImplementedError("This method currently not supported, requires root permissions.")
try:
# Follow link to actual device
target_device = subprocess.check_output(' '.join(["readlink", "-f", "/dev/serial/by-id/%s" % origin]), shell=True)
# Strip newline from output
target_device = target_device.decode().split('\n')[0]
# Create another link to it
subprocess.check_output(' '.join(["mkdir", "-p", "/dev/serial/ping"]), shell=True)
subprocess.check_output("ln -fs %s %s" % (
target_device,
target), shell=True)
print(origin, " linked to ", target)
return True
except subprocess.CalledProcessError as exception:
print(exception)
return False
def erase_old_symlinks(self):
"""
Erases all symlinks at "/dev/serial/ping/"
"""
raise NotImplementedError("This method currently not supported, requires root permissions.")
try:
subprocess.check_output(["rm", "-rf", "/dev/serial/ping"])
except subprocess.CalledProcessError as exception:
print(exception)
def list_serial_devices(self):
"""
Lists serial devices at "/dev/serial/by-id/"
"""
# Look for connected serial devices
try:
output = subprocess.check_output("ls /dev/serial/by-id", shell=True)
return output.decode().strip().split("\n")
except subprocess.CalledProcessError as exception:
print(exception)
return []
class PingDriver:
def __init__(self):
rospy.init_node("ping1d_driver_node")
self.ping_sensors = []
self.enumerator = PingEnumerator()
hz = rospy.Rate(1.0)
while not len(self.ping_sensors) and not rospy.is_shutdown():
self.ping_sensors = [f"/dev/serial/by-id/{dev}" for dev in self.enumerator.list_serial_devices()]
rospy.logerr_throttle(10.0, f"{rospy.get_name()} | Waiting for valid ping1d sensor to appear.")
hz.sleep()
## Messages that have the current distance measurement in the payload
self.distance_messages = [
PING1D_DISTANCE,
PING1D_DISTANCE_SIMPLE,
PING1D_PROFILE
]
## Parser to verify client comms
self.parser = PingParser()
self.range_publisher = rospy.Publisher("range", Range, queue_size=10)
self.profile_publisher = rospy.Publisher("profile", MultiEchoLaserScan, queue_size=10)
self.hz = rospy.Rate(15.0)
if not rospy.is_shutdown():
rospy.loginfo("Setting up serial device.")
self.device = PingDevice()
self.device.connect_serial(self.ping_sensors[0], 115200)
data = PingMessage(PING1D_CONTINUOUS_STOP)
data.pack_msg_data()
self.device.write(data.msg_data)
data = PingMessage(PING1D_SET_MODE_AUTO)
data.pack_msg_data()
self.device.write(data.msg_data)
data = PingMessage(PING1D_SET_RANGE)
data.scan_start = 200
data.scan_length = 30000
data.pack_msg_data()
self.device.write(data.msg_data)
## Digest incoming ping data
def parse(self, data: PingMessage):
range_msg = None
profile_msg = None
if data.message_id in self.distance_messages:
range_msg = Range()
range_msg.header.frame_id = "altimeter"
range_msg.header.stamp = rospy.Time.now()
range_msg.radiation_type = range_msg.ULTRASOUND
range_msg.field_of_view = 0.52
range_msg.max_range = (data.scan_start + data.scan_length) / 1000
range_msg.min_range = data.scan_start / 1000.0
if range_msg.min_range <= data.distance / 1000 <= range_msg.max_range:
range_msg.range = data.distance / 1000
if data.message_id == PING1D_PROFILE:
profile_msg = MultiEchoLaserScan()
profile_msg.header = range_msg.header
profile_msg.ranges = [LaserEcho(np.linspace(data.scan_start / 1000, data.scan_start / 1000 + data.scan_length / 1000, data.profile_data_length).tolist())]
profile_msg.range_min = data.scan_start / 1000.0
profile_msg.range_max = (data.scan_start + data.scan_length) / 1000
profile_msg.angle_increment = 0
profile_msg.angle_max = 0
profile_msg.angle_min = 0
profile_msg.intensities = [LaserEcho(np.frombuffer(data.profile_data, dtype=np.uint8).tolist())]
return range_msg, profile_msg
def send_ping1d_request(self):
data = PingMessage()
data.request_id = PING1D_DISTANCE
data.src_device_id = 0
data.pack_msg_data()
self.device.write(data.msg_data)
def run(self):
# read ping device from serial
try:
while not rospy.is_shutdown():
self.send_ping1d_request()
device_data = self.device.read()
if device_data is not None:
range_msg, profile_msg = self.parse(device_data)
if range_msg is not None:
self.range_publisher.publish(range_msg)
if profile_msg is not None:
self.profile_publisher.publish(profile_msg)
self.hz.sleep()
except rospy.ROSInterruptException:
pass
finally:
self.device.iodev.close()
class PingClient(object):
def __init__(self):
## Queued messages received from client
self.rx_msgs = deque([])
## Parser to verify client comms
self.parser = PingParser()
## Digest incoming client data
# @return None
def parse(self, data):
for b in bytearray(data):
if self.parser.parse_byte(b) == PingParser.NEW_MESSAGE:
self.rx_msgs.append(self.parser.rx_msg)
## Dequeue a message received from client
# @return None: if there are no comms in the queue
# @return PingMessage: the next ping message in the queue
def dequeue(self):
if len(self.rx_msgs) == 0:
return None
return self.rx_msgs.popleft()
class PingProxy(object):
def __init__(self, device: str, port: int, topic: str):
## A serial object for ping device comms
self.device = device
## UDP port number for server
self.port = port
## Publisher to send ROS range information on
self.range_msg = Range()
self.range_publisher = rospy.Publisher(topic, Range, queue_size=10)
## Connected client dictionary
self.clients = {}
## Socket to serve on
self.socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.socket.setblocking(False)
self.socket.bind(('0.0.0.0', self.port))
## Run proxy tasks
def run(self):
try:
data, address = self.socket.recvfrom(4096)
# new client
if address not in self.clients:
self.clients[address] = PingClient()
# digest data coming in from client
self.clients[address].parse(data)
except TimeoutError:
pass # waiting for data
except Exception as e:
print("Error reading data", e)
# read ping device from serial
device_data = self.device.read(self.device.in_waiting)
# send ping device data to all clients via UDP
if device_data: # don't write empty data
for client in self.clients:
# print("writing to client", client)
self.socket.sendto(device_data, client)
# send all client comms to ping device
for client in self.clients:
c = self.clients[client]
msg = c.dequeue()
while msg is not None:
self.device.write(msg.msg_data)
msg = c.dequeue()
|
[
"serial.Serial",
"brping.PingParser",
"rospy.Time.now",
"brping.PingDevice",
"numpy.frombuffer",
"subprocess.check_output",
"socket.socket",
"sensor_msgs.msg.MultiEchoLaserScan",
"rospy.Publisher",
"rospy.Rate",
"rospy.loginfo",
"sensor_msgs.msg.Range",
"rospy.is_shutdown",
"brping.PingMessage",
"rospy.init_node",
"numpy.linspace",
"rospy.get_name",
"collections.deque"
] |
[((5198, 5235), 'rospy.init_node', 'rospy.init_node', (['"""ping1d_driver_node"""'], {}), "('ping1d_driver_node')\n", (5213, 5235), False, 'import rospy\n'), ((5323, 5338), 'rospy.Rate', 'rospy.Rate', (['(1.0)'], {}), '(1.0)\n', (5333, 5338), False, 'import rospy\n'), ((5930, 5942), 'brping.PingParser', 'PingParser', ([], {}), '()\n', (5940, 5942), False, 'from brping import PingDevice, PingParser, PingMessage\n'), ((5975, 6021), 'rospy.Publisher', 'rospy.Publisher', (['"""range"""', 'Range'], {'queue_size': '(10)'}), "('range', Range, queue_size=10)\n", (5990, 6021), False, 'import rospy\n'), ((6055, 6116), 'rospy.Publisher', 'rospy.Publisher', (['"""profile"""', 'MultiEchoLaserScan'], {'queue_size': '(10)'}), "('profile', MultiEchoLaserScan, queue_size=10)\n", (6070, 6116), False, 'import rospy\n'), ((6135, 6151), 'rospy.Rate', 'rospy.Rate', (['(15.0)'], {}), '(15.0)\n', (6145, 6151), False, 'import rospy\n'), ((8278, 8291), 'brping.PingMessage', 'PingMessage', ([], {}), '()\n', (8289, 8291), False, 'from brping import PingDevice, PingParser, PingMessage\n'), ((9252, 9261), 'collections.deque', 'deque', (['[]'], {}), '([])\n', (9257, 9261), False, 'from collections import deque\n'), ((9326, 9338), 'brping.PingParser', 'PingParser', ([], {}), '()\n', (9336, 9338), False, 'from brping import PingDevice, PingParser, PingMessage\n'), ((10172, 10179), 'sensor_msgs.msg.Range', 'Range', ([], {}), '()\n', (10177, 10179), False, 'from sensor_msgs.msg import Range, MultiEchoLaserScan, LaserEcho\n'), ((10211, 10255), 'rospy.Publisher', 'rospy.Publisher', (['topic', 'Range'], {'queue_size': '(10)'}), '(topic, Range, queue_size=10)\n', (10226, 10255), False, 'import rospy\n'), ((10375, 10423), 'socket.socket', 'socket.socket', (['socket.AF_INET', 'socket.SOCK_DGRAM'], {}), '(socket.AF_INET, socket.SOCK_DGRAM)\n', (10388, 10423), False, 'import socket\n'), ((1430, 1442), 'brping.PingDevice', 'PingDevice', ([], {}), '()\n', (1440, 1442), False, 'from brping import PingDevice, PingParser, PingMessage\n'), ((4048, 4125), 'subprocess.check_output', 'subprocess.check_output', (["('ln -fs %s %s' % (target_device, target))"], {'shell': '(True)'}), "('ln -fs %s %s' % (target_device, target), shell=True)\n", (4071, 4125), False, 'import subprocess\n'), ((4582, 4640), 'subprocess.check_output', 'subprocess.check_output', (["['rm', '-rf', '/dev/serial/ping']"], {}), "(['rm', '-rf', '/dev/serial/ping'])\n", (4605, 4640), False, 'import subprocess\n'), ((4921, 4980), 'subprocess.check_output', 'subprocess.check_output', (['"""ls /dev/serial/by-id"""'], {'shell': '(True)'}), "('ls /dev/serial/by-id', shell=True)\n", (4944, 4980), False, 'import subprocess\n'), ((6168, 6187), 'rospy.is_shutdown', 'rospy.is_shutdown', ([], {}), '()\n', (6185, 6187), False, 'import rospy\n'), ((6201, 6243), 'rospy.loginfo', 'rospy.loginfo', (['"""Setting up serial device."""'], {}), "('Setting up serial device.')\n", (6214, 6243), False, 'import rospy\n'), ((6270, 6282), 'brping.PingDevice', 'PingDevice', ([], {}), '()\n', (6280, 6282), False, 'from brping import PingDevice, PingParser, PingMessage\n'), ((6371, 6406), 'brping.PingMessage', 'PingMessage', (['PING1D_CONTINUOUS_STOP'], {}), '(PING1D_CONTINUOUS_STOP)\n', (6382, 6406), False, 'from brping import PingDevice, PingParser, PingMessage\n'), ((6504, 6537), 'brping.PingMessage', 'PingMessage', (['PING1D_SET_MODE_AUTO'], {}), '(PING1D_SET_MODE_AUTO)\n', (6515, 6537), False, 'from brping import PingDevice, PingParser, PingMessage\n'), ((6635, 6664), 'brping.PingMessage', 'PingMessage', (['PING1D_SET_RANGE'], {}), '(PING1D_SET_RANGE)\n', (6646, 6664), False, 'from brping import PingDevice, PingParser, PingMessage\n'), ((7017, 7024), 'sensor_msgs.msg.Range', 'Range', ([], {}), '()\n', (7022, 7024), False, 'from sensor_msgs.msg import Range, MultiEchoLaserScan, LaserEcho\n'), ((7114, 7130), 'rospy.Time.now', 'rospy.Time.now', ([], {}), '()\n', (7128, 7130), False, 'import rospy\n'), ((7581, 7601), 'sensor_msgs.msg.MultiEchoLaserScan', 'MultiEchoLaserScan', ([], {}), '()\n', (7599, 7601), False, 'from sensor_msgs.msg import Range, MultiEchoLaserScan, LaserEcho\n'), ((2186, 2236), 'serial.Serial', 'serial.Serial', (["('/dev/serial/by-id/' + dev)", '(2000000)'], {}), "('/dev/serial/by-id/' + dev, 2000000)\n", (2199, 2236), False, 'import serial\n'), ((5388, 5407), 'rospy.is_shutdown', 'rospy.is_shutdown', ([], {}), '()\n', (5405, 5407), False, 'import rospy\n'), ((8529, 8548), 'rospy.is_shutdown', 'rospy.is_shutdown', ([], {}), '()\n', (8546, 8548), False, 'import rospy\n'), ((5562, 5578), 'rospy.get_name', 'rospy.get_name', ([], {}), '()\n', (5576, 5578), False, 'import rospy\n'), ((7696, 7812), 'numpy.linspace', 'np.linspace', (['(data.scan_start / 1000)', '(data.scan_start / 1000 + data.scan_length / 1000)', 'data.profile_data_length'], {}), '(data.scan_start / 1000, data.scan_start / 1000 + data.\n scan_length / 1000, data.profile_data_length)\n', (7707, 7812), True, 'import numpy as np\n'), ((8129, 8177), 'numpy.frombuffer', 'np.frombuffer', (['data.profile_data'], {'dtype': 'np.uint8'}), '(data.profile_data, dtype=np.uint8)\n', (8142, 8177), True, 'import numpy as np\n')]
|
# BSD 3-Clause License
# Copyright (c) 2020, Instit<NAME>
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import networkx as nx
import numpy as np
import scipy.sparse as sparse
class StateTransitionSubGraphs:
def __init__(self, A_sparse, x0):
self.subnetws = None
self.scc_submats = None
self.nonempty_subgraphs = None
self.sorted_vertices = None
self.cyclic_sorted_subgraphs = None
self.fcn_scc_subgraphs(A_sparse, x0)
def fcn_metagraph_scc(self, A_sparse_sub):
matr_size = A_sparse_sub.shape[0]
g_sub = nx.from_scipy_sparse_matrix(A_sparse_sub, create_using=nx.DiGraph())
g_sub.remove_edges_from(nx.selfloop_edges(g_sub))
# Here we reverse it only for debugging purpose
# The order shouldn't matter, but it's nice to have the same as matlab
scc_list = list(reversed(list(nx.strongly_connected_components(g_sub))))
# print("%d connected components" % len(scc_list))
num_verts_per_scc = []
scc_memb_per_vert = np.zeros((matr_size, 1))
for i, scc in enumerate(scc_list):
num_verts_per_scc.append(len(scc))
scc_memb_per_vert[list(scc),:] = i
# row, col = np.where((A_sparse_sub - np.diag(A_sparse_sub.diagonal())) > 0)
# Yet another trick to get the exact same results as matlab
# The difference is returning the list from parsing via columns or via rows, hopefully nothing critical
t_matr = (A_sparse_sub - sparse.diags(A_sparse_sub.diagonal())).transpose()
col, row, _ = sparse.find(t_matr > 0)
diff = scc_memb_per_vert[row] != scc_memb_per_vert[col]
row_sel = row[np.where(diff[:, 0])]
col_sel = col[np.where(diff[:, 0])]
A_metagraph = sparse.csr_matrix(
(np.array(A_sparse_sub[row_sel, col_sel]).flatten(),
(scc_memb_per_vert[row_sel][:, 0], scc_memb_per_vert[col_sel][:, 0])),
shape=(len(num_verts_per_scc), len(num_verts_per_scc))
)
metagraph = nx.from_scipy_sparse_matrix(A_metagraph, create_using=nx.DiGraph())
metagraph_ordering=np.array(list(nx.topological_sort(metagraph)))
terminal_scc_ind, _ = np.where(A_metagraph.sum(axis=1) == 0)
terminal_scc_pos = np.isin(metagraph_ordering, terminal_scc_ind)
nonterm_scc_num = len(num_verts_per_scc) - len(terminal_scc_ind)
scc_sup1 = [i for i, scc in enumerate(scc_list) if len(scc) > 1]
term_cycles_ind = set(scc_sup1).intersection(set(terminal_scc_ind))
where_terminal_scc_pos, = np.where(terminal_scc_pos)
if np.sum(np.logical_not(where_terminal_scc_pos>(nonterm_scc_num-1))) > 0:
nonterm_scc_inds = np.logical_not(np.isin(metagraph_ordering, terminal_scc_ind))
metagraph_ordering_terminal_bottom = np.concatenate([
metagraph_ordering[nonterm_scc_inds],
metagraph_ordering[terminal_scc_pos]
])
else:
metagraph_ordering_terminal_bottom = metagraph_ordering
if len(term_cycles_ind) > 0:
scc_cell_reordered = [scc_list[i] for i in metagraph_ordering_terminal_bottom]
# index of cells containing term cycles after reordering
term_cycles_ind, = np.where(np.isin(metagraph_ordering_terminal_bottom, np.array(list(term_cycles_ind))))
# we need a cell of the indices of certices withing whese
scc_cell_reordered_lengths = np.array([len(scc) for scc in scc_cell_reordered])
scc_cell_reordered_cumsum = np.cumsum(scc_cell_reordered_lengths)
cycle_first_verts = scc_cell_reordered_cumsum[term_cycles_ind] - scc_cell_reordered_lengths[term_cycles_ind];
cycle_last_verts = scc_cell_reordered_cumsum[term_cycles_ind] - 1
term_cycles_bounds = [np.concatenate([cycle_first_verts, cycle_last_verts])]
else:
term_cycles_ind = []
term_cycles_bounds = []
# reordered original vertices
vert_topol_sort = np.concatenate([list(scc_list[i]) for i in metagraph_ordering_terminal_bottom])
return vert_topol_sort, term_cycles_ind, A_metagraph, scc_list, term_cycles_bounds
def fcn_scc_subgraphs(self, A_sparse, x0):
# print("Indentifying SCCs")
B_sparse = sparse.csc_matrix(A_sparse)
B_sparse.setdiag(0)
nb_scc, labels = sparse.csgraph.connected_components(B_sparse, directed=True,connection='weak')
scc = [[] for _ in range(nb_scc)]
for i, label in enumerate(labels):
scc[label].append(i)
self.subnetws = scc
cell_subgraphs = []
self.scc_submats = []
self.nonempty_subgraphs = []
# print("Identifying SCCs in subgraphs")
for i, subnet in enumerate(self.subnetws):
cell_subgraphs.append(subnet)
# Slicing done it two steps : First the rows, which is the most efficient for csr sparse matrix
# then columns. I should probably dig deeper
t_sparse = A_sparse[subnet, :][:, subnet]
t_sparse.setdiag(0)
nb_scc, labels = sparse.csgraph.connected_components(t_sparse, directed=True,connection='strong')
scc = [[] for _ in range(nb_scc)]
for j, label in enumerate(labels):
scc[label].append(j)
self.scc_submats.append(scc)
if sum(x0[subnet]) > 0:
self.nonempty_subgraphs.append(i)
self.sorted_vertices = []
self.cyclic_sorted_subgraphs = []
counter = 0
for nonempty_subgraph in self.nonempty_subgraphs:
A_sparse_sub = A_sparse[self.subnetws[nonempty_subgraph], :][:, self.subnetws[nonempty_subgraph]]
if A_sparse_sub.shape[0] == len(self.scc_submats[nonempty_subgraph]):
t_g = nx.from_scipy_sparse_matrix(A_sparse_sub, create_using=nx.DiGraph())
t_g.remove_edges_from(nx.selfloop_edges(t_g))
self.sorted_vertices.append(list(nx.topological_sort(t_g)))
else:
# print("Cycles in STG")
# If entire graph is only one connected component, no need for re-ordering
if len(self.scc_submats[nonempty_subgraph]) == 1:
self.sorted_vertices.append(self.scc_submats[nonempty_subgraph])
else:
vert_topol_sort,term_cycles_ind,_,scc_cell,term_cycle_bounds=self.fcn_metagraph_scc(A_sparse_sub)
cycle_lengths = [len(scc) for scc in scc_cell]
a = np.zeros((max(cycle_lengths)))
for i in range(max(cycle_lengths)):
for j in cycle_lengths:
if j == i+1:
a[j-1] += 1
# print('Cycles of lenth: %s (%s times)' % (set(cycle_lengths), a[np.where(a>0)]) )
self.cyclic_sorted_subgraphs.append((vert_topol_sort, term_cycles_ind, term_cycle_bounds))
counter += 1
|
[
"numpy.isin",
"scipy.sparse.find",
"numpy.logical_not",
"numpy.zeros",
"networkx.topological_sort",
"networkx.selfloop_edges",
"numpy.cumsum",
"scipy.sparse.csc_matrix",
"numpy.where",
"numpy.array",
"scipy.sparse.csgraph.connected_components",
"networkx.strongly_connected_components",
"networkx.DiGraph",
"numpy.concatenate"
] |
[((2565, 2589), 'numpy.zeros', 'np.zeros', (['(matr_size, 1)'], {}), '((matr_size, 1))\n', (2573, 2589), True, 'import numpy as np\n'), ((3138, 3161), 'scipy.sparse.find', 'sparse.find', (['(t_matr > 0)'], {}), '(t_matr > 0)\n', (3149, 3161), True, 'import scipy.sparse as sparse\n'), ((3860, 3905), 'numpy.isin', 'np.isin', (['metagraph_ordering', 'terminal_scc_ind'], {}), '(metagraph_ordering, terminal_scc_ind)\n', (3867, 3905), True, 'import numpy as np\n'), ((4181, 4207), 'numpy.where', 'np.where', (['terminal_scc_pos'], {}), '(terminal_scc_pos)\n', (4189, 4207), True, 'import numpy as np\n'), ((6019, 6046), 'scipy.sparse.csc_matrix', 'sparse.csc_matrix', (['A_sparse'], {}), '(A_sparse)\n', (6036, 6046), True, 'import scipy.sparse as sparse\n'), ((6100, 6179), 'scipy.sparse.csgraph.connected_components', 'sparse.csgraph.connected_components', (['B_sparse'], {'directed': '(True)', 'connection': '"""weak"""'}), "(B_sparse, directed=True, connection='weak')\n", (6135, 6179), True, 'import scipy.sparse as sparse\n'), ((2186, 2210), 'networkx.selfloop_edges', 'nx.selfloop_edges', (['g_sub'], {}), '(g_sub)\n', (2203, 2210), True, 'import networkx as nx\n'), ((3258, 3278), 'numpy.where', 'np.where', (['diff[:, 0]'], {}), '(diff[:, 0])\n', (3266, 3278), True, 'import numpy as np\n'), ((3302, 3322), 'numpy.where', 'np.where', (['diff[:, 0]'], {}), '(diff[:, 0])\n', (3310, 3322), True, 'import numpy as np\n'), ((4434, 4531), 'numpy.concatenate', 'np.concatenate', (['[metagraph_ordering[nonterm_scc_inds], metagraph_ordering[terminal_scc_pos]]'], {}), '([metagraph_ordering[nonterm_scc_inds], metagraph_ordering[\n terminal_scc_pos]])\n', (4448, 4531), True, 'import numpy as np\n'), ((5189, 5226), 'numpy.cumsum', 'np.cumsum', (['scc_cell_reordered_lengths'], {}), '(scc_cell_reordered_lengths)\n', (5198, 5226), True, 'import numpy as np\n'), ((6882, 6968), 'scipy.sparse.csgraph.connected_components', 'sparse.csgraph.connected_components', (['t_sparse'], {'directed': '(True)', 'connection': '"""strong"""'}), "(t_sparse, directed=True, connection=\n 'strong')\n", (6917, 6968), True, 'import scipy.sparse as sparse\n'), ((2140, 2152), 'networkx.DiGraph', 'nx.DiGraph', ([], {}), '()\n', (2150, 2152), True, 'import networkx as nx\n'), ((3667, 3679), 'networkx.DiGraph', 'nx.DiGraph', ([], {}), '()\n', (3677, 3679), True, 'import networkx as nx\n'), ((3722, 3752), 'networkx.topological_sort', 'nx.topological_sort', (['metagraph'], {}), '(metagraph)\n', (3741, 3752), True, 'import networkx as nx\n'), ((4227, 4287), 'numpy.logical_not', 'np.logical_not', (['(where_terminal_scc_pos > nonterm_scc_num - 1)'], {}), '(where_terminal_scc_pos > nonterm_scc_num - 1)\n', (4241, 4287), True, 'import numpy as np\n'), ((4338, 4383), 'numpy.isin', 'np.isin', (['metagraph_ordering', 'terminal_scc_ind'], {}), '(metagraph_ordering, terminal_scc_ind)\n', (4345, 4383), True, 'import numpy as np\n'), ((5487, 5540), 'numpy.concatenate', 'np.concatenate', (['[cycle_first_verts, cycle_last_verts]'], {}), '([cycle_first_verts, cycle_last_verts])\n', (5501, 5540), True, 'import numpy as np\n'), ((2394, 2433), 'networkx.strongly_connected_components', 'nx.strongly_connected_components', (['g_sub'], {}), '(g_sub)\n', (2426, 2433), True, 'import networkx as nx\n'), ((7758, 7780), 'networkx.selfloop_edges', 'nx.selfloop_edges', (['t_g'], {}), '(t_g)\n', (7775, 7780), True, 'import networkx as nx\n'), ((3379, 3419), 'numpy.array', 'np.array', (['A_sparse_sub[row_sel, col_sel]'], {}), '(A_sparse_sub[row_sel, col_sel])\n', (3387, 3419), True, 'import numpy as np\n'), ((7706, 7718), 'networkx.DiGraph', 'nx.DiGraph', ([], {}), '()\n', (7716, 7718), True, 'import networkx as nx\n'), ((7831, 7855), 'networkx.topological_sort', 'nx.topological_sort', (['t_g'], {}), '(t_g)\n', (7850, 7855), True, 'import networkx as nx\n')]
|
"""Communication protocol stacks for easy abstractions of communication links."""
# Builtins
# Packages
from phyllo.protocol.application.stacks import make_preset_stack as make_preset_application
from phyllo.protocol.application.stacks import make_pubsub
from phyllo.protocol.communication import AutomaticStack, PRESET_STACK_TYPES
from phyllo.protocol.transport.stacks import make_preset_stack as make_preset_transport
from phyllo.protocol.transport.stacks import make_stack as make_transport
# Protocol stacks
def make_stack(
transport_stack=make_transport, application_stack=make_pubsub,
stack=AutomaticStack, name='Protocol'
):
"""Make a protocol stack."""
stacks = []
if transport_stack is not None:
if callable(transport_stack):
transport_stack = transport_stack()
stacks.append(transport_stack)
if application_stack is not None:
if callable(application_stack):
application_stack = application_stack()
stacks.append(application_stack)
if not stacks:
raise ValueError('Cannot make an empty protocol stack!')
return stack(*stacks, name=name)
# Preset stacks
def make_preset_stack(
transport_medium='stream', transport_logical='minimal', application='pubsub',
stack='automatic', name='Protocol'
):
"""Make a protocol stack specified by preset names."""
transport = make_preset_transport(
medium=transport_medium, logical=transport_logical, stack=stack
)
application = make_preset_application(application=application, stack=stack)
return make_stack(
transport_stack=transport, application_stack=application,
stack=PRESET_STACK_TYPES[stack], name=name
)
|
[
"phyllo.protocol.transport.stacks.make_preset_stack",
"phyllo.protocol.application.stacks.make_preset_stack"
] |
[((1390, 1480), 'phyllo.protocol.transport.stacks.make_preset_stack', 'make_preset_transport', ([], {'medium': 'transport_medium', 'logical': 'transport_logical', 'stack': 'stack'}), '(medium=transport_medium, logical=transport_logical,\n stack=stack)\n', (1411, 1480), True, 'from phyllo.protocol.transport.stacks import make_preset_stack as make_preset_transport\n'), ((1509, 1570), 'phyllo.protocol.application.stacks.make_preset_stack', 'make_preset_application', ([], {'application': 'application', 'stack': 'stack'}), '(application=application, stack=stack)\n', (1532, 1570), True, 'from phyllo.protocol.application.stacks import make_preset_stack as make_preset_application\n')]
|
# -*- coding: utf-8 -*-
"""
Created on Sun Dec 27 22:04:58 2020
@author: zhangjun
"""
# -*- coding: utf-8 -*-
"""
Created on Sun Dec 27 20:06:37 2020
@author: zhangjun
"""
import numpy as np
class perceptron:
def __init__(self):
self.alpha = None
self.b = None
self.w = None
def train(self, x, y, learning_rate=1):
self.alpha = np.zeros(x.shape[0])
self.b = np.zeros(1)
G = np.dot(x,x.T)
while True:
index_ms = 0
for index,x_i in enumerate(x):
index_s = y[index]*(np.sum(np.dot(self.alpha*y,G[:,index]))+ self.b)
if index_s<=0:
self.alpha[index] = self.alpha[index] + learning_rate
self.b = self.b + learning_rate*y[index]
break
index_ms = index_ms + 1
self.w = np.dot(self.alpha.T*y,x)
print (self.alpha,self.w,self.b)
if index_ms==x.shape[0]:
break
def prediction(self,x_pred):
y_pred = np.zeros(x_pred.shape[0])
for index,x_i in enumerate(x_pred):
y_pred[index] = np.sum(self.w*x_i) + self.b
if y_pred[index]>0:
y_pred[index] = 1
else:
y_pred[index] = -1
return y_pred
if __name__ == '__main__':
x = np.array([[3,3],[4,3],[1,1]])
y = np.array([1,1,-1])
Model = perceptron()
Model.train(x,y,learning_rate=1)
y_pred = Model.prediction(x)
print ('w,b=',Model.w,Model.b)
|
[
"numpy.zeros",
"numpy.dot",
"numpy.array",
"numpy.sum"
] |
[((1480, 1514), 'numpy.array', 'np.array', (['[[3, 3], [4, 3], [1, 1]]'], {}), '([[3, 3], [4, 3], [1, 1]])\n', (1488, 1514), True, 'import numpy as np\n'), ((1519, 1539), 'numpy.array', 'np.array', (['[1, 1, -1]'], {}), '([1, 1, -1])\n', (1527, 1539), True, 'import numpy as np\n'), ((405, 425), 'numpy.zeros', 'np.zeros', (['x.shape[0]'], {}), '(x.shape[0])\n', (413, 425), True, 'import numpy as np\n'), ((444, 455), 'numpy.zeros', 'np.zeros', (['(1)'], {}), '(1)\n', (452, 455), True, 'import numpy as np\n'), ((469, 483), 'numpy.dot', 'np.dot', (['x', 'x.T'], {}), '(x, x.T)\n', (475, 483), True, 'import numpy as np\n'), ((1167, 1192), 'numpy.zeros', 'np.zeros', (['x_pred.shape[0]'], {}), '(x_pred.shape[0])\n', (1175, 1192), True, 'import numpy as np\n'), ((957, 984), 'numpy.dot', 'np.dot', (['(self.alpha.T * y)', 'x'], {}), '(self.alpha.T * y, x)\n', (963, 984), True, 'import numpy as np\n'), ((1267, 1287), 'numpy.sum', 'np.sum', (['(self.w * x_i)'], {}), '(self.w * x_i)\n', (1273, 1287), True, 'import numpy as np\n'), ((622, 657), 'numpy.dot', 'np.dot', (['(self.alpha * y)', 'G[:, index]'], {}), '(self.alpha * y, G[:, index])\n', (628, 657), True, 'import numpy as np\n')]
|
import pymssql
import logging
import sys
logger = logging.getLogger(__name__)
class DbaseException(Exception):
pass
class SelectorMSSQL:
def __init__(self, device, db_setting):
self.cursor = None
self.device = device
try:
self.connection = pymssql.connect(server=db_setting['server'],
port=db_setting['port'],
user=db_setting['user'],
password=db_setting['password'],
database=db_setting['database'])
except pymssql.OperationalError as err:
logger.error(f"[{device}] Не удалось подключиться к БД")
sys.exit(1)
# raise DbaseException(f'[{self.device}] Dbase server connection failed {err}')
def __exit__(self, exc_type, exc_val, exc_tb):
self.connection.close()
def raw_query(self, query):
self.cursor = self.connection.cursor()
try:
self.cursor.execute(query)
except pymssql.ProgrammingError:
raise DbaseException(
f'[{self.device}] SQL ProgrammingError at dbase.select function. Error in sql select: {query}')
return self.cursor
|
[
"sys.exit",
"logging.getLogger",
"pymssql.connect"
] |
[((51, 78), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (68, 78), False, 'import logging\n'), ((289, 459), 'pymssql.connect', 'pymssql.connect', ([], {'server': "db_setting['server']", 'port': "db_setting['port']", 'user': "db_setting['user']", 'password': "db_setting['password']", 'database': "db_setting['database']"}), "(server=db_setting['server'], port=db_setting['port'], user=\n db_setting['user'], password=db_setting['password'], database=\n db_setting['database'])\n", (304, 459), False, 'import pymssql\n'), ((763, 774), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (771, 774), False, 'import sys\n')]
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Test for ff_builder"""
from __future__ import absolute_import
from __future__ import print_function
from aiida.orm import Dict
from aiida.plugins import CalculationFactory
from aiida.engine import run_get_node
# Calculation objects
FFBuilder = CalculationFactory("lsmo.ff_builder") # pylint: disable=invalid-name
ff_parameters = Dict( # pylint: disable=invalid-name
dict={
'ff_framework': 'UFF',
'ff_molecules': {
'CO2': 'TraPPE',
'N2': 'TraPPE',
},
'shifted': False,
'tail_corrections': True,
'mixing_rule': 'Lorentz-Berthelot',
'separate_interactions': True
})
results, node = run_get_node(FFBuilder, ff_parameters) # pylint: disable=invalid-name
print("Terminated ff_builder calcfunction, pk:", node.pk)
for key, val in results.items():
#filepath = os.path.join(val._repository._get_base_folder().abspath, val.filename)
print("Output:", val.pk, key)
|
[
"aiida.orm.Dict",
"aiida.plugins.CalculationFactory",
"aiida.engine.run_get_node"
] |
[((294, 331), 'aiida.plugins.CalculationFactory', 'CalculationFactory', (['"""lsmo.ff_builder"""'], {}), "('lsmo.ff_builder')\n", (312, 331), False, 'from aiida.plugins import CalculationFactory\n'), ((381, 585), 'aiida.orm.Dict', 'Dict', ([], {'dict': "{'ff_framework': 'UFF', 'ff_molecules': {'CO2': 'TraPPE', 'N2': 'TraPPE'},\n 'shifted': False, 'tail_corrections': True, 'mixing_rule':\n 'Lorentz-Berthelot', 'separate_interactions': True}"}), "(dict={'ff_framework': 'UFF', 'ff_molecules': {'CO2': 'TraPPE', 'N2':\n 'TraPPE'}, 'shifted': False, 'tail_corrections': True, 'mixing_rule':\n 'Lorentz-Berthelot', 'separate_interactions': True})\n", (385, 585), False, 'from aiida.orm import Dict\n'), ((720, 758), 'aiida.engine.run_get_node', 'run_get_node', (['FFBuilder', 'ff_parameters'], {}), '(FFBuilder, ff_parameters)\n', (732, 758), False, 'from aiida.engine import run_get_node\n')]
|
#!/usr/bin/env python3
from aws_cdk import core
from aws_cdk.aws_s3 import Bucket
from s3_share.s3_share_stack import S3ShareStack
app = core.App()
S3ShareStack(app, "s3-share")
app.synth()
|
[
"aws_cdk.core.App",
"s3_share.s3_share_stack.S3ShareStack"
] |
[((140, 150), 'aws_cdk.core.App', 'core.App', ([], {}), '()\n', (148, 150), False, 'from aws_cdk import core\n'), ((152, 181), 's3_share.s3_share_stack.S3ShareStack', 'S3ShareStack', (['app', '"""s3-share"""'], {}), "(app, 's3-share')\n", (164, 181), False, 'from s3_share.s3_share_stack import S3ShareStack\n')]
|
# Generated by Django 2.2.6 on 2020-01-28 12:30
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('assistants', '0007_dummyassistant'),
]
operations = [
migrations.DeleteModel(
name='DummyAssistant',
),
]
|
[
"django.db.migrations.DeleteModel"
] |
[((226, 271), 'django.db.migrations.DeleteModel', 'migrations.DeleteModel', ([], {'name': '"""DummyAssistant"""'}), "(name='DummyAssistant')\n", (248, 271), False, 'from django.db import migrations\n')]
|
# Generated by Django 3.0.8 on 2020-07-11 10:44
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('auctions', '0002_bids_listings'),
]
operations = [
migrations.RenameModel(
old_name='Bids',
new_name='Bid',
),
migrations.RenameModel(
old_name='Listings',
new_name='Listing',
),
]
|
[
"django.db.migrations.RenameModel"
] |
[((223, 278), 'django.db.migrations.RenameModel', 'migrations.RenameModel', ([], {'old_name': '"""Bids"""', 'new_name': '"""Bid"""'}), "(old_name='Bids', new_name='Bid')\n", (245, 278), False, 'from django.db import migrations\n'), ((323, 386), 'django.db.migrations.RenameModel', 'migrations.RenameModel', ([], {'old_name': '"""Listings"""', 'new_name': '"""Listing"""'}), "(old_name='Listings', new_name='Listing')\n", (345, 386), False, 'from django.db import migrations\n')]
|
import numpy as np
import pylab as pl
from sklearn import mixture
np.random.seed(0)
#C1 = np.array([[3, -2.7], [1.5, 2.7]])
#C2 = np.array([[1, 2.0], [-1.5, 1.7]])
#
#X_train = np.r_[
# np.random.multivariate_normal((-7, -7), C1, size=7),
# np.random.multivariate_normal((7, 7), C2, size=7),
#]
X_train = np.r_[
np.array([[0,0],[0,1],[2,0],[3,2],[3,3],[2,2],[2,0]]),
np.array([[7,7],[8,6],[9,7],[8,10],[7,10],[8,9],[7,11]]),
]
print(X_train)
clf = mixture.GaussianMixture(n_components=2, covariance_type='full')
clf.weights_ = [2,1]
clf.fit(X_train)
#define g1(x, y) and g2(x, y)
def g1(x, y):
print("x = {},y = {} for g1".format(x,y))
return clf.predict_proba(np.column_stack((x, y)))[:, 0]
def g2(x, y):
print("x = {},y = {} for g2".format(x,y))
return clf.predict_proba(np.column_stack((x, y)))[:, 1]
X, Y = np.mgrid[-15:13:500j, -15:13:500j]
x = X.ravel()
y = Y.ravel()
p = (g1(x, y) - g2(x, y)).reshape(X.shape)
pl.scatter(X_train[:, 0], X_train[:, 1])
pl.contour(X, Y, p, levels=[0])
pl.show()
|
[
"pylab.contour",
"pylab.show",
"numpy.random.seed",
"sklearn.mixture.GaussianMixture",
"pylab.scatter",
"numpy.array",
"numpy.column_stack"
] |
[((67, 84), 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), '(0)\n', (81, 84), True, 'import numpy as np\n'), ((470, 533), 'sklearn.mixture.GaussianMixture', 'mixture.GaussianMixture', ([], {'n_components': '(2)', 'covariance_type': '"""full"""'}), "(n_components=2, covariance_type='full')\n", (493, 533), False, 'from sklearn import mixture\n'), ((961, 1001), 'pylab.scatter', 'pl.scatter', (['X_train[:, 0]', 'X_train[:, 1]'], {}), '(X_train[:, 0], X_train[:, 1])\n', (971, 1001), True, 'import pylab as pl\n'), ((1002, 1033), 'pylab.contour', 'pl.contour', (['X', 'Y', 'p'], {'levels': '[0]'}), '(X, Y, p, levels=[0])\n', (1012, 1033), True, 'import pylab as pl\n'), ((1034, 1043), 'pylab.show', 'pl.show', ([], {}), '()\n', (1041, 1043), True, 'import pylab as pl\n'), ((324, 390), 'numpy.array', 'np.array', (['[[0, 0], [0, 1], [2, 0], [3, 2], [3, 3], [2, 2], [2, 0]]'], {}), '([[0, 0], [0, 1], [2, 0], [3, 2], [3, 3], [2, 2], [2, 0]])\n', (332, 390), True, 'import numpy as np\n'), ((383, 452), 'numpy.array', 'np.array', (['[[7, 7], [8, 6], [9, 7], [8, 10], [7, 10], [8, 9], [7, 11]]'], {}), '([[7, 7], [8, 6], [9, 7], [8, 10], [7, 10], [8, 9], [7, 11]])\n', (391, 452), True, 'import numpy as np\n'), ((693, 716), 'numpy.column_stack', 'np.column_stack', (['(x, y)'], {}), '((x, y))\n', (708, 716), True, 'import numpy as np\n'), ((814, 837), 'numpy.column_stack', 'np.column_stack', (['(x, y)'], {}), '((x, y))\n', (829, 837), True, 'import numpy as np\n')]
|
"""
test_properties
~~~~~~~~~~~~~~~
This module implements tests for the properties module.
"""
import pytest
from binalyzer_core import (
ValueProperty,
ReferenceProperty,
StretchSizeProperty,
Template,
)
def test_reference_property_is_read_only():
property = ReferenceProperty(Template(), 'invalid_name')
with pytest.raises(RuntimeError):
property.value = 0
def test_value_property():
value_property0 = ValueProperty()
value_property1 = ValueProperty(42)
assert value_property0.value == 0
assert value_property1.value == 42
def test_sizing_stretch_without_predecessors():
template_a = Template(name='a')
template_c = Template(name='c', parent=template_a)
template_d = Template(name='d', parent=template_a)
template_a.size = 10
template_c.size_property = StretchSizeProperty(template_c)
template_d.size = 4
assert template_c.size == 6
def test_sizing_stretch_without_successors():
template_a = Template(name='a')
template_b = Template(name='b', parent=template_a)
template_c = Template(name='c', parent=template_a)
template_a.size = 10
template_b.size = 1
template_c.size_property = StretchSizeProperty(template_c)
assert template_c.size == 9
def test_sizing_stretch_with_siblings():
template_a = Template(name='a')
template_b = Template(name='b', parent=template_a)
template_c = Template(name='c', parent=template_a)
template_d = Template(name='d', parent=template_a)
template_a.size = 10
template_b.size = 1
template_c.size_property = StretchSizeProperty(template_c)
template_d.size = 4
assert template_c.size == 5
def test_sizing_stretch_without_siblings():
template_a = Template(name='a')
template_c = Template(name='c', parent=template_a)
template_a.size = 10
template_c.size_property = StretchSizeProperty(template_c)
assert template_c.size == 10
|
[
"binalyzer_core.ValueProperty",
"pytest.raises",
"binalyzer_core.Template",
"binalyzer_core.StretchSizeProperty"
] |
[((459, 474), 'binalyzer_core.ValueProperty', 'ValueProperty', ([], {}), '()\n', (472, 474), False, 'from binalyzer_core import ValueProperty, ReferenceProperty, StretchSizeProperty, Template\n'), ((497, 514), 'binalyzer_core.ValueProperty', 'ValueProperty', (['(42)'], {}), '(42)\n', (510, 514), False, 'from binalyzer_core import ValueProperty, ReferenceProperty, StretchSizeProperty, Template\n'), ((660, 678), 'binalyzer_core.Template', 'Template', ([], {'name': '"""a"""'}), "(name='a')\n", (668, 678), False, 'from binalyzer_core import ValueProperty, ReferenceProperty, StretchSizeProperty, Template\n'), ((696, 733), 'binalyzer_core.Template', 'Template', ([], {'name': '"""c"""', 'parent': 'template_a'}), "(name='c', parent=template_a)\n", (704, 733), False, 'from binalyzer_core import ValueProperty, ReferenceProperty, StretchSizeProperty, Template\n'), ((751, 788), 'binalyzer_core.Template', 'Template', ([], {'name': '"""d"""', 'parent': 'template_a'}), "(name='d', parent=template_a)\n", (759, 788), False, 'from binalyzer_core import ValueProperty, ReferenceProperty, StretchSizeProperty, Template\n'), ((845, 876), 'binalyzer_core.StretchSizeProperty', 'StretchSizeProperty', (['template_c'], {}), '(template_c)\n', (864, 876), False, 'from binalyzer_core import ValueProperty, ReferenceProperty, StretchSizeProperty, Template\n'), ((998, 1016), 'binalyzer_core.Template', 'Template', ([], {'name': '"""a"""'}), "(name='a')\n", (1006, 1016), False, 'from binalyzer_core import ValueProperty, ReferenceProperty, StretchSizeProperty, Template\n'), ((1034, 1071), 'binalyzer_core.Template', 'Template', ([], {'name': '"""b"""', 'parent': 'template_a'}), "(name='b', parent=template_a)\n", (1042, 1071), False, 'from binalyzer_core import ValueProperty, ReferenceProperty, StretchSizeProperty, Template\n'), ((1089, 1126), 'binalyzer_core.Template', 'Template', ([], {'name': '"""c"""', 'parent': 'template_a'}), "(name='c', parent=template_a)\n", (1097, 1126), False, 'from binalyzer_core import ValueProperty, ReferenceProperty, StretchSizeProperty, Template\n'), ((1207, 1238), 'binalyzer_core.StretchSizeProperty', 'StretchSizeProperty', (['template_c'], {}), '(template_c)\n', (1226, 1238), False, 'from binalyzer_core import ValueProperty, ReferenceProperty, StretchSizeProperty, Template\n'), ((1331, 1349), 'binalyzer_core.Template', 'Template', ([], {'name': '"""a"""'}), "(name='a')\n", (1339, 1349), False, 'from binalyzer_core import ValueProperty, ReferenceProperty, StretchSizeProperty, Template\n'), ((1367, 1404), 'binalyzer_core.Template', 'Template', ([], {'name': '"""b"""', 'parent': 'template_a'}), "(name='b', parent=template_a)\n", (1375, 1404), False, 'from binalyzer_core import ValueProperty, ReferenceProperty, StretchSizeProperty, Template\n'), ((1422, 1459), 'binalyzer_core.Template', 'Template', ([], {'name': '"""c"""', 'parent': 'template_a'}), "(name='c', parent=template_a)\n", (1430, 1459), False, 'from binalyzer_core import ValueProperty, ReferenceProperty, StretchSizeProperty, Template\n'), ((1477, 1514), 'binalyzer_core.Template', 'Template', ([], {'name': '"""d"""', 'parent': 'template_a'}), "(name='d', parent=template_a)\n", (1485, 1514), False, 'from binalyzer_core import ValueProperty, ReferenceProperty, StretchSizeProperty, Template\n'), ((1595, 1626), 'binalyzer_core.StretchSizeProperty', 'StretchSizeProperty', (['template_c'], {}), '(template_c)\n', (1614, 1626), False, 'from binalyzer_core import ValueProperty, ReferenceProperty, StretchSizeProperty, Template\n'), ((1746, 1764), 'binalyzer_core.Template', 'Template', ([], {'name': '"""a"""'}), "(name='a')\n", (1754, 1764), False, 'from binalyzer_core import ValueProperty, ReferenceProperty, StretchSizeProperty, Template\n'), ((1782, 1819), 'binalyzer_core.Template', 'Template', ([], {'name': '"""c"""', 'parent': 'template_a'}), "(name='c', parent=template_a)\n", (1790, 1819), False, 'from binalyzer_core import ValueProperty, ReferenceProperty, StretchSizeProperty, Template\n'), ((1876, 1907), 'binalyzer_core.StretchSizeProperty', 'StretchSizeProperty', (['template_c'], {}), '(template_c)\n', (1895, 1907), False, 'from binalyzer_core import ValueProperty, ReferenceProperty, StretchSizeProperty, Template\n'), ((315, 325), 'binalyzer_core.Template', 'Template', ([], {}), '()\n', (323, 325), False, 'from binalyzer_core import ValueProperty, ReferenceProperty, StretchSizeProperty, Template\n'), ((352, 379), 'pytest.raises', 'pytest.raises', (['RuntimeError'], {}), '(RuntimeError)\n', (365, 379), False, 'import pytest\n')]
|
import sys, time
num = 1
try:
while num<=10:
print(num)
num += 1
time.sleep(1)
except KeyboardInterrupt:
print('exit')
sys.exit(0)
else:
print('complete')
finally:
print('Goodbye Python')
|
[
"sys.exit",
"time.sleep"
] |
[((94, 107), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (104, 107), False, 'import sys, time\n'), ((156, 167), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (164, 167), False, 'import sys, time\n')]
|
#!/usr/bin/env python3
import os
import sys
import toml
from .utils import Utils
DEFAULT_FILE_CONTENT = [
"indent_style = 'spaces'",
"indent_size = 4",
"max_len_line = 80",
"max_size_function = 20",
"max_function_in_file = 5",
"epitech_header = true",
"return_values_in_parenthese = true",
'forbidden_functions = []',
"forbidden_comments_in_functions = true",
"space_after_keyword = true",
"space_after_coma = true",
"requiere_void_when_no_args = true",
"max_parameters_to_functions = 4",
"max_variable_per_function = -1",
"brackets_style = 'end_of_line'",
"additionnal_types = []",
"excluded_files = []"
]
class Config:
def __init__(self):
self.settings = {}
self.config_file = ".normi.toml"
def parse_config(self):
content = Utils.get_file_content(self.config_file)
if content == None:
self.settings = toml.loads("\n".join(DEFAULT_FILE_CONTENT))
print("Using default configuration")
else:
self.settings = toml.loads(content)
@classmethod
def init_config(self):
config_file = ".normi.toml"
if os.path.exists(config_file):
sys.exit(f"{config_file} already exists, can't init the file")
try:
f = open(config_file, 'w')
except:
sys.exit(f"Could not create file {config_file}")
for line in DEFAULT_FILE_CONTENT:
f.write(line + '\n')
f.close()
print(f"Initialized {config_file} with success")
def get(self, param):
return self.settings[param]
|
[
"toml.loads",
"os.path.exists",
"sys.exit"
] |
[((1175, 1202), 'os.path.exists', 'os.path.exists', (['config_file'], {}), '(config_file)\n', (1189, 1202), False, 'import os\n'), ((1063, 1082), 'toml.loads', 'toml.loads', (['content'], {}), '(content)\n', (1073, 1082), False, 'import toml\n'), ((1216, 1278), 'sys.exit', 'sys.exit', (['f"""{config_file} already exists, can\'t init the file"""'], {}), '(f"{config_file} already exists, can\'t init the file")\n', (1224, 1278), False, 'import sys\n'), ((1359, 1407), 'sys.exit', 'sys.exit', (['f"""Could not create file {config_file}"""'], {}), "(f'Could not create file {config_file}')\n", (1367, 1407), False, 'import sys\n')]
|
import numpy as np
import pandas as pd
from scipy.optimize import curve_fit
from .helper import exp_fit_func, inverse_exp_func, exp_func
def exp_curve_fit_(x_range, ln_y_range):
popc, pcov = curve_fit(exp_fit_func, x_range, ln_y_range)
ln_a, b = popc
a = np.exp(ln_a)
return a, b
def get_interm_zip_features_(ynew, _s4, _p4, _e1):
start_times = []
peak_times = []
end_times = []
peak_intensities = []
for i in range(len(_s4)):
if (_p4[i] - _s4[i] > 0) and (_e1[i] - _p4[i] > 0):
start_times.append(_s4[i])
peak_times.append(_p4[i])
end_times.append(_e1[i])
peak_intensities.append(ynew[_p4[i]])
return start_times, peak_times, end_times, peak_intensities
def get_interm_zip_(h1, h2, h3, h4):
_zip = pd.DataFrame(zip(h1, h2, h3, h4))
_zip.columns = ["start_time", "peak_time", "end_time", "peak_intensity"]
return _zip
def get_final_zip_features(xnew, ynew, _zip):
st = _zip["start_time"]
pt = _zip["peak_time"]
et = _zip["end_time"]
pi = _zip["peak_intensity"]
y_min = np.min(ynew)
final_st = []
final_pt = []
final_et = []
est_et = []
final_si = []
final_pi = []
final_err = []
final_bc = []
_class = []
for i in range(len(st)):
x_range = [int(xnew[j] - xnew[pt[i]]) for j in range(pt[i], et[i])]
ln_y_range = [np.log(ynew[j]) for j in range(pt[i], et[i])]
try:
popc, pcov = curve_fit(exp_fit_func, x_range, ln_y_range)
ln_a, b = popc
a = np.exp(ln_a)
# the 7th filter, can't allow increasing exponential so-called-flares!
# _calc_et is estimated end time from the analytical function fitted
if b < 0:
continue
_calc_et = inverse_exp_func(ynew[st[i]], a, b)
final_st.append(st[i])
final_pt.append(pt[i])
final_et.append(et[i])
final_pi.append(pi[i])
final_si.append(ynew[st[i]])
est_et.append(_calc_et + pt[i])
final_bc.append((ynew[st[i]] + ynew[et[i]]) / 2)
y_dash = []
y_diff = []
y_proj = []
x_proj = []
for _i, j in enumerate(x_range):
__y = exp_func(xnew[j], a, b)
y_dash.append(__y)
y_diff.append(abs(np.exp(ln_y_range[_i]) - __y))
for j in range(et[i] - pt[i], _calc_et):
if (j + pt[i]) < len(xnew):
x_proj.append(xnew[j + pt[i]])
y_proj.append(exp_func(xnew[j], a, b))
# error is sum(difference between fitted and actual) / ((peak intensity - minimum intensity) * duration from peak to actual end)
final_err.append((np.sum(y_dash)) / ((pi[i] - y_min) * (len(x_range))))
val = np.log10(pi[i] / 25)
_str = ""
_val = str(int(val * 100) / 10)[-3:]
if int(val) < 1:
_str = "A" + _val
elif int(val) == 1:
_str = "B" + _val
elif int(val) == 2:
_str = "C" + _val
elif int(val) == 3:
_str = "M" + _val
elif int(val) > 3:
_str = "X" + _val
_class.append(_str)
except:
print("Error in curve fitting")
return (
final_st,
final_pt,
final_et,
est_et,
final_si,
final_pi,
final_bc,
final_err,
_class,
)
def get_final_zip(g1, g2, g3, g4, g5, g6, g7, g8, g9):
final_zip = pd.DataFrame(zip(g1, g2, g3, g4, g5, g6, g7, g8, g9))
final_zip.columns = [
"start_time",
"peak_time",
"end_time",
"est_end_time",
"start_intensity",
"peak_intensity",
"background_counts",
"error",
"class",
]
return final_zip
|
[
"numpy.sum",
"numpy.log",
"scipy.optimize.curve_fit",
"numpy.min",
"numpy.exp",
"numpy.log10"
] |
[((197, 241), 'scipy.optimize.curve_fit', 'curve_fit', (['exp_fit_func', 'x_range', 'ln_y_range'], {}), '(exp_fit_func, x_range, ln_y_range)\n', (206, 241), False, 'from scipy.optimize import curve_fit\n'), ((269, 281), 'numpy.exp', 'np.exp', (['ln_a'], {}), '(ln_a)\n', (275, 281), True, 'import numpy as np\n'), ((1105, 1117), 'numpy.min', 'np.min', (['ynew'], {}), '(ynew)\n', (1111, 1117), True, 'import numpy as np\n'), ((1404, 1419), 'numpy.log', 'np.log', (['ynew[j]'], {}), '(ynew[j])\n', (1410, 1419), True, 'import numpy as np\n'), ((1488, 1532), 'scipy.optimize.curve_fit', 'curve_fit', (['exp_fit_func', 'x_range', 'ln_y_range'], {}), '(exp_fit_func, x_range, ln_y_range)\n', (1497, 1532), False, 'from scipy.optimize import curve_fit\n'), ((1576, 1588), 'numpy.exp', 'np.exp', (['ln_a'], {}), '(ln_a)\n', (1582, 1588), True, 'import numpy as np\n'), ((2882, 2902), 'numpy.log10', 'np.log10', (['(pi[i] / 25)'], {}), '(pi[i] / 25)\n', (2890, 2902), True, 'import numpy as np\n'), ((2810, 2824), 'numpy.sum', 'np.sum', (['y_dash'], {}), '(y_dash)\n', (2816, 2824), True, 'import numpy as np\n'), ((2401, 2423), 'numpy.exp', 'np.exp', (['ln_y_range[_i]'], {}), '(ln_y_range[_i])\n', (2407, 2423), True, 'import numpy as np\n')]
|
import numpy as np
from scipy.spatial import cKDTree
from scipy.spatial.distance import pdist, squareform
from scipy.sparse import coo_matrix
import pylab as plt
def squared_exponential(x2,D=3):
#x = np.reshape(x,(-1,D))
return np.exp(-x2/2.)
def matern52(x2):
x = np.sqrt(x2)
res = x2
res *= 5./3.
res += np.sqrt(5) * x
res += 1
res *= np.exp((-np.sqrt(5))*x)
return res
def sparse_covariance(cfun, points, sigma, corr,tol=0.1,upper_tri=True):
N,D = points.shape
if not isinstance(corr,np.ndarray):
corr = np.ones(D)*corr
#Get support
if tol == 0.:
isot = np.inf
else:
isot = 0.
for dim in range(D):
direction = (np.arange(D)==dim).astype(float)
t = 0
c0 = cfun(0)
c = c0
while c/c0 > tol:
t += 0.1
c = cfun(np.sum((t*direction/corr)**2))
isot = max(isot,t/corr[dim])
#print("isotropic support: {}".format(isot))
kd = cKDTree(points/corr)
if upper_tri:
pairs = kd.query_pairs(isot,p=2,output_type='ndarray')
pairs = np.concatenate([np.array([np.arange(N)]*2).T,pairs])
x1 = points[pairs[:,0],:]
x2 = points[pairs[:,1],:]
dx = x1-x2
dx /= corr
dx *= dx
dx = np.sum(dx,axis=1)
cval = cfun(dx)
csparse = coo_matrix((cval,(pairs[:,0],pairs[:,1])), shape=(N,N))
else:
X = kd.sparse_distance_matrix(kd,isot,output_type='coo_matrix')
cval = cfun(X.data**2)
csparse = coo_matrix((cval,(X.col,X.row)), shape=(N,N))
return (sigma**2)*csparse
def dense_covariance(cfun, points, sigma, corr):
N,D = points.shape
if not isinstance(corr,np.ndarray):
corr = np.ones(D)*corr
points = points / corr
X = squareform(pdist(points,metric='sqeuclidean'))
return (sigma**2)*cfun(X)
def test_sparse_covariance():
corr = np.array([0.2,0.5,0.1])
xvec = np.linspace(0,1,50)
yvec = np.linspace(0,1,10)
zvec = np.linspace(0,1,10)
X,Y,Z = np.meshgrid(xvec,yvec,zvec,indexing='ij')
points = np.array([X.flatten(),Y.flatten(), Z.flatten()]).T
#%timeit -n 2 cdense = dense_covariance(squared_exponential, points, None, corr)
cdense = dense_covariance(matern52, points, 1., corr)
# #print(cdense)
# plt.imshow(cdense)
# plt.colorbar()
# plt.show()
#%timeit -n 2 csparse = sparse_covariance(squared_exponential,points,None,corr,tol=0.1)
csparse = sparse_covariance(matern52,points,1.,corr,tol=0,upper_tri=False)
assert np.all(np.isclose(csparse.toarray(), cdense))
# #print(csparse.toarray())
# plt.imshow(csparse.toarray())
# plt.colorbar()
# plt.show()
# plt.imshow(csparse.toarray() - cdense)
# plt.colorbar()
# plt.show()
csparse = sparse_covariance(matern52,points,1.,corr,tol=0.1,upper_tri=True)
print("upper triangle tol=0.1 -> saving: {}%".format(1-csparse.nonzero()[0].size/cdense.size))
csparse = sparse_covariance(matern52,points,1.,corr,tol=0.01,upper_tri=True)
print("upper triangle tol=0.01 -> saving: {}%".format(1-csparse.nonzero()[0].size/cdense.size))
def test_sparse_covariance_performance():
corr = np.array([5.,5.,1.])
xvec = np.linspace(-80,80,150)
yvec = np.linspace(-80,80,150)
zvec = np.linspace(0,1000,20)
X,Y,Z = np.meshgrid(xvec,yvec,zvec,indexing='ij')
points = np.array([X.flatten(),Y.flatten(), Z.flatten()]).T
csparse = sparse_covariance(matern52,points,1.,corr,tol=0.1,upper_tri=True)
print("upper triangle tol=0.1 -> saving: {}%".format(1-csparse.nonzero()[0].size/points.size**2))
if __name__=='__main__':
test_sparse_covariance_performance()
|
[
"numpy.meshgrid",
"numpy.sum",
"numpy.ones",
"scipy.sparse.coo_matrix",
"numpy.array",
"numpy.exp",
"numpy.linspace",
"scipy.spatial.cKDTree",
"scipy.spatial.distance.pdist",
"numpy.arange",
"numpy.sqrt"
] |
[((237, 254), 'numpy.exp', 'np.exp', (['(-x2 / 2.0)'], {}), '(-x2 / 2.0)\n', (243, 254), True, 'import numpy as np\n'), ((279, 290), 'numpy.sqrt', 'np.sqrt', (['x2'], {}), '(x2)\n', (286, 290), True, 'import numpy as np\n'), ((1032, 1054), 'scipy.spatial.cKDTree', 'cKDTree', (['(points / corr)'], {}), '(points / corr)\n', (1039, 1054), False, 'from scipy.spatial import cKDTree\n'), ((1972, 1997), 'numpy.array', 'np.array', (['[0.2, 0.5, 0.1]'], {}), '([0.2, 0.5, 0.1])\n', (1980, 1997), True, 'import numpy as np\n'), ((2008, 2029), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', '(50)'], {}), '(0, 1, 50)\n', (2019, 2029), True, 'import numpy as np\n'), ((2039, 2060), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', '(10)'], {}), '(0, 1, 10)\n', (2050, 2060), True, 'import numpy as np\n'), ((2070, 2091), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', '(10)'], {}), '(0, 1, 10)\n', (2081, 2091), True, 'import numpy as np\n'), ((2103, 2147), 'numpy.meshgrid', 'np.meshgrid', (['xvec', 'yvec', 'zvec'], {'indexing': '"""ij"""'}), "(xvec, yvec, zvec, indexing='ij')\n", (2114, 2147), True, 'import numpy as np\n'), ((3261, 3286), 'numpy.array', 'np.array', (['[5.0, 5.0, 1.0]'], {}), '([5.0, 5.0, 1.0])\n', (3269, 3286), True, 'import numpy as np\n'), ((3294, 3319), 'numpy.linspace', 'np.linspace', (['(-80)', '(80)', '(150)'], {}), '(-80, 80, 150)\n', (3305, 3319), True, 'import numpy as np\n'), ((3329, 3354), 'numpy.linspace', 'np.linspace', (['(-80)', '(80)', '(150)'], {}), '(-80, 80, 150)\n', (3340, 3354), True, 'import numpy as np\n'), ((3364, 3388), 'numpy.linspace', 'np.linspace', (['(0)', '(1000)', '(20)'], {}), '(0, 1000, 20)\n', (3375, 3388), True, 'import numpy as np\n'), ((3400, 3444), 'numpy.meshgrid', 'np.meshgrid', (['xvec', 'yvec', 'zvec'], {'indexing': '"""ij"""'}), "(xvec, yvec, zvec, indexing='ij')\n", (3411, 3444), True, 'import numpy as np\n'), ((332, 342), 'numpy.sqrt', 'np.sqrt', (['(5)'], {}), '(5)\n', (339, 342), True, 'import numpy as np\n'), ((1341, 1359), 'numpy.sum', 'np.sum', (['dx'], {'axis': '(1)'}), '(dx, axis=1)\n', (1347, 1359), True, 'import numpy as np\n'), ((1402, 1462), 'scipy.sparse.coo_matrix', 'coo_matrix', (['(cval, (pairs[:, 0], pairs[:, 1]))'], {'shape': '(N, N)'}), '((cval, (pairs[:, 0], pairs[:, 1])), shape=(N, N))\n', (1412, 1462), False, 'from scipy.sparse import coo_matrix\n'), ((1589, 1637), 'scipy.sparse.coo_matrix', 'coo_matrix', (['(cval, (X.col, X.row))'], {'shape': '(N, N)'}), '((cval, (X.col, X.row)), shape=(N, N))\n', (1599, 1637), False, 'from scipy.sparse import coo_matrix\n'), ((1864, 1899), 'scipy.spatial.distance.pdist', 'pdist', (['points'], {'metric': '"""sqeuclidean"""'}), "(points, metric='sqeuclidean')\n", (1869, 1899), False, 'from scipy.spatial.distance import pdist, squareform\n'), ((563, 573), 'numpy.ones', 'np.ones', (['D'], {}), '(D)\n', (570, 573), True, 'import numpy as np\n'), ((1802, 1812), 'numpy.ones', 'np.ones', (['D'], {}), '(D)\n', (1809, 1812), True, 'import numpy as np\n'), ((380, 390), 'numpy.sqrt', 'np.sqrt', (['(5)'], {}), '(5)\n', (387, 390), True, 'import numpy as np\n'), ((902, 937), 'numpy.sum', 'np.sum', (['((t * direction / corr) ** 2)'], {}), '((t * direction / corr) ** 2)\n', (908, 937), True, 'import numpy as np\n'), ((727, 739), 'numpy.arange', 'np.arange', (['D'], {}), '(D)\n', (736, 739), True, 'import numpy as np\n'), ((1177, 1189), 'numpy.arange', 'np.arange', (['N'], {}), '(N)\n', (1186, 1189), True, 'import numpy as np\n')]
|
from __future__ import print_function
import sys
sys.path.insert(1,"../../../")
import h2o
from tests import pyunit_utils
from h2o.estimators.psvm import H2OSupportVectorMachineEstimator
def svm_svmguide3():
svmguide3 = h2o.import_file(pyunit_utils.locate("smalldata/svm_test/svmguide3scale.svm"))
svmguide3_test = h2o.import_file(pyunit_utils.locate("smalldata/svm_test/svmguide3scale_test.svm"))
# parameters taken from libsvm guide
svm_tuned = H2OSupportVectorMachineEstimator(hyper_param=128, gamma=0.125, disable_training_metrics=False)
svm_tuned.train(y="C1", training_frame=svmguide3, validation_frame=svmguide3_test)
accuracy = svm_tuned.model_performance(valid=True).accuracy()[0][1]
assert accuracy >= 0.80 # guide has 87% - this just shows it is not completely off
if __name__ == "__main__":
pyunit_utils.standalone_test(svm_svmguide3)
else:
svm_svmguide3()
|
[
"h2o.estimators.psvm.H2OSupportVectorMachineEstimator",
"tests.pyunit_utils.standalone_test",
"sys.path.insert",
"tests.pyunit_utils.locate"
] |
[((49, 80), 'sys.path.insert', 'sys.path.insert', (['(1)', '"""../../../"""'], {}), "(1, '../../../')\n", (64, 80), False, 'import sys\n'), ((466, 564), 'h2o.estimators.psvm.H2OSupportVectorMachineEstimator', 'H2OSupportVectorMachineEstimator', ([], {'hyper_param': '(128)', 'gamma': '(0.125)', 'disable_training_metrics': '(False)'}), '(hyper_param=128, gamma=0.125,\n disable_training_metrics=False)\n', (498, 564), False, 'from h2o.estimators.psvm import H2OSupportVectorMachineEstimator\n'), ((845, 888), 'tests.pyunit_utils.standalone_test', 'pyunit_utils.standalone_test', (['svm_svmguide3'], {}), '(svm_svmguide3)\n', (873, 888), False, 'from tests import pyunit_utils\n'), ((242, 302), 'tests.pyunit_utils.locate', 'pyunit_utils.locate', (['"""smalldata/svm_test/svmguide3scale.svm"""'], {}), "('smalldata/svm_test/svmguide3scale.svm')\n", (261, 302), False, 'from tests import pyunit_utils\n'), ((341, 406), 'tests.pyunit_utils.locate', 'pyunit_utils.locate', (['"""smalldata/svm_test/svmguide3scale_test.svm"""'], {}), "('smalldata/svm_test/svmguide3scale_test.svm')\n", (360, 406), False, 'from tests import pyunit_utils\n')]
|
# Translation in python of the Matlab implementation of <NAME> and
# <NAME>, of the algorithm described in
# "Mixtures of Probabilistic Principal Component Analysers",
# <NAME> and <NAME>, Neural Computation 11(2),
# pp 443–482, MIT Press, 1999
import numpy as np
def initialization_kmeans(X, p, q, variance_level=None):
"""
X : dataset
p : number of clusters
q : dimension of the latent space
variance_level
pi : proportions of clusters
mu : centers of the clusters in the observation space
W : latent to observation matricies
sigma2 : noise
"""
N, d = X.shape
# initialization
init_centers = np.random.randint(0, N, p)
while (len(np.unique(init_centers)) != p):
init_centers = np.random.randint(0, N, p)
mu = X[init_centers, :]
distance_square = np.zeros((N, p))
clusters = np.zeros(N, dtype=np.int32)
D_old = -2
D = -1
while(D_old != D):
D_old = D
# assign clusters
for c in range(p):
distance_square[:, c] = np.power(X - mu[c, :], 2).sum(1)
clusters = np.argmin(distance_square, axis=1)
# compute distortion
distmin = distance_square[range(N), clusters]
D = distmin.sum()
# compute new centers
for c in range(p):
mu[c, :] = X[clusters == c, :].mean(0)
#for c in range(p):
# plt.scatter(X[clusters == c, 0], X[clusters == c, 1], c=np.random.rand(3,1))
# parameter initialization
pi = np.zeros(p)
W = np.zeros((p, d, q))
sigma2 = np.zeros(p)
for c in range(p):
if variance_level:
W[c, :, :] = variance_level * np.random.randn(d, q)
else:
W[c, :, :] = np.random.randn(d, q)
pi[c] = (clusters == c).sum() / N
if variance_level:
sigma2[c] = np.abs((variance_level/10) * np.random.randn())
else:
sigma2[c] = (distmin[clusters == c]).mean() / d
return pi, mu, W, sigma2, clusters
def mppca_gem(X, pi, mu, W, sigma2, niter):
N, d = X.shape
p = len(sigma2)
_, q = W[0].shape
sigma2hist = np.zeros((p, niter))
M = np.zeros((p, q, q))
Minv = np.zeros((p, q, q))
Cinv = np.zeros((p, d, d))
logR = np.zeros((N, p))
R = np.zeros((N, p))
M[:] = 0.
Minv[:] = 0.
Cinv[:] = 0.
L = np.zeros(niter)
for i in range(niter):
print('.', end='')
for c in range(p):
sigma2hist[c, i] = sigma2[c]
# M
M[c, :, :] = sigma2[c]*np.eye(q) + np.dot(W[c, :, :].T, W[c, :, :])
Minv[c, :, :] = np.linalg.inv(M[c, :, :])
# Cinv
Cinv[c, :, :] = (np.eye(d)
- np.dot(np.dot(W[c, :, :], Minv[c, :, :]), W[c, :, :].T)
) / sigma2[c]
# R_ni
deviation_from_center = X - mu[c, :]
logR[:, c] = ( np.log(pi[c])
+ 0.5*np.log(
np.linalg.det(
np.eye(d) - np.dot(np.dot(W[c, :, :], Minv[c, :, :]), W[c, :, :].T)
)
)
- 0.5*d*np.log(sigma2[c])
- 0.5*(deviation_from_center * np.dot(deviation_from_center, Cinv[c, :, :].T)).sum(1)
)
myMax = logR.max(axis=1).reshape((N, 1))
L[i] = (
(myMax.ravel() + np.log(np.exp(logR - myMax).sum(axis=1))).sum(axis=0)
- N*d*np.log(2*3.141593)/2.
)
logR = logR - myMax - np.reshape(np.log(np.exp(logR - myMax).sum(axis=1)), (N, 1))
myMax = logR.max(axis=0)
logpi = myMax + np.log(np.exp(logR - myMax).sum(axis=0)) - np.log(N)
logpi = logpi.T
pi = np.exp(logpi)
R = np.exp(logR)
for c in range(p):
mu[c, :] = (R[:, c].reshape((N, 1)) * X).sum(axis=0) / R[:, c].sum()
deviation_from_center = X - mu[c, :].reshape((1, d))
SW = ( (1/(pi[c]*N))
* np.dot((R[:, c].reshape((N, 1)) * deviation_from_center).T,
np.dot(deviation_from_center, W[c, :, :]))
)
Wnew = np.dot(SW, np.linalg.inv(sigma2[c]*np.eye(q) + np.dot(np.dot(Minv[c, :, :], W[c, :, :].T), SW)))
sigma2[c] = (1/d) * (
(R[:, c].reshape(N, 1) * np.power(deviation_from_center, 2)).sum()
/
(N*pi[c])
-
np.trace(np.dot(np.dot(SW, Minv[c, :, :]), Wnew.T))
)
W[c, :, :] = Wnew
return pi, mu, W, sigma2, R, L, sigma2hist
def mppca_predict(X, pi, mu, W, sigma2):
N, d = X.shape
p = len(sigma2)
_, q = W[0].shape
M = np.zeros((p, q, q))
Minv = np.zeros((p, q, q))
Cinv = np.zeros((p, d, d))
logR = np.zeros((N, p))
R = np.zeros((N, p))
for c in range(p):
# M
M[c, :, :] = sigma2[c] * np.eye(q) + np.dot(W[c, :, :].T, W[c, :, :])
Minv[c, :, :] = np.linalg.inv(M[c, :, :])
# Cinv
Cinv[c, :, :] = (np.eye(d)
- np.dot(np.dot(W[c, :, :], Minv[c, :, :]), W[c, :, :].T)
) / sigma2[c]
# R_ni
deviation_from_center = X - mu[c, :]
logR[:, c] = ( np.log(pi[c])
+ 0.5*np.log(
np.linalg.det(
np.eye(d) - np.dot(np.dot(W[c, :, :], Minv[c, :, :]), W[c, :, :].T)
)
)
- 0.5*d*np.log(sigma2[c])
- 0.5*(deviation_from_center * np.dot(deviation_from_center, Cinv[c, :, :].T)).sum(1)
)
myMax = logR.max(axis=1).reshape((N, 1))
logR = logR - myMax - np.reshape(np.log(np.exp(logR - myMax).sum(axis=1)), (N, 1))
R = np.exp(logR)
return R
|
[
"numpy.log",
"numpy.eye",
"numpy.random.randn",
"numpy.power",
"numpy.zeros",
"numpy.argmin",
"numpy.random.randint",
"numpy.linalg.inv",
"numpy.exp",
"numpy.dot",
"numpy.unique"
] |
[((653, 679), 'numpy.random.randint', 'np.random.randint', (['(0)', 'N', 'p'], {}), '(0, N, p)\n', (670, 679), True, 'import numpy as np\n'), ((828, 844), 'numpy.zeros', 'np.zeros', (['(N, p)'], {}), '((N, p))\n', (836, 844), True, 'import numpy as np\n'), ((860, 887), 'numpy.zeros', 'np.zeros', (['N'], {'dtype': 'np.int32'}), '(N, dtype=np.int32)\n', (868, 887), True, 'import numpy as np\n'), ((1505, 1516), 'numpy.zeros', 'np.zeros', (['p'], {}), '(p)\n', (1513, 1516), True, 'import numpy as np\n'), ((1525, 1544), 'numpy.zeros', 'np.zeros', (['(p, d, q)'], {}), '((p, d, q))\n', (1533, 1544), True, 'import numpy as np\n'), ((1558, 1569), 'numpy.zeros', 'np.zeros', (['p'], {}), '(p)\n', (1566, 1569), True, 'import numpy as np\n'), ((2126, 2146), 'numpy.zeros', 'np.zeros', (['(p, niter)'], {}), '((p, niter))\n', (2134, 2146), True, 'import numpy as np\n'), ((2155, 2174), 'numpy.zeros', 'np.zeros', (['(p, q, q)'], {}), '((p, q, q))\n', (2163, 2174), True, 'import numpy as np\n'), ((2186, 2205), 'numpy.zeros', 'np.zeros', (['(p, q, q)'], {}), '((p, q, q))\n', (2194, 2205), True, 'import numpy as np\n'), ((2217, 2236), 'numpy.zeros', 'np.zeros', (['(p, d, d)'], {}), '((p, d, d))\n', (2225, 2236), True, 'import numpy as np\n'), ((2248, 2264), 'numpy.zeros', 'np.zeros', (['(N, p)'], {}), '((N, p))\n', (2256, 2264), True, 'import numpy as np\n'), ((2273, 2289), 'numpy.zeros', 'np.zeros', (['(N, p)'], {}), '((N, p))\n', (2281, 2289), True, 'import numpy as np\n'), ((2347, 2362), 'numpy.zeros', 'np.zeros', (['niter'], {}), '(niter)\n', (2355, 2362), True, 'import numpy as np\n'), ((4692, 4711), 'numpy.zeros', 'np.zeros', (['(p, q, q)'], {}), '((p, q, q))\n', (4700, 4711), True, 'import numpy as np\n'), ((4723, 4742), 'numpy.zeros', 'np.zeros', (['(p, q, q)'], {}), '((p, q, q))\n', (4731, 4742), True, 'import numpy as np\n'), ((4754, 4773), 'numpy.zeros', 'np.zeros', (['(p, d, d)'], {}), '((p, d, d))\n', (4762, 4773), True, 'import numpy as np\n'), ((4785, 4801), 'numpy.zeros', 'np.zeros', (['(N, p)'], {}), '((N, p))\n', (4793, 4801), True, 'import numpy as np\n'), ((4810, 4826), 'numpy.zeros', 'np.zeros', (['(N, p)'], {}), '((N, p))\n', (4818, 4826), True, 'import numpy as np\n'), ((5704, 5716), 'numpy.exp', 'np.exp', (['logR'], {}), '(logR)\n', (5710, 5716), True, 'import numpy as np\n'), ((750, 776), 'numpy.random.randint', 'np.random.randint', (['(0)', 'N', 'p'], {}), '(0, N, p)\n', (767, 776), True, 'import numpy as np\n'), ((1099, 1133), 'numpy.argmin', 'np.argmin', (['distance_square'], {'axis': '(1)'}), '(distance_square, axis=1)\n', (1108, 1133), True, 'import numpy as np\n'), ((3712, 3725), 'numpy.exp', 'np.exp', (['logpi'], {}), '(logpi)\n', (3718, 3725), True, 'import numpy as np\n'), ((3738, 3750), 'numpy.exp', 'np.exp', (['logR'], {}), '(logR)\n', (3744, 3750), True, 'import numpy as np\n'), ((4965, 4990), 'numpy.linalg.inv', 'np.linalg.inv', (['M[c, :, :]'], {}), '(M[c, :, :])\n', (4978, 4990), True, 'import numpy as np\n'), ((695, 718), 'numpy.unique', 'np.unique', (['init_centers'], {}), '(init_centers)\n', (704, 718), True, 'import numpy as np\n'), ((1723, 1744), 'numpy.random.randn', 'np.random.randn', (['d', 'q'], {}), '(d, q)\n', (1738, 1744), True, 'import numpy as np\n'), ((2610, 2635), 'numpy.linalg.inv', 'np.linalg.inv', (['M[c, :, :]'], {}), '(M[c, :, :])\n', (2623, 2635), True, 'import numpy as np\n'), ((3665, 3674), 'numpy.log', 'np.log', (['N'], {}), '(N)\n', (3671, 3674), True, 'import numpy as np\n'), ((4908, 4940), 'numpy.dot', 'np.dot', (['W[c, :, :].T', 'W[c, :, :]'], {}), '(W[c, :, :].T, W[c, :, :])\n', (4914, 4940), True, 'import numpy as np\n'), ((1662, 1683), 'numpy.random.randn', 'np.random.randn', (['d', 'q'], {}), '(d, q)\n', (1677, 1683), True, 'import numpy as np\n'), ((2549, 2581), 'numpy.dot', 'np.dot', (['W[c, :, :].T', 'W[c, :, :]'], {}), '(W[c, :, :].T, W[c, :, :])\n', (2555, 2581), True, 'import numpy as np\n'), ((4896, 4905), 'numpy.eye', 'np.eye', (['q'], {}), '(q)\n', (4902, 4905), True, 'import numpy as np\n'), ((5032, 5041), 'numpy.eye', 'np.eye', (['d'], {}), '(d)\n', (5038, 5041), True, 'import numpy as np\n'), ((1047, 1072), 'numpy.power', 'np.power', (['(X - mu[c, :])', '(2)'], {}), '(X - mu[c, :], 2)\n', (1055, 1072), True, 'import numpy as np\n'), ((1868, 1885), 'numpy.random.randn', 'np.random.randn', ([], {}), '()\n', (1883, 1885), True, 'import numpy as np\n'), ((2537, 2546), 'numpy.eye', 'np.eye', (['q'], {}), '(q)\n', (2543, 2546), True, 'import numpy as np\n'), ((2685, 2694), 'numpy.eye', 'np.eye', (['d'], {}), '(d)\n', (2691, 2694), True, 'import numpy as np\n'), ((3436, 3456), 'numpy.log', 'np.log', (['(2 * 3.141593)'], {}), '(2 * 3.141593)\n', (3442, 3456), True, 'import numpy as np\n'), ((4056, 4097), 'numpy.dot', 'np.dot', (['deviation_from_center', 'W[c, :, :]'], {}), '(deviation_from_center, W[c, :, :])\n', (4062, 4097), True, 'import numpy as np\n'), ((5063, 5096), 'numpy.dot', 'np.dot', (['W[c, :, :]', 'Minv[c, :, :]'], {}), '(W[c, :, :], Minv[c, :, :])\n', (5069, 5096), True, 'import numpy as np\n'), ((5222, 5235), 'numpy.log', 'np.log', (['pi[c]'], {}), '(pi[c])\n', (5228, 5235), True, 'import numpy as np\n'), ((5433, 5450), 'numpy.log', 'np.log', (['sigma2[c]'], {}), '(sigma2[c])\n', (5439, 5450), True, 'import numpy as np\n'), ((2720, 2753), 'numpy.dot', 'np.dot', (['W[c, :, :]', 'Minv[c, :, :]'], {}), '(W[c, :, :], Minv[c, :, :])\n', (2726, 2753), True, 'import numpy as np\n'), ((2895, 2908), 'numpy.log', 'np.log', (['pi[c]'], {}), '(pi[c])\n', (2901, 2908), True, 'import numpy as np\n'), ((3130, 3147), 'numpy.log', 'np.log', (['sigma2[c]'], {}), '(sigma2[c])\n', (3136, 3147), True, 'import numpy as np\n'), ((5653, 5673), 'numpy.exp', 'np.exp', (['(logR - myMax)'], {}), '(logR - myMax)\n', (5659, 5673), True, 'import numpy as np\n'), ((3521, 3541), 'numpy.exp', 'np.exp', (['(logR - myMax)'], {}), '(logR - myMax)\n', (3527, 3541), True, 'import numpy as np\n'), ((3629, 3649), 'numpy.exp', 'np.exp', (['(logR - myMax)'], {}), '(logR - myMax)\n', (3635, 3649), True, 'import numpy as np\n'), ((4172, 4181), 'numpy.eye', 'np.eye', (['q'], {}), '(q)\n', (4178, 4181), True, 'import numpy as np\n'), ((4191, 4226), 'numpy.dot', 'np.dot', (['Minv[c, :, :]', 'W[c, :, :].T'], {}), '(Minv[c, :, :], W[c, :, :].T)\n', (4197, 4226), True, 'import numpy as np\n'), ((4446, 4471), 'numpy.dot', 'np.dot', (['SW', 'Minv[c, :, :]'], {}), '(SW, Minv[c, :, :])\n', (4452, 4471), True, 'import numpy as np\n'), ((5494, 5540), 'numpy.dot', 'np.dot', (['deviation_from_center', 'Cinv[c, :, :].T'], {}), '(deviation_from_center, Cinv[c, :, :].T)\n', (5500, 5540), True, 'import numpy as np\n'), ((3195, 3241), 'numpy.dot', 'np.dot', (['deviation_from_center', 'Cinv[c, :, :].T'], {}), '(deviation_from_center, Cinv[c, :, :].T)\n', (3201, 3241), True, 'import numpy as np\n'), ((3371, 3391), 'numpy.exp', 'np.exp', (['(logR - myMax)'], {}), '(logR - myMax)\n', (3377, 3391), True, 'import numpy as np\n'), ((4310, 4344), 'numpy.power', 'np.power', (['deviation_from_center', '(2)'], {}), '(deviation_from_center, 2)\n', (4318, 4344), True, 'import numpy as np\n'), ((5313, 5322), 'numpy.eye', 'np.eye', (['d'], {}), '(d)\n', (5319, 5322), True, 'import numpy as np\n'), ((2998, 3007), 'numpy.eye', 'np.eye', (['d'], {}), '(d)\n', (3004, 3007), True, 'import numpy as np\n'), ((5332, 5365), 'numpy.dot', 'np.dot', (['W[c, :, :]', 'Minv[c, :, :]'], {}), '(W[c, :, :], Minv[c, :, :])\n', (5338, 5365), True, 'import numpy as np\n'), ((3017, 3050), 'numpy.dot', 'np.dot', (['W[c, :, :]', 'Minv[c, :, :]'], {}), '(W[c, :, :], Minv[c, :, :])\n', (3023, 3050), True, 'import numpy as np\n')]
|
from flask import jsonify
from ast import literal_eval
from app import db
from app.models.office import User, Location
def validate_and_add_user(form):
status, new_user = validate_and_save_user(form, skip_location=False)
if status:
return jsonify(success=True, item=new_user.to_dict())
else:
return jsonify(success=False, message='Missing required fields!'), 400
def fetch_all_users(is_plain_dict=False, args=None):
if args :
campus_id = args.get('campusId', None)
if campus_id:
locations = Location.query.filter_by(campus_id=campus_id).all()
users = User.query.filter(User.location_id.in_([l.id for l in locations])).all()
else:
users = User.query.all()
return [user.to_plain_dict() if is_plain_dict else user.to_dict() for user in users]
def fetch_user_with(id=None):
return find_or_delete_user_with(id=id)
def find_or_delete_user_with(id=None, should_delete=False):
user = User.query.filter_by(id=id).first()
if user:
if should_delete:
db.session.delete(user)
db.session.commit()
return jsonify(item=user.to_dict(), success=True), 200
else:
return jsonify(message='Requested Record Not Available!', success=False), 404
def delete_user_with(id=None):
return find_or_delete_user_with(id=id, should_delete=True)
def validate_input_and_authenticate(form):
uname = form.get('username', None)
passwd = form.get('password', None)
if uname and passwd:
user = User.query.filter_by(username=uname, password=passwd).first()
if user:
return jsonify(success=True, item=user.to_dict())
else:
return jsonify(success=False, message='Authentication Failed!'), 403
else:
return jsonify(success=False, message='Missing required fields!'), 401
def validate_and_upload_users(ustr, reset):
users = literal_eval(ustr.decode().replace("'", '"'))
if reset :
User.query.delete()
db.session.commit()
count = 0
status = False
for user in users:
status, u = validate_and_save_user(user, True)
count += 1 if status else 0
# print(new_user)
return status, count
def validate_and_save_user(form, skip_location):
first_name = form.get("firstName", None)
last_name = form.get("lastName", None)
username = form.get("username", None)
password = form.get("password", None)
location_id = form.get("locationId", None) if "locationId" in form else None
if first_name and last_name and username and password:
if (not skip_location) and (not location_id):
return False, None
new_user = User(first_name=first_name, last_name=last_name, username=username, password=password, location_id=location_id)
new_user.save()
return True, new_user
return False, None
|
[
"app.models.office.User",
"app.models.office.User.location_id.in_",
"flask.jsonify",
"app.db.session.delete",
"app.db.session.commit",
"app.models.office.User.query.filter_by",
"app.models.office.User.query.delete",
"app.models.office.Location.query.filter_by",
"app.models.office.User.query.all"
] |
[((725, 741), 'app.models.office.User.query.all', 'User.query.all', ([], {}), '()\n', (739, 741), False, 'from app.models.office import User, Location\n'), ((1988, 2007), 'app.models.office.User.query.delete', 'User.query.delete', ([], {}), '()\n', (2005, 2007), False, 'from app.models.office import User, Location\n'), ((2016, 2035), 'app.db.session.commit', 'db.session.commit', ([], {}), '()\n', (2033, 2035), False, 'from app import db\n'), ((2700, 2815), 'app.models.office.User', 'User', ([], {'first_name': 'first_name', 'last_name': 'last_name', 'username': 'username', 'password': 'password', 'location_id': 'location_id'}), '(first_name=first_name, last_name=last_name, username=username,\n password=password, location_id=location_id)\n', (2704, 2815), False, 'from app.models.office import User, Location\n'), ((329, 387), 'flask.jsonify', 'jsonify', ([], {'success': '(False)', 'message': '"""Missing required fields!"""'}), "(success=False, message='Missing required fields!')\n", (336, 387), False, 'from flask import jsonify\n'), ((977, 1004), 'app.models.office.User.query.filter_by', 'User.query.filter_by', ([], {'id': 'id'}), '(id=id)\n', (997, 1004), False, 'from app.models.office import User, Location\n'), ((1064, 1087), 'app.db.session.delete', 'db.session.delete', (['user'], {}), '(user)\n', (1081, 1087), False, 'from app import db\n'), ((1100, 1119), 'app.db.session.commit', 'db.session.commit', ([], {}), '()\n', (1117, 1119), False, 'from app import db\n'), ((1208, 1273), 'flask.jsonify', 'jsonify', ([], {'message': '"""Requested Record Not Available!"""', 'success': '(False)'}), "(message='Requested Record Not Available!', success=False)\n", (1215, 1273), False, 'from flask import jsonify\n'), ((1798, 1856), 'flask.jsonify', 'jsonify', ([], {'success': '(False)', 'message': '"""Missing required fields!"""'}), "(success=False, message='Missing required fields!')\n", (1805, 1856), False, 'from flask import jsonify\n'), ((1537, 1590), 'app.models.office.User.query.filter_by', 'User.query.filter_by', ([], {'username': 'uname', 'password': 'passwd'}), '(username=uname, password=passwd)\n', (1557, 1590), False, 'from app.models.office import User, Location\n'), ((1711, 1767), 'flask.jsonify', 'jsonify', ([], {'success': '(False)', 'message': '"""Authentication Failed!"""'}), "(success=False, message='Authentication Failed!')\n", (1718, 1767), False, 'from flask import jsonify\n'), ((554, 599), 'app.models.office.Location.query.filter_by', 'Location.query.filter_by', ([], {'campus_id': 'campus_id'}), '(campus_id=campus_id)\n', (578, 599), False, 'from app.models.office import User, Location\n'), ((644, 691), 'app.models.office.User.location_id.in_', 'User.location_id.in_', (['[l.id for l in locations]'], {}), '([l.id for l in locations])\n', (664, 691), False, 'from app.models.office import User, Location\n')]
|
from django.conf import settings
from django.db.models.signals import post_save, pre_save
from django.dispatch import receiver
from .models import KnownMember, StudentRegistration
from .tasks import notify_sds_registration, register_on_slack
from .utils import email_user_complete_registration
@receiver(pre_save, sender=StudentRegistration)
def check_known_members(instance: StudentRegistration, **kwargs):
"""
Checks the current known members for the current registering user. If the user is a known
member, their information is updated after registration.
Args:
instance (StudentRegistration): The student registration instance.
"""
known_member = KnownMember.objects.filter(email__exact=instance.user.email)
if known_member:
known_member = known_member.first()
instance.slack_registered = known_member.slack_registered
instance.sds_registered = known_member.sds_registered
instance.user.name = known_member.name
known_member.delete() # delete the known member to save space in the database
@receiver(post_save, sender=StudentRegistration)
def notify_complete_registration(instance: StudentRegistration, **kwargs):
"""
Notifies the user when their registration has been updated.
Args:
instance (StudentRegistration): The student registration instance.
"""
if settings.RUN_REGISTRATION_POST_SAVE_SIGNAL:
save_again = False # needed for if we notify admin/the user
# add users to slack automatically with Flameboi util
if (
settings.FLAMEBOI["REGISTER_SLACK_USERS_WITH_FLAMEBOI"]
and not instance.slack_registered
and not instance.slack_add_attempt
):
register_on_slack.delay(emails=[instance.user.email])
instance.slack_add_attempt = True
save_again = True
# notify managers of new users to be added to SunDevilSync
if (
settings.NOTIFY_MANAGERS_SDS_REGISTRATION
and not instance.sds_registered
and not instance.sds_notified
):
notify_sds_registration.delay(instance.user.email)
instance.sds_notified = True
save_again = True
# notify a user if their registration has been completed
if (
settings.SEND_COMPLETED_REGISTRATION_NOTIFICATION
and instance.completed_registration
and not instance.completed_registration_notification
and not instance._restart_registration
):
# TODO send the user an email saying their registration has been completed
email_user_complete_registration(email=instance.user.email)
instance.completed_registration_notification = True
save_again = True
if save_again:
instance.save()
|
[
"django.dispatch.receiver"
] |
[((298, 344), 'django.dispatch.receiver', 'receiver', (['pre_save'], {'sender': 'StudentRegistration'}), '(pre_save, sender=StudentRegistration)\n', (306, 344), False, 'from django.dispatch import receiver\n'), ((1078, 1125), 'django.dispatch.receiver', 'receiver', (['post_save'], {'sender': 'StudentRegistration'}), '(post_save, sender=StudentRegistration)\n', (1086, 1125), False, 'from django.dispatch import receiver\n')]
|
import os
import numpy as np
folder = ""
file_path = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
os.chdir(file_path)
map_files = {
"main": "main.csv",
"landmass": "landmass.csv"
}
save_file = "map_saves"
def save(maps):
for map_name in maps:
np.savetxt(save_file + '/' + map_files[map_name], maps[map_name], delimiter = ',')
def load(maps):
for map_name in map_files:
maps[map_name] = np.loadtxt(save_file + '/' + map_files[map_name], delimiter = ',')
return maps
|
[
"numpy.loadtxt",
"os.path.abspath",
"numpy.savetxt",
"os.chdir"
] |
[((121, 140), 'os.chdir', 'os.chdir', (['file_path'], {}), '(file_path)\n', (129, 140), False, 'import os\n'), ((92, 117), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (107, 117), False, 'import os\n'), ((300, 385), 'numpy.savetxt', 'np.savetxt', (["(save_file + '/' + map_files[map_name])", 'maps[map_name]'], {'delimiter': '""","""'}), "(save_file + '/' + map_files[map_name], maps[map_name], delimiter=','\n )\n", (310, 385), True, 'import numpy as np\n'), ((464, 528), 'numpy.loadtxt', 'np.loadtxt', (["(save_file + '/' + map_files[map_name])"], {'delimiter': '""","""'}), "(save_file + '/' + map_files[map_name], delimiter=',')\n", (474, 528), True, 'import numpy as np\n')]
|
from __future__ import absolute_import, unicode_literals
from django_pg import models
class Hobbit(models.Model):
name = models.CharField(max_length=50)
favorite_foods = models.ArrayField(models.CharField(max_length=100))
created = models.DateTimeField(auto_now_add=True)
modified = models.DateTimeField(auto_now=True)
class Elf(models.Model):
id = models.UUIDField(auto_add=True, primary_key=True)
name = models.CharField(max_length=50)
created = models.DateTimeField(auto_now_add=True)
modified = models.DateTimeField(auto_now=True)
class AuthorField(models.CompositeField):
name = models.CharField(max_length=75)
sex = models.CharField(max_length=6, choices=(
('male', 'Male'),
('female', 'Female'),
))
birthdate = models.DateField()
class Book(models.Model):
title = models.CharField(max_length=50)
author = AuthorField()
date_published = models.DateField()
|
[
"django_pg.models.CharField",
"django_pg.models.DateField",
"django_pg.models.DateTimeField",
"django_pg.models.UUIDField"
] |
[((127, 158), 'django_pg.models.CharField', 'models.CharField', ([], {'max_length': '(50)'}), '(max_length=50)\n', (143, 158), False, 'from django_pg import models\n'), ((246, 285), 'django_pg.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now_add': '(True)'}), '(auto_now_add=True)\n', (266, 285), False, 'from django_pg import models\n'), ((301, 336), 'django_pg.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now': '(True)'}), '(auto_now=True)\n', (321, 336), False, 'from django_pg import models\n'), ((373, 422), 'django_pg.models.UUIDField', 'models.UUIDField', ([], {'auto_add': '(True)', 'primary_key': '(True)'}), '(auto_add=True, primary_key=True)\n', (389, 422), False, 'from django_pg import models\n'), ((434, 465), 'django_pg.models.CharField', 'models.CharField', ([], {'max_length': '(50)'}), '(max_length=50)\n', (450, 465), False, 'from django_pg import models\n'), ((480, 519), 'django_pg.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now_add': '(True)'}), '(auto_now_add=True)\n', (500, 519), False, 'from django_pg import models\n'), ((535, 570), 'django_pg.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now': '(True)'}), '(auto_now=True)\n', (555, 570), False, 'from django_pg import models\n'), ((626, 657), 'django_pg.models.CharField', 'models.CharField', ([], {'max_length': '(75)'}), '(max_length=75)\n', (642, 657), False, 'from django_pg import models\n'), ((668, 753), 'django_pg.models.CharField', 'models.CharField', ([], {'max_length': '(6)', 'choices': "(('male', 'Male'), ('female', 'Female'))"}), "(max_length=6, choices=(('male', 'Male'), ('female', 'Female'))\n )\n", (684, 753), False, 'from django_pg import models\n'), ((788, 806), 'django_pg.models.DateField', 'models.DateField', ([], {}), '()\n', (804, 806), False, 'from django_pg import models\n'), ((847, 878), 'django_pg.models.CharField', 'models.CharField', ([], {'max_length': '(50)'}), '(max_length=50)\n', (863, 878), False, 'from django_pg import models\n'), ((927, 945), 'django_pg.models.DateField', 'models.DateField', ([], {}), '()\n', (943, 945), False, 'from django_pg import models\n'), ((198, 230), 'django_pg.models.CharField', 'models.CharField', ([], {'max_length': '(100)'}), '(max_length=100)\n', (214, 230), False, 'from django_pg import models\n')]
|
"""Add journal content column
Revision ID: 088e13f2ae70
Revises: <KEY>
Create Date: 2017-08-21 04:34:29.541975
"""
# revision identifiers, used by Alembic.
revision = '088e13f2ae70'
down_revision = '<KEY>'
from alembic import op
import sqlalchemy as sa
def upgrade():
op.add_column('journal', sa.Column('content', sa.Text(), nullable=True))
def downgrade():
raise Exception('Reversing this migration could delete new journal content')
|
[
"sqlalchemy.Text"
] |
[((324, 333), 'sqlalchemy.Text', 'sa.Text', ([], {}), '()\n', (331, 333), True, 'import sqlalchemy as sa\n')]
|
#!usr/bin/env python
# Program to mine data from your own facebook account
import json
import facebook
import os
import sys
import random
token = os.environ.get('FACEBOOK_TOKEN')
group_id = str(os.environ.get('THOPGANG_GROUP_ID'))
timestamp = str(sys.argv[1])
polarity_file = '../data/polarity.may2020.pos'
print("The group is ", group_id)
def get_content(polarity_file):
f = open(polarity_file)
lines = []
for line in f:
line = line.split('\n')[0].split()
content = ' '.join(k for k in line[:-1])
if len(line) > 7:
lines.append(content)
selected_content = random.choice(lines).split()
content = ' '.join(k for k in selected_content)
text = "Do you remember this from last month? + '\n' " + content
return text
def main():
graph = facebook.GraphAPI(token)
# profile = graph.get_object(
# 'me', fields='first_name,location,link,email,groups')
group = graph.get_object(id=group_id)
id = group['id']
#pic = get_pic()
#pic = pics_path + '/' + pic
#graph.put_photo(album_path=id + '/photos', image=open(pic, 'rb'), message='Look at this! Posting at ' + timestamp + ' EST')
content = get_content(polarity_file)
graph.put_object(id, 'feed', message=content)
print(group)
if __name__ == '__main__':
main()
|
[
"os.environ.get",
"random.choice",
"facebook.GraphAPI"
] |
[((148, 180), 'os.environ.get', 'os.environ.get', (['"""FACEBOOK_TOKEN"""'], {}), "('FACEBOOK_TOKEN')\n", (162, 180), False, 'import os\n'), ((196, 231), 'os.environ.get', 'os.environ.get', (['"""THOPGANG_GROUP_ID"""'], {}), "('THOPGANG_GROUP_ID')\n", (210, 231), False, 'import os\n'), ((805, 829), 'facebook.GraphAPI', 'facebook.GraphAPI', (['token'], {}), '(token)\n', (822, 829), False, 'import facebook\n'), ((612, 632), 'random.choice', 'random.choice', (['lines'], {}), '(lines)\n', (625, 632), False, 'import random\n')]
|
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
import os
import pytest
from twitter.common.contextutil import temporary_dir
from pex.common import safe_copy
from pex.fetcher import Fetcher
from pex.package import EggPackage, SourcePackage
from pex.resolvable import ResolvableRequirement
from pex.resolver import Unsatisfiable, _ResolvableSet, resolve
from pex.resolver_options import ResolverOptionsBuilder
from pex.testing import make_sdist
def test_empty_resolve():
empty_resolve = resolve([])
assert empty_resolve == []
with temporary_dir() as td:
empty_resolve = resolve([], cache=td)
assert empty_resolve == []
def test_simple_local_resolve():
project_sdist = make_sdist(name='project')
with temporary_dir() as td:
safe_copy(project_sdist, os.path.join(td, os.path.basename(project_sdist)))
fetchers = [Fetcher([td])]
dists = resolve(['project'], fetchers=fetchers)
assert len(dists) == 1
def test_diamond_local_resolve_cached():
# This exercises the issue described here: https://github.com/pantsbuild/pex/issues/120
project1_sdist = make_sdist(name='project1', install_reqs=['project2<1.0.0'])
project2_sdist = make_sdist(name='project2')
with temporary_dir() as dd:
for sdist in (project1_sdist, project2_sdist):
safe_copy(sdist, os.path.join(dd, os.path.basename(sdist)))
fetchers = [Fetcher([dd])]
with temporary_dir() as cd:
dists = resolve(['project1', 'project2'], fetchers=fetchers, cache=cd, cache_ttl=1000)
assert len(dists) == 2
def test_resolvable_set():
builder = ResolverOptionsBuilder()
rs = _ResolvableSet()
rq = ResolvableRequirement.from_string('foo[ext]', builder)
source_pkg = SourcePackage.from_href('foo-2.3.4.tar.gz')
binary_pkg = EggPackage.from_href('Foo-2.3.4-py3.4.egg')
rs.merge(rq, [source_pkg, binary_pkg])
assert rs.get(source_pkg.name) == set([source_pkg, binary_pkg])
assert rs.get(binary_pkg.name) == set([source_pkg, binary_pkg])
assert rs.packages() == [(rq, set([source_pkg, binary_pkg]), None)]
# test methods
assert rs.extras('foo') == set(['ext'])
assert rs.extras('Foo') == set(['ext'])
# test filtering
rs.merge(rq, [source_pkg])
assert rs.get('foo') == set([source_pkg])
assert rs.get('Foo') == set([source_pkg])
with pytest.raises(Unsatisfiable):
rs.merge(rq, [binary_pkg])
def test_resolvable_set_built():
builder = ResolverOptionsBuilder()
rs = _ResolvableSet()
rq = ResolvableRequirement.from_string('foo', builder)
source_pkg = SourcePackage.from_href('foo-2.3.4.tar.gz')
binary_pkg = EggPackage.from_href('foo-2.3.4-py3.4.egg')
rs.merge(rq, [source_pkg])
assert rs.get('foo') == set([source_pkg])
assert rs.packages() == [(rq, set([source_pkg]), None)]
with pytest.raises(Unsatisfiable):
rs.merge(rq, [binary_pkg])
updated_rs = rs.replace_built({source_pkg: binary_pkg})
updated_rs.merge(rq, [binary_pkg])
assert updated_rs.get('foo') == set([binary_pkg])
assert updated_rs.packages() == [(rq, set([binary_pkg]), None)]
|
[
"pex.fetcher.Fetcher",
"pex.package.EggPackage.from_href",
"pex.testing.make_sdist",
"os.path.basename",
"twitter.common.contextutil.temporary_dir",
"pex.resolver_options.ResolverOptionsBuilder",
"pytest.raises",
"pex.resolver._ResolvableSet",
"pex.resolvable.ResolvableRequirement.from_string",
"pex.resolver.resolve",
"pex.package.SourcePackage.from_href"
] |
[((576, 587), 'pex.resolver.resolve', 'resolve', (['[]'], {}), '([])\n', (583, 587), False, 'from pex.resolver import Unsatisfiable, _ResolvableSet, resolve\n'), ((774, 800), 'pex.testing.make_sdist', 'make_sdist', ([], {'name': '"""project"""'}), "(name='project')\n", (784, 800), False, 'from pex.testing import make_sdist\n'), ((1174, 1234), 'pex.testing.make_sdist', 'make_sdist', ([], {'name': '"""project1"""', 'install_reqs': "['project2<1.0.0']"}), "(name='project1', install_reqs=['project2<1.0.0'])\n", (1184, 1234), False, 'from pex.testing import make_sdist\n'), ((1254, 1281), 'pex.testing.make_sdist', 'make_sdist', ([], {'name': '"""project2"""'}), "(name='project2')\n", (1264, 1281), False, 'from pex.testing import make_sdist\n'), ((1656, 1680), 'pex.resolver_options.ResolverOptionsBuilder', 'ResolverOptionsBuilder', ([], {}), '()\n', (1678, 1680), False, 'from pex.resolver_options import ResolverOptionsBuilder\n'), ((1688, 1704), 'pex.resolver._ResolvableSet', '_ResolvableSet', ([], {}), '()\n', (1702, 1704), False, 'from pex.resolver import Unsatisfiable, _ResolvableSet, resolve\n'), ((1712, 1766), 'pex.resolvable.ResolvableRequirement.from_string', 'ResolvableRequirement.from_string', (['"""foo[ext]"""', 'builder'], {}), "('foo[ext]', builder)\n", (1745, 1766), False, 'from pex.resolvable import ResolvableRequirement\n'), ((1782, 1825), 'pex.package.SourcePackage.from_href', 'SourcePackage.from_href', (['"""foo-2.3.4.tar.gz"""'], {}), "('foo-2.3.4.tar.gz')\n", (1805, 1825), False, 'from pex.package import EggPackage, SourcePackage\n'), ((1841, 1884), 'pex.package.EggPackage.from_href', 'EggPackage.from_href', (['"""Foo-2.3.4-py3.4.egg"""'], {}), "('Foo-2.3.4-py3.4.egg')\n", (1861, 1884), False, 'from pex.package import EggPackage, SourcePackage\n'), ((2484, 2508), 'pex.resolver_options.ResolverOptionsBuilder', 'ResolverOptionsBuilder', ([], {}), '()\n', (2506, 2508), False, 'from pex.resolver_options import ResolverOptionsBuilder\n'), ((2516, 2532), 'pex.resolver._ResolvableSet', '_ResolvableSet', ([], {}), '()\n', (2530, 2532), False, 'from pex.resolver import Unsatisfiable, _ResolvableSet, resolve\n'), ((2540, 2589), 'pex.resolvable.ResolvableRequirement.from_string', 'ResolvableRequirement.from_string', (['"""foo"""', 'builder'], {}), "('foo', builder)\n", (2573, 2589), False, 'from pex.resolvable import ResolvableRequirement\n'), ((2605, 2648), 'pex.package.SourcePackage.from_href', 'SourcePackage.from_href', (['"""foo-2.3.4.tar.gz"""'], {}), "('foo-2.3.4.tar.gz')\n", (2628, 2648), False, 'from pex.package import EggPackage, SourcePackage\n'), ((2664, 2707), 'pex.package.EggPackage.from_href', 'EggPackage.from_href', (['"""foo-2.3.4-py3.4.egg"""'], {}), "('foo-2.3.4-py3.4.egg')\n", (2684, 2707), False, 'from pex.package import EggPackage, SourcePackage\n'), ((625, 640), 'twitter.common.contextutil.temporary_dir', 'temporary_dir', ([], {}), '()\n', (638, 640), False, 'from twitter.common.contextutil import temporary_dir\n'), ((668, 689), 'pex.resolver.resolve', 'resolve', (['[]'], {'cache': 'td'}), '([], cache=td)\n', (675, 689), False, 'from pex.resolver import Unsatisfiable, _ResolvableSet, resolve\n'), ((809, 824), 'twitter.common.contextutil.temporary_dir', 'temporary_dir', ([], {}), '()\n', (822, 824), False, 'from twitter.common.contextutil import temporary_dir\n'), ((955, 994), 'pex.resolver.resolve', 'resolve', (["['project']"], {'fetchers': 'fetchers'}), "(['project'], fetchers=fetchers)\n", (962, 994), False, 'from pex.resolver import Unsatisfiable, _ResolvableSet, resolve\n'), ((1290, 1305), 'twitter.common.contextutil.temporary_dir', 'temporary_dir', ([], {}), '()\n', (1303, 1305), False, 'from twitter.common.contextutil import temporary_dir\n'), ((2376, 2404), 'pytest.raises', 'pytest.raises', (['Unsatisfiable'], {}), '(Unsatisfiable)\n', (2389, 2404), False, 'import pytest\n'), ((2848, 2876), 'pytest.raises', 'pytest.raises', (['Unsatisfiable'], {}), '(Unsatisfiable)\n', (2861, 2876), False, 'import pytest\n'), ((928, 941), 'pex.fetcher.Fetcher', 'Fetcher', (['[td]'], {}), '([td])\n', (935, 941), False, 'from pex.fetcher import Fetcher\n'), ((1446, 1459), 'pex.fetcher.Fetcher', 'Fetcher', (['[dd]'], {}), '([dd])\n', (1453, 1459), False, 'from pex.fetcher import Fetcher\n'), ((1470, 1485), 'twitter.common.contextutil.temporary_dir', 'temporary_dir', ([], {}), '()\n', (1483, 1485), False, 'from twitter.common.contextutil import temporary_dir\n'), ((1507, 1585), 'pex.resolver.resolve', 'resolve', (["['project1', 'project2']"], {'fetchers': 'fetchers', 'cache': 'cd', 'cache_ttl': '(1000)'}), "(['project1', 'project2'], fetchers=fetchers, cache=cd, cache_ttl=1000)\n", (1514, 1585), False, 'from pex.resolver import Unsatisfiable, _ResolvableSet, resolve\n'), ((878, 909), 'os.path.basename', 'os.path.basename', (['project_sdist'], {}), '(project_sdist)\n', (894, 909), False, 'import os\n'), ((1404, 1427), 'os.path.basename', 'os.path.basename', (['sdist'], {}), '(sdist)\n', (1420, 1427), False, 'import os\n')]
|
from django.http import JsonResponse
def response_template(msg, result, code, data):
return JsonResponse({
'result': result, 'code': code,
'msg': msg, 'data': data
})
def response_success(msg="", result=1, code=0, data=None):
return response_template(msg, result, code, data)
def response_failure(msg="", result=0, code=0, data=None):
return response_template(msg, result, code, data)
def msg_template(task, msg, result, code, data):
return {
"task": task, 'result': result, 'code': code,
'msg': msg, 'data': data
}
def msg_success(task="", msg="", result=1, code=0, data=None):
return msg_template(task, msg, result, code, data)
def msg_failure(task="", msg="", result=0, code=0, data=None):
return msg_template(task, msg, result, code, data)
def user_does_not_exists(code=0):
return response_failure(msg="没有对应用户", code=code)
def view_exception(code=0): # view出现异常
return response_failure(msg="网络不好", code=code)
|
[
"django.http.JsonResponse"
] |
[((98, 170), 'django.http.JsonResponse', 'JsonResponse', (["{'result': result, 'code': code, 'msg': msg, 'data': data}"], {}), "({'result': result, 'code': code, 'msg': msg, 'data': data})\n", (110, 170), False, 'from django.http import JsonResponse\n')]
|
# MIT License
# Copyright (c) 2018 the NJUNLP groups.
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# Author baoyu.nlp
# Time 2019-01-28 18:02
from __future__ import division
import os
from .base_evaluator import BaseEvaluator
from .bleu_scorer import BleuScoreMetric
from .evaluation import prediction as evaluate
class TranslationEvaluator(BaseEvaluator):
def __init__(self, model, eval_set, eval_lists, sort_key, eval_tgt, out_dir="./out", batch_size=20,
write_down=False, use_bpe=False, **kwargs):
super().__init__(model, eval_set, out_dir, batch_size)
self.eval_dirs = eval_lists
self.write_down = write_down
self.sort_key = sort_key
self.eval_tgt = eval_tgt
self.score_item = "BLEU"
self.use_bpe = use_bpe
def __call__(self, eval_desc="mt"):
"""
Args:
eval_desc:
Returns: eval the multi-bleu for machine translation
"""
training = self.model.training
self.model.eval()
eval_results = evaluate(
examples=self.eval_set,
model=self.model,
sort_key=self.sort_key,
batch_size=self.batch_size,
out_dir=os.path.join(self.out_dir, eval_desc) if self.write_down is not None else None)
bleu = BleuScoreMetric.evaluate_file(
pred_file=eval_results['pred_file'],
gold_files=self.eval_dirs,
)
self.model.training = training
return {
'BLEU': bleu,
'EVAL TIME': eval_results['use_time'],
"EVAL SPEED": len(self.eval_set) / eval_results['use_time']
}
|
[
"os.path.join"
] |
[((2228, 2265), 'os.path.join', 'os.path.join', (['self.out_dir', 'eval_desc'], {}), '(self.out_dir, eval_desc)\n', (2240, 2265), False, 'import os\n')]
|
"""file to deal with batch operations"""
import sys
import json
import xlwt
import extract_info
from fit_sheet_wrapper import FitSheetWrapper
from xlwt import Workbook
import random
# from desk import *
import requests
import pandas as pd
import numpy as np
import nltk
import json
import csv
from nltk.corpus import stopwords
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.model_selection import train_test_split
from sklearn import naive_bayes
from sklearn.metrics import roc_auc_score
import pandas
""" Gets the arguments from the command line input """
SENTENCES = sys.argv[1:]
#print(SENTENCES)
wb = Workbook()
ws = FitSheetWrapper(wb.add_sheet('Sheet 1'))
style = xlwt.XFStyle()
font = xlwt.Font()
font.bold = True
style.font = font
# df = pandas.read_csv('engine/desktop_train_health2.csv')
# stopset = set(stopwords.words('english'))
# vectorizer = TfidfVectorizer(use_idf=True,lowercase=True,strip_accents='ascii',stop_words=stopset)
# y = df.common
# x = vectorizer.fit_transform(df.action)
# x_train,x_test,y_train,y_test=train_test_split(x,y,random_state=42)
# clf=naive_bayes.MultinomialNB()
# clf.fit(x_train,y_train)
i = 1
for request in SENTENCES:
result = extract_info.main(request)
print(result)
ws.write(i, 0, "Test Case Number", style=style)
ws.write(i, 1, result['case_id'])
i += 1
ws.write(i, 0, "Test Case Type", style=style)
r = random.randint(0,1)
ws.write(i, 1, "Unique" if r==1 else "General")
i += 1
ws.write(i, 0, "Test Case Description", style=style)
ws.write(i, 1, result['action'])
i += 1
if len(result["inputs"]) > 0:
ws.write_merge(
i, i + len(result["inputs"]) - 1, 0, 0, "Expected Inputs", style=style)
for inp in result["inputs"]:
ws.write(i, 1, inp[0] + " = " + inp[1])
i += 1
else:
ws.write(i, 0, "Expected Inputs", style=style)
ws.write(i, 1, "-")
i += 1
ws.write(i, 0, "Expected Resuls", style=style)
ws.write(i, 1, result['expectation'])
i += 4
wb.save('genTestCases.xls')
print(json.dumps({"code":True}))
sys.stdout.flush()
|
[
"xlwt.Workbook",
"random.randint",
"xlwt.XFStyle",
"extract_info.main",
"json.dumps",
"sys.stdout.flush",
"xlwt.Font"
] |
[((635, 645), 'xlwt.Workbook', 'Workbook', ([], {}), '()\n', (643, 645), False, 'from xlwt import Workbook\n'), ((701, 715), 'xlwt.XFStyle', 'xlwt.XFStyle', ([], {}), '()\n', (713, 715), False, 'import xlwt\n'), ((724, 735), 'xlwt.Font', 'xlwt.Font', ([], {}), '()\n', (733, 735), False, 'import xlwt\n'), ((2133, 2151), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (2149, 2151), False, 'import sys\n'), ((1215, 1241), 'extract_info.main', 'extract_info.main', (['request'], {}), '(request)\n', (1232, 1241), False, 'import extract_info\n'), ((1420, 1440), 'random.randint', 'random.randint', (['(0)', '(1)'], {}), '(0, 1)\n', (1434, 1440), False, 'import random\n'), ((2106, 2132), 'json.dumps', 'json.dumps', (["{'code': True}"], {}), "({'code': True})\n", (2116, 2132), False, 'import json\n')]
|
"""Tests for 'layout' filters."""
from flask_extras.filters import layout
class TestBs3Col:
"""All tests for bs3 col function."""
def test_returns_right_width(self):
"""Test the return value for a valid type."""
assert layout.bs3_cols(1) == 12
assert layout.bs3_cols(2) == 6
assert layout.bs3_cols(3) == 4
assert layout.bs3_cols(4) == 3
assert layout.bs3_cols(5) == 2
assert layout.bs3_cols(6) == 2
def test_returns_right_width_bad_data(self):
"""Test the return value for an invalid type."""
assert layout.bs3_cols(None) == 12
assert layout.bs3_cols('foo') == 12
assert layout.bs3_cols(dict()) == 12
|
[
"flask_extras.filters.layout.bs3_cols"
] |
[((247, 265), 'flask_extras.filters.layout.bs3_cols', 'layout.bs3_cols', (['(1)'], {}), '(1)\n', (262, 265), False, 'from flask_extras.filters import layout\n'), ((287, 305), 'flask_extras.filters.layout.bs3_cols', 'layout.bs3_cols', (['(2)'], {}), '(2)\n', (302, 305), False, 'from flask_extras.filters import layout\n'), ((326, 344), 'flask_extras.filters.layout.bs3_cols', 'layout.bs3_cols', (['(3)'], {}), '(3)\n', (341, 344), False, 'from flask_extras.filters import layout\n'), ((365, 383), 'flask_extras.filters.layout.bs3_cols', 'layout.bs3_cols', (['(4)'], {}), '(4)\n', (380, 383), False, 'from flask_extras.filters import layout\n'), ((404, 422), 'flask_extras.filters.layout.bs3_cols', 'layout.bs3_cols', (['(5)'], {}), '(5)\n', (419, 422), False, 'from flask_extras.filters import layout\n'), ((443, 461), 'flask_extras.filters.layout.bs3_cols', 'layout.bs3_cols', (['(6)'], {}), '(6)\n', (458, 461), False, 'from flask_extras.filters import layout\n'), ((589, 610), 'flask_extras.filters.layout.bs3_cols', 'layout.bs3_cols', (['None'], {}), '(None)\n', (604, 610), False, 'from flask_extras.filters import layout\n'), ((632, 654), 'flask_extras.filters.layout.bs3_cols', 'layout.bs3_cols', (['"""foo"""'], {}), "('foo')\n", (647, 654), False, 'from flask_extras.filters import layout\n')]
|
import os
import getpass
import tempfile
from ..Task import Task
from ...TaskWrapper import TaskWrapper
from ...Crawler import Crawler
from ...Crawler.Fs.FsCrawler import FsCrawler
from ...Crawler.Fs.Image import ImageCrawler
class SGPublishTask(Task):
"""
Generic Shotgun publish task for data created by the CreateVersion task.
"""
__shotgunUrl = os.environ.get('KOMBI_SHOTGUN_URL', '')
__shotgunScriptName = os.environ.get('KOMBI_SHOTGUN_SCRIPTNAME', '')
__shotgunApiKey = os.environ.get('KOMBI_SHOTGUN_APIKEY', '')
def __init__(self, *args, **kwargs):
"""
Create a RenderPublish object.
"""
super(SGPublishTask, self).__init__(*args, **kwargs)
# setting default options
self.setOption("url", self.__shotgunUrl)
self.setOption("scriptName", self.__shotgunScriptName)
self.setOption("apiKey", self.__shotgunApiKey)
self.__publishData = {}
def _perform(self):
"""
Perform the task.
"""
import shotgun_api3
# creating a singleton session object
sg = shotgun_api3.Shotgun(
self.option('url'),
script_name=self.option('scriptName'),
api_key=self.option('apiKey')
)
# Source crawler is a json crawler that points to published data
sourceCrawler = self.crawlers()[0]
filePath = self.target(sourceCrawler) if self.target(sourceCrawler) else sourceCrawler.var('filePath')
self.__publishData["path"] = {"local_path": filePath}
self.__publishData["description"] = self.templateOption('comment', crawler=sourceCrawler)
self.__publishData["version_number"] = sourceCrawler.var('version')
if "_sgTask" in sourceCrawler.varNames():
self.__publishData["task"] = sourceCrawler.var("_sgTask")
publishName = self.templateOption('publishName', crawler=sourceCrawler)
self.__publishData["name"] = publishName
self.__publishData["code"] = publishName
self.__linkData(sg)
self.__sgFileType(sg)
self.__sgUser(sg)
sgPublishFile = sg.create("PublishedFile", self.__publishData)
self.__makeThumbnail(sgPublishFile, sg)
self.__makeDaily(sgPublishFile, sg)
# this task does not return any crawlers as result
return []
def __linkData(self, sg):
"""
Find the data that needs to be linked to the publish in Shotgun.
"""
sourceCrawler = self.crawlers()[0]
project = sg.find_one('Project', [['name', 'is', sourceCrawler.var('job')]])
self.__publishData['project'] = project
if "shot" in sourceCrawler.varNames() or "assetName" in sourceCrawler.varNames():
varName = "shot" if "shot" in sourceCrawler.varNames() else "assetName"
varType = "Shot" if "shot" in sourceCrawler.varNames() else "Asset"
filters = [
['code', 'is', sourceCrawler.var(varName)],
['project', 'is', project]
]
entityData = sg.find(varType, filters)
if len(entityData) != 1:
raise Exception(
"[SGPublish] Cannot find unique {} {} in project {}. Skip Publish.".format(
varName,
sourceCrawler.var(varName),
sourceCrawler.var('job')
)
)
self.__publishData['entity'] = entityData[0]
else:
self.__publishData['entity'] = project
def __sgFileType(self, sg):
"""
Find the shotgun file type for the publish. Create it in Shotgun if it does not already exist.
"""
publishedFileType = self.option('publishedFileType')
sgFileType = sg.find_one('PublishedFileType', filters=[["code", "is", publishedFileType]])
if not sgFileType:
# create a published file type on the fly
sgFileType = sg.create("PublishedFileType", {"code": publishedFileType})
self.__publishData["published_file_type"] = sgFileType
def __sgUser(self, sg):
"""
Find the shotgun user information for the publish.
"""
fields = ["id", "type", "email", "login", "name", "image"]
user = os.environ.get("KOMBI_USER", getpass.getuser()),
self.__publishData["created_by"] = sg.find_one("HumanUser", filters=[["login", "is", user]], fields=fields)
def __makeThumbnail(self, sgPublishFile, sg):
"""
Create a temporary thumbnail using images found in data to load as publish thumbnail in shotgun.
"""
createThumbnail = False
sourceCrawler = self.crawlers()[0]
if "thumbnailFile" in self.optionNames():
thumbnailFilePath = self.templateOption('thumbnailFile', crawler=sourceCrawler)
else:
# Look for an image sequence to create a thumbnail. If multiple sequences found, using the first one.
createThumbnail = True
imageCrawlers = sourceCrawler.globFromParent(filterTypes=[ImageCrawler])
if not imageCrawlers:
# No images anywhere in the publish, nothing to use as a thumbnail
return
groups = Crawler.group(filter(lambda x: x.isSequence(), imageCrawlers))
if groups:
targetCrawler = groups[0][int(len(groups[0]) / 2)]
else:
targetCrawler = imageCrawlers[0]
tempFile = tempfile.NamedTemporaryFile(
delete=False,
suffix=".jpg",
mode='w'
)
tempFile.close()
thumbnailFilePath = tempFile.name
# Remove file so the thumbnail task doesn't ask to overwrite it
os.unlink(thumbnailFilePath)
thumbnailTask = Task.create('imageThumbnail')
thumbnailTask.add(targetCrawler, thumbnailFilePath)
# Using python taskWrapper because the imageThumbnail task uses OIIO
TaskWrapper.create('python').run(thumbnailTask)
if os.path.exists(thumbnailFilePath):
sg.upload_thumbnail("PublishedFile", sgPublishFile["id"], thumbnailFilePath)
if createThumbnail:
# removing the temporary file
os.unlink(thumbnailFilePath)
def __makeDaily(self, sgPublishFile, sg):
"""
Create a version in Shotgun for this path and linked to this publish.
"""
sourceCrawler = self.crawlers()[0]
if 'movieFile' not in self.optionNames():
# No movie provided, glob for a mov
movCrawlers = sourceCrawler.globFromParent(filterTypes=["mov"])
if not movCrawlers:
return
movieFilePath = movCrawlers[0].var("filePath")
else:
movieFilePath = self.templateOption('movieFile', crawler=sourceCrawler)
if not movieFilePath or not os.path.exists(movieFilePath):
raise Exception("Movie provided for daily creation does not exist: {}".format(movieFilePath))
# create a name for the version based on the file name
# grab the file name, strip off extension
name = os.path.splitext(os.path.basename(movieFilePath))[0]
# do some replacements
name = name.replace("_", " ")
# and capitalize
name = name.capitalize()
firstFrame = None
lastFrame = None
imageSeqPath = None
movCrawler = FsCrawler.createFromPath(movieFilePath)
if firstFrame in movCrawler.varNames():
firstFrame = movCrawler.var('firstFrame')
lastFrame = movCrawler.var('lastFrame')
imageCrawlers = sourceCrawler.globFromParent(filterTypes=[ImageCrawler])
groups = Crawler.group(filter(lambda x: x.isSequence(), imageCrawlers))
if groups:
seqGroup = groups[0]
imageSeqPath = os.path.join(
os.path.dirname(seqGroup[0].var("filePath")),
'{0}.%0{1}d.{2}'.format(
seqGroup[0].var('name'),
seqGroup[0].var('padding'),
seqGroup[0].var('ext')
)
)
if firstFrame is None:
firstFrame = seqGroup[0].var('frame')
lastFrame = seqGroup[-1].var('frame')
# Create the version in Shotgun
data = {
"code": name,
"sg_status_list": "rev",
"entity": self.__publishData['entity'],
"created_by": self.__publishData['created_by'],
"user": self.__publishData['created_by'],
"description": self.__publishData['description'],
"project": self.__publishData['project']
}
if firstFrame is not None and lastFrame is not None:
data["sg_first_frame"] = firstFrame
data["sg_last_frame"] = lastFrame
data["frame_count"] = (lastFrame - firstFrame + 1)
data["frame_range"] = "%s-%s" % (firstFrame, lastFrame)
if imageSeqPath:
data["sg_path_to_frames"] = imageSeqPath
data["published_files"] = [sgPublishFile]
data["sg_path_to_movie"] = movieFilePath
sgVersion = sg.create("Version", data)
# upload files
sg.upload("Version", sgVersion["id"], movieFilePath, "sg_uploaded_movie")
return sgVersion
# registering task
Task.register(
'sgPublish',
SGPublishTask
)
|
[
"tempfile.NamedTemporaryFile",
"getpass.getuser",
"os.unlink",
"os.path.basename",
"os.path.exists",
"os.environ.get"
] |
[((367, 406), 'os.environ.get', 'os.environ.get', (['"""KOMBI_SHOTGUN_URL"""', '""""""'], {}), "('KOMBI_SHOTGUN_URL', '')\n", (381, 406), False, 'import os\n'), ((433, 479), 'os.environ.get', 'os.environ.get', (['"""KOMBI_SHOTGUN_SCRIPTNAME"""', '""""""'], {}), "('KOMBI_SHOTGUN_SCRIPTNAME', '')\n", (447, 479), False, 'import os\n'), ((502, 544), 'os.environ.get', 'os.environ.get', (['"""KOMBI_SHOTGUN_APIKEY"""', '""""""'], {}), "('KOMBI_SHOTGUN_APIKEY', '')\n", (516, 544), False, 'import os\n'), ((6128, 6161), 'os.path.exists', 'os.path.exists', (['thumbnailFilePath'], {}), '(thumbnailFilePath)\n', (6142, 6161), False, 'import os\n'), ((5531, 5597), 'tempfile.NamedTemporaryFile', 'tempfile.NamedTemporaryFile', ([], {'delete': '(False)', 'suffix': '""".jpg"""', 'mode': '"""w"""'}), "(delete=False, suffix='.jpg', mode='w')\n", (5558, 5597), False, 'import tempfile\n'), ((5823, 5851), 'os.unlink', 'os.unlink', (['thumbnailFilePath'], {}), '(thumbnailFilePath)\n', (5832, 5851), False, 'import os\n'), ((6335, 6363), 'os.unlink', 'os.unlink', (['thumbnailFilePath'], {}), '(thumbnailFilePath)\n', (6344, 6363), False, 'import os\n'), ((4345, 4362), 'getpass.getuser', 'getpass.getuser', ([], {}), '()\n', (4360, 4362), False, 'import getpass\n'), ((7269, 7300), 'os.path.basename', 'os.path.basename', (['movieFilePath'], {}), '(movieFilePath)\n', (7285, 7300), False, 'import os\n'), ((6982, 7011), 'os.path.exists', 'os.path.exists', (['movieFilePath'], {}), '(movieFilePath)\n', (6996, 7011), False, 'import os\n')]
|
"""Verify accuracy of the unsteady Navier-Stokes-Boussinesq solver."""
import firedrake as fe
import sapphire.mms
import tests.verification.test__navier_stokes_boussinesq
from sapphire.simulations.unsteady_navier_stokes_boussinesq import Simulation
import tests.validation.helpers
diff = fe.diff
def strong_residual(sim, solution):
r_p, r_u, r_T = tests.verification.test__navier_stokes_boussinesq.\
strong_residual(sim = sim, solution = solution)
_, u, T = solution
t = sim.time
r_u += diff(u, t)
r_T += diff(T, t)
return r_p, r_u, r_T
sin, pi = fe.sin, fe.pi
def space_verification_solution(sim):
x, y = fe.SpatialCoordinate(sim.mesh)
u0 = sin(2*pi*x)*sin(pi*y)
u1 = sin(pi*x)*sin(2*pi*y)
ihat, jhat = sim.unit_vectors
u = (u0*ihat + u1*jhat)
p = -0.5*sin(pi*x)*sin(pi*y)
T = sin(2*pi*x)*sin(pi*y)
mean_pressure = fe.assemble(p*fe.dx)
p -= mean_pressure
return p, u, T
def time_verification_solution(sim):
exp = fe.exp
x, y = fe.SpatialCoordinate(sim.mesh)
t = sim.time
u0 = sin(2*pi*x)*sin(pi*y)
u1 = sin(pi*x)*sin(2*pi*y)
ihat, jhat = sim.unit_vectors
u = exp(t)*(u0*ihat + u1*jhat)
p = -0.5*sin(pi*x)*sin(pi*y)
T = exp(t)*sin(2*pi*x)*sin(pi*y)
mean_pressure = fe.assemble(p*fe.dx)
p -= mean_pressure
return p, u, T
class UnitSquareUniformMeshSimulation(Simulation):
def __init__(self, *args,
meshcell_size,
**kwargs):
n = int(round(1/meshcell_size))
kwargs["mesh"] = fe.UnitSquareMesh(n, n)
super().__init__(*args, **kwargs)
def dirichlet_boundary_conditions(sim, manufactured_solution):
"""Apply velocity and temperature Dirichlet BC's on every boundary.
Do not apply Dirichlet BC's on the pressure.
"""
_, u, T = manufactured_solution
return [
fe.DirichletBC(sim.solution_subspaces["u"], u, "on_boundary"),
fe.DirichletBC(sim.solution_subspaces["T"], T, "on_boundary")]
sim_kwargs = {
"reynolds_number": 20,
"rayleigh_number": 1.e3,
"prandtl_number": 0.71,
"quadrature_degree": 4}
def test__verify_second_order_spatial_convergence_via_mms():
sim_kwargs["taylor_hood_pressure_degree"] = 1
sim_kwargs["temperature_degree"] = 2
sim_kwargs["timestep_size"] = 1
sim_kwargs["time_stencil_size"] = 2
sapphire.mms.verify_order_of_accuracy(
discretization_parameter_name = "meshcell_size",
discretization_parameter_values = [1/n for n in (8, 16, 32)],
Simulation = UnitSquareUniformMeshSimulation,
sim_kwargs = sim_kwargs,
strong_residual = strong_residual,
manufactured_solution = space_verification_solution,
dirichlet_boundary_conditions = dirichlet_boundary_conditions,
norms = ("L2", "H1", "H1"),
expected_orders = (2, 2, 2),
decimal_places = 1,
endtime = 1)
def test__verify_first_order_temporal_convergence_via_mms():
sim_kwargs["meshcell_size"] = 1/32
sim_kwargs["taylor_hood_pressure_degree"] = 2
sim_kwargs["temperature_degree"] = 3
sapphire.mms.verify_order_of_accuracy(
discretization_parameter_name = "timestep_size",
discretization_parameter_values = (1/2, 1/4, 1/8, 1/16),
Simulation = UnitSquareUniformMeshSimulation,
sim_kwargs = sim_kwargs,
strong_residual = strong_residual,
manufactured_solution = time_verification_solution,
dirichlet_boundary_conditions = dirichlet_boundary_conditions,
endtime = 1,
norms = (None, "L2", "L2"),
expected_orders = (None, 1, 1),
decimal_places = 1)
class HeatDrivenCavitySimulation(UnitSquareUniformMeshSimulation):
def dirichlet_boundary_conditions(self):
return [
fe.DirichletBC(self.solution_subspaces["u"], (0, 0), "on_boundary"),
fe.DirichletBC(self.solution_subspaces["T"], 0.5, 1),
fe.DirichletBC(self.solution_subspaces["T"], -0.5, 2)]
def test__steady_state_heat_driven_cavity_benchmark():
""" Verify against steady state heat-driven cavity benchmark.
Comparing to data published in @cite{wang2010comprehensive}.
"""
endtime = 1.e12
Ra = 1.e6
Pr = 0.71
sim = HeatDrivenCavitySimulation(
rayleigh_number = Ra,
prandtl_number = Pr,
taylor_hood_pressure_degree = 1,
temperature_degree = 2,
meshcell_size = 1/40,
timestep_size = endtime)
sim.states = sim.run(endtime = endtime)
# Check coordinates (0.3499, 0.8499) instead of (0.35, 0.85)
# because the Function evaluation fails at the exact coordinates.
# See https://github.com/firedrakeproject/firedrake/issues/1340
tests.validation.helpers.check_scalar_solution_component(
solution = sim.solution,
component = 1,
subcomponent = 0,
coordinates = [(0.5, y)
for y in (0, 0.15, 0.34999, 0.5, 0.65, 0.84999)],
expected_values = [val*Ra**0.5/Pr
for val in (0, -0.0649, -0.0194, 0,
0.0194, 0.0649)],
absolute_tolerances = [val*Ra**0.5/Pr
for val in (1.e-12, 0.001, 0.001, 1.e-12, 0.001, 0.001)])
|
[
"firedrake.SpatialCoordinate",
"firedrake.assemble",
"firedrake.DirichletBC",
"firedrake.UnitSquareMesh"
] |
[((688, 718), 'firedrake.SpatialCoordinate', 'fe.SpatialCoordinate', (['sim.mesh'], {}), '(sim.mesh)\n', (708, 718), True, 'import firedrake as fe\n'), ((961, 983), 'firedrake.assemble', 'fe.assemble', (['(p * fe.dx)'], {}), '(p * fe.dx)\n', (972, 983), True, 'import firedrake as fe\n'), ((1115, 1145), 'firedrake.SpatialCoordinate', 'fe.SpatialCoordinate', (['sim.mesh'], {}), '(sim.mesh)\n', (1135, 1145), True, 'import firedrake as fe\n'), ((1424, 1446), 'firedrake.assemble', 'fe.assemble', (['(p * fe.dx)'], {}), '(p * fe.dx)\n', (1435, 1446), True, 'import firedrake as fe\n'), ((1726, 1749), 'firedrake.UnitSquareMesh', 'fe.UnitSquareMesh', (['n', 'n'], {}), '(n, n)\n', (1743, 1749), True, 'import firedrake as fe\n'), ((2070, 2131), 'firedrake.DirichletBC', 'fe.DirichletBC', (["sim.solution_subspaces['u']", 'u', '"""on_boundary"""'], {}), "(sim.solution_subspaces['u'], u, 'on_boundary')\n", (2084, 2131), True, 'import firedrake as fe\n'), ((2141, 2202), 'firedrake.DirichletBC', 'fe.DirichletBC', (["sim.solution_subspaces['T']", 'T', '"""on_boundary"""'], {}), "(sim.solution_subspaces['T'], T, 'on_boundary')\n", (2155, 2202), True, 'import firedrake as fe\n'), ((4071, 4138), 'firedrake.DirichletBC', 'fe.DirichletBC', (["self.solution_subspaces['u']", '(0, 0)', '"""on_boundary"""'], {}), "(self.solution_subspaces['u'], (0, 0), 'on_boundary')\n", (4085, 4138), True, 'import firedrake as fe\n'), ((4152, 4204), 'firedrake.DirichletBC', 'fe.DirichletBC', (["self.solution_subspaces['T']", '(0.5)', '(1)'], {}), "(self.solution_subspaces['T'], 0.5, 1)\n", (4166, 4204), True, 'import firedrake as fe\n'), ((4218, 4271), 'firedrake.DirichletBC', 'fe.DirichletBC', (["self.solution_subspaces['T']", '(-0.5)', '(2)'], {}), "(self.solution_subspaces['T'], -0.5, 2)\n", (4232, 4271), True, 'import firedrake as fe\n')]
|
from fromTxtToVec.corpus_build import Corpus
from fromTxtToVec.pad import Pad
from fromTxtToVec.BERT_feat import ExtractBertEmb
from fromTxtToVec.train_vector import Embedding
import numpy as np
class To_vec:
def __init__(self, mode, sent_maxlen):
self.mode = mode
self.sent_maxlen = sent_maxlen
def vector(self):
sents, labels = Corpus().read_txt()
pad_sents, pad_labels = Pad(self.sent_maxlen).pad_seq(sents, labels)
if self.mode == 'w2v':
sents_, labels_ = pad_sents, pad_labels
elif self.mode == 'bert':
path = input('请输入BERT模型的绝对路径or相对路径...')
extractor = ExtractBertEmb(bert_path=path)
granu = input('请输入抽取的粒度: token or cls')
if granu == 'token':
bert_sents = extractor.extract(sentences=[''.join(i) for i in sents], granularity=granu)
sents_ = []
for s in bert_sents:
if len(s) >= int(self.sent_maxlen):
matrix = s[:int(self.sent_maxlen)]
else:
matrix = np.zeros((int(self.sent_maxlen), 768))
for idx, i in enumerate(s):
matrix[idx] = i
sents_.append(matrix)
elif granu == 'token':
sents_ = extractor.extract(sentences=[''.join(i) for i in sents], granularity=granu)
labels_ = pad_labels
return np.array(sents_), labels_
def w2v_matrix(self, emb_size):
sents, labels = Corpus().read_txt()
matrix = Embedding(emb_size=emb_size).w2v(corpus=sents)
return matrix
|
[
"fromTxtToVec.BERT_feat.ExtractBertEmb",
"numpy.array",
"fromTxtToVec.corpus_build.Corpus",
"fromTxtToVec.train_vector.Embedding",
"fromTxtToVec.pad.Pad"
] |
[((1521, 1537), 'numpy.array', 'np.array', (['sents_'], {}), '(sents_)\n', (1529, 1537), True, 'import numpy as np\n'), ((382, 390), 'fromTxtToVec.corpus_build.Corpus', 'Corpus', ([], {}), '()\n', (388, 390), False, 'from fromTxtToVec.corpus_build import Corpus\n'), ((435, 456), 'fromTxtToVec.pad.Pad', 'Pad', (['self.sent_maxlen'], {}), '(self.sent_maxlen)\n', (438, 456), False, 'from fromTxtToVec.pad import Pad\n'), ((682, 712), 'fromTxtToVec.BERT_feat.ExtractBertEmb', 'ExtractBertEmb', ([], {'bert_path': 'path'}), '(bert_path=path)\n', (696, 712), False, 'from fromTxtToVec.BERT_feat import ExtractBertEmb\n'), ((1613, 1621), 'fromTxtToVec.corpus_build.Corpus', 'Corpus', ([], {}), '()\n', (1619, 1621), False, 'from fromTxtToVec.corpus_build import Corpus\n'), ((1651, 1679), 'fromTxtToVec.train_vector.Embedding', 'Embedding', ([], {'emb_size': 'emb_size'}), '(emb_size=emb_size)\n', (1660, 1679), False, 'from fromTxtToVec.train_vector import Embedding\n')]
|
""" Import needed modules """
"-----------------------------------------------------------------------------"
from scipy.integrate import solve_ivp
from Shared_Funcs.pemfc_transport_funcs import *
import cantera as ct
import numpy as np
import sys
""" Control options for derivative functions """
"-----------------------------------------------------------------------------"
# Toggles to turn on/off in/outer rxns, gas transports, or surface tracking:---
pt_rxn = 1
o2_rxn = 1
gas_tog = 1
gdl_tog = 1
surf_tog = 1
""" Define CL dsvdt for core-shell model """
"-----------------------------------------------------------------------------"
def dsvdt_cl_cs(t, sv, dsvdt, objs, p, iSV, gdl_BC):
""" Set up conditions at GDL/CL BC """
# Initialize indecies for looping:-----------------------------------------
cl_ymv = 0 # CL y direction mover (y: GDL -> Elyte)
# Load in BC state and flux from GDL:--------------------------------------
TDY1 = gdl_BC['TDY1']
flux_up = gdl_BC['flux_up']
i_io_up = 0 # no protons flow into the GDL
""" Begin loop - with if statements for CL/Elyte BC """
for i in range(cl['Ny']):
# Temperature at each Y node:------------------------------------------
dsvdt[iSV['T_cl'] +cl_ymv] = 0
# Gas phase species at each Y node:------------------------------------
if i == cl['Ny'] -1: # BC for CL and electrolyte interface
flux_dwn = np.zeros(gas_ca.n_species)
else:
rho_gas_k = sv[iSV['rho_gas_k'] +cl_ymv +cl['nxt_y']]
TDY2 = sv[iSV['T_cl'] +cl_ymv +cl['nxt_y']], sum(rho_gas_k), rho_gas_k
flux_dwn = fickian_adf(TDY1, TDY2, gas_ca, cl, gas_tog)
# Set the phases for O2 absorption rxn:
rho_gas_k = sv[iSV['rho_gas_k'] +cl_ymv]
rho_naf_k = sv[iSV['rho_naf_k'] +cl_ymv]
gas_ca.TDY = sv[iSV['T_cl'] +cl_ymv], sum(rho_gas_k), rho_gas_k
naf_b_ca.TDY = sv[iSV['T_cl'] +cl_ymv], sum(rho_naf_k), rho_naf_k
rho_dot_g = naf_s_ca.get_net_production_rates(gas_ca) *cl['SApv_naf']\
*cl['1/eps_g'] *gas_ca.molecular_weights *gas_tog
rho_dot_n = naf_s_ca.get_net_production_rates(naf_b_ca) *cl['SApv_naf']\
*cl['1/eps_n'] *naf_b_ca.molecular_weights
# Include rxn and flux in ODE term:
dsvdt[iSV['rho_gas_k'] +cl_ymv] = (flux_up - flux_dwn)*cl['1/eps_g']*cl['1/dy']\
+ o2_rxn *rho_dot_g
flux_up = flux_dwn
TDY1 = TDY2
# Nafion densities at each R node:-------------------------------------
# The Naftion densities change due to reactions at the outter and inner
# most shells as well as fluxes between adjacent shells. The direction
# of storage for the radial terms are done from the outermost shell
# to the innermost one.
" Start by evaluating the outermost shell "
# This node contains an O2 absorption rxn with the gas phase as well as
# a maxx flux with the adjacent inner node.
rho_k1 = sv[iSV['rho_naf_k'] +cl_ymv]
rho_k2 = sv[iSV['rho_naf_k'] +cl_ymv +cl['nxt_r']]
rho_flx_inr = radial_fdiff(rho_k1, rho_k2, cl, 0, ver, 'core_shell')
# Combine absorption and flux to get overall ODE for Nafion densities:
dsvdt[iSV['rho_naf_k'] +cl_ymv] = o2_rxn *rho_dot_n *cl['1/Vf_shl'][0]\
- rho_flx_inr *cl['1/r_j'][0]**2 *cl['1/t_shl'][0]
dsvdt[iSV['rho_naf_k'][cl['iH']] +cl_ymv] = 0 # Ensure constant proton density
rho_flx_otr = rho_flx_inr
rho_k1 = rho_k2
" Evaluate the inner shell nodes "
for j in range(1, cl['Nr'] -1):
rho_k2 = sv[iSV['rho_naf_k'] +cl_ymv +(j+1)*cl['nxt_r']]
rho_flx_inr = radial_fdiff(rho_k1, rho_k2, cl, j, ver, 'core_shell')
iMid = iSV['rho_naf_k'] +cl_ymv +j*cl['nxt_r']
dsvdt[iMid] = (rho_flx_otr - rho_flx_inr) *cl['1/r_j'][j]**2 *cl['1/t_shl'][j]
rho_flx_otr = rho_flx_inr
rho_k1 = rho_k2
" Apply the Pt reaction BC at the innermost shell "
# Set the phases for the ORR at the Pt surface:
carb_ca.electric_potential = 0
pt_s_ca.electric_potential = 0
naf_b_ca.electric_potential = -sv[iSV['phi_dl'] +cl_ymv]
naf_s_ca.electric_potential = -sv[iSV['phi_dl'] +cl_ymv]
pt_s_ca.coverages = sv[iSV['theta_pt_k'] +cl_ymv]
naf_b_ca.TDY = sv[iSV['T_cl'] +cl_ymv], sum(rho_k1), rho_k1
rho_dot_n = pt_s_ca.get_net_production_rates(naf_b_ca) *cl['SApv_pt']\
*cl['1/eps_n'] *naf_b_ca.molecular_weights
# Pt surface coverages:
dsvdt[iSV['theta_pt_k'] +cl_ymv] = pt_s_ca.get_net_production_rates(pt_s_ca)\
*cl['1/gamma'] *pt_rxn *surf_tog
# Innermost Nafion node densities:
iLast = iSV['rho_naf_k'] +cl_ymv +(cl['Nr'] -1)*cl['nxt_r']
dsvdt[iLast] = pt_rxn *rho_dot_n *cl['1/Vf_shl'][-1] \
+ rho_flx_otr *cl['1/r_j'][-1]**2 *cl['1/t_shl'][-1]
# Double layer potential at each Y node:-------------------------------
# The double layer potential is only stored as a function of CL depth.
# This means that no local potential gradients are shored in the radial
# direction throughout the Nafion shells.
# Find ionic currents and define ODE for phi_dl:
if i == cl['Ny'] -1: # BC for CL and electrolyte interface
i_io_dwn = cl['i_ext']
else:
i_io_dwn = (sv[iSV['phi_dl'] +cl_ymv] - sv[iSV['phi_dl'] +cl_ymv +cl['nxt_y']])\
*cl['sig_naf_io'] *cl['1/dy']
i_Far = pt_rxn *pt_s_ca.get_net_production_rates(carb_ca) *ct.faraday
i_dl = (i_io_up - i_io_dwn)*cl['1/dy'] - i_Far*cl['SApv_pt']
dsvdt[iSV['phi_dl'] +cl_ymv] = i_dl*cl['1/CA_dl']
i_io_up = i_io_dwn
# Update Y direction moving index:-------------------------------------
cl_ymv = cl_ymv +cl['nxt_y']
return dsvdt
""" Define CL dsvdt for flooded-agglomerate model """
"-----------------------------------------------------------------------------"
def dsvdt_cl_fa(t, sv, dsvdt, objs, p, iSV, gdl_BC):
""" Set up conditions at GDL/CL BC """
# Initialize indecies for looping:-----------------------------------------
cl_ymv = 0 # CL y direction mover (y: GDL -> Elyte)
# Load in BC state and flux from GDL:--------------------------------------
TDY1 = gdl_BC['TDY1']
flux_up = gdl_BC['flux_up']
i_io_up = 0 # no protons flow into the GDL
""" Begin loop - with if statements for CL/Elyte BC """
for i in range(cl['Ny']):
# Temperature at each Y node:------------------------------------------
dsvdt[iSV['T_cl'] +cl_ymv] = 0
# Gas phase species at each Y node:------------------------------------
if i == cl['Ny'] -1:
flux_dwn = np.zeros(gas_ca.n_species)
else:
rho_gas_k = sv[iSV['rho_gas_k'] +cl_ymv +cl['nxt_y']]
TDY2 = sv[iSV['T_cl'] +cl_ymv +cl['nxt_y']], sum(rho_gas_k), rho_gas_k
flux_dwn = fickian_adf(TDY1, TDY2, gas_ca, cl, gas_tog)
# Set the phases for O2 absorption rxn:
rho_gas_k = sv[iSV['rho_gas_k'] +cl_ymv]
rho_shl_k = sv[iSV['rho_shl_k'] +cl_ymv]
gas_ca.TDY = sv[iSV['T_cl'] +cl_ymv], sum(rho_gas_k), rho_gas_k
naf_b_ca.TDY = sv[iSV['T_cl'] +cl_ymv], sum(rho_shl_k), rho_shl_k
rho_dot_g = naf_s_ca.get_net_production_rates(gas_ca) *cl['SApv_naf']\
*cl['1/eps_g'] *gas_ca.molecular_weights *gas_tog
rho_dot_n = naf_s_ca.get_net_production_rates(naf_b_ca) *cl['SApv_naf']\
*cl['1/eps_n'] *naf_b_ca.molecular_weights
# Include rxn and flux in ODE term:
dsvdt[iSV['rho_gas_k'] +cl_ymv] = (flux_up - flux_dwn)*cl['1/eps_g']*cl['1/dy']\
+ o2_rxn *rho_dot_g
flux_up = flux_dwn
TDY1 = TDY2
# Nafion densities at each R node:-------------------------------------
# The Nafion densities change due to reactions throughout the inner
# agglomerate as well as fluxes between adjacent radial nodes. The
# direction of storage for the radial terms starts with a single node
# for the outer shell, and then continues from the outer agglomerate
# node into the center.
" Start by evaluating single-node nafion shell "
# This node contains an O2 absorption rxn with the gas phase as well as
# a mass flux with the inner agglomerate.
rho_k1 = sv[iSV['rho_shl_k'] +cl_ymv]
rho_k2 = sv[iSV['rho_naf_k'] +cl_ymv]
rho_flx_inr = radial_fdiff(rho_k1, rho_k2, cl, 0, ver, 'flooded_agg')
# Combine absorption and flux to get overall ODE:
dsvdt[iSV['rho_shl_k'] +cl_ymv] = o2_rxn *rho_dot_n - rho_flx_inr
dsvdt[iSV['rho_shl_k'][cl['iH']] +cl_ymv] = 0 # Ensure constant proton density
rho_flx_otr = rho_flx_inr
rho_k1 = rho_k2
" Evaluate the inner agglomerate nodes "
# Loop through radial nodes within agglomerate:
i_Far_r = np.zeros(cl['Nr'])
# Set the phases for ORR at the Pt surface:
carb_ca.electric_potential = 0
pt_s_ca.electric_potential = 0
naf_b_ca.electric_potential = -sv[iSV['phi_dl'] +cl_ymv]
naf_s_ca.electric_potential = -sv[iSV['phi_dl'] +cl_ymv]
for j in range(cl['Nr'] -1):
rho_k2 = sv[iSV['rho_naf_k'] +cl_ymv +(j+1)*cl['nxt_r']]
rho_flx_inr = radial_fdiff(rho_k1, rho_k2, cl, j+1, ver, 'flooded_agg')
pt_s_ca.coverages = sv[iSV['theta_pt_k'] +cl_ymv +j*cl['nxt_r']]
naf_b_ca.TDY = sv[iSV['T_cl'] +cl_ymv], sum(rho_k1), rho_k1
rho_dot_n = pt_s_ca.get_net_production_rates(naf_b_ca) *cl['SApv_pt']\
*cl['1/eps_n'] *naf_b_ca.molecular_weights *cl['Vf_ishl'][j]
i_Far_r[j] = pt_rxn *pt_s_ca.get_net_production_rates(carb_ca)\
*ct.faraday *cl['Vf_ishl'][j]
# Pt surface coverages:
iMid = iSV['theta_pt_k'] +cl_ymv +j*cl['nxt_r']
dsvdt[iMid] = pt_s_ca.get_net_production_rates(pt_s_ca) *cl['1/gamma']\
*pt_rxn *surf_tog
# Combine ORR and flux to get overall ODE for Nafion densities:
iMid = iSV['rho_naf_k'] +cl_ymv +j*cl['nxt_r']
dsvdt[iMid] = rho_flx_otr - rho_flx_inr + pt_rxn *rho_dot_n
dsvdt[iMid[cl['iH']]] = 0 # Ensure constant proton density
rho_flx_otr = rho_flx_inr
rho_k1 = rho_k2
" Apply symmetric flux BC at innermost agglomerate node "
rho_flx_inr = np.zeros(naf_b_ca.n_species)
# Set the phases for ORR at the Pt surface:
pt_s_ca.coverages = sv[iSV['theta_pt_k'] +cl_ymv +(cl['Nr'] -1)*cl['nxt_r']]
naf_b_ca.TDY = sv[iSV['T_cl'] +cl_ymv], sum(rho_k1), rho_k1
rho_dot_n = pt_s_ca.get_net_production_rates(naf_b_ca) *cl['SApv_pt']\
*cl['1/eps_n'] *naf_b_ca.molecular_weights *cl['Vf_ishl'][-1]
i_Far_r[-1] = pt_rxn *pt_s_ca.get_net_production_rates(carb_ca)\
*ct.faraday *cl['Vf_ishl'][-1]
# Pt surface coverages:
iLast = iSV['theta_pt_k'] +cl_ymv +(cl['Nr'] -1)*cl['nxt_r']
dsvdt[iLast] = pt_s_ca.get_net_production_rates(pt_s_ca) *cl['1/gamma']\
*pt_rxn *surf_tog
# Combine ORR and flux to get overall ODE:
iLast = iSV['rho_naf_k'] +cl_ymv +(cl['Nr'] -1)*cl['nxt_r']
dsvdt[iLast] = rho_flx_otr - rho_flx_inr + pt_rxn *rho_dot_n
dsvdt[iLast[cl['iH']]] = 0 # Ensure constant proton density
# Double layer potential at each Y node:-------------------------------
# The double layer potential is only stored as a function of CL depth,
# but is based on the reactions that occur throughout the radial
# direction of each agglomerate. Looping through the radial nodes of
# each agglomerate and summing over all faradaic currents is used to
# evaluate an overall double layer current.
" Simplify all radial terms into a single y-dependent double layer "
# Combine the faradaic currents to get overall i_Far:
i_Far = np.sum(i_Far_r)
# Find ionic currents and define ODE for phi_dl:
if i == cl['Ny'] -1:
i_io_dwn = cl['i_ext']
else:
i_io_dwn = (sv[iSV['phi_dl'] +cl_ymv] - sv[iSV['phi_dl'] +cl_ymv +cl['nxt_y']])\
*cl['sig_naf_io'] *cl['1/dy']
i_dl = (i_io_up - i_io_dwn)*cl['1/dy'] - i_Far*cl['SApv_pt']
dsvdt[iSV['phi_dl'] +cl_ymv] = i_dl*cl['1/CA_dl']
i_io_up = i_io_dwn
# Update Y direction moving index:-------------------------------------
cl_ymv = cl_ymv +cl['nxt_y']
return dsvdt
""" Define dsvdt for pemfc models - common for GDL and then CLs above """
"-----------------------------------------------------------------------------"
def dsvdt_func(t, sv, objs, p, iSV):
# Initialize indecies for looping:-----------------------------------------
gdl_ymv = 0 # GDL y direction mover (y: gas channel -> CL)
dsvdt = np.zeros_like(sv)
""" Bondary Condition - GDL and CL gas transport """
# Densities/Temp of GDL gas species and CL BC (top):-----------------------
gas_ca.TPY = gdl['TPY_BC']
TDY_BC = gas_ca.TDY
# If GDL diffusion is turned on, compare adjacent nodes with ADF flux to
# determine the BC composition between the GDL and CL.
rho_gdl_k = sv[iSV['rho_gdl_k']]
TDY1 = sv[iSV['T_gdl']], sum(rho_gdl_k), rho_gdl_k
flux_up = fickian_adf(TDY_BC, TDY1, gas_ca, gdl, gdl_tog)
for k in range(gdl['Ny'] -1):
rho_gdl_k = sv[iSV['rho_gdl_k'] +gdl_ymv +gdl['nxt_y']]
TDY2 = sv[iSV['T_gdl'] +gdl_ymv +gdl['nxt_y']], sum(rho_gdl_k), rho_gdl_k
flux_dwn = fickian_adf(TDY1, TDY2, gas_ca, gdl, gdl_tog)
dsvdt[iSV['rho_gdl_k'] +gdl_ymv] = (flux_up - flux_dwn)*gdl['1/eps_g']*gdl['1/dy']
flux_up = flux_dwn
TDY1 = TDY2
gdl_ymv = gdl_ymv +gdl['nxt_y']
# Use the composition and state of the last GDL node to calculate the flux
# into the first CL node.
rho_gas_k = sv[iSV['rho_gas_k']]
TDY2 = sv[iSV['T_cl']], sum(rho_gas_k), rho_gas_k
flux_dwn = fickian_adf(TDY1, TDY2, gas_ca, gdl_cl, gdl_tog)
dsvdt[iSV['rho_gdl_k'] +gdl_ymv] = (flux_up - flux_dwn)*gdl['1/eps_g']*gdl['1/dy']
flux_up = fickian_adf(TDY1, TDY2, gas_ca, gdl_cl, gas_tog)
TDY1 = TDY2
# Load BC values to pass into CL functions:
gdl_BC = {}
gdl_BC['TDY1'] = TDY1
gdl_BC['flux_up'] = flux_up
""" Generic loop for interal CL nodes in y-direction """
if model == 'core_shell':
dsvdt = dsvdt_cl_cs(t, sv, dsvdt, objs, p, iSV, gdl_BC)
elif model == 'flooded_agg':
dsvdt = dsvdt_cl_fa(t, sv, dsvdt, objs, p, iSV, gdl_BC)
# print(t)
# print(dsvdt)
#
# user_in = input('"Enter" to continue or "Ctrl+d" to cancel.')
# if user_in == KeyboardInterrupt:
# sys.exit(0)
return dsvdt
""" Use integrator to call dsvdt and solve to SS """
"-----------------------------------------------------------------------------"
# Create vectors to store outputs:
i_ext = np.hstack([i_OCV, i_ext0, i_ext1, i_ext2])
eta_ss, dphi_ss = np.zeros_like(i_ext), np.zeros_like(i_ext)
sv_save = np.zeros([len(SV_0) +1, len(i_ext)])
# Define common index for last CL node's phi_dl:
iPhi_f = int(iSV['phi_dl'] + (Ny_cl-1)*L_cl/Ny_cl)
# Update and convert i_ext: A/cm^2 -> A/m^2
cl['i_ext'] = i_ext[0] *100**2
sol = solve_ivp(lambda t, sv: dsvdt_func(t, sv, objs, p, iSV), [0, t_sim],
SV_0, method=method, atol=atol, rtol=rtol, max_step=max_t)
# Calculate extra PEM resistance terms to subtract off:
R_naf_vec = i_ext*(pem['R_naf'] + 0.5*cl['dy'] / cl['sig_naf_io'] *100**2)
# Store solution and update initial values:
SV_0, sv_save[:,0] = sol.y[:,-1], np.append(i_ext[0], sol.y[:,-1])
dphi_ss[0] = sol.y[iPhi_f, -1] - dphi_eq_an - R_naf_vec[0]
print('t_f:',sol.t[-1],'i_ext:',round(cl['i_ext']*1e-4,3), 'dPhi:',round(dphi_ss[0],3))
for i in range(len(i_ext) -1):
# Don't run the for loop if i_OCV was not set to 0...
if any([all([i == 0, i_OCV != 0]), polar == 'off']):
break
# Update and convert i_ext: A/cm^2 -> A/m^2
cl['i_ext'] = i_ext[i+1] *100**2
sol = solve_ivp(lambda t, sv: dsvdt_func(t, sv, objs, p, iSV), [0, t_sim],
SV_0, method=method, atol=atol, rtol=rtol, max_step=max_t)
# Store solution and update initial values:
SV_0, sv_save[:,i+1] = sol.y[:,-1], np.append(i_ext[i+1], sol.y[:,-1])
eta_ss[i+1] = dphi_ss[0] - sol.y[iPhi_f,-1]
dphi_ss[i+1] = sol.y[iPhi_f,-1] - dphi_eq_an - R_naf_vec[i+1]
print('t_f:',sol.t[-1], 'i_ext:',round(cl['i_ext']*1e-4,3), 'dPhi:',round(dphi_ss[i+1],3))
|
[
"numpy.zeros_like",
"numpy.sum",
"numpy.zeros",
"numpy.hstack",
"numpy.append"
] |
[((16058, 16100), 'numpy.hstack', 'np.hstack', (['[i_OCV, i_ext0, i_ext1, i_ext2]'], {}), '([i_OCV, i_ext0, i_ext1, i_ext2])\n', (16067, 16100), True, 'import numpy as np\n'), ((13776, 13793), 'numpy.zeros_like', 'np.zeros_like', (['sv'], {}), '(sv)\n', (13789, 13793), True, 'import numpy as np\n'), ((16119, 16139), 'numpy.zeros_like', 'np.zeros_like', (['i_ext'], {}), '(i_ext)\n', (16132, 16139), True, 'import numpy as np\n'), ((16141, 16161), 'numpy.zeros_like', 'np.zeros_like', (['i_ext'], {}), '(i_ext)\n', (16154, 16161), True, 'import numpy as np\n'), ((16749, 16782), 'numpy.append', 'np.append', (['i_ext[0]', 'sol.y[:, -1]'], {}), '(i_ext[0], sol.y[:, -1])\n', (16758, 16782), True, 'import numpy as np\n'), ((9532, 9550), 'numpy.zeros', 'np.zeros', (["cl['Nr']"], {}), "(cl['Nr'])\n", (9540, 9550), True, 'import numpy as np\n'), ((11169, 11197), 'numpy.zeros', 'np.zeros', (['naf_b_ca.n_species'], {}), '(naf_b_ca.n_species)\n', (11177, 11197), True, 'import numpy as np\n'), ((12814, 12829), 'numpy.sum', 'np.sum', (['i_Far_r'], {}), '(i_Far_r)\n', (12820, 12829), True, 'import numpy as np\n'), ((17451, 17488), 'numpy.append', 'np.append', (['i_ext[i + 1]', 'sol.y[:, -1]'], {}), '(i_ext[i + 1], sol.y[:, -1])\n', (17460, 17488), True, 'import numpy as np\n'), ((1460, 1486), 'numpy.zeros', 'np.zeros', (['gas_ca.n_species'], {}), '(gas_ca.n_species)\n', (1468, 1486), True, 'import numpy as np\n'), ((7242, 7268), 'numpy.zeros', 'np.zeros', (['gas_ca.n_species'], {}), '(gas_ca.n_species)\n', (7250, 7268), True, 'import numpy as np\n')]
|
# coding: utf-8
# -----------------------------------------------------------------------------------
# <copyright company="Aspose Pty Ltd" file="CadOptions.py">
# Copyright (c) 2003-2021 Aspose Pty Ltd
# </copyright>
# <summary>
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# </summary>
# -----------------------------------------------------------------------------------
import pprint
import re # noqa: F401
import six
class CadOptions(object):
"""
Rendering options for CAD file formats. CAD file formats include files with extensions: .dwg, .dxf, .dgn, .ifc, .stl
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'scale_factor': 'float',
'width': 'int',
'height': 'int',
'tiles': 'list[Tile]',
'render_layouts': 'bool',
'layout_name': 'str',
'layers': 'list[str]'
}
attribute_map = {
'scale_factor': 'ScaleFactor',
'width': 'Width',
'height': 'Height',
'tiles': 'Tiles',
'render_layouts': 'RenderLayouts',
'layout_name': 'LayoutName',
'layers': 'Layers'
}
def __init__(self, scale_factor=None, width=None, height=None, tiles=None, render_layouts=None, layout_name=None, layers=None, **kwargs): # noqa: E501
"""Initializes new instance of CadOptions""" # noqa: E501
self._scale_factor = None
self._width = None
self._height = None
self._tiles = None
self._render_layouts = None
self._layout_name = None
self._layers = None
if scale_factor is not None:
self.scale_factor = scale_factor
if width is not None:
self.width = width
if height is not None:
self.height = height
if tiles is not None:
self.tiles = tiles
if render_layouts is not None:
self.render_layouts = render_layouts
if layout_name is not None:
self.layout_name = layout_name
if layers is not None:
self.layers = layers
@property
def scale_factor(self):
"""
Gets the scale_factor. # noqa: E501
Scale factor allows to change the size of the output document. Values higher than 1 will enlarge output result and values between 0 and 1 will make output result smaller. This option is ignored when either Height or Width options are set. # noqa: E501
:return: The scale_factor. # noqa: E501
:rtype: float
"""
return self._scale_factor
@scale_factor.setter
def scale_factor(self, scale_factor):
"""
Sets the scale_factor.
Scale factor allows to change the size of the output document. Values higher than 1 will enlarge output result and values between 0 and 1 will make output result smaller. This option is ignored when either Height or Width options are set. # noqa: E501
:param scale_factor: The scale_factor. # noqa: E501
:type: float
"""
if scale_factor is None:
raise ValueError("Invalid value for `scale_factor`, must not be `None`") # noqa: E501
self._scale_factor = scale_factor
@property
def width(self):
"""
Gets the width. # noqa: E501
Width of the output result in pixels # noqa: E501
:return: The width. # noqa: E501
:rtype: int
"""
return self._width
@width.setter
def width(self, width):
"""
Sets the width.
Width of the output result in pixels # noqa: E501
:param width: The width. # noqa: E501
:type: int
"""
if width is None:
raise ValueError("Invalid value for `width`, must not be `None`") # noqa: E501
self._width = width
@property
def height(self):
"""
Gets the height. # noqa: E501
Height of the output result in pixels # noqa: E501
:return: The height. # noqa: E501
:rtype: int
"""
return self._height
@height.setter
def height(self, height):
"""
Sets the height.
Height of the output result in pixels # noqa: E501
:param height: The height. # noqa: E501
:type: int
"""
if height is None:
raise ValueError("Invalid value for `height`, must not be `None`") # noqa: E501
self._height = height
@property
def tiles(self):
"""
Gets the tiles. # noqa: E501
The drawing regions to render This option supported only for DWG and DWT file types The RenderLayouts and LayoutName options are ignored when rendering by tiles # noqa: E501
:return: The tiles. # noqa: E501
:rtype: list[Tile]
"""
return self._tiles
@tiles.setter
def tiles(self, tiles):
"""
Sets the tiles.
The drawing regions to render This option supported only for DWG and DWT file types The RenderLayouts and LayoutName options are ignored when rendering by tiles # noqa: E501
:param tiles: The tiles. # noqa: E501
:type: list[Tile]
"""
self._tiles = tiles
@property
def render_layouts(self):
"""
Gets the render_layouts. # noqa: E501
Indicates whether layouts from CAD document should be rendered # noqa: E501
:return: The render_layouts. # noqa: E501
:rtype: bool
"""
return self._render_layouts
@render_layouts.setter
def render_layouts(self, render_layouts):
"""
Sets the render_layouts.
Indicates whether layouts from CAD document should be rendered # noqa: E501
:param render_layouts: The render_layouts. # noqa: E501
:type: bool
"""
if render_layouts is None:
raise ValueError("Invalid value for `render_layouts`, must not be `None`") # noqa: E501
self._render_layouts = render_layouts
@property
def layout_name(self):
"""
Gets the layout_name. # noqa: E501
The name of the specific layout to render. Layout name is case-sensitive # noqa: E501
:return: The layout_name. # noqa: E501
:rtype: str
"""
return self._layout_name
@layout_name.setter
def layout_name(self, layout_name):
"""
Sets the layout_name.
The name of the specific layout to render. Layout name is case-sensitive # noqa: E501
:param layout_name: The layout_name. # noqa: E501
:type: str
"""
self._layout_name = layout_name
@property
def layers(self):
"""
Gets the layers. # noqa: E501
The CAD drawing layers to render By default all layers are rendered; Layer names are case-sensitive # noqa: E501
:return: The layers. # noqa: E501
:rtype: list[str]
"""
return self._layers
@layers.setter
def layers(self, layers):
"""
Sets the layers.
The CAD drawing layers to render By default all layers are rendered; Layer names are case-sensitive # noqa: E501
:param layers: The layers. # noqa: E501
:type: list[str]
"""
self._layers = layers
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, CadOptions):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
[
"six.iteritems"
] |
[((8710, 8743), 'six.iteritems', 'six.iteritems', (['self.swagger_types'], {}), '(self.swagger_types)\n', (8723, 8743), False, 'import six\n')]
|
from proteus import Domain, Context
from proteus.mprans import SpatialTools as st
from proteus import Gauges as ga
from proteus import WaveTools as wt
from math import *
import numpy as np
from proteus.mprans import BodyDynamics as bd
opts=Context.Options([
# predefined test cases
("water_level", 0.325, "Height of free surface above bottom"),
# Geometry
('Lgen', 1.0, 'Genaration zone in terms of wave lengths'),
('Labs', 1.0, 'Absorption zone in terms of wave lengths'),
('Ls', 1.0, 'Length of domain from genZone to the front toe of rubble mound in terms of wave lengths'),
('Lend', 1.0, 'Length of domain from absZone to the back toe of rubble mound in terms of wave lengths'),
# waves
('wave', True, 'Enable wave generation'),
('waveType', 'Fenton', 'Wavetype for regular waves, Linear or Fenton'),
("wave_period", 1.30, "Period of the waves"),
("wave_height", 0.167, "Height of the waves"),
('wavelength', 2.121, 'Wavelength only if Fenton is activated'),
('Ycoeff', [0.21107604, 0.07318902, 0.02782228, 0.01234846, 0.00618291, 0.00346483, 0.00227917, 0.00194241], 'Ycoeff only if Fenton is activated'),
('Bcoeff', [0.23112932, 0.03504843, 0.00431442, 0.00036993, 0.00004245, 0.00001877, 0.00000776, 0.00000196], 'Bcoeff only if Fenton is activated'),
('Nf', 8 ,'Number of frequency components for fenton waves'),
('meanVelocity', [ 0., 0., 0.],'Velocity used for currents'),
('phi0', 0.0 ,'Initial phase for waves'),
('Uwind', [0.0, 0.0, 0.0], 'Set air velocity'),
('fast', True ,'Switches ON fast cosh approximation'),
# rubble mound
('porousMedia', True, 'Enable porus media region'),
("hs", 0.175, "Height of the breakwater"),
("slope1", 1./3., "Slope1 of the breakwater"),
("slope2", 1./2., "Slope2 of the breakwater"),
('porosity', 0.4, "Porosity of the medium"),
('d50', 0.030, "Mean diameter of the medium"),
('d15', None, "15% grading curve diameter of the medium"),
('Resistance', 'Shih', 'Ergun or Engelund or Shih'),
# soil foundation
("springs", True, "Switch on/off soil module"),
("Kx", 541553.2, "Horizontal stiffness in Pa"),
("Ky", 582633.7, "Vertical stiffness in Pa"),
("Krot", 16246.6, "Rotational stiffness in N"),
("Cx", 1694.2, "Damping factor in Pa s "),
("Cy", 1757.32, "Damping factor in Pa s "),
("Crot", 69.61, "Rotational damping factor in N s "),
# caisson
("caisson2D", True, "Switch on/off caisson2D"),
('dimx', 0.300, 'X-dimension of the caisson2D'),
('dimy', 0.385, 'Y-dimension of the caisson2D'),
('width', 1.0, 'Z-dimension of the caisson2D'),
('mass', 64.8/0.4, 'Mass of the caisson2D [kg]'),
('caissonBC', 'FreeSlip', 'caisson2D boundaries: NoSlip or FreeSlip'),
("rotation", False, "Initial position for free oscillation"),
("friction", True, "Switch on/off friction module for sliding"),
("overturning", True, "Switch on/off overturning module"),
("m_static", 0.500, "Static friction factor between caisson2D and rubble mound"),
("m_dynamic", 0.500, "Dynamic friction factor between caisson2D and rubble mound"),
('scheme', 'Runge_Kutta', 'Numerical scheme applied to solve motion calculation (Runge_Kutta or Central_Difference)'),
# numerical options
("GenZone", True, 'Turn on generation zone at left side'),
("AbsZone", True, 'Turn on absorption zone at right side'),
("refinement_level", 0.0,"he=walength/refinement_level"),
("he", 0.05,"he=walength/refinement_level"),
("cfl", 0.450 ,"Target cfl"),
("duration", 20., "Durarion of the simulation"),
("freezeLevelSet", True, "No motion to the levelset"),
("useVF", 1.0, "For density and viscosity smoothing"),
('movingDomain', True, "Moving domain and mesh option"),
('conservativeFlux', True,'Fix post-processing velocity bug for porous interface'),
])
# ----- DOMAIN ----- #
domain = Domain.PlanarStraightLineGraphDomain()
# ----- WAVE CONDITIONS ----- #
period=opts.wave_period
omega=2*np.pi/opts.wave_period
waterLevel=opts.water_level
waveDir=np.array([1, 0., 0.])
mwl=waterLevel #coordinate of the initial mean level of water surface
waveHeight=opts.wave_height
inflowHeightMean=waterLevel
inflowVelocityMean =np.array([0.,0.,0.])
windVelocity = np.array([0.,0.,0.])
# ----- Phisical constants ----- #
rho_0=998.2
nu_0 =1.004e-6
rho_1=1.205
nu_1 =1.500e-5
sigma_01=0.0
g =np.array([0.,-9.8,0.])
gAbs=sqrt(sum(g**2))
# ----- WAVE input ----- #
if opts.wave == True:
waveinput = wt.MonochromaticWaves(period=period,
waveHeight=waveHeight,
mwl=mwl,
depth=waterLevel,
g=g,
waveDir=waveDir,
wavelength=opts.wavelength, # used by fenton waves
waveType=opts.waveType,
Ycoeff=np.array(opts.Ycoeff), # used by fenton waves
Bcoeff=np.array(opts.Bcoeff), # used by fenton waves
Nf=opts.Nf, # used by fenton waves
meanVelocity = np.array(opts.meanVelocity),
phi0 = opts.phi0,
fast = opts.fast,
)
#---------Domain Dimension
nd = 2
wl = waveinput.wavelength
#---------MESH SIZE
if opts.he == 0.0:
he = wl/opts.refinement_level
else:
he = opts.he
####################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################
# ----- SHAPES ----- #
####################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################
if opts.caisson2D:
L_leftSpo = opts.Lgen*wl
L_rightSpo = opts.Labs*wl
hs=opts.hs
slope1=opts.slope1
slope2=opts.slope2
#-caisson2D
dimx=opts.dimx
dimy=opts.dimy
b=dimx
#-Tank
x1=L_leftSpo
x2=x1+opts.Ls*wl
x3=x2+(hs/slope1)
xc1=x3+0.20
xc2=xc1+b
yc1=yc2=hs
x4=xc2+0.20
x5=x4+(hs/slope2)
x6=x5+opts.Lend*wl
x7=x6+L_rightSpo
tank_dim = [x7, 1.0]
boundaryOrientations = {'y-': np.array([0., -1.,0.]),
'x+': np.array([+1., 0.,0.]),
'y+': np.array([0., +1.,0.]),
'x-': np.array([-1., 0.,0.]),
'sponge': None,
'porousLayer': None,
'moving_porousLayer': None,
}
boundaryTags = {'y-' : 1,
'x+' : 2,
'y+' : 3,
'x-' : 4,
'sponge' : 5,
'porousLayer' : 6,
'moving_porousLayer' : 7,
}
else:
L_leftSpo = opts.Lgen*wl
L_rightSpo = opts.Labs*wl
#-Tank
x1=L_leftSpo
x2=x1+opts.Ls*wl
x3=x2+L_rightSpo
tank_dim = [x3, 1.0]
boundaryOrientations = {'y-': np.array([0., -1.,0.]),
'x+': np.array([+1., 0.,0.]),
'y+': np.array([0., +1.,0.]),
'x-': np.array([-1., 0.,0.]),
'sponge': None,
}
boundaryTags = {'y-': 1,
'x+': 2,
'y+': 3,
'x-': 4,
'sponge': 5,
}
##############################################################################################################################################################################################################
# caisson2D
############################################################################################################################################################################################################
if opts.caisson2D:
dimx=dimx
dimy=dimy
dim=(dimx,dimy)
coords=[xc1+b/2., hs+dimy/2.] # For bodyDimensions and barycenter
VCG=dim[1]/2. # For barycenter
width=opts.width # The 3rd dimension
mass=opts.mass #kg
volume=float(dimx*dimy*width)
density=float(mass/volume) #kg/m3
I=mass*(dimx**2.+dimy**2.)/12.
# It=(dimx**2.+dimy**2.)/12.
# --- Shape properties setup
caisson = st.Rectangle(domain, dim=dim, coords=coords)
caisson.vertices[0][0]=xc1
caisson.vertices[0][1]=yc1
caisson.vertices[1][0]=xc2
caisson.vertices[1][1]=yc2
# --- Body properties setup
caisson2D = bd.CaissonBody(shape=caisson, substeps=20)
free_x=(0.0, 0.0, 0.0) # Translational DOFs
free_r=(0.0, 0.0, 0.0) # Rotational DOFs
m_static=opts.m_static # Static friction
m_dynamic=opts.m_dynamic # Dynamic friction
if opts.movingDomain==True:
free_x=(1.0, 1.0, 0.0) # Translational DOFs
if opts.overturning==True:
free_r=(0.0, 0.0, 1.0) # Rotational DOFs
caisson2D.setMass(mass)
caisson2D.setConstraints(free_x=free_x, free_r=free_r)
caisson2D.setFriction(friction=opts.friction, m_static=m_static, m_dynamic=m_dynamic,
tolerance=he/(float(10**6)), grainSize=opts.d50)
overturning=opts.overturning
caisson2D.setOverturning(overturning)
if opts.rotation==True: # Initial position for free oscillation
caisson2D.rotate(rotation)
caisson2D.It= I/caisson2D.mass/width
caisson2D.setNumericalScheme(scheme=opts.scheme)
caisson2D.setRecordValues(filename='caisson2D', all_values=True)
##############################################################################################################################################################################################################
# Tank
#########################################################################################################################################################################################################
if opts.caisson2D==False:
vertices=[[0.0, 0.0],#0
[x1, 0.0],#1
[x2, 0.0], #2
[x3, 0.0 ],#3
[x3, tank_dim[1] ],#4
[x2, tank_dim[1] ],#5
[x1, tank_dim[1] ],#6
[0.0, tank_dim[1] ],#7
]
vertexFlags=np.array([1, 1, 1, 1,
3, 3, 3, 3,
])
segments=[[0,1],
[1,2],
[2,3],
[3,4],
[4,5],
[5,6],
[6,7],
[7,0],
[1,6],
[2,5],
]
segmentFlags=np.array([1, 1, 1,
2, 3, 3, 3, 4,
5, 5,
])
regions = [ [ 0.90*x1 , 0.10*tank_dim[1] ],
[ 0.90*x2 , 0.90*tank_dim[1] ],
[ 0.95*x3 , 0.95*tank_dim[1] ] ]
regionFlags=np.array([1, 2, 3])
else:
vertices=[[0.0, 0.0],#0
[x1, 0.0],#1
[x2, 0.0], #2
[x3, hs ],#3
[x4, hs ],#4
[x5, 0.0],#5
[x6, 0.0],#6
[x7, 0.0],#7
[x7, tank_dim[1]],#8
[x6, tank_dim[1]],#9
[x1, tank_dim[1]],#10
[0.0, tank_dim[1]],#11
[xc1, yc1],#12
[xc2, yc2],#13
]
vertexFlags=np.array([1, 1, 1,
6, 6,
1, 1, 1,
3, 3, 3, 3,
7, 7,
])
segments=[[0,1],
[1,2],
[2,3],
[4,5],
[5,6],
[6,7],
[7,8],
[8,9],
[9,10],
[10,11],
[11,0],
[2,5],
[1,10],
[6,9],
[3,12],
[13,4],
]
segmentFlags=np.array([1, 1,
6, 6,
1, 1,
2, 3, 3, 3, 4,
1,
5, 5,
7, 7,
])
regions = [ [ 0.90*x1 , 0.10*tank_dim[1] ],
[ 0.90*x2 , 0.90*tank_dim[1] ],
[ xc1 , 0.50*hs ],
[ 0.95*x7 , 0.95*tank_dim[1] ] ]
regionFlags=np.array([1, 2, 3, 4])
tank = st.CustomShape(domain, vertices=vertices, vertexFlags=vertexFlags,
segments=segments, segmentFlags=segmentFlags,
regions=regions, regionFlags=regionFlags,
boundaryTags=boundaryTags, boundaryOrientations=boundaryOrientations)
##################################################################################################################################################################################################################
# POROUS MEDIA
##################################################################################################################################################################################################################
porosity=opts.porosity
voidFrac=1.0-porosity
d50=opts.d50
if d50==None:
d15=opts.d15
else:
d15=d50/1.2
#----- SHIH
if opts.Resistance=='Shih':
term1=3.12*(10**-3.)
term2=(gAbs/(nu_0**2.))**(2./3.)
term3=(d15**2.)
Alpha1=1684+term1*term2*term3 #Shih
Alpha=Alpha1*nu_0*(voidFrac**2)/((porosity**3)*(d15**2))
term1=-5.10*(10**-3.)
term2=(gAbs/(nu_0**2.))**(1./3.)
term3=(d15)
Beta1=1.72+1.57*exp(term1*term2*term3) #Shih
Beta=Beta1*voidFrac/((porosity**3)*d15)
#----- ERGUN
if opts.Resistance=='Ergun':
Alpha1=150 #Ergun
Beta1=1.75 #Ergun
Alpha=Alpha1*nu_0*(voidFrac**2)/((porosity**3)*(d15**2))
Beta=Beta1*voidFrac/((porosity**3)*d15)
#----- ENGELUND
if opts.Resistance=='Engelund':
Alpha1=360 #Ergun
Beta1=3.6 #Ergun
Alpha=Alpha1*nu_0*(voidFrac**3)/((porosity**2)*(d15**2))
Beta=Beta1*voidFrac/((porosity**3)*d15)
#Proteus scale in viscosity, so i need to divide alpha and beta by nu_0
dragAlpha=(porosity**2)*Alpha/nu_0
dragBeta=(porosity**3)*Beta/nu_0
#----- Spring setup
springs=opts.springs
Kx = opts.Kx
Ky = opts.Ky
Krot = opts.Krot
Cx = opts.Cx
Cy = opts.Cy
Crot = opts.Crot
if opts.caisson2D:
caisson2D.setSprings(springs, Kx, Ky, Krot, Cx, Cy, Crot)
#############################################################################################################################################################################################################################################################################################################################################################################################
# ----- BOUNDARY CONDITIONS ----- #
#############################################################################################################################################################################################################################################################################################################################################################################################
if opts.caisson2D:
# Caisson boundaries
for bc in caisson.BC_list:
if opts.caissonBC == 'FreeSlip':
bc.setFreeSlip()
if opts.caissonBC == 'NoSlip':
bc.setNoSlip()
# Tank Boundaries
tank.BC['y+'].setAtmosphere()
tank.BC['x-'].setUnsteadyTwoPhaseVelocityInlet(wave=waveinput, vert_axis=1, smoothing=3.0*he)
tank.BC['y-'].setFreeSlip()
tank.BC['x+'].setFreeSlip()
tank.BC['sponge'].setNonMaterial()
if opts.caisson2D:
# Porous media buondaries
tank.BC['porousLayer'].reset()
tank.BC['moving_porousLayer'].reset()
# Moving Mesh Options
if opts.movingDomain==True:
for tb in [tank.BC['x+'], tank.BC['x-'], tank.BC['y+'], tank.BC['y-'], tank.BC['sponge'], tank.BC['porousLayer']]:
tb.hx_dirichlet.uOfXT= lambda x, t: 0.0
tb.hy_dirichlet.uOfXT= lambda x, t: 0.0
tb.hz_dirichlet.uOfXT= lambda x, t: 0.0
tb.u_stress.uOfXT=None
tb.v_stress.uOfXT=None
tb.w_stress.uOfXT=None
ms=tank.BC['moving_porousLayer']
ms.hx_dirichlet.uOfXT= None
ms.hy_dirichlet.uOfXT= None
ms.hz_dirichlet.uOfXT= lambda x, t: 0.0
ms.u_stress.uOfXT=None
ms.v_stress.uOfXT=None
ms.w_stress.uOfXT=None
########################################################################################################################################################################################################################################################################################################################################################
# ----- GENERATION ZONE & ABSORPTION ZONE ----- #
########################################################################################################################################################################################################################################################################################################################################################
# Waves and Generation zone
if opts.GenZone and opts.wave:
tank.setGenerationZones(flags=1, epsFact_solid=float(L_leftSpo/2.),
orientation=[1., 0.], center=(float(L_leftSpo/2.), 0., 0.),
waves=waveinput, smoothing=3.0*he, dragAlpha=10.*omega/nu_0)
# Only Generation zone
elif opts.GenZone:
tank.setAbsorptionZones(flags=1, epsFact_solid=float(L_leftSpo/2.),
orientation=[1., 0.], center=(float(L_leftSpo/2.), 0., 0.),
dragAlpha=10.*omega/nu_0)
# Porous zone
if opts.porousMedia:
tank.setPorousZones(flags=3,
dragAlpha=dragAlpha, dragBeta=dragBeta,
porosity=porosity,)
# Absorption zone
if opts.AbsZone:
if opts.caisson2D:
tank.setAbsorptionZones(flags=4, epsFact_solid=float(L_rightSpo/2.),
orientation=[-1., 0.], center=(float(tank_dim[0]-L_rightSpo/2.), 0., 0.),
dragAlpha=10.*omega/nu_0)
else:
tank.setAbsorptionZones(flags=3, epsFact_solid=float(L_rightSpo/2.),
orientation=[-1., 0.], center=(float(tank_dim[0]-L_rightSpo/2.), 0., 0.),
dragAlpha=10.*omega/nu_0)
############################################################################################################################################################################
# ----- Output Gauges ----- #
############################################################################################################################################################################
T = opts.duration
gauge_dx=0.25
tank_dim_x=int(tank_dim[0])
nprobes=int(tank_dim_x/gauge_dx)+1
probes=np.linspace(0., tank_dim_x, nprobes)
PG=[]
if opts.caisson2D:
zProbes=hs*0.5
else:
zProbes=opts.water_level*0.5
for i in probes:
PG.append((i, zProbes, 0.),)
if opts.caisson2D:
gauge_dy=0.01
tol=np.array([1*(10**-5),1*(10**-5),0.])
i_point_f=np.array([caisson.vertices[0][0],caisson.vertices[0][1],0.])
i_point_f += -tol #to avoid floating point error
i_point_b=np.array([caisson.vertices[1][0],caisson.vertices[1][1],0.])
i_point_b += tol #to avoid floating point error
yProbes = np.linspace(i_point_f[1],i_point_f[1]+dimy, int(dimy/gauge_dy)+1)
LG1=[]
LG2=[]
for j in yProbes:
LG1.append((i_point_f[0],j,0.),)
LG2.append((i_point_b[0],j,0.),)
#point_output=ga.PointGauges(gauges=((('p'),PG),
# ),
# activeTime = (0., T),
# sampleRate=0.,
# fileName='point_gauges.csv')
#loadingsGauges=ga.PointGauges(gauges=((('p'),LG1),
# (('p'),LG2),
# ),
# activeTime = (0., T),
# sampleRate=0.,
# fileName='loadingsGauges.csv')
levelset_output=ga.PointGauges(gauges=((('phi',),PG),
),
activeTime = (0., T),
sampleRate=0.,
fileName='levelset_gauges.csv')
######################################################################################################################################################################################################################
# Numerical Options and other parameters #
######################################################################################################################################################################################################################
he = he
domain.MeshOptions.he = he
from math import *
from proteus import MeshTools, AuxiliaryVariables
import numpy
import proteus.MeshTools
from proteus import Domain
from proteus.Profiling import logEvent
from proteus.default_n import *
from proteus.ctransportCoefficients import smoothedHeaviside
from proteus.ctransportCoefficients import smoothedHeaviside_integral
st.assembleDomain(domain)
#----------------------------------------------------
# Time stepping and velocity
#----------------------------------------------------
weak_bc_penalty_constant = 10.0/nu_0 #100
dt_fixed = 1
dt_init = min(0.1*dt_fixed,0.001)
T = T
nDTout= int(round(T/dt_fixed))
runCFL = opts.cfl
#----------------------------------------------------
# Discretization -- input options
#----------------------------------------------------
checkMass=False
applyCorrection=True
applyRedistancing=True
freezeLevelSet=opts.freezeLevelSet
useOnlyVF = False # if TRUE proteus uses only these modules --> twp_navier_stokes_p + twp_navier_stokes_n
# vof_p + vof_n
movingDomain=opts.movingDomain
useRANS = 0 # 0 -- None
# 1 -- K-Epsilon
# 2 -- K-Omega, 1998
# 3 -- K-Omega, 1988
genMesh=True
# By DEFAULT on the other files.py --> fullNewtonFlag = True
# multilevelNonlinearSolver & levelNonlinearSolver == NonlinearSolvers.Newton
useOldPETSc=False # if TRUE --> multilevelLinearSolver & levelLinearSolver == LinearSolvers.PETSc
# if FALSE --> multilevelLinearSolver & levelLinearSolver == LinearSolvers.KSP_petsc4py
useSuperlu = False #if TRUE --> multilevelLinearSolver & levelLinearSolver == LinearSolvers.LU
spaceOrder = 1
useHex = False # used for discretization, if 1.0 --> CubeGaussQuadrature
# ELSE --> SimplexGaussQuadrature
useRBLES = 0.0 # multiplied with subGridError
useMetrics = 1.0 # if 1.0 --> use of user's parameters as (ns_shockCapturingFactor, ns_lag_shockCapturing, ecc ...)
useVF = opts.useVF # used in the smoothing functions as (1.0-useVF)*smoothedHeaviside(eps_rho,phi) + useVF*fmin(1.0,fmax(0.0,vf))
# Input checks
if spaceOrder not in [1,2]:
print("INVALID: spaceOrder" + spaceOrder)
sys.exit()
if useRBLES not in [0.0, 1.0]:
print("INVALID: useRBLES" + useRBLES)
sys.exit()
if useMetrics not in [0.0, 1.0]:
print("INVALID: useMetrics")
sys.exit()
# Discretization
nd = 2
if spaceOrder == 1:
hFactor=1.0
if useHex:
basis=C0_AffineLinearOnCubeWithNodalBasis
elementQuadrature = CubeGaussQuadrature(nd,3)
elementBoundaryQuadrature = CubeGaussQuadrature(nd-1,3)
else:
basis=C0_AffineLinearOnSimplexWithNodalBasis
elementQuadrature = SimplexGaussQuadrature(nd,3)
elementBoundaryQuadrature = SimplexGaussQuadrature(nd-1,3)
#elementBoundaryQuadrature = SimplexLobattoQuadrature(nd-1,1)
elif spaceOrder == 2:
hFactor=0.5
if useHex:
basis=C0_AffineLagrangeOnCubeWithNodalBasis
elementQuadrature = CubeGaussQuadrature(nd,4)
elementBoundaryQuadrature = CubeGaussQuadrature(nd-1,4)
else:
basis=C0_AffineQuadraticOnSimplexWithNodalBasis
elementQuadrature = SimplexGaussQuadrature(nd,4)
elementBoundaryQuadrature = SimplexGaussQuadrature(nd-1,4)
# Numerical parameters
ns_forceStrongDirichlet = False
backgroundDiffusionFactor=0.01
if useMetrics:
ns_shockCapturingFactor = 0.5 # magnifies numerical viscosity in NS (smoothening velocity fields)
ns_lag_shockCapturing = True # lagging numerical viscosity speedsup Newton but destabilzes the solution
ns_lag_subgridError = True # less nonlinear but less stable
ls_shockCapturingFactor = 0.5 # numerical diffusion of level set (smoothening phi)
ls_lag_shockCapturing = True # less nonlinear but less stable
ls_sc_uref = 1.0 # reference gradient in numerical solution (higher=more diffusion)
ls_sc_beta = 1.5 # 1 is fully nonlinear, 2 is linear
vof_shockCapturingFactor = 0.5 # numerical diffusion of level set (smoothening volume of fraction)
vof_lag_shockCapturing = True # less nonlinear but less stable
vof_sc_uref = 1.0
vof_sc_beta = 1.5
rd_shockCapturingFactor = 0.5
rd_lag_shockCapturing = False
epsFact_density = 3.0 # control width of water/air transition zone
epsFact_viscosity = epsFact_curvature = epsFact_vof = epsFact_consrv_heaviside = epsFact_consrv_dirac = ecH = epsFact_density
epsFact_redistance = 0.33
epsFact_consrv_diffusion = 1.0 # affects smoothing diffusion in mass conservation
redist_Newton = True
kappa_shockCapturingFactor = 0.5
kappa_lag_shockCapturing = True # False
kappa_sc_uref = 1.0
kappa_sc_beta = 1.5
dissipation_shockCapturingFactor = 0.5
dissipation_lag_shockCapturing = True # False
dissipation_sc_uref = 1.0
dissipation_sc_beta = 1.5
else:
ns_shockCapturingFactor = 0.9
ns_lag_shockCapturing = True
ns_lag_subgridError = True
ls_shockCapturingFactor = 0.9
ls_lag_shockCapturing = True
ls_sc_uref = 1.0
ls_sc_beta = 1.0
vof_shockCapturingFactor = 0.9
vof_lag_shockCapturing = True
vof_sc_uref = 1.0
vof_sc_beta = 1.0
rd_shockCapturingFactor = 0.9
rd_lag_shockCapturing = False
epsFact_density = 1.5
epsFact_viscosity = epsFact_curvature = epsFact_vof = epsFact_consrv_heaviside = epsFact_consrv_dirac = epsFact_density
epsFact_redistance = 0.33
epsFact_consrv_diffusion = 10.0
redist_Newton = False
kappa_shockCapturingFactor = 0.9
kappa_lag_shockCapturing = True#False
kappa_sc_uref = 1.0
kappa_sc_beta = 1.0
dissipation_shockCapturingFactor = 0.9
dissipation_lag_shockCapturing = True#False
dissipation_sc_uref = 1.0
dissipation_sc_beta = 1.0
ns_nl_atol_res = max(1.0e-12,0.001*domain.MeshOptions.he**2)
vof_nl_atol_res = max(1.0e-12,0.001*domain.MeshOptions.he**2)
ls_nl_atol_res = max(1.0e-12,0.001*domain.MeshOptions.he**2)
mcorr_nl_atol_res = max(1.0e-12,0.0001*domain.MeshOptions.he**2)
rd_nl_atol_res = max(1.0e-12,0.01*domain.MeshOptions.he)
kappa_nl_atol_res = max(1.0e-12,0.001*domain.MeshOptions.he**2)
dissipation_nl_atol_res = max(1.0e-12,0.001*domain.MeshOptions.he**2)
mesh_nl_atol_res = max(1.0e-12,0.001*domain.MeshOptions.he**2)
#turbulence
ns_closure=0 #1-classic smagorinsky, 2-dynamic smagorinsky, 3 -- k-epsilon, 4 -- k-omega
if useRANS == 1:
ns_closure = 3
elif useRANS >= 2:
ns_closure == 4
# Initial condition
waterLine_x = 2*tank_dim[0]
waterLine_z = waterLevel
def waveHeight(x,t):
waterDepth = waveinput.eta(x, t) + waveinput.mwl
return waterDepth
def wavePhi(x,t):
[nd-1]- waveHeight(x,t)
def waveVF(x,t):
return smoothedHeaviside(epsFact_consrv_heaviside*he,wavePhi(x,t))
def signedDistance(x):
phi_x = x[0]-waterLine_x
phi_z = x[nd-1]-waterLine_z
if phi_x < 0.0:
if phi_z < 0.0:
return max(phi_x,phi_z)
else:
return phi_z
else:
if phi_z < 0.0:
return phi_x
else:
return sqrt(phi_x**2 + phi_z**2)
|
[
"proteus.mprans.BodyDynamics.CaissonBody",
"proteus.Domain.PlanarStraightLineGraphDomain",
"proteus.Gauges.PointGauges",
"proteus.mprans.SpatialTools.assembleDomain",
"proteus.mprans.SpatialTools.CustomShape",
"numpy.array",
"numpy.linspace",
"proteus.mprans.SpatialTools.Rectangle",
"proteus.Context.Options"
] |
[((242, 3785), 'proteus.Context.Options', 'Context.Options', (["[('water_level', 0.325, 'Height of free surface above bottom'), ('Lgen', \n 1.0, 'Genaration zone in terms of wave lengths'), ('Labs', 1.0,\n 'Absorption zone in terms of wave lengths'), ('Ls', 1.0,\n 'Length of domain from genZone to the front toe of rubble mound in terms of wave lengths'\n ), ('Lend', 1.0,\n 'Length of domain from absZone to the back toe of rubble mound in terms of wave lengths'\n ), ('wave', True, 'Enable wave generation'), ('waveType', 'Fenton',\n 'Wavetype for regular waves, Linear or Fenton'), ('wave_period', 1.3,\n 'Period of the waves'), ('wave_height', 0.167, 'Height of the waves'),\n ('wavelength', 2.121, 'Wavelength only if Fenton is activated'), (\n 'Ycoeff', [0.21107604, 0.07318902, 0.02782228, 0.01234846, 0.00618291, \n 0.00346483, 0.00227917, 0.00194241],\n 'Ycoeff only if Fenton is activated'), ('Bcoeff', [0.23112932, \n 0.03504843, 0.00431442, 0.00036993, 4.245e-05, 1.877e-05, 7.76e-06, \n 1.96e-06], 'Bcoeff only if Fenton is activated'), ('Nf', 8,\n 'Number of frequency components for fenton waves'), ('meanVelocity', [\n 0.0, 0.0, 0.0], 'Velocity used for currents'), ('phi0', 0.0,\n 'Initial phase for waves'), ('Uwind', [0.0, 0.0, 0.0],\n 'Set air velocity'), ('fast', True,\n 'Switches ON fast cosh approximation'), ('porousMedia', True,\n 'Enable porus media region'), ('hs', 0.175, 'Height of the breakwater'),\n ('slope1', 1.0 / 3.0, 'Slope1 of the breakwater'), ('slope2', 1.0 / 2.0,\n 'Slope2 of the breakwater'), ('porosity', 0.4, 'Porosity of the medium'\n ), ('d50', 0.03, 'Mean diameter of the medium'), ('d15', None,\n '15% grading curve diameter of the medium'), ('Resistance', 'Shih',\n 'Ergun or Engelund or Shih'), ('springs', True,\n 'Switch on/off soil module'), ('Kx', 541553.2,\n 'Horizontal stiffness in Pa'), ('Ky', 582633.7,\n 'Vertical stiffness in Pa'), ('Krot', 16246.6,\n 'Rotational stiffness in N'), ('Cx', 1694.2, 'Damping factor in Pa s '),\n ('Cy', 1757.32, 'Damping factor in Pa s '), ('Crot', 69.61,\n 'Rotational damping factor in N s '), ('caisson2D', True,\n 'Switch on/off caisson2D'), ('dimx', 0.3,\n 'X-dimension of the caisson2D'), ('dimy', 0.385,\n 'Y-dimension of the caisson2D'), ('width', 1.0,\n 'Z-dimension of the caisson2D'), ('mass', 64.8 / 0.4,\n 'Mass of the caisson2D [kg]'), ('caissonBC', 'FreeSlip',\n 'caisson2D boundaries: NoSlip or FreeSlip'), ('rotation', False,\n 'Initial position for free oscillation'), ('friction', True,\n 'Switch on/off friction module for sliding'), ('overturning', True,\n 'Switch on/off overturning module'), ('m_static', 0.5,\n 'Static friction factor between caisson2D and rubble mound'), (\n 'm_dynamic', 0.5,\n 'Dynamic friction factor between caisson2D and rubble mound'), (\n 'scheme', 'Runge_Kutta',\n 'Numerical scheme applied to solve motion calculation (Runge_Kutta or Central_Difference)'\n ), ('GenZone', True, 'Turn on generation zone at left side'), (\n 'AbsZone', True, 'Turn on absorption zone at right side'), (\n 'refinement_level', 0.0, 'he=walength/refinement_level'), ('he', 0.05,\n 'he=walength/refinement_level'), ('cfl', 0.45, 'Target cfl'), (\n 'duration', 20.0, 'Durarion of the simulation'), ('freezeLevelSet', \n True, 'No motion to the levelset'), ('useVF', 1.0,\n 'For density and viscosity smoothing'), ('movingDomain', True,\n 'Moving domain and mesh option'), ('conservativeFlux', True,\n 'Fix post-processing velocity bug for porous interface')]"], {}), "([('water_level', 0.325,\n 'Height of free surface above bottom'), ('Lgen', 1.0,\n 'Genaration zone in terms of wave lengths'), ('Labs', 1.0,\n 'Absorption zone in terms of wave lengths'), ('Ls', 1.0,\n 'Length of domain from genZone to the front toe of rubble mound in terms of wave lengths'\n ), ('Lend', 1.0,\n 'Length of domain from absZone to the back toe of rubble mound in terms of wave lengths'\n ), ('wave', True, 'Enable wave generation'), ('waveType', 'Fenton',\n 'Wavetype for regular waves, Linear or Fenton'), ('wave_period', 1.3,\n 'Period of the waves'), ('wave_height', 0.167, 'Height of the waves'),\n ('wavelength', 2.121, 'Wavelength only if Fenton is activated'), (\n 'Ycoeff', [0.21107604, 0.07318902, 0.02782228, 0.01234846, 0.00618291, \n 0.00346483, 0.00227917, 0.00194241],\n 'Ycoeff only if Fenton is activated'), ('Bcoeff', [0.23112932, \n 0.03504843, 0.00431442, 0.00036993, 4.245e-05, 1.877e-05, 7.76e-06, \n 1.96e-06], 'Bcoeff only if Fenton is activated'), ('Nf', 8,\n 'Number of frequency components for fenton waves'), ('meanVelocity', [\n 0.0, 0.0, 0.0], 'Velocity used for currents'), ('phi0', 0.0,\n 'Initial phase for waves'), ('Uwind', [0.0, 0.0, 0.0],\n 'Set air velocity'), ('fast', True,\n 'Switches ON fast cosh approximation'), ('porousMedia', True,\n 'Enable porus media region'), ('hs', 0.175, 'Height of the breakwater'),\n ('slope1', 1.0 / 3.0, 'Slope1 of the breakwater'), ('slope2', 1.0 / 2.0,\n 'Slope2 of the breakwater'), ('porosity', 0.4, 'Porosity of the medium'\n ), ('d50', 0.03, 'Mean diameter of the medium'), ('d15', None,\n '15% grading curve diameter of the medium'), ('Resistance', 'Shih',\n 'Ergun or Engelund or Shih'), ('springs', True,\n 'Switch on/off soil module'), ('Kx', 541553.2,\n 'Horizontal stiffness in Pa'), ('Ky', 582633.7,\n 'Vertical stiffness in Pa'), ('Krot', 16246.6,\n 'Rotational stiffness in N'), ('Cx', 1694.2, 'Damping factor in Pa s '),\n ('Cy', 1757.32, 'Damping factor in Pa s '), ('Crot', 69.61,\n 'Rotational damping factor in N s '), ('caisson2D', True,\n 'Switch on/off caisson2D'), ('dimx', 0.3,\n 'X-dimension of the caisson2D'), ('dimy', 0.385,\n 'Y-dimension of the caisson2D'), ('width', 1.0,\n 'Z-dimension of the caisson2D'), ('mass', 64.8 / 0.4,\n 'Mass of the caisson2D [kg]'), ('caissonBC', 'FreeSlip',\n 'caisson2D boundaries: NoSlip or FreeSlip'), ('rotation', False,\n 'Initial position for free oscillation'), ('friction', True,\n 'Switch on/off friction module for sliding'), ('overturning', True,\n 'Switch on/off overturning module'), ('m_static', 0.5,\n 'Static friction factor between caisson2D and rubble mound'), (\n 'm_dynamic', 0.5,\n 'Dynamic friction factor between caisson2D and rubble mound'), (\n 'scheme', 'Runge_Kutta',\n 'Numerical scheme applied to solve motion calculation (Runge_Kutta or Central_Difference)'\n ), ('GenZone', True, 'Turn on generation zone at left side'), (\n 'AbsZone', True, 'Turn on absorption zone at right side'), (\n 'refinement_level', 0.0, 'he=walength/refinement_level'), ('he', 0.05,\n 'he=walength/refinement_level'), ('cfl', 0.45, 'Target cfl'), (\n 'duration', 20.0, 'Durarion of the simulation'), ('freezeLevelSet', \n True, 'No motion to the levelset'), ('useVF', 1.0,\n 'For density and viscosity smoothing'), ('movingDomain', True,\n 'Moving domain and mesh option'), ('conservativeFlux', True,\n 'Fix post-processing velocity bug for porous interface')])\n", (257, 3785), False, 'from proteus import Domain, Context\n'), ((3941, 3979), 'proteus.Domain.PlanarStraightLineGraphDomain', 'Domain.PlanarStraightLineGraphDomain', ([], {}), '()\n', (3977, 3979), False, 'from proteus import Domain\n'), ((4109, 4132), 'numpy.array', 'np.array', (['[1, 0.0, 0.0]'], {}), '([1, 0.0, 0.0])\n', (4117, 4132), True, 'import numpy as np\n'), ((4279, 4304), 'numpy.array', 'np.array', (['[0.0, 0.0, 0.0]'], {}), '([0.0, 0.0, 0.0])\n', (4287, 4304), True, 'import numpy as np\n'), ((4315, 4340), 'numpy.array', 'np.array', (['[0.0, 0.0, 0.0]'], {}), '([0.0, 0.0, 0.0])\n', (4323, 4340), True, 'import numpy as np\n'), ((4444, 4470), 'numpy.array', 'np.array', (['[0.0, -9.8, 0.0]'], {}), '([0.0, -9.8, 0.0])\n', (4452, 4470), True, 'import numpy as np\n'), ((13442, 13681), 'proteus.mprans.SpatialTools.CustomShape', 'st.CustomShape', (['domain'], {'vertices': 'vertices', 'vertexFlags': 'vertexFlags', 'segments': 'segments', 'segmentFlags': 'segmentFlags', 'regions': 'regions', 'regionFlags': 'regionFlags', 'boundaryTags': 'boundaryTags', 'boundaryOrientations': 'boundaryOrientations'}), '(domain, vertices=vertices, vertexFlags=vertexFlags, segments\n =segments, segmentFlags=segmentFlags, regions=regions, regionFlags=\n regionFlags, boundaryTags=boundaryTags, boundaryOrientations=\n boundaryOrientations)\n', (13456, 13681), True, 'from proteus.mprans import SpatialTools as st\n'), ((19993, 20030), 'numpy.linspace', 'np.linspace', (['(0.0)', 'tank_dim_x', 'nprobes'], {}), '(0.0, tank_dim_x, nprobes)\n', (20004, 20030), True, 'import numpy as np\n'), ((21248, 21362), 'proteus.Gauges.PointGauges', 'ga.PointGauges', ([], {'gauges': "((('phi',), PG),)", 'activeTime': '(0.0, T)', 'sampleRate': '(0.0)', 'fileName': '"""levelset_gauges.csv"""'}), "(gauges=((('phi',), PG),), activeTime=(0.0, T), sampleRate=\n 0.0, fileName='levelset_gauges.csv')\n", (21262, 21362), True, 'from proteus import Gauges as ga\n'), ((22319, 22344), 'proteus.mprans.SpatialTools.assembleDomain', 'st.assembleDomain', (['domain'], {}), '(domain)\n', (22336, 22344), True, 'from proteus.mprans import SpatialTools as st\n'), ((9337, 9381), 'proteus.mprans.SpatialTools.Rectangle', 'st.Rectangle', (['domain'], {'dim': 'dim', 'coords': 'coords'}), '(domain, dim=dim, coords=coords)\n', (9349, 9381), True, 'from proteus.mprans import SpatialTools as st\n'), ((9552, 9594), 'proteus.mprans.BodyDynamics.CaissonBody', 'bd.CaissonBody', ([], {'shape': 'caisson', 'substeps': '(20)'}), '(shape=caisson, substeps=20)\n', (9566, 9594), True, 'from proteus.mprans import BodyDynamics as bd\n'), ((11288, 11322), 'numpy.array', 'np.array', (['[1, 1, 1, 1, 3, 3, 3, 3]'], {}), '([1, 1, 1, 1, 3, 3, 3, 3])\n', (11296, 11322), True, 'import numpy as np\n'), ((11636, 11676), 'numpy.array', 'np.array', (['[1, 1, 1, 2, 3, 3, 3, 4, 5, 5]'], {}), '([1, 1, 1, 2, 3, 3, 3, 4, 5, 5])\n', (11644, 11676), True, 'import numpy as np\n'), ((11914, 11933), 'numpy.array', 'np.array', (['[1, 2, 3]'], {}), '([1, 2, 3])\n', (11922, 11933), True, 'import numpy as np\n'), ((12411, 12463), 'numpy.array', 'np.array', (['[1, 1, 1, 6, 6, 1, 1, 1, 3, 3, 3, 3, 7, 7]'], {}), '([1, 1, 1, 6, 6, 1, 1, 1, 3, 3, 3, 3, 7, 7])\n', (12419, 12463), True, 'import numpy as np\n'), ((12974, 13032), 'numpy.array', 'np.array', (['[1, 1, 6, 6, 1, 1, 2, 3, 3, 3, 4, 1, 5, 5, 7, 7]'], {}), '([1, 1, 6, 6, 1, 1, 2, 3, 3, 3, 4, 1, 5, 5, 7, 7])\n', (12982, 13032), True, 'import numpy as np\n'), ((13409, 13431), 'numpy.array', 'np.array', (['[1, 2, 3, 4]'], {}), '([1, 2, 3, 4])\n', (13417, 13431), True, 'import numpy as np\n'), ((20209, 20252), 'numpy.array', 'np.array', (['[1 * 10 ** -5, 1 * 10 ** -5, 0.0]'], {}), '([1 * 10 ** -5, 1 * 10 ** -5, 0.0])\n', (20217, 20252), True, 'import numpy as np\n'), ((20260, 20323), 'numpy.array', 'np.array', (['[caisson.vertices[0][0], caisson.vertices[0][1], 0.0]'], {}), '([caisson.vertices[0][0], caisson.vertices[0][1], 0.0])\n', (20268, 20323), True, 'import numpy as np\n'), ((20388, 20451), 'numpy.array', 'np.array', (['[caisson.vertices[1][0], caisson.vertices[1][1], 0.0]'], {}), '([caisson.vertices[1][0], caisson.vertices[1][1], 0.0])\n', (20396, 20451), True, 'import numpy as np\n'), ((7161, 7187), 'numpy.array', 'np.array', (['[0.0, -1.0, 0.0]'], {}), '([0.0, -1.0, 0.0])\n', (7169, 7187), True, 'import numpy as np\n'), ((7219, 7245), 'numpy.array', 'np.array', (['[+1.0, 0.0, 0.0]'], {}), '([+1.0, 0.0, 0.0])\n', (7227, 7245), True, 'import numpy as np\n'), ((7277, 7303), 'numpy.array', 'np.array', (['[0.0, +1.0, 0.0]'], {}), '([0.0, +1.0, 0.0])\n', (7285, 7303), True, 'import numpy as np\n'), ((7335, 7361), 'numpy.array', 'np.array', (['[-1.0, 0.0, 0.0]'], {}), '([-1.0, 0.0, 0.0])\n', (7343, 7361), True, 'import numpy as np\n'), ((8009, 8035), 'numpy.array', 'np.array', (['[0.0, -1.0, 0.0]'], {}), '([0.0, -1.0, 0.0])\n', (8017, 8035), True, 'import numpy as np\n'), ((8067, 8093), 'numpy.array', 'np.array', (['[+1.0, 0.0, 0.0]'], {}), '([+1.0, 0.0, 0.0])\n', (8075, 8093), True, 'import numpy as np\n'), ((8125, 8151), 'numpy.array', 'np.array', (['[0.0, +1.0, 0.0]'], {}), '([0.0, +1.0, 0.0])\n', (8133, 8151), True, 'import numpy as np\n'), ((8183, 8209), 'numpy.array', 'np.array', (['[-1.0, 0.0, 0.0]'], {}), '([-1.0, 0.0, 0.0])\n', (8191, 8209), True, 'import numpy as np\n'), ((5026, 5047), 'numpy.array', 'np.array', (['opts.Ycoeff'], {}), '(opts.Ycoeff)\n', (5034, 5047), True, 'import numpy as np\n'), ((5117, 5138), 'numpy.array', 'np.array', (['opts.Bcoeff'], {}), '(opts.Bcoeff)\n', (5125, 5138), True, 'import numpy as np\n'), ((5307, 5334), 'numpy.array', 'np.array', (['opts.meanVelocity'], {}), '(opts.meanVelocity)\n', (5315, 5334), True, 'import numpy as np\n')]
|
from django.db import transaction
from django.shortcuts import get_object_or_404, redirect
from django.utils.translation import ugettext as _
from django.views.generic import FormView, TemplateView
from guardian.mixins import LoginRequiredMixin
from tally_ho.apps.tally.models.audit import Audit
from tally_ho.apps.tally.models.result_form import ResultForm
from tally_ho.libs.models.enums.form_state import FormState
from tally_ho.libs.permissions import groups
from tally_ho.libs.views.session import session_matches_post_result_form
from tally_ho.libs.verify.quarantine_checks import quarantine_checks
from tally_ho.libs.views import mixins
from tally_ho.libs.views.form_state import form_in_state
def check_quarantine(result_form, user):
"""Run quarantine checks. Create an audit with links to the failed
quarantine checks if any fail.
:param result_form: The result form to run quarantine checks on.
:param user: The user to associate with an audit if any checks fail.
"""
audit = None
result_form.audit_set.update(active=False)
if not result_form.skip_quarantine_checks:
for passed_check, check in quarantine_checks():
if not passed_check(result_form):
if not audit:
audit = Audit.objects.create(
user=user,
result_form=result_form)
audit.quarantine_checks.add(check)
if audit:
result_form.audited_count += 1
result_form.save()
def states_for_form(user, result_form, states=[FormState.ARCHIVING]):
"""Get the possible states for this result_form.
Archive supervisors can modify archived forms, check the user and see if
this state should be added.
:param user: The user to determine form states for.
:param result_form: The form to check the state of.
:param states: The initial states a form can be in.
:returns: A list of states that a form may be in.
"""
if groups.QUALITY_CONTROL_ARCHIVE_SUPERVISOR in groups.user_groups(user)\
and result_form.form_state == FormState.ARCHIVED:
states.append(FormState.ARCHIVED)
return states
class ArchivePrintView(LoginRequiredMixin,
mixins.GroupRequiredMixin,
mixins.TallyAccessMixin,
mixins.ReverseSuccessURLMixin,
FormView):
group_required = [groups.QUALITY_CONTROL_ARCHIVE_CLERK,
groups.QUALITY_CONTROL_ARCHIVE_SUPERVISOR]
template_name = "archive/print_cover.html"
success_url = 'archive-success'
def get(self, *args, **kwargs):
tally_id = kwargs.get('tally_id')
pk = self.request.session.get('result_form')
result_form = get_object_or_404(ResultForm, pk=pk)
possible_states = states_for_form(self.request.user, result_form)
form_in_state(result_form, possible_states)
check_quarantine(result_form, self.request.user)
return self.render_to_response(
self.get_context_data(result_form=result_form))
@transaction.atomic
def post(self, *args, **kwargs):
tally_id = kwargs.get('tally_id')
post_data = self.request.POST
pk = session_matches_post_result_form(post_data, self.request)
result_form = get_object_or_404(ResultForm, pk=pk)
possible_states = states_for_form(self.request.user, result_form)
form_in_state(result_form, possible_states)
result_form.form_state = FormState.AUDIT if result_form.audit else\
FormState.ARCHIVED
result_form.save()
return redirect(self.success_url, tally_id=tally_id)
class ConfirmationView(LoginRequiredMixin,
mixins.GroupRequiredMixin,
TemplateView):
template_name = "success.html"
group_required = [groups.QUALITY_CONTROL_ARCHIVE_CLERK,
groups.QUALITY_CONTROL_ARCHIVE_SUPERVISOR]
def get(self, *args, **kwargs):
tally_id = kwargs.get('tally_id')
pk = self.request.session.get('result_form')
result_form = get_object_or_404(ResultForm, pk=pk)
next_step = _('Quarantine') if result_form.audit else _('Archive')
del self.request.session['result_form']
return self.render_to_response(self.get_context_data(
result_form=result_form, header_text=_('Quality Control & Archiving'),
next_step=next_step, start_url='quality-control',
tally_id=tally_id))
|
[
"tally_ho.libs.permissions.groups.user_groups",
"django.shortcuts.redirect",
"tally_ho.libs.views.session.session_matches_post_result_form",
"tally_ho.apps.tally.models.audit.Audit.objects.create",
"tally_ho.libs.verify.quarantine_checks.quarantine_checks",
"django.shortcuts.get_object_or_404",
"django.utils.translation.ugettext",
"tally_ho.libs.views.form_state.form_in_state"
] |
[((1150, 1169), 'tally_ho.libs.verify.quarantine_checks.quarantine_checks', 'quarantine_checks', ([], {}), '()\n', (1167, 1169), False, 'from tally_ho.libs.verify.quarantine_checks import quarantine_checks\n'), ((2775, 2811), 'django.shortcuts.get_object_or_404', 'get_object_or_404', (['ResultForm'], {'pk': 'pk'}), '(ResultForm, pk=pk)\n', (2792, 2811), False, 'from django.shortcuts import get_object_or_404, redirect\n'), ((2895, 2938), 'tally_ho.libs.views.form_state.form_in_state', 'form_in_state', (['result_form', 'possible_states'], {}), '(result_form, possible_states)\n', (2908, 2938), False, 'from tally_ho.libs.views.form_state import form_in_state\n'), ((3252, 3309), 'tally_ho.libs.views.session.session_matches_post_result_form', 'session_matches_post_result_form', (['post_data', 'self.request'], {}), '(post_data, self.request)\n', (3284, 3309), False, 'from tally_ho.libs.views.session import session_matches_post_result_form\n'), ((3332, 3368), 'django.shortcuts.get_object_or_404', 'get_object_or_404', (['ResultForm'], {'pk': 'pk'}), '(ResultForm, pk=pk)\n', (3349, 3368), False, 'from django.shortcuts import get_object_or_404, redirect\n'), ((3451, 3494), 'tally_ho.libs.views.form_state.form_in_state', 'form_in_state', (['result_form', 'possible_states'], {}), '(result_form, possible_states)\n', (3464, 3494), False, 'from tally_ho.libs.views.form_state import form_in_state\n'), ((3646, 3691), 'django.shortcuts.redirect', 'redirect', (['self.success_url'], {'tally_id': 'tally_id'}), '(self.success_url, tally_id=tally_id)\n', (3654, 3691), False, 'from django.shortcuts import get_object_or_404, redirect\n'), ((4139, 4175), 'django.shortcuts.get_object_or_404', 'get_object_or_404', (['ResultForm'], {'pk': 'pk'}), '(ResultForm, pk=pk)\n', (4156, 4175), False, 'from django.shortcuts import get_object_or_404, redirect\n'), ((2033, 2057), 'tally_ho.libs.permissions.groups.user_groups', 'groups.user_groups', (['user'], {}), '(user)\n', (2051, 2057), False, 'from tally_ho.libs.permissions import groups\n'), ((4196, 4211), 'django.utils.translation.ugettext', '_', (['"""Quarantine"""'], {}), "('Quarantine')\n", (4197, 4211), True, 'from django.utils.translation import ugettext as _\n'), ((4238, 4250), 'django.utils.translation.ugettext', '_', (['"""Archive"""'], {}), "('Archive')\n", (4239, 4250), True, 'from django.utils.translation import ugettext as _\n'), ((1275, 1331), 'tally_ho.apps.tally.models.audit.Audit.objects.create', 'Audit.objects.create', ([], {'user': 'user', 'result_form': 'result_form'}), '(user=user, result_form=result_form)\n', (1295, 1331), False, 'from tally_ho.apps.tally.models.audit import Audit\n'), ((4411, 4443), 'django.utils.translation.ugettext', '_', (['"""Quality Control & Archiving"""'], {}), "('Quality Control & Archiving')\n", (4412, 4443), True, 'from django.utils.translation import ugettext as _\n')]
|
"""
Helper functions for calculating MMD and performing MMD test
This module contains original code from: https://github.com/fengliu90/DK-for-TST
MIT License
Copyright (c) 2021 <NAME>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import numpy as np
import torch
def get_item(x):
"""get the numpy value from a torch tensor."""
x = x.cpu().detach().numpy()
return x
def Pdist2(x, y):
"""compute the paired distance between x and y."""
x_norm = (x ** 2).sum(1).view(-1, 1)
if y is not None:
y_norm = (y ** 2).sum(1).view(1, -1)
else:
y = x
y_norm = x_norm.view(1, -1)
Pdist = x_norm + y_norm - 2.0 * torch.mm(x, torch.transpose(y, 0, 1))
Pdist[Pdist<0]=0
return Pdist
def h1_mean_var_gram(Kx, Ky, Kxy, is_var_computed, use_1sample_U=True):
"""compute value of MMD and std of MMD using kernel matrix."""
Kxxy = torch.cat((Kx,Kxy),1)
Kyxy = torch.cat((Kxy.transpose(0,1),Ky),1)
Kxyxy = torch.cat((Kxxy,Kyxy),0)
nx = Kx.shape[0]
ny = Ky.shape[0]
is_unbiased = True
if is_unbiased:
xx = torch.div((torch.sum(Kx) - torch.sum(torch.diag(Kx))), (nx * (nx - 1)))
yy = torch.div((torch.sum(Ky) - torch.sum(torch.diag(Ky))), (ny * (ny - 1)))
# one-sample U-statistic.
if use_1sample_U:
xy = torch.div((torch.sum(Kxy) - torch.sum(torch.diag(Kxy))), (nx * (ny - 1)))
else:
xy = torch.div(torch.sum(Kxy), (nx * ny))
mmd2 = xx - 2 * xy + yy
else:
xx = torch.div((torch.sum(Kx)), (nx * nx))
yy = torch.div((torch.sum(Ky)), (ny * ny))
# one-sample U-statistic.
if use_1sample_U:
xy = torch.div((torch.sum(Kxy)), (nx * ny))
else:
xy = torch.div(torch.sum(Kxy), (nx * ny))
mmd2 = xx - 2 * xy + yy
if not is_var_computed:
return mmd2, None, Kxyxy
hh = Kx+Ky-Kxy-Kxy.transpose(0,1)
V1 = torch.dot(hh.sum(1)/ny,hh.sum(1)/ny) / ny
V2 = (hh).sum() / (nx) / nx
varEst = 4*(V1 - V2**2)
return mmd2, varEst, Kxyxy
def MMDu(Fea, len_s, Fea_org, sigma, sigma0=0.1, epsilon = 10**(-10), is_smooth=True, is_var_computed=True, use_1sample_U=True):
"""compute value of deep-kernel MMD and std of deep-kernel MMD using merged data."""
X = Fea[0:len_s, :] # fetch the sample 1 (features of deep networks)
Y = Fea[len_s:, :] # fetch the sample 2 (features of deep networks)
X_org = Fea_org[0:len_s, :] # fetch the original sample 1
Y_org = Fea_org[len_s:, :] # fetch the original sample 2
L = 1 # generalized Gaussian (if L>1)
Dxx = Pdist2(X, X)
Dyy = Pdist2(Y, Y)
Dxy = Pdist2(X, Y)
Dxx_org = Pdist2(X_org, X_org)
Dyy_org = Pdist2(Y_org, Y_org)
Dxy_org = Pdist2(X_org, Y_org)
if is_smooth:
Kx = (1-epsilon) * torch.exp(-(Dxx / sigma0)**L -Dxx_org / sigma) + epsilon * torch.exp(-Dxx_org / sigma)
Ky = (1-epsilon) * torch.exp(-(Dyy / sigma0)**L -Dyy_org / sigma) + epsilon * torch.exp(-Dyy_org / sigma)
Kxy = (1-epsilon) * torch.exp(-(Dxy / sigma0)**L -Dxy_org / sigma) + epsilon * torch.exp(-Dxy_org / sigma)
else:
Kx = torch.exp(-Dxx / sigma0)
Ky = torch.exp(-Dyy / sigma0)
Kxy = torch.exp(-Dxy / sigma0)
return h1_mean_var_gram(Kx, Ky, Kxy, is_var_computed, use_1sample_U)
def TST_MMD_u(Fea, N_per, N1, Fea_org, sigma, sigma0, ep, alpha, device, dtype, is_smooth=True):
"""run two-sample test (TST) using deep kernel kernel."""
mmd_vector = np.zeros(N_per)
TEMP = MMDu(Fea, N1, Fea_org, sigma, sigma0, ep, is_smooth)
mmd_value = get_item(TEMP[0])
Kxyxy = TEMP[2]
count = 0
nxy = Fea.shape[0]
nx = N1
for r in range(N_per):
# print r
ind = np.random.choice(nxy, nxy, replace=False)
# divide into new X, Y
indx = ind[:nx]
# print(indx)
indy = ind[nx:]
Kx = Kxyxy[np.ix_(indx, indx)]
# print(Kx)
Ky = Kxyxy[np.ix_(indy, indy)]
Kxy = Kxyxy[np.ix_(indx, indy)]
TEMP = h1_mean_var_gram(Kx, Ky, Kxy, is_var_computed=False)
mmd_vector[r] = TEMP[0]
if mmd_vector[r] > mmd_value:
count = count + 1
if count > np.ceil(N_per * alpha):
h = 0
threshold = "NaN"
break
else:
h = 1
if h == 1:
S_mmd_vector = np.sort(mmd_vector)
# print(np.int(np.ceil(N_per*alpha)))
threshold = S_mmd_vector[np.int(np.ceil(N_per * (1 - alpha)))]
return h, threshold, mmd_value.item()
|
[
"numpy.ceil",
"numpy.ix_",
"numpy.zeros",
"torch.cat",
"torch.diag",
"torch.exp",
"numpy.sort",
"numpy.random.choice",
"torch.sum",
"torch.transpose"
] |
[((1922, 1945), 'torch.cat', 'torch.cat', (['(Kx, Kxy)', '(1)'], {}), '((Kx, Kxy), 1)\n', (1931, 1945), False, 'import torch\n'), ((2006, 2032), 'torch.cat', 'torch.cat', (['(Kxxy, Kyxy)', '(0)'], {}), '((Kxxy, Kyxy), 0)\n', (2015, 2032), False, 'import torch\n'), ((4606, 4621), 'numpy.zeros', 'np.zeros', (['N_per'], {}), '(N_per)\n', (4614, 4621), True, 'import numpy as np\n'), ((4245, 4269), 'torch.exp', 'torch.exp', (['(-Dxx / sigma0)'], {}), '(-Dxx / sigma0)\n', (4254, 4269), False, 'import torch\n'), ((4284, 4308), 'torch.exp', 'torch.exp', (['(-Dyy / sigma0)'], {}), '(-Dyy / sigma0)\n', (4293, 4308), False, 'import torch\n'), ((4324, 4348), 'torch.exp', 'torch.exp', (['(-Dxy / sigma0)'], {}), '(-Dxy / sigma0)\n', (4333, 4348), False, 'import torch\n'), ((4857, 4898), 'numpy.random.choice', 'np.random.choice', (['nxy', 'nxy'], {'replace': '(False)'}), '(nxy, nxy, replace=False)\n', (4873, 4898), True, 'import numpy as np\n'), ((5507, 5526), 'numpy.sort', 'np.sort', (['mmd_vector'], {}), '(mmd_vector)\n', (5514, 5526), True, 'import numpy as np\n'), ((2585, 2598), 'torch.sum', 'torch.sum', (['Kx'], {}), '(Kx)\n', (2594, 2598), False, 'import torch\n'), ((2637, 2650), 'torch.sum', 'torch.sum', (['Ky'], {}), '(Ky)\n', (2646, 2650), False, 'import torch\n'), ((5024, 5042), 'numpy.ix_', 'np.ix_', (['indx', 'indx'], {}), '(indx, indx)\n', (5030, 5042), True, 'import numpy as np\n'), ((5085, 5103), 'numpy.ix_', 'np.ix_', (['indy', 'indy'], {}), '(indy, indy)\n', (5091, 5103), True, 'import numpy as np\n'), ((5126, 5144), 'numpy.ix_', 'np.ix_', (['indx', 'indy'], {}), '(indx, indy)\n', (5132, 5144), True, 'import numpy as np\n'), ((5340, 5362), 'numpy.ceil', 'np.ceil', (['(N_per * alpha)'], {}), '(N_per * alpha)\n', (5347, 5362), True, 'import numpy as np\n'), ((1701, 1725), 'torch.transpose', 'torch.transpose', (['y', '(0)', '(1)'], {}), '(y, 0, 1)\n', (1716, 1725), False, 'import torch\n'), ((2145, 2158), 'torch.sum', 'torch.sum', (['Kx'], {}), '(Kx)\n', (2154, 2158), False, 'import torch\n'), ((2231, 2244), 'torch.sum', 'torch.sum', (['Ky'], {}), '(Ky)\n', (2240, 2244), False, 'import torch\n'), ((2489, 2503), 'torch.sum', 'torch.sum', (['Kxy'], {}), '(Kxy)\n', (2498, 2503), False, 'import torch\n'), ((2755, 2769), 'torch.sum', 'torch.sum', (['Kxy'], {}), '(Kxy)\n', (2764, 2769), False, 'import torch\n'), ((2826, 2840), 'torch.sum', 'torch.sum', (['Kxy'], {}), '(Kxy)\n', (2835, 2840), False, 'import torch\n'), ((3902, 3951), 'torch.exp', 'torch.exp', (['(-(Dxx / sigma0) ** L - Dxx_org / sigma)'], {}), '(-(Dxx / sigma0) ** L - Dxx_org / sigma)\n', (3911, 3951), False, 'import torch\n'), ((3961, 3988), 'torch.exp', 'torch.exp', (['(-Dxx_org / sigma)'], {}), '(-Dxx_org / sigma)\n', (3970, 3988), False, 'import torch\n'), ((4017, 4066), 'torch.exp', 'torch.exp', (['(-(Dyy / sigma0) ** L - Dyy_org / sigma)'], {}), '(-(Dyy / sigma0) ** L - Dyy_org / sigma)\n', (4026, 4066), False, 'import torch\n'), ((4076, 4103), 'torch.exp', 'torch.exp', (['(-Dyy_org / sigma)'], {}), '(-Dyy_org / sigma)\n', (4085, 4103), False, 'import torch\n'), ((4133, 4182), 'torch.exp', 'torch.exp', (['(-(Dxy / sigma0) ** L - Dxy_org / sigma)'], {}), '(-(Dxy / sigma0) ** L - Dxy_org / sigma)\n', (4142, 4182), False, 'import torch\n'), ((4192, 4219), 'torch.exp', 'torch.exp', (['(-Dxy_org / sigma)'], {}), '(-Dxy_org / sigma)\n', (4201, 4219), False, 'import torch\n'), ((5622, 5650), 'numpy.ceil', 'np.ceil', (['(N_per * (1 - alpha))'], {}), '(N_per * (1 - alpha))\n', (5629, 5650), True, 'import numpy as np\n'), ((2171, 2185), 'torch.diag', 'torch.diag', (['Kx'], {}), '(Kx)\n', (2181, 2185), False, 'import torch\n'), ((2257, 2271), 'torch.diag', 'torch.diag', (['Ky'], {}), '(Ky)\n', (2267, 2271), False, 'import torch\n'), ((2383, 2397), 'torch.sum', 'torch.sum', (['Kxy'], {}), '(Kxy)\n', (2392, 2397), False, 'import torch\n'), ((2410, 2425), 'torch.diag', 'torch.diag', (['Kxy'], {}), '(Kxy)\n', (2420, 2425), False, 'import torch\n')]
|
#!/usr/bin/env python
"""
My template:
"""
__date__ = "2016-07-07"
__author__ = "<NAME>"
__email__ = "<EMAIL>"
__license__ = "GPL"
# imports
import sys
import os
import time
from argparse import ArgumentParser, RawTextHelpFormatter
import shutil
import subprocess
from subprocess import run
parser = ArgumentParser(description=__doc__, formatter_class=RawTextHelpFormatter)
parser.add_argument("-v",
"--verbose",
dest="verbose",
action="store_true",
default=False,
help="Be loud!")
parser.add_argument("-s",
"--srr_id",
dest="srr_id",
help="SRR number (from SRA) for the current sample")
parser.add_argument("--outdir",
dest="outdir",
help="directory to which the fastq files are written")
parser.add_argument("--paired",
dest="paired",
action="store_true",
help="indicate if two samples (paired-end sequencing) belong to that sample id")
# redefine a functions for writing to stdout and stderr to save some writting
syserr = sys.stderr.write
sysout = sys.stdout.write
def main(options):
"""Download the fastq file(s) for the given id"""
# get the path to the installation of aspera
starts_with_sep = shutil.which("ascp").startswith(os.sep)
aspera_path_list = shutil.which("ascp").split(os.sep)
bin_dir = len(aspera_path_list)
for i in range(len(aspera_path_list) - 1, -1, -1):
if aspera_path_list[i] == "bin":
bin_dir = i
break
aspera_path = os.path.join( *aspera_path_list[0:bin_dir] )
# prepend a directory separator if necessary
if starts_with_sep and not aspera_path.startswith(os.sep):
aspera_path = os.sep + aspera_path
command_list = ["ascp", "-QT", "-l", "300m", "-P33001", "-d", "-i"]
command_list.append( aspera_path + "/etc/asperaweb_id_dsa.openssh")
base_address = "<EMAIL>:/vol1/fastq"
# SRR, ERR or DRR?
prefix = options.srr_id[0:3]
srr_number = options.srr_id.replace(prefix, "")
if len(srr_number) == 6:
address = os.path.join(base_address,
options.srr_id[:6],
options.srr_id,
options.srr_id
)
elif len(srr_number) == 7:
address = os.path.join(base_address,
options.srr_id[:6],
"00" + str(srr_number[-1]),
options.srr_id,
options.srr_id
)
elif len(srr_number) == 8:
address = os.path.join(base_address,
options.srr_id[:6],
"0" + str(srr_number[-2:]),
options.srr_id,
options.srr_id
)
elif len(srr_number) == 8:
address = os.path.join(base_address,
options.srr_id[:6],
str(srr_number[-3:]),
options.srr_id,
options.srr_id
)
else:
syserr("[ERROR] SRR id %s has unexpected format. Expected is the form: SRR\d+ with \d+ being 6 to 9 digits\n" % srr_number)
sys.exit(2)
if options.paired:
for read in [1,2]:
fulladdress = address + "_" + str(read) + ".fastq.gz"
command = command_list + [fulladdress, options.outdir]
# print( command )
returnObj = run(command, stderr = subprocess.DEVNULL, stdout = subprocess.DEVNULL)
# syserr("[INFO] command args: %s\n" % str(return_obj.args))
if returnObj.returncode != 0:
syserr("[ERROR] command failed\n")
syserr("[ERROR] command: %s\n" % command)
sys.exit(2)
else:
fulladdress = address + ".fastq.gz"
command = command_list + [fulladdress, options.outdir]
return_val = run(command, stderr = subprocess.DEVNULL, stdout = subprocess.DEVNULL).returncode
if return_val != 0:
syserr("[ERROR] command failed\n")
syserr("[ERROR] command: %s\n" % command)
sys.exit(2)
if __name__ == '__main__':
try:
# check if aspera's ascp is available
if not shutil.which("ascp"):
syserr("[ERROR] Could not find Aspera's ascp\n")
syserr("[ERROR] Ensure that ascp is available and rerun the script\n")
sys.exit(2)
try:
options = parser.parse_args()
except Exception:
parser.print_help()
sys.exit()
if options.verbose:
start_time = time.time()
start_date = time.strftime("%d-%m-%Y at %H:%M:%S")
syserr("############## Started script on %s ##############\n" %
start_date)
main(options)
if options.verbose:
syserr("### Successfully finished in %i seconds, on %s ###\n" %
(time.time() - start_time,
time.strftime("%d-%m-%Y at %H:%M:%S")))
except KeyboardInterrupt:
syserr("Interrupted by user after %i seconds!\n" %
(time.time() - start_time))
sys.exit(-1)
|
[
"subprocess.run",
"argparse.ArgumentParser",
"shutil.which",
"time.strftime",
"time.time",
"os.path.join",
"sys.exit"
] |
[((303, 376), 'argparse.ArgumentParser', 'ArgumentParser', ([], {'description': '__doc__', 'formatter_class': 'RawTextHelpFormatter'}), '(description=__doc__, formatter_class=RawTextHelpFormatter)\n', (317, 376), False, 'from argparse import ArgumentParser, RawTextHelpFormatter\n'), ((1676, 1718), 'os.path.join', 'os.path.join', (['*aspera_path_list[0:bin_dir]'], {}), '(*aspera_path_list[0:bin_dir])\n', (1688, 1718), False, 'import os\n'), ((2220, 2298), 'os.path.join', 'os.path.join', (['base_address', 'options.srr_id[:6]', 'options.srr_id', 'options.srr_id'], {}), '(base_address, options.srr_id[:6], options.srr_id, options.srr_id)\n', (2232, 2298), False, 'import os\n'), ((1385, 1405), 'shutil.which', 'shutil.which', (['"""ascp"""'], {}), "('ascp')\n", (1397, 1405), False, 'import shutil\n'), ((1448, 1468), 'shutil.which', 'shutil.which', (['"""ascp"""'], {}), "('ascp')\n", (1460, 1468), False, 'import shutil\n'), ((3663, 3729), 'subprocess.run', 'run', (['command'], {'stderr': 'subprocess.DEVNULL', 'stdout': 'subprocess.DEVNULL'}), '(command, stderr=subprocess.DEVNULL, stdout=subprocess.DEVNULL)\n', (3666, 3729), False, 'from subprocess import run\n'), ((4124, 4190), 'subprocess.run', 'run', (['command'], {'stderr': 'subprocess.DEVNULL', 'stdout': 'subprocess.DEVNULL'}), '(command, stderr=subprocess.DEVNULL, stdout=subprocess.DEVNULL)\n', (4127, 4190), False, 'from subprocess import run\n'), ((4347, 4358), 'sys.exit', 'sys.exit', (['(2)'], {}), '(2)\n', (4355, 4358), False, 'import sys\n'), ((4457, 4477), 'shutil.which', 'shutil.which', (['"""ascp"""'], {}), "('ascp')\n", (4469, 4477), False, 'import shutil\n'), ((4635, 4646), 'sys.exit', 'sys.exit', (['(2)'], {}), '(2)\n', (4643, 4646), False, 'import sys\n'), ((4836, 4847), 'time.time', 'time.time', ([], {}), '()\n', (4845, 4847), False, 'import time\n'), ((4873, 4910), 'time.strftime', 'time.strftime', (['"""%d-%m-%Y at %H:%M:%S"""'], {}), "('%d-%m-%Y at %H:%M:%S')\n", (4886, 4910), False, 'import time\n'), ((5390, 5402), 'sys.exit', 'sys.exit', (['(-1)'], {}), '(-1)\n', (5398, 5402), False, 'import sys\n'), ((3974, 3985), 'sys.exit', 'sys.exit', (['(2)'], {}), '(2)\n', (3982, 3985), False, 'import sys\n'), ((4772, 4782), 'sys.exit', 'sys.exit', ([], {}), '()\n', (4780, 4782), False, 'import sys\n'), ((3412, 3423), 'sys.exit', 'sys.exit', (['(2)'], {}), '(2)\n', (3420, 3423), False, 'import sys\n'), ((5210, 5247), 'time.strftime', 'time.strftime', (['"""%d-%m-%Y at %H:%M:%S"""'], {}), "('%d-%m-%Y at %H:%M:%S')\n", (5223, 5247), False, 'import time\n'), ((5355, 5366), 'time.time', 'time.time', ([], {}), '()\n', (5364, 5366), False, 'import time\n'), ((5164, 5175), 'time.time', 'time.time', ([], {}), '()\n', (5173, 5175), False, 'import time\n')]
|
# -*- coding: utf-8 -*-
from django.contrib import admin
from .models import Nav
class NavAdmin(admin.ModelAdmin):
list_display = ['__unicode__', 'parent', 'subnav_type', 'slug', ]
search_fields = ['name', ]
list_filter = ['parent', ]
prepopulated_fields = {'slug': ('name',)}
admin.site.register(Nav, NavAdmin)
|
[
"django.contrib.admin.site.register"
] |
[((297, 331), 'django.contrib.admin.site.register', 'admin.site.register', (['Nav', 'NavAdmin'], {}), '(Nav, NavAdmin)\n', (316, 331), False, 'from django.contrib import admin\n')]
|
from st2reactor.sensor.base import Sensor
from signalr import Connection
__all__ = [
'SignalRHubSensor'
]
class SignalRHubSensor(Sensor):
def __init__(self, sensor_service, config=None):
super(SignalRHubSensor, self).__init__(sensor_service=sensor_service,
config=config)
self._logger = self._sensor_service.get_logger(__name__)
self.url = config['hub_url']
self.hub_name = config['hub_name']
self._trigger_ref = 'signalr.message_received'
self._hub = None
self.connection = None
self.session = None
def setup(self):
self.connection = Connection(self.url, self.session)
# start a connection
self.connection.start()
# add a handler to process notifications to the connection
def _log_notifications(data):
self._logger.debug('Connection: new notification - {}'.format(data))
self.connection.handlers += _log_notifications # noqa pylint: disable=no-member
# get hub
self._hub = self.connection.hub(self.hub_name)
def message_received(self, message):
self._logger.debug('Connection: new notification.' % message)
self._sensor_service.dispatch(trigger=self._trigger_ref,
payload={message: message})
def run(self):
self._hub.client.on('message_received',
SignalRHubSensor.message_received)
def cleanup(self):
# do not receive new messages
self._hub.client.off('message_received', self.message_received)
self.connection.close()
|
[
"signalr.Connection"
] |
[((670, 704), 'signalr.Connection', 'Connection', (['self.url', 'self.session'], {}), '(self.url, self.session)\n', (680, 704), False, 'from signalr import Connection\n')]
|
import dpctl
def has_gpu(backend="opencl"):
return bool(dpctl.get_num_devices(backend=backend, device_type="gpu"))
def has_cpu(backend="opencl"):
return bool(dpctl.get_num_devices(backend=backend, device_type="cpu"))
def has_sycl_platforms():
return bool(len(dpctl.get_platforms()))
|
[
"dpctl.get_platforms",
"dpctl.get_num_devices"
] |
[((62, 119), 'dpctl.get_num_devices', 'dpctl.get_num_devices', ([], {'backend': 'backend', 'device_type': '"""gpu"""'}), "(backend=backend, device_type='gpu')\n", (83, 119), False, 'import dpctl\n'), ((170, 227), 'dpctl.get_num_devices', 'dpctl.get_num_devices', ([], {'backend': 'backend', 'device_type': '"""cpu"""'}), "(backend=backend, device_type='cpu')\n", (191, 227), False, 'import dpctl\n'), ((277, 298), 'dpctl.get_platforms', 'dpctl.get_platforms', ([], {}), '()\n', (296, 298), False, 'import dpctl\n')]
|
# Copyright (c) 2020 The FedVision Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import shutil
import typer
from fedvision_deploy_toolkit import __template__
app = typer.Typer(help="template tools")
@app.command()
def generate():
"""
generate template
"""
shutil.copy(os.path.join(__template__, "template.yaml"), os.getcwd())
@app.command(name="standalone")
def standalone_template():
"""
generate template for standalone deploy
"""
shutil.copy(os.path.join(__template__, "standalone_template.yaml"), os.getcwd())
|
[
"typer.Typer",
"os.path.join",
"os.getcwd"
] |
[((706, 740), 'typer.Typer', 'typer.Typer', ([], {'help': '"""template tools"""'}), "(help='template tools')\n", (717, 740), False, 'import typer\n'), ((828, 871), 'os.path.join', 'os.path.join', (['__template__', '"""template.yaml"""'], {}), "(__template__, 'template.yaml')\n", (840, 871), False, 'import os\n'), ((873, 884), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (882, 884), False, 'import os\n'), ((1023, 1077), 'os.path.join', 'os.path.join', (['__template__', '"""standalone_template.yaml"""'], {}), "(__template__, 'standalone_template.yaml')\n", (1035, 1077), False, 'import os\n'), ((1079, 1090), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (1088, 1090), False, 'import os\n')]
|
import os
import random
import pandas as pd
import requests
import wget
from bs4 import BeautifulSoup
def get_poster(movie_id):
base_url = 'http://www.imdb.com/title/tt{}/'.format(movie_id)
print(base_url)
return BeautifulSoup(requests.get(base_url).content, 'lxml').find('div', {'class': 'poster'}).find('img').attrs[
'src']
df_id = pd.read_csv('ml-latest-small/links.csv', sep=',')
idx_to_movie = {}
for row in df_id.itertuples():
idx_to_movie[row[1] - 1] = row[2]
total_movies = 9000
movies = [0] * total_movies
for i in range(len(movies)):
if i in idx_to_movie.keys() and len(str(idx_to_movie[i])) == 6:
movies[i] = (idx_to_movie[i])
movies = list(filter(lambda imdb: imdb != 0, movies))
total_movies = len(movies)
URL = [0] * total_movies
IMDB = [0] * total_movies
URL_IMDB = {'url': [], 'imdb': []}
poster_path = 'posters'
random.shuffle(movies)
for movie_id in movies:
out = os.path.join(poster_path, str(movie_id) + '.jpg')
if not os.path.exists(out):
try:
target = get_poster(movie_id)
print('Download img from [{0}] to [{1}].'.format(target, out))
wget.download(url=target, out=out, bar=None)
except AttributeError:
pass # IMDB does not have picture for this movie.
else:
print('Image already exists {0}'.format(out))
|
[
"pandas.read_csv",
"random.shuffle",
"os.path.exists",
"wget.download",
"requests.get"
] |
[((359, 408), 'pandas.read_csv', 'pd.read_csv', (['"""ml-latest-small/links.csv"""'], {'sep': '""","""'}), "('ml-latest-small/links.csv', sep=',')\n", (370, 408), True, 'import pandas as pd\n'), ((876, 898), 'random.shuffle', 'random.shuffle', (['movies'], {}), '(movies)\n', (890, 898), False, 'import random\n'), ((994, 1013), 'os.path.exists', 'os.path.exists', (['out'], {}), '(out)\n', (1008, 1013), False, 'import os\n'), ((1157, 1201), 'wget.download', 'wget.download', ([], {'url': 'target', 'out': 'out', 'bar': 'None'}), '(url=target, out=out, bar=None)\n', (1170, 1201), False, 'import wget\n'), ((242, 264), 'requests.get', 'requests.get', (['base_url'], {}), '(base_url)\n', (254, 264), False, 'import requests\n')]
|
import datetime
import json
import os
import unittest
import responses
from config import TestingConfig
from response_operations_ui import create_app
from response_operations_ui.controllers import collection_exercise_controllers
from response_operations_ui.exceptions.exceptions import ApiError
ce_id = "4a084bc0-130f-4aee-ae48-1a9f9e50178f"
ce_events_by_id_url = f"{TestingConfig.COLLECTION_EXERCISE_URL}/collectionexercises/{ce_id}/events"
ce_nudge_events_by_id_url = f"{TestingConfig.COLLECTION_EXERCISE_URL}/collectionexercises/{ce_id}/events/nudge"
project_root = os.path.dirname(os.path.dirname(__file__))
with open(f"{project_root}/test_data/collection_exercise/ce_events_by_id.json") as fp:
ce_events = json.load(fp)
class TestCollectionExerciseController(unittest.TestCase):
def setUp(self):
self.app = create_app("TestingConfig")
self.client = self.app.test_client()
def test_get_ce_events_by_id_all_events(self):
with responses.RequestsMock() as rsps:
rsps.add(rsps.GET, ce_events_by_id_url, json=ce_events, status=200, content_type="applicaton/json")
with self.app.app_context():
collection_exercise = collection_exercise_controllers.get_collection_exercise_events_by_id(ce_id)
self.assertIn("mps", collection_exercise[0]["tag"], "MPS not in collection exercise events")
self.assertIn("go_live", collection_exercise[1]["tag"], "Go live not in collection exercise events")
self.assertIn("return_by", collection_exercise[2]["tag"], "Return by not in collection exercise events")
self.assertIn(
"exercise_end", collection_exercise[3]["tag"], "Exercise end not in collection exercise events"
)
def test_get_ce_events_by_id_no_events(self):
with responses.RequestsMock() as rsps:
rsps.add(rsps.GET, ce_events_by_id_url, json=[], status=200, content_type="applicaton/json")
with self.app.app_context():
collection_exercise = collection_exercise_controllers.get_collection_exercise_events_by_id(ce_id)
self.assertEqual(len(collection_exercise), 0, "Unexpected collection exercise event returned.")
def test_get_ce_events_by_id_http_error(self):
with responses.RequestsMock() as rsps:
rsps.add(rsps.GET, ce_events_by_id_url, status=400)
with self.app.app_context():
self.assertRaises(ApiError, collection_exercise_controllers.get_collection_exercise_events_by_id, ce_id)
def test_create_ce_event_success(self):
with responses.RequestsMock() as rsps:
rsps.add(rsps.POST, ce_events_by_id_url, status=200)
timestamp = datetime.datetime.strptime(
"".join("2020-01-27 07:00:00+00:00".rsplit(":", 1)), "%Y-%m-%d %H:%M:%S%z"
)
with self.app.app_context():
self.assertFalse(
collection_exercise_controllers.create_collection_exercise_event(ce_id, "mps", timestamp)
)
def test_delete_ce_event_accepted(self):
with responses.RequestsMock() as rsps:
rsps.add(rsps.POST, ce_nudge_events_by_id_url, status=200)
with self.app.app_context():
self.assertFalse(collection_exercise_controllers.delete_event(ce_id, "nudge"))
def test_create_ce_event_bad_request_return_false(self):
with responses.RequestsMock() as rsps:
rsps.add(rsps.POST, ce_events_by_id_url, body='{"error":{"message": "some message"}}', status=400)
timestamp = datetime.datetime.strptime(
"".join("2020-01-27 07:00:00+00:00".rsplit(":", 1)), "%Y-%m-%d %H:%M:%S%z"
)
with self.app.app_context():
self.assertTrue(
collection_exercise_controllers.create_collection_exercise_event(ce_id, "mps", timestamp)
)
|
[
"responses.RequestsMock",
"json.load",
"response_operations_ui.create_app",
"response_operations_ui.controllers.collection_exercise_controllers.create_collection_exercise_event",
"os.path.dirname",
"response_operations_ui.controllers.collection_exercise_controllers.get_collection_exercise_events_by_id",
"response_operations_ui.controllers.collection_exercise_controllers.delete_event"
] |
[((589, 614), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (604, 614), False, 'import os\n'), ((720, 733), 'json.load', 'json.load', (['fp'], {}), '(fp)\n', (729, 733), False, 'import json\n'), ((835, 862), 'response_operations_ui.create_app', 'create_app', (['"""TestingConfig"""'], {}), "('TestingConfig')\n", (845, 862), False, 'from response_operations_ui import create_app\n'), ((973, 997), 'responses.RequestsMock', 'responses.RequestsMock', ([], {}), '()\n', (995, 997), False, 'import responses\n'), ((1828, 1852), 'responses.RequestsMock', 'responses.RequestsMock', ([], {}), '()\n', (1850, 1852), False, 'import responses\n'), ((2297, 2321), 'responses.RequestsMock', 'responses.RequestsMock', ([], {}), '()\n', (2319, 2321), False, 'import responses\n'), ((2616, 2640), 'responses.RequestsMock', 'responses.RequestsMock', ([], {}), '()\n', (2638, 2640), False, 'import responses\n'), ((3136, 3160), 'responses.RequestsMock', 'responses.RequestsMock', ([], {}), '()\n', (3158, 3160), False, 'import responses\n'), ((3453, 3477), 'responses.RequestsMock', 'responses.RequestsMock', ([], {}), '()\n', (3475, 3477), False, 'import responses\n'), ((1199, 1274), 'response_operations_ui.controllers.collection_exercise_controllers.get_collection_exercise_events_by_id', 'collection_exercise_controllers.get_collection_exercise_events_by_id', (['ce_id'], {}), '(ce_id)\n', (1267, 1274), False, 'from response_operations_ui.controllers import collection_exercise_controllers\n'), ((2047, 2122), 'response_operations_ui.controllers.collection_exercise_controllers.get_collection_exercise_events_by_id', 'collection_exercise_controllers.get_collection_exercise_events_by_id', (['ce_id'], {}), '(ce_id)\n', (2115, 2122), False, 'from response_operations_ui.controllers import collection_exercise_controllers\n'), ((2969, 3062), 'response_operations_ui.controllers.collection_exercise_controllers.create_collection_exercise_event', 'collection_exercise_controllers.create_collection_exercise_event', (['ce_id', '"""mps"""', 'timestamp'], {}), "(ce_id,\n 'mps', timestamp)\n", (3033, 3062), False, 'from response_operations_ui.controllers import collection_exercise_controllers\n'), ((3316, 3376), 'response_operations_ui.controllers.collection_exercise_controllers.delete_event', 'collection_exercise_controllers.delete_event', (['ce_id', '"""nudge"""'], {}), "(ce_id, 'nudge')\n", (3360, 3376), False, 'from response_operations_ui.controllers import collection_exercise_controllers\n'), ((3851, 3944), 'response_operations_ui.controllers.collection_exercise_controllers.create_collection_exercise_event', 'collection_exercise_controllers.create_collection_exercise_event', (['ce_id', '"""mps"""', 'timestamp'], {}), "(ce_id,\n 'mps', timestamp)\n", (3915, 3944), False, 'from response_operations_ui.controllers import collection_exercise_controllers\n')]
|
from .images import (
extract_image_filenames, display_data_for_image, image_setup_cmd
)
from ipykernel.kernelbase import Kernel
import logging
from pexpect import replwrap, EOF
import pexpect
from subprocess import check_output
import os.path
import re
import signal
__version__ = '0.7.2'
version_pat = re.compile(r'version (\d+(\.\d+)+)')
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
class IREPLWrapper(replwrap.REPLWrapper):
"""A subclass of REPLWrapper that gives incremental output
specifically for hbase_kernel.
The parameters are the same as for REPLWrapper, except for one
extra parameter:
:param line_output_callback: a callback method to receive each batch
of incremental output. It takes one string parameter.
"""
def __init__(self, cmd_or_spawn, orig_prompt, prompt_change=None,
extra_init_cmd=None, line_output_callback=None):
self.line_output_callback = line_output_callback
replwrap.REPLWrapper.__init__(self, cmd_or_spawn, orig_prompt,
prompt_change, extra_init_cmd=extra_init_cmd)
def _expect_prompt(self, timeout=None):
if timeout == None:
# "None" means we are executing code from a Jupyter cell by way of the run_command
# in the do_execute() code below, so do incremental output.
while True:
pos = self.child.expect([r'hbase:\d+:\d+>'], timeout=None)
if pos == 2:
# End of line received
self.line_output_callback(self.child.before + '\n')
else:
if len(self.child.before) != 0:
# prompt received, but partial line precedes it
self.line_output_callback(self.child.before)
break
else:
# Otherwise, use existing non-incremental code
pos = replwrap.REPLWrapper._expect_prompt(self, timeout=timeout)
# Prompt received, so return normally
return pos
class HBaseKernel(Kernel):
implementation = 'hbase_kernel'
implementation_version = __version__
@property
def language_version(self):
m = version_pat.search(self.banner)
return m.group(1)
_banner = None
@property
def banner(self):
if self._banner is None:
self._banner = 'hbase banner'
return self._banner
language_info = {'name': 'hbase',
'codemirror_mode': 'shell',
'mimetype': 'text/x-sh',
'file_extension': '.sh'}
def __init__(self, **kwargs):
self.silent = False
logger.debug('init')
Kernel.__init__(self, **kwargs)
self._start_bash()
def _start_bash(self):
print('_start_bash')
# Signal handlers are inherited by forked processes, and we can't easily
# reset it from the subprocess. Since kernelapp ignores SIGINT except in
# message handlers, we need to temporarily reset the SIGINT handler here
# so that bash and its children are interruptible.
sig = signal.signal(signal.SIGINT, signal.SIG_DFL)
try:
# Note: the next few lines mirror functionality in the
# bash() function of pexpect/replwrap.py. Look at the
# source code there for comments and context for
# understanding the code here.
child = pexpect.spawn(
"/usr/local/hbase/bin/hbase", ['shell'], echo=False, encoding='utf-8', codec_errors='replace')
ps1 = replwrap.PEXPECT_PROMPT[:5] + \
u'\[\]' + replwrap.PEXPECT_PROMPT[5:]
ps2 = replwrap.PEXPECT_CONTINUATION_PROMPT[:5] + \
u'\[\]' + replwrap.PEXPECT_CONTINUATION_PROMPT[5:]
prompt_change = u"PS1='{0}' PS2='{1}' PROMPT_COMMAND=''".format(
ps1, ps2)
print(ps1)
print(ps2)
print(prompt_change)
# Using IREPLWrapper to get incremental output
# self.bashwrapper = IREPLWrapper(child, 'hbase:',
self.bashwrapper = IREPLWrapper(child, 'hbase:\d+:\d+>',
line_output_callback=self.process_output)
finally:
signal.signal(signal.SIGINT, sig)
# Register Bash function to write image data to temporary file
# self.bashwrapper.run_command(image_setup_cmd)
def process_output(self, output):
print('process line')
if not self.silent:
image_filenames, output = extract_image_filenames(output)
# Send standard output
stream_content = {'name': 'stdout', 'text': output}
self.send_response(self.iopub_socket, 'stream', stream_content)
# Send images, if any
for filename in image_filenames:
try:
data = display_data_for_image(filename)
except ValueError as e:
message = {'name': 'stdout', 'text': str(e)}
self.send_response(self.iopub_socket, 'stream', message)
else:
self.send_response(self.iopub_socket, 'display_data', data)
def do_execute(self, code, silent, store_history=True,
user_expressions=None, allow_stdin=False):
self.silent = silent
if not code.strip():
return {'status': 'ok', 'execution_count': self.execution_count,
'payload': [], 'user_expressions': {}}
interrupted = False
try:
# Note: timeout=None tells IREPLWrapper to do incremental
# output. Also note that the return value from
# run_command is not needed, because the output was
# already sent by IREPLWrapper.
self.bashwrapper.run_command(code.rstrip(), timeout=None)
except KeyboardInterrupt:
self.bashwrapper.child.sendintr()
interrupted = True
self.bashwrapper._expect_prompt()
output = self.bashwrapper.child.before
self.process_output(output)
except EOF:
output = self.bashwrapper.child.before + 'Restarting Bash'
self._start_bash()
self.process_output(output)
if interrupted:
return {'status': 'abort', 'execution_count': self.execution_count}
try:
exitcode = 2
except Exception:
exitcode = 1
if exitcode:
error_content = {
'ename': '',
'evalue': str(exitcode),
'traceback': []
}
self.send_response(self.iopub_socket, 'error', error_content)
error_content['execution_count'] = self.execution_count
error_content['status'] = 'error'
return error_content
else:
return {'status': 'ok', 'execution_count': self.execution_count,
'payload': [], 'user_expressions': {}}
|
[
"pexpect.spawn",
"ipykernel.kernelbase.Kernel.__init__",
"pexpect.replwrap.REPLWrapper._expect_prompt",
"pexpect.replwrap.REPLWrapper.__init__",
"signal.signal",
"logging.getLogger",
"re.compile"
] |
[((312, 350), 're.compile', 're.compile', (['"""version (\\\\d+(\\\\.\\\\d+)+)"""'], {}), "('version (\\\\d+(\\\\.\\\\d+)+)')\n", (322, 350), False, 'import re\n'), ((358, 385), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (375, 385), False, 'import logging\n'), ((992, 1104), 'pexpect.replwrap.REPLWrapper.__init__', 'replwrap.REPLWrapper.__init__', (['self', 'cmd_or_spawn', 'orig_prompt', 'prompt_change'], {'extra_init_cmd': 'extra_init_cmd'}), '(self, cmd_or_spawn, orig_prompt,\n prompt_change, extra_init_cmd=extra_init_cmd)\n', (1021, 1104), False, 'from pexpect import replwrap, EOF\n'), ((2742, 2773), 'ipykernel.kernelbase.Kernel.__init__', 'Kernel.__init__', (['self'], {}), '(self, **kwargs)\n', (2757, 2773), False, 'from ipykernel.kernelbase import Kernel\n'), ((3174, 3218), 'signal.signal', 'signal.signal', (['signal.SIGINT', 'signal.SIG_DFL'], {}), '(signal.SIGINT, signal.SIG_DFL)\n', (3187, 3218), False, 'import signal\n'), ((1954, 2012), 'pexpect.replwrap.REPLWrapper._expect_prompt', 'replwrap.REPLWrapper._expect_prompt', (['self'], {'timeout': 'timeout'}), '(self, timeout=timeout)\n', (1989, 2012), False, 'from pexpect import replwrap, EOF\n'), ((3490, 3603), 'pexpect.spawn', 'pexpect.spawn', (['"""/usr/local/hbase/bin/hbase"""', "['shell']"], {'echo': '(False)', 'encoding': '"""utf-8"""', 'codec_errors': '"""replace"""'}), "('/usr/local/hbase/bin/hbase', ['shell'], echo=False, encoding\n ='utf-8', codec_errors='replace')\n", (3503, 3603), False, 'import pexpect\n'), ((4339, 4372), 'signal.signal', 'signal.signal', (['signal.SIGINT', 'sig'], {}), '(signal.SIGINT, sig)\n', (4352, 4372), False, 'import signal\n')]
|
import numpy as np
from blind_walking.envs.env_modifiers.env_modifier import EnvModifier
from blind_walking.envs.env_modifiers.heightfield import HeightField
from blind_walking.envs.env_modifiers.stairs import Stairs, boxHalfLength, boxHalfWidth
""" Train robot to walk up stairs curriculum.
Equal chances for the robot to encounter going up and going down the stairs.
"""
class TrainStairs(EnvModifier):
def __init__(self):
super().__init__()
self.step_rise_levels = [0.02, 0.05, 0.075, 0.10]
self.num_levels = len(self.step_rise_levels)
self.num_steps = 10
self.stair_gap = 1.5
self.step_run = 0.3
self.stair_length = (self.num_steps - 1) * self.step_run * 2 + boxHalfLength * 2 * 2
self._level = 0
self.stairs = []
for _ in range(self.num_levels):
self.stairs.append(Stairs())
def _generate(self, env):
start_x = self.stair_gap
for i in range(self.num_levels):
self.stairs[i]._generate(
env, start_x=start_x, num_steps=self.num_steps, step_rise=self.step_rise_levels[i], step_run=self.step_run
)
start_x += self.stair_length + self.stair_gap
def _reset(self, env):
if self._level > 0 and self.down_level(env):
# robot down-levels
self._level -= 1
print(f"DOWNGRADE TO LEVEL {self._level}")
elif self._level < self.num_levels and self.up_level(env):
# robot up-levels
self._level += 1
print(f"LEVEL UP TO LEVEL {self._level}!")
level = self._level
if level >= self.num_levels:
# Loop back to randomly selected level
level_list = np.arange(self.num_levels) + 1
level_probs = level_list / sum(level_list)
level = np.random.choice(self.num_levels, p=level_probs)
print(f"LOOP TO LEVEL {level}")
x_pos = level * (self.stair_length + self.stair_gap)
z_pos = 0
# Equal chances to encouter going up and down the stair level
if np.random.uniform() < 0.4:
x_pos += self.stair_gap + self.stair_length / 2 - 1
z_pos = self.step_rise_levels[level] * self.num_steps
self.adjust_position = (x_pos, 0, z_pos)
def up_level(self, env):
"""To succeed the current level, robot needs to climb over the current stair level
and reach the start of next stair level"""
base_pos = env._robot.GetBasePosition()
target_x = (self._level + 1) * (self.stair_length + self.stair_gap) + 0.5
return (
self.adjust_position[2] == 0
and base_pos[0] > target_x
and base_pos[1] > -boxHalfWidth
and base_pos[1] < boxHalfWidth
)
def down_level(self, env):
"""Downgrade to the previous level if robot was unable to travel a quarter of the stair length"""
start_pos = self.adjust_position
base_pos = env._robot.GetBasePosition()
x_dist_travelled = base_pos[0] - start_pos[0]
return x_dist_travelled < self.stair_length / 5
class TrainUneven(EnvModifier):
def __init__(self):
super().__init__()
self.hf = HeightField()
def _generate(self, env):
self.hf._generate(env, start_x=10, heightPerturbationRange=0.08)
class TrainMultiple(EnvModifier):
def __init__(self):
super().__init__()
self.hf_length = 20
self.hf_perturb = 0.08
self.hf = HeightField()
self.step_rise_levels = [0.02, 0.05]
self.num_levels = len(self.step_rise_levels)
self.num_steps = 10
self.stair_gap = 1.5
self.step_run = 0.3
self.stair_length = (self.num_steps - 1) * self.step_run * 2 + boxHalfLength * 2 * 2
self._stair_level = 0
self.stairs = []
for _ in range(self.num_levels):
self.stairs.append(Stairs())
self._reset_manual_override = None
def _generate(self, env):
self.hf._generate(env, start_x=10, heightPerturbationRange=self.hf_perturb)
start_x = self.stair_gap + self.hf_length
for i in range(self.num_levels):
self.stairs[i]._generate(
env, start_x=start_x, num_steps=self.num_steps, step_rise=self.step_rise_levels[i], step_run=self.step_run
)
start_x += self.stair_length + self.stair_gap
def _reset_to_heightfield(self):
"""Reset position to before the heightfield"""
self.adjust_position = (0, 0, 0)
def _select_stairs_level(self, env):
# Check if robot has succeeded current level
if self._stair_level < self.num_levels and self.succeed_level(env):
print(f"LEVEL {self._stair_level} PASSED!")
self._stair_level += 1
level = self._stair_level
if level >= self.num_levels:
# Loop back to randomly selected level
level_list = np.arange(self.num_levels) + 1
level_probs = level_list / sum(level_list)
level = np.random.choice(self.num_levels, p=level_probs)
print(f"LOOP TO LEVEL {level}")
elif level > 0 and np.random.uniform() < 0.2:
# Redo previous level
level -= 1
return level
def _reset_to_stairs(self, level):
"""Reset position to just before the stairs of a given level"""
x_pos = self.hf_length + level * (self.stair_length + self.stair_gap)
z_pos = 0
# Equal chances to encouter going up and down the stair level
if np.random.uniform() < 0.4:
x_pos += self.stair_gap + self.stair_length / 2 - 1
z_pos = self.step_rise_levels[level] * self.num_steps
self.adjust_position = (x_pos, 0, z_pos)
def _reset_randomly(self, env):
if np.random.uniform() < 0.5:
# See heightfield
self._reset_to_heightfield()
else:
# See stairs
level = self._select_stairs_level(env)
self._reset_to_stairs(level)
def _reset(self, env):
if self._reset_manual_override is not None:
self._reset_manually()
# Remove override for subsequent resets
# self._reset_manual_override = None
else:
self._reset_randomly(env)
def _reset_manually(self):
if self._reset_manual_override == "heightfield":
self._reset_to_heightfield()
elif self._reset_manual_override == "stairs_0":
self._reset_to_stairs(level=0)
elif self._reset_manual_override == "stairs_1":
self._reset_to_stairs(level=1)
else:
raise ValueError(f"Invalid override {self._reset_manual_override}")
def _override_reset(self, override: str):
"""Manually set what the next reset should be"""
assert override in ("heightfield", "stairs_0", "stairs_1")
self._reset_manual_override = override
def succeed_level(self, env):
"""To succeed the current level, robot needs to climb over the current stair level
and reach the start of next stair level"""
base_pos = env._robot.GetBasePosition()
target_x = self.hf_length + (self._stair_level + 1) * (self.stair_length + self.stair_gap) + 0.5
return (
self.adjust_position[2] == 0
and base_pos[0] > target_x
and base_pos[1] > -boxHalfWidth
and base_pos[1] < boxHalfWidth
)
|
[
"numpy.random.uniform",
"numpy.arange",
"numpy.random.choice",
"blind_walking.envs.env_modifiers.stairs.Stairs",
"blind_walking.envs.env_modifiers.heightfield.HeightField"
] |
[((3324, 3337), 'blind_walking.envs.env_modifiers.heightfield.HeightField', 'HeightField', ([], {}), '()\n', (3335, 3337), False, 'from blind_walking.envs.env_modifiers.heightfield import HeightField\n'), ((3619, 3632), 'blind_walking.envs.env_modifiers.heightfield.HeightField', 'HeightField', ([], {}), '()\n', (3630, 3632), False, 'from blind_walking.envs.env_modifiers.heightfield import HeightField\n'), ((1894, 1942), 'numpy.random.choice', 'np.random.choice', (['self.num_levels'], {'p': 'level_probs'}), '(self.num_levels, p=level_probs)\n', (1910, 1942), True, 'import numpy as np\n'), ((2154, 2173), 'numpy.random.uniform', 'np.random.uniform', ([], {}), '()\n', (2171, 2173), True, 'import numpy as np\n'), ((5215, 5263), 'numpy.random.choice', 'np.random.choice', (['self.num_levels'], {'p': 'level_probs'}), '(self.num_levels, p=level_probs)\n', (5231, 5263), True, 'import numpy as np\n'), ((5741, 5760), 'numpy.random.uniform', 'np.random.uniform', ([], {}), '()\n', (5758, 5760), True, 'import numpy as np\n'), ((6001, 6020), 'numpy.random.uniform', 'np.random.uniform', ([], {}), '()\n', (6018, 6020), True, 'import numpy as np\n'), ((897, 905), 'blind_walking.envs.env_modifiers.stairs.Stairs', 'Stairs', ([], {}), '()\n', (903, 905), False, 'from blind_walking.envs.env_modifiers.stairs import Stairs, boxHalfLength, boxHalfWidth\n'), ((1786, 1812), 'numpy.arange', 'np.arange', (['self.num_levels'], {}), '(self.num_levels)\n', (1795, 1812), True, 'import numpy as np\n'), ((4048, 4056), 'blind_walking.envs.env_modifiers.stairs.Stairs', 'Stairs', ([], {}), '()\n', (4054, 4056), False, 'from blind_walking.envs.env_modifiers.stairs import Stairs, boxHalfLength, boxHalfWidth\n'), ((5107, 5133), 'numpy.arange', 'np.arange', (['self.num_levels'], {}), '(self.num_levels)\n', (5116, 5133), True, 'import numpy as np\n'), ((5337, 5356), 'numpy.random.uniform', 'np.random.uniform', ([], {}), '()\n', (5354, 5356), True, 'import numpy as np\n')]
|
import argparse
import nltk
def process(input_file_name, output_file_name):
input_file = open(input_file_name, mode='r', encoding='utf-8')
output_file = open(output_file_name, mode='w', encoding='utf-8')
for line in input_file:
sentences = nltk.sent_tokenize(line)
output_line = " <segment> ".join(sentences)
output_file.write(output_line)
output_file.write("\n")
output_file.close()
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Add <segment> markers between each sentence')
parser.add_argument('-input_file', type=str,
help='path of input file', default=None)
parser.add_argument('-output_file', type=str,
help='path of output file', default=None)
args = parser.parse_args()
process(args.input_file, args.output_file)
|
[
"nltk.sent_tokenize",
"argparse.ArgumentParser"
] |
[((476, 563), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Add <segment> markers between each sentence"""'}), "(description=\n 'Add <segment> markers between each sentence')\n", (499, 563), False, 'import argparse\n'), ((262, 286), 'nltk.sent_tokenize', 'nltk.sent_tokenize', (['line'], {}), '(line)\n', (280, 286), False, 'import nltk\n')]
|
# 2-electron VMC code for 2dim quantum dot with importance sampling
# Using gaussian rng for new positions and Metropolis- Hastings
# Added energy minimization
from math import exp, sqrt
from random import random, seed, normalvariate
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from matplotlib import cm
from matplotlib.ticker import LinearLocator, FormatStrFormatter
from scipy.optimize import minimize
import sys
# Trial wave function for the 2-electron quantum dot in two dims
def WaveFunction(r,alpha,beta):
r1 = r[0,0]**2 + r[0,1]**2
r2 = r[1,0]**2 + r[1,1]**2
r12 = sqrt((r[0,0]-r[1,0])**2 + (r[0,1]-r[1,1])**2)
deno = r12/(1+beta*r12)
return exp(-0.5*alpha*(r1+r2)+deno)
# Local energy for the 2-electron quantum dot in two dims, using analytical local energy
def LocalEnergy(r,alpha,beta):
r1 = (r[0,0]**2 + r[0,1]**2)
r2 = (r[1,0]**2 + r[1,1]**2)
r12 = sqrt((r[0,0]-r[1,0])**2 + (r[0,1]-r[1,1])**2)
deno = 1.0/(1+beta*r12)
deno2 = deno*deno
return 0.5*(1-alpha*alpha)*(r1 + r2) +2.0*alpha + 1.0/r12+deno2*(alpha*r12-deno2+2*beta*deno-1.0/r12)
# Derivate of wave function ansatz as function of variational parameters
def DerivativeWFansatz(r,alpha,beta):
WfDer = np.zeros((2), np.double)
r1 = (r[0,0]**2 + r[0,1]**2)
r2 = (r[1,0]**2 + r[1,1]**2)
r12 = sqrt((r[0,0]-r[1,0])**2 + (r[0,1]-r[1,1])**2)
deno = 1.0/(1+beta*r12)
deno2 = deno*deno
WfDer[0] = -0.5*(r1+r2)
WfDer[1] = -r12*r12*deno2
return WfDer
# Setting up the quantum force for the two-electron quantum dot, recall that it is a vector
def QuantumForce(r,alpha,beta):
qforce = np.zeros((NumberParticles,Dimension), np.double)
r12 = sqrt((r[0,0]-r[1,0])**2 + (r[0,1]-r[1,1])**2)
deno = 1.0/(1+beta*r12)
qforce[0,:] = -2*r[0,:]*alpha*(r[0,:]-r[1,:])*deno*deno/r12
qforce[1,:] = -2*r[1,:]*alpha*(r[1,:]-r[0,:])*deno*deno/r12
return qforce
# Computing the derivative of the energy and the energy
def EnergyDerivative(x0):
# Parameters in the Fokker-Planck simulation of the quantum force
D = 0.5
TimeStep = 0.05
# positions
PositionOld = np.zeros((NumberParticles,Dimension), np.double)
PositionNew = np.zeros((NumberParticles,Dimension), np.double)
# Quantum force
QuantumForceOld = np.zeros((NumberParticles,Dimension), np.double)
QuantumForceNew = np.zeros((NumberParticles,Dimension), np.double)
energy = 0.0
DeltaE = 0.0
alpha = x0[0]
beta = x0[1]
EnergyDer = 0.0
DeltaPsi = 0.0
DerivativePsiE = 0.0
#Initial position
for i in range(NumberParticles):
for j in range(Dimension):
PositionOld[i,j] = normalvariate(0.0,1.0)*sqrt(TimeStep)
wfold = WaveFunction(PositionOld,alpha,beta)
QuantumForceOld = QuantumForce(PositionOld,alpha, beta)
#Loop over MC MCcycles
for MCcycle in range(NumberMCcycles):
#Trial position moving one particle at the time
for i in range(NumberParticles):
for j in range(Dimension):
PositionNew[i,j] = PositionOld[i,j]+normalvariate(0.0,1.0)*sqrt(TimeStep)+\
QuantumForceOld[i,j]*TimeStep*D
wfnew = WaveFunction(PositionNew,alpha,beta)
QuantumForceNew = QuantumForce(PositionNew,alpha, beta)
GreensFunction = 0.0
for j in range(Dimension):
GreensFunction += 0.5*(QuantumForceOld[i,j]+QuantumForceNew[i,j])*\
(D*TimeStep*0.5*(QuantumForceOld[i,j]-QuantumForceNew[i,j])-\
PositionNew[i,j]+PositionOld[i,j])
GreensFunction = exp(GreensFunction)
ProbabilityRatio = GreensFunction*wfnew**2/wfold**2
#Metropolis-Hastings test to see whether we accept the move
if random() <= ProbabilityRatio:
for j in range(Dimension):
PositionOld[i,j] = PositionNew[i,j]
QuantumForceOld[i,j] = QuantumForceNew[i,j]
wfold = wfnew
DeltaE = LocalEnergy(PositionOld,alpha,beta)
DerPsi = DerivativeWFansatz(PositionOld,alpha,beta)
DeltaPsi += DerPsi
energy += DeltaE
DerivativePsiE += DerPsi*DeltaE
# We calculate mean values
energy /= NumberMCcycles
DerivativePsiE /= NumberMCcycles
DeltaPsi /= NumberMCcycles
EnergyDer = 2*(DerivativePsiE-DeltaPsi*energy)
return EnergyDer
# Computing the expectation value of the local energy
def Energy(x0):
# Parameters in the Fokker-Planck simulation of the quantum force
D = 0.5
TimeStep = 0.05
# positions
PositionOld = np.zeros((NumberParticles,Dimension), np.double)
PositionNew = np.zeros((NumberParticles,Dimension), np.double)
# Quantum force
QuantumForceOld = np.zeros((NumberParticles,Dimension), np.double)
QuantumForceNew = np.zeros((NumberParticles,Dimension), np.double)
energy = 0.0
DeltaE = 0.0
alpha = x0[0]
beta = x0[1]
#Initial position
for i in range(NumberParticles):
for j in range(Dimension):
PositionOld[i,j] = normalvariate(0.0,1.0)*sqrt(TimeStep)
wfold = WaveFunction(PositionOld,alpha,beta)
QuantumForceOld = QuantumForce(PositionOld,alpha, beta)
#Loop over MC MCcycles
for MCcycle in range(NumberMCcycles):
#Trial position moving one particle at the time
for i in range(NumberParticles):
for j in range(Dimension):
PositionNew[i,j] = PositionOld[i,j]+normalvariate(0.0,1.0)*sqrt(TimeStep)+\
QuantumForceOld[i,j]*TimeStep*D
wfnew = WaveFunction(PositionNew,alpha,beta)
QuantumForceNew = QuantumForce(PositionNew,alpha, beta)
GreensFunction = 0.0
for j in range(Dimension):
GreensFunction += 0.5*(QuantumForceOld[i,j]+QuantumForceNew[i,j])*\
(D*TimeStep*0.5*(QuantumForceOld[i,j]-QuantumForceNew[i,j])-\
PositionNew[i,j]+PositionOld[i,j])
GreensFunction = exp(GreensFunction)
ProbabilityRatio = GreensFunction*wfnew**2/wfold**2
#Metropolis-Hastings test to see whether we accept the move
if random() <= ProbabilityRatio:
for j in range(Dimension):
PositionOld[i,j] = PositionNew[i,j]
QuantumForceOld[i,j] = QuantumForceNew[i,j]
wfold = wfnew
DeltaE = LocalEnergy(PositionOld,alpha,beta)
energy += DeltaE
if Printout:
outfile.write('%f\n' %(energy/(MCcycle+1.0)))
# We calculate mean values
energy /= NumberMCcycles
return energy
#Here starts the main program with variable declarations
NumberParticles = 2
Dimension = 2
# seed for rng generator
seed()
# Monte Carlo cycles for parameter optimization
Printout = False
NumberMCcycles= 10000
# guess for variational parameters
x0 = np.array([0.9,0.2])
# Using Broydens method to find optimal parameters
res = minimize(Energy, x0, method='BFGS', jac=EnergyDerivative, options={'gtol': 1e-4,'disp': True})
x0 = res.x
print(x0)
# Compute the energy again with the optimal parameters and increased number of Monte Cycles
NumberMCcycles= 100000
Printout = True
outfile = open("Energies.dat",'w')
print(Energy(x0))
outfile.close()
|
[
"scipy.optimize.minimize",
"math.exp",
"math.sqrt",
"random.normalvariate",
"numpy.zeros",
"random.random",
"numpy.array",
"random.seed"
] |
[((6999, 7005), 'random.seed', 'seed', ([], {}), '()\n', (7003, 7005), False, 'from random import random, seed, normalvariate\n'), ((7133, 7153), 'numpy.array', 'np.array', (['[0.9, 0.2]'], {}), '([0.9, 0.2])\n', (7141, 7153), True, 'import numpy as np\n'), ((7210, 7312), 'scipy.optimize.minimize', 'minimize', (['Energy', 'x0'], {'method': '"""BFGS"""', 'jac': 'EnergyDerivative', 'options': "{'gtol': 0.0001, 'disp': True}"}), "(Energy, x0, method='BFGS', jac=EnergyDerivative, options={'gtol': \n 0.0001, 'disp': True})\n", (7218, 7312), False, 'from scipy.optimize import minimize\n'), ((635, 692), 'math.sqrt', 'sqrt', (['((r[0, 0] - r[1, 0]) ** 2 + (r[0, 1] - r[1, 1]) ** 2)'], {}), '((r[0, 0] - r[1, 0]) ** 2 + (r[0, 1] - r[1, 1]) ** 2)\n', (639, 692), False, 'from math import exp, sqrt\n'), ((720, 756), 'math.exp', 'exp', (['(-0.5 * alpha * (r1 + r2) + deno)'], {}), '(-0.5 * alpha * (r1 + r2) + deno)\n', (723, 756), False, 'from math import exp, sqrt\n'), ((952, 1009), 'math.sqrt', 'sqrt', (['((r[0, 0] - r[1, 0]) ** 2 + (r[0, 1] - r[1, 1]) ** 2)'], {}), '((r[0, 0] - r[1, 0]) ** 2 + (r[0, 1] - r[1, 1]) ** 2)\n', (956, 1009), False, 'from math import exp, sqrt\n'), ((1284, 1306), 'numpy.zeros', 'np.zeros', (['(2)', 'np.double'], {}), '(2, np.double)\n', (1292, 1306), True, 'import numpy as np\n'), ((1385, 1442), 'math.sqrt', 'sqrt', (['((r[0, 0] - r[1, 0]) ** 2 + (r[0, 1] - r[1, 1]) ** 2)'], {}), '((r[0, 0] - r[1, 0]) ** 2 + (r[0, 1] - r[1, 1]) ** 2)\n', (1389, 1442), False, 'from math import exp, sqrt\n'), ((1696, 1745), 'numpy.zeros', 'np.zeros', (['(NumberParticles, Dimension)', 'np.double'], {}), '((NumberParticles, Dimension), np.double)\n', (1704, 1745), True, 'import numpy as np\n'), ((1755, 1812), 'math.sqrt', 'sqrt', (['((r[0, 0] - r[1, 0]) ** 2 + (r[0, 1] - r[1, 1]) ** 2)'], {}), '((r[0, 0] - r[1, 0]) ** 2 + (r[0, 1] - r[1, 1]) ** 2)\n', (1759, 1812), False, 'from math import exp, sqrt\n'), ((2206, 2255), 'numpy.zeros', 'np.zeros', (['(NumberParticles, Dimension)', 'np.double'], {}), '((NumberParticles, Dimension), np.double)\n', (2214, 2255), True, 'import numpy as np\n'), ((2273, 2322), 'numpy.zeros', 'np.zeros', (['(NumberParticles, Dimension)', 'np.double'], {}), '((NumberParticles, Dimension), np.double)\n', (2281, 2322), True, 'import numpy as np\n'), ((2364, 2413), 'numpy.zeros', 'np.zeros', (['(NumberParticles, Dimension)', 'np.double'], {}), '((NumberParticles, Dimension), np.double)\n', (2372, 2413), True, 'import numpy as np\n'), ((2435, 2484), 'numpy.zeros', 'np.zeros', (['(NumberParticles, Dimension)', 'np.double'], {}), '((NumberParticles, Dimension), np.double)\n', (2443, 2484), True, 'import numpy as np\n'), ((4765, 4814), 'numpy.zeros', 'np.zeros', (['(NumberParticles, Dimension)', 'np.double'], {}), '((NumberParticles, Dimension), np.double)\n', (4773, 4814), True, 'import numpy as np\n'), ((4832, 4881), 'numpy.zeros', 'np.zeros', (['(NumberParticles, Dimension)', 'np.double'], {}), '((NumberParticles, Dimension), np.double)\n', (4840, 4881), True, 'import numpy as np\n'), ((4923, 4972), 'numpy.zeros', 'np.zeros', (['(NumberParticles, Dimension)', 'np.double'], {}), '((NumberParticles, Dimension), np.double)\n', (4931, 4972), True, 'import numpy as np\n'), ((4994, 5043), 'numpy.zeros', 'np.zeros', (['(NumberParticles, Dimension)', 'np.double'], {}), '((NumberParticles, Dimension), np.double)\n', (5002, 5043), True, 'import numpy as np\n'), ((3743, 3762), 'math.exp', 'exp', (['GreensFunction'], {}), '(GreensFunction)\n', (3746, 3762), False, 'from math import exp, sqrt\n'), ((6237, 6256), 'math.exp', 'exp', (['GreensFunction'], {}), '(GreensFunction)\n', (6240, 6256), False, 'from math import exp, sqrt\n'), ((2744, 2767), 'random.normalvariate', 'normalvariate', (['(0.0)', '(1.0)'], {}), '(0.0, 1.0)\n', (2757, 2767), False, 'from random import random, seed, normalvariate\n'), ((2767, 2781), 'math.sqrt', 'sqrt', (['TimeStep'], {}), '(TimeStep)\n', (2771, 2781), False, 'from math import exp, sqrt\n'), ((3914, 3922), 'random.random', 'random', ([], {}), '()\n', (3920, 3922), False, 'from random import random, seed, normalvariate\n'), ((5238, 5261), 'random.normalvariate', 'normalvariate', (['(0.0)', '(1.0)'], {}), '(0.0, 1.0)\n', (5251, 5261), False, 'from random import random, seed, normalvariate\n'), ((5261, 5275), 'math.sqrt', 'sqrt', (['TimeStep'], {}), '(TimeStep)\n', (5265, 5275), False, 'from math import exp, sqrt\n'), ((6408, 6416), 'random.random', 'random', ([], {}), '()\n', (6414, 6416), False, 'from random import random, seed, normalvariate\n'), ((3149, 3172), 'random.normalvariate', 'normalvariate', (['(0.0)', '(1.0)'], {}), '(0.0, 1.0)\n', (3162, 3172), False, 'from random import random, seed, normalvariate\n'), ((3172, 3186), 'math.sqrt', 'sqrt', (['TimeStep'], {}), '(TimeStep)\n', (3176, 3186), False, 'from math import exp, sqrt\n'), ((5643, 5666), 'random.normalvariate', 'normalvariate', (['(0.0)', '(1.0)'], {}), '(0.0, 1.0)\n', (5656, 5666), False, 'from random import random, seed, normalvariate\n'), ((5666, 5680), 'math.sqrt', 'sqrt', (['TimeStep'], {}), '(TimeStep)\n', (5670, 5680), False, 'from math import exp, sqrt\n')]
|
import numpy as np
import pandas as pd
import tensorflow as tf
from sklearn.metrics import average_precision_score as auprc
from tensorflow.keras.models import Model
from tensorflow.keras.layers import Dense, concatenate, Input, LSTM
from tensorflow.keras.layers import Conv1D, Reshape, Lambda
from tensorflow.keras.optimizers import SGD
from tensorflow.keras.callbacks import Callback
from tensorflow.keras.callbacks import ModelCheckpoint
import tensorflow.keras.backend as K
from iterutils import train_generator
def data_generator(path, batchsize, seqlen, bin_size):
dat_seq = train_generator(path['seq'], batchsize, seqlen, 'seq', 'repeat')
dat_chromatin = []
for chromatin_track in path['chromatin_tracks']:
dat_chromatin.append(
train_generator(chromatin_track, batchsize, seqlen, 'chrom', 'repeat'))
y = train_generator(path['labels'], batchsize, seqlen, 'labels', 'repeat')
while True:
combined_chrom_data = []
for chromatin_track_generators in dat_chromatin:
curr_chromatin_mark = next(chromatin_track_generators)
mark_resolution = curr_chromatin_mark.shape
assert (mark_resolution == (batchsize, seqlen/bin_size)),\
"Please check binning, specified bin size=50"
combined_chrom_data.append(pd.DataFrame(curr_chromatin_mark))
chromatin_features = pd.concat(combined_chrom_data, axis=1).values
print(chromatin_features.shape)
sequence_features = next(dat_seq)
labels = next(y)
yield [sequence_features, chromatin_features], labels
def add_new_layers(base_model, seq_len, no_of_chromatin_tracks, bin_size):
"""
Takes a pre-existing M-SEQ (Definition in README) & adds structure to \
use it as part of a bimodal DNA sequence + prior chromatin network
Parameters:
base_model (keras Model): A pre-trained sequence-only (M-SEQ) model
chrom_size (int) : The expected number of chromatin tracks
Returns:
model: a Keras Model
"""
def permute(x):
return K.permute_dimensions(x, (0, 2, 1))
# Transfer from a pre-trained M-SEQ
curr_layer = base_model.get_layer(name='dense_2')
curr_tensor = curr_layer.output
xs = Dense(1, name='MSEQ-dense-new', activation='tanh')(curr_tensor)
# Defining a M-C sub-network
chrom_input = Input(shape=(no_of_chromatin_tracks * int(seq_len/bin_size),), name='chrom_input')
ci = Reshape((no_of_chromatin_tracks, int(seq_len/bin_size)),
input_shape=(no_of_chromatin_tracks * int(seq_len/bin_size),))(chrom_input)
# Permuting the input dimensions to match Keras input requirements:
permute_func = Lambda(permute)
ci = permute_func(ci)
xc = Conv1D(15, 1, padding='valid', activation='relu', name='MC-conv1d')(ci)
xc = LSTM(5, activation='relu', name='MC-lstm')(xc)
xc = Dense(1, activation='tanh', name='MC-dense')(xc)
# Concatenating sequence (MSEQ) and chromatin (MC) networks:
merged_layer = concatenate([xs, xc])
result = Dense(1, activation='sigmoid', name='MSC-dense')(merged_layer)
model = Model(inputs=[base_model.input, chrom_input], outputs=result)
return model
class PrecisionRecall(Callback):
def __init__(self, val_data):
super().__init__()
self.validation_data = val_data
def on_train_begin(self, logs=None):
self.val_auprc = []
self.train_auprc = []
def on_epoch_end(self, epoch, logs=None):
(x_val, c_val), y_val = self.validation_data
predictions = self.model.predict([x_val, c_val])
aupr = auprc(y_val, predictions)
self.val_auprc.append(aupr)
def save_metrics(hist_object, pr_history, records_path):
loss = hist_object.history['loss']
val_loss = hist_object.history['val_loss']
val_pr = pr_history.val_auprc
# Saving the training metrics
np.savetxt(records_path + 'trainingLoss.txt', loss, fmt='%1.2f')
np.savetxt(records_path + 'valLoss.txt', val_loss, fmt='%1.2f')
np.savetxt(records_path + 'valPRC.txt', val_pr, fmt='%1.2f')
return loss, val_pr
def transfer(train_path, val_path, basemodel, model, steps_per_epoch,
batchsize, records_path, bin_size, seq_len):
"""
Trains the M-SC, transferring weights from the pre-trained M-SEQ.
The M-SEQ weights are kept fixed except for the final layer.
Parameters:
train_path (str): Path + prefix to training data
val_path (str): Path + prefix to the validation data
basemodel (Model): Pre-trained keras M-SEQ model
model (Model): Defined bimodal network
steps_per_epoch (int): Len(training_data/batchsize)
batchsize (int): Batch size used in SGD
records_path (str): Path + prefix to output directory
Returns:
loss (ndarray): An array with the validation loss at each epoch
"""
# Making the base model layers non-trainable:
for layer in basemodel.layers:
layer.trainable = False
# Training rest of the model.
sgd = SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)
model.compile(loss='binary_crossentropy', optimizer=sgd)
# Get train and validation data
train_data_generator = data_generator(train_path, batchsize, seqlen=seq_len, bin_size=bin_size)
val_data_generator = data_generator(val_path, 200000, seqlen=seq_len, bin_size=bin_size)
validation_data = next(val_data_generator)
precision_recall_history = PrecisionRecall(validation_data)
checkpointer = ModelCheckpoint(records_path + 'model_epoch{epoch}.hdf5',
verbose=1, save_best_only=False)
hist = model.fit_generator(epochs=15, steps_per_epoch=steps_per_epoch,
generator=train_data_generator,
validation_data=validation_data,
callbacks=[precision_recall_history,
checkpointer])
loss, val_pr = save_metrics(hist_object=hist, pr_history=precision_recall_history,
records_path=records_path)
return loss, val_pr
def transfer_and_train_msc(train_path, val_path, basemodel,
batch_size, records_path, bin_size, seq_len):
# Calculate size of the training set:
training_set_size = len(np.loadtxt(train_path['labels']))
# Calculate the steps per epoch
steps_per_epoch = training_set_size / batch_size
# Calculate number of chromatin tracks
no_of_chrom_tracks = len(train_path['chromatin_tracks'])
model = add_new_layers(basemodel, seq_len, no_of_chrom_tracks, bin_size)
loss, val_pr = transfer(train_path, val_path, basemodel, model, steps_per_epoch,
batch_size, records_path, bin_size, seq_len)
return loss, val_pr
|
[
"pandas.DataFrame",
"tensorflow.keras.layers.Dense",
"tensorflow.keras.layers.Conv1D",
"numpy.savetxt",
"tensorflow.keras.optimizers.SGD",
"tensorflow.keras.callbacks.ModelCheckpoint",
"tensorflow.keras.models.Model",
"iterutils.train_generator",
"tensorflow.keras.layers.concatenate",
"tensorflow.keras.backend.permute_dimensions",
"tensorflow.keras.layers.LSTM",
"numpy.loadtxt",
"sklearn.metrics.average_precision_score",
"pandas.concat",
"tensorflow.keras.layers.Lambda"
] |
[((589, 653), 'iterutils.train_generator', 'train_generator', (["path['seq']", 'batchsize', 'seqlen', '"""seq"""', '"""repeat"""'], {}), "(path['seq'], batchsize, seqlen, 'seq', 'repeat')\n", (604, 653), False, 'from iterutils import train_generator\n'), ((852, 922), 'iterutils.train_generator', 'train_generator', (["path['labels']", 'batchsize', 'seqlen', '"""labels"""', '"""repeat"""'], {}), "(path['labels'], batchsize, seqlen, 'labels', 'repeat')\n", (867, 922), False, 'from iterutils import train_generator\n'), ((2704, 2719), 'tensorflow.keras.layers.Lambda', 'Lambda', (['permute'], {}), '(permute)\n', (2710, 2719), False, 'from tensorflow.keras.layers import Conv1D, Reshape, Lambda\n'), ((3026, 3047), 'tensorflow.keras.layers.concatenate', 'concatenate', (['[xs, xc]'], {}), '([xs, xc])\n', (3037, 3047), False, 'from tensorflow.keras.layers import Dense, concatenate, Input, LSTM\n'), ((3136, 3197), 'tensorflow.keras.models.Model', 'Model', ([], {'inputs': '[base_model.input, chrom_input]', 'outputs': 'result'}), '(inputs=[base_model.input, chrom_input], outputs=result)\n', (3141, 3197), False, 'from tensorflow.keras.models import Model\n'), ((3903, 3967), 'numpy.savetxt', 'np.savetxt', (["(records_path + 'trainingLoss.txt')", 'loss'], {'fmt': '"""%1.2f"""'}), "(records_path + 'trainingLoss.txt', loss, fmt='%1.2f')\n", (3913, 3967), True, 'import numpy as np\n'), ((3972, 4035), 'numpy.savetxt', 'np.savetxt', (["(records_path + 'valLoss.txt')", 'val_loss'], {'fmt': '"""%1.2f"""'}), "(records_path + 'valLoss.txt', val_loss, fmt='%1.2f')\n", (3982, 4035), True, 'import numpy as np\n'), ((4040, 4100), 'numpy.savetxt', 'np.savetxt', (["(records_path + 'valPRC.txt')", 'val_pr'], {'fmt': '"""%1.2f"""'}), "(records_path + 'valPRC.txt', val_pr, fmt='%1.2f')\n", (4050, 4100), True, 'import numpy as np\n'), ((5063, 5117), 'tensorflow.keras.optimizers.SGD', 'SGD', ([], {'lr': '(0.01)', 'decay': '(1e-06)', 'momentum': '(0.9)', 'nesterov': '(True)'}), '(lr=0.01, decay=1e-06, momentum=0.9, nesterov=True)\n', (5066, 5117), False, 'from tensorflow.keras.optimizers import SGD\n'), ((5538, 5632), 'tensorflow.keras.callbacks.ModelCheckpoint', 'ModelCheckpoint', (["(records_path + 'model_epoch{epoch}.hdf5')"], {'verbose': '(1)', 'save_best_only': '(False)'}), "(records_path + 'model_epoch{epoch}.hdf5', verbose=1,\n save_best_only=False)\n", (5553, 5632), False, 'from tensorflow.keras.callbacks import ModelCheckpoint\n'), ((2080, 2114), 'tensorflow.keras.backend.permute_dimensions', 'K.permute_dimensions', (['x', '(0, 2, 1)'], {}), '(x, (0, 2, 1))\n', (2100, 2114), True, 'import tensorflow.keras.backend as K\n'), ((2255, 2305), 'tensorflow.keras.layers.Dense', 'Dense', (['(1)'], {'name': '"""MSEQ-dense-new"""', 'activation': '"""tanh"""'}), "(1, name='MSEQ-dense-new', activation='tanh')\n", (2260, 2305), False, 'from tensorflow.keras.layers import Dense, concatenate, Input, LSTM\n'), ((2755, 2822), 'tensorflow.keras.layers.Conv1D', 'Conv1D', (['(15)', '(1)'], {'padding': '"""valid"""', 'activation': '"""relu"""', 'name': '"""MC-conv1d"""'}), "(15, 1, padding='valid', activation='relu', name='MC-conv1d')\n", (2761, 2822), False, 'from tensorflow.keras.layers import Conv1D, Reshape, Lambda\n'), ((2836, 2878), 'tensorflow.keras.layers.LSTM', 'LSTM', (['(5)'], {'activation': '"""relu"""', 'name': '"""MC-lstm"""'}), "(5, activation='relu', name='MC-lstm')\n", (2840, 2878), False, 'from tensorflow.keras.layers import Dense, concatenate, Input, LSTM\n'), ((2892, 2936), 'tensorflow.keras.layers.Dense', 'Dense', (['(1)'], {'activation': '"""tanh"""', 'name': '"""MC-dense"""'}), "(1, activation='tanh', name='MC-dense')\n", (2897, 2936), False, 'from tensorflow.keras.layers import Dense, concatenate, Input, LSTM\n'), ((3061, 3109), 'tensorflow.keras.layers.Dense', 'Dense', (['(1)'], {'activation': '"""sigmoid"""', 'name': '"""MSC-dense"""'}), "(1, activation='sigmoid', name='MSC-dense')\n", (3066, 3109), False, 'from tensorflow.keras.layers import Dense, concatenate, Input, LSTM\n'), ((3624, 3649), 'sklearn.metrics.average_precision_score', 'auprc', (['y_val', 'predictions'], {}), '(y_val, predictions)\n', (3629, 3649), True, 'from sklearn.metrics import average_precision_score as auprc\n'), ((6369, 6401), 'numpy.loadtxt', 'np.loadtxt', (["train_path['labels']"], {}), "(train_path['labels'])\n", (6379, 6401), True, 'import numpy as np\n'), ((772, 842), 'iterutils.train_generator', 'train_generator', (['chromatin_track', 'batchsize', 'seqlen', '"""chrom"""', '"""repeat"""'], {}), "(chromatin_track, batchsize, seqlen, 'chrom', 'repeat')\n", (787, 842), False, 'from iterutils import train_generator\n'), ((1388, 1426), 'pandas.concat', 'pd.concat', (['combined_chrom_data'], {'axis': '(1)'}), '(combined_chrom_data, axis=1)\n', (1397, 1426), True, 'import pandas as pd\n'), ((1324, 1357), 'pandas.DataFrame', 'pd.DataFrame', (['curr_chromatin_mark'], {}), '(curr_chromatin_mark)\n', (1336, 1357), True, 'import pandas as pd\n')]
|
import logging
import azure.functions as func
from backlogapiprocessmodule import *
def main(req: func.HttpRequest) -> func.HttpResponse:
logging.info('-------Python HTTP trigger function processed a request.')
configFilePath = '/home/site/wwwroot/BacklogApiTimerTrigger/config.yml'
loggingConfigFilePath = '/home/site/wwwroot/BacklogApiTimerTrigger/logging_debug.conf'
backlogapiprocess.run(configFilePath, loggingConfigFilePath)
name = req.params.get('name')
if not name:
try:
req_body = req.get_json()
except ValueError:
pass
else:
name = req_body.get('name')
if name:
return func.HttpResponse(f"Hello {name}!")
else:
return func.HttpResponse(
"Please pass a name on the query string or in the request body",
status_code=400
)
|
[
"azure.functions.HttpResponse",
"logging.info"
] |
[((150, 222), 'logging.info', 'logging.info', (['"""-------Python HTTP trigger function processed a request."""'], {}), "('-------Python HTTP trigger function processed a request.')\n", (162, 222), False, 'import logging\n'), ((700, 735), 'azure.functions.HttpResponse', 'func.HttpResponse', (['f"""Hello {name}!"""'], {}), "(f'Hello {name}!')\n", (717, 735), True, 'import azure.functions as func\n'), ((763, 871), 'azure.functions.HttpResponse', 'func.HttpResponse', (['"""Please pass a name on the query string or in the request body"""'], {'status_code': '(400)'}), "(\n 'Please pass a name on the query string or in the request body',\n status_code=400)\n", (780, 871), True, 'import azure.functions as func\n')]
|
# Copyright 2019-2020 by <NAME>, MGLAND animation studio. All rights reserved.
# This file is part of IUTest, and is released under the "MIT License Agreement".
# Please see the LICENSE file that should have been included as part of this package.
import logging
logger = logging.getLogger(__name__)
class _ErrorDummy(object):
def __getattribute__(self, name):
return _ErrorDummy()
def __call__(self, *_, **__):
return _ErrorDummy()
def __repr__(self):
return "Error Happened."
def __iter__(self):
yield _ErrorDummy()
def __getitem__(self, index):
return _ErrorDummy()
def __bool__(self):
return False
def __nonzero__(self):
return False
class _DependencyWrapper(object):
@classmethod
def get(cls, silent=False):
"""Get an instance of the wrapper object.
Args:
silent (bool): Whether we issue errors or debug when the dependency
is not installed.
"""
if not hasattr(cls, "_instance"):
return None
if not cls._instance:
cls._instance = cls()
return cls._instance
@classmethod
def getModule(cls, silent=False):
wrapper = cls.get(silent=silent)
return wrapper._mod if wrapper._mod else _ErrorDummy()
@classmethod
def reload(cls, silent=True):
"""Try reimport the dependency module.
Args:
silent (bool): Whether we issue errors or debug when the dependency
is not installed.
"""
cls.get()._tryImport(force=True, silent=silent)
def __init__(self):
self._mod = None
self._tryImport(force=False, silent=True)
@classmethod
def _issueNotInstalledError(cls, silent=True):
if not silent:
logger.error("The package '%s' is not installed", cls.name())
else:
logger.debug("The package '%s' is not installed", cls.name())
@classmethod
def _issueNotImplementedError(cls):
err = "Please use a derived class instead of base class {}".format(cls.__name__)
raise NotImplementedError(err)
def _tryImport(self, force, silent):
self._issueNotImplementedError()
@classmethod
def name(cls):
cls._issueNotImplementedError()
def isValid(self):
return bool(self._mod)
@classmethod
def check(cls):
if not cls.get().isValid():
cls._issueNotInstalledError(silent=False)
return False
return True
class ReimportWrapper(_DependencyWrapper):
_instance = None
@classmethod
def name(cls):
return "reimport"
def _tryImport(self, force, silent):
if not force and self._mod:
return
self._mod = None
try:
import reimport
self._mod = reimport
except ImportError:
self._issueNotInstalledError(silent)
class Nose2Wrapper(_DependencyWrapper):
_instance = None
@classmethod
def name(cls):
return "nose2"
def _tryImport(self, force, silent):
if not force and self._mod:
return
self._mod = None
try:
import nose2
self._mod = nose2
except ImportError:
self._issueNotInstalledError(silent)
class PyTestWrapper(_DependencyWrapper):
_instance = None
@classmethod
def name(cls):
return "pytest"
def _tryImport(self, force, silent):
if not force and self._mod:
return
self._mod = None
try:
import pytest
self._mod = pytest
except ImportError:
self._issueNotInstalledError(silent)
|
[
"logging.getLogger"
] |
[((273, 300), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (290, 300), False, 'import logging\n')]
|
import pytest
from p2p.exceptions import PeerConnectionLost
from trinity.protocol.eth.peer import (
ETHPeerPoolEventServer,
)
from tests.core.integration_test_helpers import (
FakeAsyncChainDB,
run_peer_pool_event_server,
run_proxy_peer_pool,
run_request_server,
)
from tests.core.peer_helpers import (
get_directly_linked_peers,
MockPeerPoolWithConnectedPeers,
)
@pytest.mark.asyncio
async def test_proxy_peer_requests(request,
event_bus,
other_event_bus,
event_loop,
chaindb_fresh,
chaindb_20):
server_event_bus = event_bus
client_event_bus = other_event_bus
client_peer, server_peer = await get_directly_linked_peers(
request,
event_loop,
alice_headerdb=FakeAsyncChainDB(chaindb_fresh.db),
bob_headerdb=FakeAsyncChainDB(chaindb_20.db),
)
client_peer_pool = MockPeerPoolWithConnectedPeers([client_peer], event_bus=client_event_bus)
server_peer_pool = MockPeerPoolWithConnectedPeers([server_peer], event_bus=server_event_bus)
async with run_peer_pool_event_server(
client_event_bus, client_peer_pool, handler_type=ETHPeerPoolEventServer
), run_peer_pool_event_server(
server_event_bus, server_peer_pool, handler_type=ETHPeerPoolEventServer
), run_request_server(
server_event_bus,
FakeAsyncChainDB(chaindb_20.db)
), run_proxy_peer_pool(
client_event_bus
) as client_proxy_peer_pool, run_proxy_peer_pool(
server_event_bus
):
proxy_peer = await client_proxy_peer_pool.ensure_proxy_peer(client_peer.remote)
headers = await proxy_peer.requests.get_block_headers(0, 1, 0, False)
assert len(headers) == 1
block_header = headers[0]
assert block_header.block_number == 0
receipts = await proxy_peer.requests.get_receipts(headers)
assert len(receipts) == 1
receipt = receipts[0]
assert receipt[1][0] == block_header.receipt_root
block_bundles = await proxy_peer.requests.get_block_bodies(headers)
assert len(block_bundles) == 1
first_bundle = block_bundles[0]
assert first_bundle[1][0] == block_header.transaction_root
node_data = await proxy_peer.requests.get_node_data((block_header.state_root,))
assert node_data[0][0] == block_header.state_root
@pytest.mark.asyncio
async def test_proxy_peer_requests_with_timeouts(request,
event_bus,
other_event_bus,
event_loop,
chaindb_fresh,
chaindb_20):
server_event_bus = event_bus
client_event_bus = other_event_bus
client_peer, server_peer = await get_directly_linked_peers(
request,
event_loop,
alice_headerdb=FakeAsyncChainDB(chaindb_fresh.db),
bob_headerdb=FakeAsyncChainDB(chaindb_20.db),
)
client_peer_pool = MockPeerPoolWithConnectedPeers([client_peer], event_bus=client_event_bus)
server_peer_pool = MockPeerPoolWithConnectedPeers([server_peer], event_bus=server_event_bus)
async with run_peer_pool_event_server(
client_event_bus, client_peer_pool, handler_type=ETHPeerPoolEventServer
), run_peer_pool_event_server(
server_event_bus, server_peer_pool, handler_type=ETHPeerPoolEventServer
), run_proxy_peer_pool(
client_event_bus
) as client_proxy_peer_pool, run_proxy_peer_pool(
server_event_bus
):
proxy_peer = await client_proxy_peer_pool.ensure_proxy_peer(client_peer.remote)
with pytest.raises(TimeoutError):
await proxy_peer.requests.get_block_headers(0, 1, 0, False, timeout=0.01)
with pytest.raises(TimeoutError):
await proxy_peer.requests.get_receipts((), timeout=0.01)
with pytest.raises(TimeoutError):
await proxy_peer.requests.get_block_bodies((), timeout=0.01)
with pytest.raises(TimeoutError):
await proxy_peer.requests.get_node_data((), timeout=0.01)
@pytest.mark.asyncio
async def test_requests_when_peer_in_client_vanishs(request,
event_bus,
other_event_bus,
event_loop,
chaindb_fresh,
chaindb_20):
server_event_bus = event_bus
client_event_bus = other_event_bus
client_peer, server_peer = await get_directly_linked_peers(
request,
event_loop,
alice_headerdb=FakeAsyncChainDB(chaindb_fresh.db),
bob_headerdb=FakeAsyncChainDB(chaindb_20.db),
)
client_peer_pool = MockPeerPoolWithConnectedPeers([client_peer], event_bus=client_event_bus)
server_peer_pool = MockPeerPoolWithConnectedPeers([server_peer], event_bus=server_event_bus)
async with run_peer_pool_event_server(
client_event_bus, client_peer_pool, handler_type=ETHPeerPoolEventServer
), run_peer_pool_event_server(
server_event_bus, server_peer_pool, handler_type=ETHPeerPoolEventServer
), run_request_server(
server_event_bus,
FakeAsyncChainDB(chaindb_20.db)
), run_proxy_peer_pool(
client_event_bus
) as client_proxy_peer_pool, run_proxy_peer_pool(
server_event_bus
):
proxy_peer = await client_proxy_peer_pool.ensure_proxy_peer(client_peer.remote)
# We remove the peer from the client and assume to see PeerConnectionLost exceptions raised
client_peer_pool.connected_nodes.pop(client_peer.remote)
with pytest.raises(PeerConnectionLost):
await proxy_peer.requests.get_block_headers(0, 1, 0, False)
with pytest.raises(PeerConnectionLost):
await proxy_peer.requests.get_receipts(())
with pytest.raises(PeerConnectionLost):
await proxy_peer.requests.get_block_bodies(())
with pytest.raises(PeerConnectionLost):
await proxy_peer.requests.get_node_data(())
|
[
"tests.core.integration_test_helpers.run_proxy_peer_pool",
"pytest.raises",
"tests.core.integration_test_helpers.FakeAsyncChainDB",
"tests.core.integration_test_helpers.run_peer_pool_event_server",
"tests.core.peer_helpers.MockPeerPoolWithConnectedPeers"
] |
[((1020, 1093), 'tests.core.peer_helpers.MockPeerPoolWithConnectedPeers', 'MockPeerPoolWithConnectedPeers', (['[client_peer]'], {'event_bus': 'client_event_bus'}), '([client_peer], event_bus=client_event_bus)\n', (1050, 1093), False, 'from tests.core.peer_helpers import get_directly_linked_peers, MockPeerPoolWithConnectedPeers\n'), ((1117, 1190), 'tests.core.peer_helpers.MockPeerPoolWithConnectedPeers', 'MockPeerPoolWithConnectedPeers', (['[server_peer]'], {'event_bus': 'server_event_bus'}), '([server_peer], event_bus=server_event_bus)\n', (1147, 1190), False, 'from tests.core.peer_helpers import get_directly_linked_peers, MockPeerPoolWithConnectedPeers\n'), ((3215, 3288), 'tests.core.peer_helpers.MockPeerPoolWithConnectedPeers', 'MockPeerPoolWithConnectedPeers', (['[client_peer]'], {'event_bus': 'client_event_bus'}), '([client_peer], event_bus=client_event_bus)\n', (3245, 3288), False, 'from tests.core.peer_helpers import get_directly_linked_peers, MockPeerPoolWithConnectedPeers\n'), ((3312, 3385), 'tests.core.peer_helpers.MockPeerPoolWithConnectedPeers', 'MockPeerPoolWithConnectedPeers', (['[server_peer]'], {'event_bus': 'server_event_bus'}), '([server_peer], event_bus=server_event_bus)\n', (3342, 3385), False, 'from tests.core.peer_helpers import get_directly_linked_peers, MockPeerPoolWithConnectedPeers\n'), ((5052, 5125), 'tests.core.peer_helpers.MockPeerPoolWithConnectedPeers', 'MockPeerPoolWithConnectedPeers', (['[client_peer]'], {'event_bus': 'client_event_bus'}), '([client_peer], event_bus=client_event_bus)\n', (5082, 5125), False, 'from tests.core.peer_helpers import get_directly_linked_peers, MockPeerPoolWithConnectedPeers\n'), ((5149, 5222), 'tests.core.peer_helpers.MockPeerPoolWithConnectedPeers', 'MockPeerPoolWithConnectedPeers', (['[server_peer]'], {'event_bus': 'server_event_bus'}), '([server_peer], event_bus=server_event_bus)\n', (5179, 5222), False, 'from tests.core.peer_helpers import get_directly_linked_peers, MockPeerPoolWithConnectedPeers\n'), ((1207, 1311), 'tests.core.integration_test_helpers.run_peer_pool_event_server', 'run_peer_pool_event_server', (['client_event_bus', 'client_peer_pool'], {'handler_type': 'ETHPeerPoolEventServer'}), '(client_event_bus, client_peer_pool, handler_type\n =ETHPeerPoolEventServer)\n', (1233, 1311), False, 'from tests.core.integration_test_helpers import FakeAsyncChainDB, run_peer_pool_event_server, run_proxy_peer_pool, run_request_server\n'), ((1322, 1426), 'tests.core.integration_test_helpers.run_peer_pool_event_server', 'run_peer_pool_event_server', (['server_event_bus', 'server_peer_pool'], {'handler_type': 'ETHPeerPoolEventServer'}), '(server_event_bus, server_peer_pool, handler_type\n =ETHPeerPoolEventServer)\n', (1348, 1426), False, 'from tests.core.integration_test_helpers import FakeAsyncChainDB, run_peer_pool_event_server, run_proxy_peer_pool, run_request_server\n'), ((1530, 1567), 'tests.core.integration_test_helpers.run_proxy_peer_pool', 'run_proxy_peer_pool', (['client_event_bus'], {}), '(client_event_bus)\n', (1549, 1567), False, 'from tests.core.integration_test_helpers import FakeAsyncChainDB, run_peer_pool_event_server, run_proxy_peer_pool, run_request_server\n'), ((1609, 1646), 'tests.core.integration_test_helpers.run_proxy_peer_pool', 'run_proxy_peer_pool', (['server_event_bus'], {}), '(server_event_bus)\n', (1628, 1646), False, 'from tests.core.integration_test_helpers import FakeAsyncChainDB, run_peer_pool_event_server, run_proxy_peer_pool, run_request_server\n'), ((3402, 3506), 'tests.core.integration_test_helpers.run_peer_pool_event_server', 'run_peer_pool_event_server', (['client_event_bus', 'client_peer_pool'], {'handler_type': 'ETHPeerPoolEventServer'}), '(client_event_bus, client_peer_pool, handler_type\n =ETHPeerPoolEventServer)\n', (3428, 3506), False, 'from tests.core.integration_test_helpers import FakeAsyncChainDB, run_peer_pool_event_server, run_proxy_peer_pool, run_request_server\n'), ((3517, 3621), 'tests.core.integration_test_helpers.run_peer_pool_event_server', 'run_peer_pool_event_server', (['server_event_bus', 'server_peer_pool'], {'handler_type': 'ETHPeerPoolEventServer'}), '(server_event_bus, server_peer_pool, handler_type\n =ETHPeerPoolEventServer)\n', (3543, 3621), False, 'from tests.core.integration_test_helpers import FakeAsyncChainDB, run_peer_pool_event_server, run_proxy_peer_pool, run_request_server\n'), ((3632, 3669), 'tests.core.integration_test_helpers.run_proxy_peer_pool', 'run_proxy_peer_pool', (['client_event_bus'], {}), '(client_event_bus)\n', (3651, 3669), False, 'from tests.core.integration_test_helpers import FakeAsyncChainDB, run_peer_pool_event_server, run_proxy_peer_pool, run_request_server\n'), ((3711, 3748), 'tests.core.integration_test_helpers.run_proxy_peer_pool', 'run_proxy_peer_pool', (['server_event_bus'], {}), '(server_event_bus)\n', (3730, 3748), False, 'from tests.core.integration_test_helpers import FakeAsyncChainDB, run_peer_pool_event_server, run_proxy_peer_pool, run_request_server\n'), ((5239, 5343), 'tests.core.integration_test_helpers.run_peer_pool_event_server', 'run_peer_pool_event_server', (['client_event_bus', 'client_peer_pool'], {'handler_type': 'ETHPeerPoolEventServer'}), '(client_event_bus, client_peer_pool, handler_type\n =ETHPeerPoolEventServer)\n', (5265, 5343), False, 'from tests.core.integration_test_helpers import FakeAsyncChainDB, run_peer_pool_event_server, run_proxy_peer_pool, run_request_server\n'), ((5354, 5458), 'tests.core.integration_test_helpers.run_peer_pool_event_server', 'run_peer_pool_event_server', (['server_event_bus', 'server_peer_pool'], {'handler_type': 'ETHPeerPoolEventServer'}), '(server_event_bus, server_peer_pool, handler_type\n =ETHPeerPoolEventServer)\n', (5380, 5458), False, 'from tests.core.integration_test_helpers import FakeAsyncChainDB, run_peer_pool_event_server, run_proxy_peer_pool, run_request_server\n'), ((5562, 5599), 'tests.core.integration_test_helpers.run_proxy_peer_pool', 'run_proxy_peer_pool', (['client_event_bus'], {}), '(client_event_bus)\n', (5581, 5599), False, 'from tests.core.integration_test_helpers import FakeAsyncChainDB, run_peer_pool_event_server, run_proxy_peer_pool, run_request_server\n'), ((5641, 5678), 'tests.core.integration_test_helpers.run_proxy_peer_pool', 'run_proxy_peer_pool', (['server_event_bus'], {}), '(server_event_bus)\n', (5660, 5678), False, 'from tests.core.integration_test_helpers import FakeAsyncChainDB, run_peer_pool_event_server, run_proxy_peer_pool, run_request_server\n'), ((1491, 1522), 'tests.core.integration_test_helpers.FakeAsyncChainDB', 'FakeAsyncChainDB', (['chaindb_20.db'], {}), '(chaindb_20.db)\n', (1507, 1522), False, 'from tests.core.integration_test_helpers import FakeAsyncChainDB, run_peer_pool_event_server, run_proxy_peer_pool, run_request_server\n'), ((3867, 3894), 'pytest.raises', 'pytest.raises', (['TimeoutError'], {}), '(TimeoutError)\n', (3880, 3894), False, 'import pytest\n'), ((3996, 4023), 'pytest.raises', 'pytest.raises', (['TimeoutError'], {}), '(TimeoutError)\n', (4009, 4023), False, 'import pytest\n'), ((4108, 4135), 'pytest.raises', 'pytest.raises', (['TimeoutError'], {}), '(TimeoutError)\n', (4121, 4135), False, 'import pytest\n'), ((4224, 4251), 'pytest.raises', 'pytest.raises', (['TimeoutError'], {}), '(TimeoutError)\n', (4237, 4251), False, 'import pytest\n'), ((5523, 5554), 'tests.core.integration_test_helpers.FakeAsyncChainDB', 'FakeAsyncChainDB', (['chaindb_20.db'], {}), '(chaindb_20.db)\n', (5539, 5554), False, 'from tests.core.integration_test_helpers import FakeAsyncChainDB, run_peer_pool_event_server, run_proxy_peer_pool, run_request_server\n'), ((5963, 5996), 'pytest.raises', 'pytest.raises', (['PeerConnectionLost'], {}), '(PeerConnectionLost)\n', (5976, 5996), False, 'import pytest\n'), ((6084, 6117), 'pytest.raises', 'pytest.raises', (['PeerConnectionLost'], {}), '(PeerConnectionLost)\n', (6097, 6117), False, 'import pytest\n'), ((6188, 6221), 'pytest.raises', 'pytest.raises', (['PeerConnectionLost'], {}), '(PeerConnectionLost)\n', (6201, 6221), False, 'import pytest\n'), ((6296, 6329), 'pytest.raises', 'pytest.raises', (['PeerConnectionLost'], {}), '(PeerConnectionLost)\n', (6309, 6329), False, 'import pytest\n'), ((900, 934), 'tests.core.integration_test_helpers.FakeAsyncChainDB', 'FakeAsyncChainDB', (['chaindb_fresh.db'], {}), '(chaindb_fresh.db)\n', (916, 934), False, 'from tests.core.integration_test_helpers import FakeAsyncChainDB, run_peer_pool_event_server, run_proxy_peer_pool, run_request_server\n'), ((957, 988), 'tests.core.integration_test_helpers.FakeAsyncChainDB', 'FakeAsyncChainDB', (['chaindb_20.db'], {}), '(chaindb_20.db)\n', (973, 988), False, 'from tests.core.integration_test_helpers import FakeAsyncChainDB, run_peer_pool_event_server, run_proxy_peer_pool, run_request_server\n'), ((3095, 3129), 'tests.core.integration_test_helpers.FakeAsyncChainDB', 'FakeAsyncChainDB', (['chaindb_fresh.db'], {}), '(chaindb_fresh.db)\n', (3111, 3129), False, 'from tests.core.integration_test_helpers import FakeAsyncChainDB, run_peer_pool_event_server, run_proxy_peer_pool, run_request_server\n'), ((3152, 3183), 'tests.core.integration_test_helpers.FakeAsyncChainDB', 'FakeAsyncChainDB', (['chaindb_20.db'], {}), '(chaindb_20.db)\n', (3168, 3183), False, 'from tests.core.integration_test_helpers import FakeAsyncChainDB, run_peer_pool_event_server, run_proxy_peer_pool, run_request_server\n'), ((4932, 4966), 'tests.core.integration_test_helpers.FakeAsyncChainDB', 'FakeAsyncChainDB', (['chaindb_fresh.db'], {}), '(chaindb_fresh.db)\n', (4948, 4966), False, 'from tests.core.integration_test_helpers import FakeAsyncChainDB, run_peer_pool_event_server, run_proxy_peer_pool, run_request_server\n'), ((4989, 5020), 'tests.core.integration_test_helpers.FakeAsyncChainDB', 'FakeAsyncChainDB', (['chaindb_20.db'], {}), '(chaindb_20.db)\n', (5005, 5020), False, 'from tests.core.integration_test_helpers import FakeAsyncChainDB, run_peer_pool_event_server, run_proxy_peer_pool, run_request_server\n')]
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import sys
import os
import optparse
from repository.utils import get_version, set_logger
from repository import RepositoryManager
from repository.minisetting import Setting
def print_cmd_header():
print('GitMirror {}'.format(get_version()))
def print_cmd_result(success=True):
if success:
print("Success")
else:
print("Failed")
print("Please check log")
def usage_error(error: str):
print("Usage Error: {} {}".format(os.path.basename(__file__), error))
print("Try {} -h for more information".format(os.path.basename(__file__)))
def process(options, args):
setting = Setting()
if options.logfile:
set_logger(setting, log_enable=True, log_file=options.logfile)
if options.loglevel:
set_logger(setting, log_enable=True, log_level=options.loglevel)
if options.logdir:
set_logger(setting, log_enable=True, log_dir=options.logdir)
if options.nolog:
set_logger(setting, log_enable=False)
repo_manager = RepositoryManager(setting)
if options.list:
if len(args) > 0:
usage_error("--list take no argument")
return False
services, services_possible = repo_manager.get_services_list()
print("Available Service: {}".format(services))
print("Potential Service: {}".format(services_possible))
return True
if options.parse:
if len(args) != 1:
usage_error("--parse only take 1 argument <service name>")
return False
service_name = args[0]
print_cmd_result(repo_manager.parse_service(service_name))
return True
if options.mirror:
if len(args) != 1:
usage_error("--mirror only take 1 argument <service name>")
return False
service_name = args[0]
print_cmd_result(repo_manager.mirror_service(service_name))
return True
if options.get:
if options.get not in ['configs', 'repos']:
usage_error("--get options should be choice of [configs, repos]")
return False
if len(args) not in (1, 2):
usage_error("--get can take 2 argument <service name> [output file]")
return False
service_name = args[0]
output = ''
if len(args) == 2:
output = args[1]
if options.get == 'configs':
print_cmd_result(repo_manager.get_service_config(service_name, output))
if options.get == 'repos':
print_cmd_result(repo_manager.get_service_repos(service_name, output))
return True
if options.add:
if len(args) != 1:
usage_error("--add only take 1 argument <service name>")
return False
service_name = args[0]
print_cmd_result(repo_manager.add_service(service_name))
return True
if options.remove:
if len(args) != 1:
usage_error("--remove only take 1 argument <service name>")
return False
service_name = args[0]
print_cmd_result(repo_manager.remove_service(service_name))
if options.autoconf:
if len(args) > 0:
usage_error("--autoconf take no argument")
return False
repo_manager.autoconf()
return True
if options.batchrun:
if len(args) != 1:
usage_error("--batchrun only take 1 argument <service name>")
return False
service_name = args[0]
repo_manager.batchrun_service(service_name)
return True
if options.init:
if len(args) > 0:
usage_error("--init take no argument")
return False
repo_manager.init()
return True
def cli(argv=None):
print_cmd_header()
if argv is None:
argv = sys.argv
usage = "usage: %prog [options] [service name] [output]"
parser = optparse.OptionParser(formatter=optparse.TitledHelpFormatter(),
conflict_handler='resolve', usage=usage)
group_global = optparse.OptionGroup(parser, "Global Options")
group_global.add_option("--logdir", metavar="PATH",
help="Log directory. if omitted local log directory will be created")
group_global.add_option("--logfile", metavar="FILE",
help="log file. if omitted stderr will be used")
group_global.add_option("--loglevel", metavar="LEVEL", default=None,
help="log level (default: DEBUG)")
group_global.add_option("--nolog", action="store_true",
help="disable logging completely")
parser.add_option_group(group_global)
parser.add_option("--list", action='store_true', dest='list',
help="List all services names available")
parser.add_option("--parse", action='store_true', dest="parse",
help="Parse repositories for <service name>")
parser.add_option("--mirror", action='store_true', dest="mirror",
help="Update from remote & Push to target for <service name>")
parser.add_option("--get", metavar="CONTENT", dest="get",
help="Get content(configs/repos) from <service name> save to [output]")
parser.add_option("--add", action='store_true', dest="add",
help="Create or Update <service name>")
parser.add_option("--remove", action='store_true', dest="remove",
help="Backup and Remove <service name>")
group_devspace = optparse.OptionGroup(parser, "Devspace Options")
group_devspace.add_option("--autoconf", action='store_true', dest="autoconf",
help="Auto add service avaialbe and update crontab")
group_devspace.add_option("--batchrun", action='store_true', dest="batchrun",
help="Run parse and mirror for <service name>")
group_devspace.add_option("--init", action='store_true', dest="init",
help="For devspace init all service and first checkout")
parser.add_option_group(group_devspace)
if len(argv) == 1:
parser.print_help()
else:
options, args = parser.parse_args(args=argv[1:])
process(options, args)
if __name__ == '__main__':
cli()
|
[
"optparse.OptionGroup",
"os.path.basename",
"repository.RepositoryManager",
"repository.utils.get_version",
"optparse.TitledHelpFormatter",
"repository.minisetting.Setting",
"repository.utils.set_logger"
] |
[((670, 679), 'repository.minisetting.Setting', 'Setting', ([], {}), '()\n', (677, 679), False, 'from repository.minisetting import Setting\n'), ((1061, 1087), 'repository.RepositoryManager', 'RepositoryManager', (['setting'], {}), '(setting)\n', (1078, 1087), False, 'from repository import RepositoryManager\n'), ((4071, 4117), 'optparse.OptionGroup', 'optparse.OptionGroup', (['parser', '"""Global Options"""'], {}), "(parser, 'Global Options')\n", (4091, 4117), False, 'import optparse\n'), ((5538, 5586), 'optparse.OptionGroup', 'optparse.OptionGroup', (['parser', '"""Devspace Options"""'], {}), "(parser, 'Devspace Options')\n", (5558, 5586), False, 'import optparse\n'), ((713, 775), 'repository.utils.set_logger', 'set_logger', (['setting'], {'log_enable': '(True)', 'log_file': 'options.logfile'}), '(setting, log_enable=True, log_file=options.logfile)\n', (723, 775), False, 'from repository.utils import get_version, set_logger\n'), ((810, 874), 'repository.utils.set_logger', 'set_logger', (['setting'], {'log_enable': '(True)', 'log_level': 'options.loglevel'}), '(setting, log_enable=True, log_level=options.loglevel)\n', (820, 874), False, 'from repository.utils import get_version, set_logger\n'), ((911, 971), 'repository.utils.set_logger', 'set_logger', (['setting'], {'log_enable': '(True)', 'log_dir': 'options.logdir'}), '(setting, log_enable=True, log_dir=options.logdir)\n', (921, 971), False, 'from repository.utils import get_version, set_logger\n'), ((1003, 1040), 'repository.utils.set_logger', 'set_logger', (['setting'], {'log_enable': '(False)'}), '(setting, log_enable=False)\n', (1013, 1040), False, 'from repository.utils import get_version, set_logger\n'), ((280, 293), 'repository.utils.get_version', 'get_version', ([], {}), '()\n', (291, 293), False, 'from repository.utils import get_version, set_logger\n'), ((510, 536), 'os.path.basename', 'os.path.basename', (['__file__'], {}), '(__file__)\n', (526, 536), False, 'import os\n'), ((596, 622), 'os.path.basename', 'os.path.basename', (['__file__'], {}), '(__file__)\n', (612, 622), False, 'import os\n'), ((3944, 3974), 'optparse.TitledHelpFormatter', 'optparse.TitledHelpFormatter', ([], {}), '()\n', (3972, 3974), False, 'import optparse\n')]
|
# ------------------------------------------------------------------------------
# Copyright (c) 2010-2013, EVEthing team
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
# NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
# OF SUCH DAMAGE.
# ------------------------------------------------------------------------------
from datetime import datetime
from django.db import models
from thing.models.character import Character
from thing.models.item import Item
from thing.models.implant import Implant
class CharacterDetails(models.Model):
"""Character details"""
character = models.OneToOneField(
Character, unique=True, primary_key=True, related_name='details'
)
wallet_balance = models.DecimalField(
max_digits=18, decimal_places=2, default=0
)
plex_balance = models.IntegerField(default=0)
cha_attribute = models.SmallIntegerField(default=20)
int_attribute = models.SmallIntegerField(default=20)
mem_attribute = models.SmallIntegerField(default=20)
per_attribute = models.SmallIntegerField(default=20)
wil_attribute = models.SmallIntegerField(default=19)
__attr_bonus = {}
def __get_attr_bonus(self, attr):
if not self.__attr_bonus:
self.__attr_bonus = self.implants.all().aggregate(
models.Sum('charisma_modifier'),
models.Sum('intelligence_modifier'),
models.Sum('memory_modifier'),
models.Sum('perception_modifier'),
models.Sum('willpower_modifier')
)
return self.__attr_bonus[attr + '_modifier__sum'] if \
self.__attr_bonus[attr + '_modifier__sum'] else 0
@property
def cha_bonus(self):
return self.__get_attr_bonus('charisma')
@property
def int_bonus(self):
return self.__get_attr_bonus('intelligence')
@property
def mem_bonus(self):
return self.__get_attr_bonus('memory')
@property
def per_bonus(self):
return self.__get_attr_bonus('perception')
@property
def wil_bonus(self):
return self.__get_attr_bonus('willpower')
implants = models.ManyToManyField(Implant)
security_status = models.DecimalField(
max_digits=6, decimal_places=4, default=0
)
last_known_location = models.CharField(max_length=255, default='')
ship_item = models.ForeignKey(Item, blank=True, null=True)
ship_name = models.CharField(max_length=128, default='')
# Fatigue
last_jump_date = models.DateTimeField(null=True, default=None)
fatigue_expire_date = models.DateTimeField(null=True, default=None)
class Meta:
app_label = 'thing'
def __unicode__(self):
return '%s' % self.character
def fatigue(self):
if self.fatigue_expire_date != None:
return self.fatigue_expire_date - datetime.utcnow()
def has_fatigue(self):
if self.fatigue_expire_date == None:
return False
fatigue = self.fatigue_expire_date - datetime.utcnow()
return fatigue.total_seconds() > 0
|
[
"django.db.models.OneToOneField",
"django.db.models.ManyToManyField",
"django.db.models.CharField",
"django.db.models.ForeignKey",
"django.db.models.Sum",
"django.db.models.DecimalField",
"django.db.models.IntegerField",
"django.db.models.SmallIntegerField",
"datetime.datetime.utcnow",
"django.db.models.DateTimeField"
] |
[((1786, 1877), 'django.db.models.OneToOneField', 'models.OneToOneField', (['Character'], {'unique': '(True)', 'primary_key': '(True)', 'related_name': '"""details"""'}), "(Character, unique=True, primary_key=True, related_name\n ='details')\n", (1806, 1877), False, 'from django.db import models\n'), ((1909, 1972), 'django.db.models.DecimalField', 'models.DecimalField', ([], {'max_digits': '(18)', 'decimal_places': '(2)', 'default': '(0)'}), '(max_digits=18, decimal_places=2, default=0)\n', (1928, 1972), False, 'from django.db import models\n'), ((2007, 2037), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'default': '(0)'}), '(default=0)\n', (2026, 2037), False, 'from django.db import models\n'), ((2059, 2095), 'django.db.models.SmallIntegerField', 'models.SmallIntegerField', ([], {'default': '(20)'}), '(default=20)\n', (2083, 2095), False, 'from django.db import models\n'), ((2116, 2152), 'django.db.models.SmallIntegerField', 'models.SmallIntegerField', ([], {'default': '(20)'}), '(default=20)\n', (2140, 2152), False, 'from django.db import models\n'), ((2173, 2209), 'django.db.models.SmallIntegerField', 'models.SmallIntegerField', ([], {'default': '(20)'}), '(default=20)\n', (2197, 2209), False, 'from django.db import models\n'), ((2230, 2266), 'django.db.models.SmallIntegerField', 'models.SmallIntegerField', ([], {'default': '(20)'}), '(default=20)\n', (2254, 2266), False, 'from django.db import models\n'), ((2287, 2323), 'django.db.models.SmallIntegerField', 'models.SmallIntegerField', ([], {'default': '(19)'}), '(default=19)\n', (2311, 2323), False, 'from django.db import models\n'), ((3337, 3368), 'django.db.models.ManyToManyField', 'models.ManyToManyField', (['Implant'], {}), '(Implant)\n', (3359, 3368), False, 'from django.db import models\n'), ((3392, 3454), 'django.db.models.DecimalField', 'models.DecimalField', ([], {'max_digits': '(6)', 'decimal_places': '(4)', 'default': '(0)'}), '(max_digits=6, decimal_places=4, default=0)\n', (3411, 3454), False, 'from django.db import models\n'), ((3496, 3540), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(255)', 'default': '""""""'}), "(max_length=255, default='')\n", (3512, 3540), False, 'from django.db import models\n'), ((3557, 3603), 'django.db.models.ForeignKey', 'models.ForeignKey', (['Item'], {'blank': '(True)', 'null': '(True)'}), '(Item, blank=True, null=True)\n', (3574, 3603), False, 'from django.db import models\n'), ((3620, 3664), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(128)', 'default': '""""""'}), "(max_length=128, default='')\n", (3636, 3664), False, 'from django.db import models\n'), ((3701, 3746), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'null': '(True)', 'default': 'None'}), '(null=True, default=None)\n', (3721, 3746), False, 'from django.db import models\n'), ((3773, 3818), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'null': '(True)', 'default': 'None'}), '(null=True, default=None)\n', (3793, 3818), False, 'from django.db import models\n'), ((4207, 4224), 'datetime.datetime.utcnow', 'datetime.utcnow', ([], {}), '()\n', (4222, 4224), False, 'from datetime import datetime\n'), ((2499, 2530), 'django.db.models.Sum', 'models.Sum', (['"""charisma_modifier"""'], {}), "('charisma_modifier')\n", (2509, 2530), False, 'from django.db import models\n'), ((2548, 2583), 'django.db.models.Sum', 'models.Sum', (['"""intelligence_modifier"""'], {}), "('intelligence_modifier')\n", (2558, 2583), False, 'from django.db import models\n'), ((2601, 2630), 'django.db.models.Sum', 'models.Sum', (['"""memory_modifier"""'], {}), "('memory_modifier')\n", (2611, 2630), False, 'from django.db import models\n'), ((2648, 2681), 'django.db.models.Sum', 'models.Sum', (['"""perception_modifier"""'], {}), "('perception_modifier')\n", (2658, 2681), False, 'from django.db import models\n'), ((2699, 2731), 'django.db.models.Sum', 'models.Sum', (['"""willpower_modifier"""'], {}), "('willpower_modifier')\n", (2709, 2731), False, 'from django.db import models\n'), ((4045, 4062), 'datetime.datetime.utcnow', 'datetime.utcnow', ([], {}), '()\n', (4060, 4062), False, 'from datetime import datetime\n')]
|
# Copyright (c) OpenMMLab. All rights reserved.
import torch
from torch.utils.data import Dataset
from .builder import DATASETS
@DATASETS.register_module()
class QuickTestImageDataset(Dataset):
"""Dataset for quickly testing the correctness.
Args:
size (tuple[int]): The size of the images. Defaults to `None`.
"""
def __init__(self, *args, size=None, **kwargs):
super().__init__()
self.size = size
self.img_tensor = torch.randn(3, self.size[0], self.size[1])
def __len__(self):
return 10000
def __getitem__(self, idx):
return dict(real_img=self.img_tensor)
|
[
"torch.randn"
] |
[((470, 512), 'torch.randn', 'torch.randn', (['(3)', 'self.size[0]', 'self.size[1]'], {}), '(3, self.size[0], self.size[1])\n', (481, 512), False, 'import torch\n')]
|
# Copyright 2019 ZTE corporation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import mock
from oslo_serialization import jsonutils
from watcher.decision_engine import rpcapi as deapi
from watcher.tests.api import base as api_base
class TestListDataModel(api_base.FunctionalTest):
def setUp(self):
super(TestListDataModel, self).setUp()
p_dcapi = mock.patch.object(deapi, 'DecisionEngineAPI')
self.mock_dcapi = p_dcapi.start()
self.mock_dcapi().get_data_model_info.return_value = \
'fake_response_value'
self.addCleanup(p_dcapi.stop)
def test_get_all(self):
response = self.get_json(
'/data_model/?data_model_type=compute',
headers={'OpenStack-API-Version': 'infra-optim 1.3'})
self.assertEqual('fake_response_value', response)
def test_get_all_not_acceptable(self):
response = self.get_json(
'/data_model/?data_model_type=compute',
headers={'OpenStack-API-Version': 'infra-optim 1.2'},
expect_errors=True)
self.assertEqual(406, response.status_int)
class TestDataModelPolicyEnforcement(api_base.FunctionalTest):
def setUp(self):
super(TestDataModelPolicyEnforcement, self).setUp()
p_dcapi = mock.patch.object(deapi, 'DecisionEngineAPI')
self.mock_dcapi = p_dcapi.start()
self.addCleanup(p_dcapi.stop)
def _common_policy_check(self, rule, func, *arg, **kwarg):
self.policy.set_rules({
"admin_api": "(role:admin or role:administrator)",
"default": "rule:admin_api",
rule: "rule:defaut"})
response = func(*arg, **kwarg)
self.assertEqual(403, response.status_int)
self.assertEqual('application/json', response.content_type)
self.assertTrue(
"Policy doesn't allow %s to be performed." % rule,
jsonutils.loads(response.json['error_message'])['faultstring'])
def test_policy_disallow_get_all(self):
self._common_policy_check(
"data_model:get_all", self.get_json,
"/data_model/?data_model_type=compute",
headers={'OpenStack-API-Version': 'infra-optim 1.3'},
expect_errors=True)
class TestDataModelEnforcementWithAdminContext(
TestListDataModel, api_base.AdminRoleTest):
def setUp(self):
super(TestDataModelEnforcementWithAdminContext, self).setUp()
self.policy.set_rules({
"admin_api": "(role:admin or role:administrator)",
"default": "rule:admin_api",
"data_model:get_all": "rule:default"})
|
[
"mock.patch.object",
"oslo_serialization.jsonutils.loads"
] |
[((921, 966), 'mock.patch.object', 'mock.patch.object', (['deapi', '"""DecisionEngineAPI"""'], {}), "(deapi, 'DecisionEngineAPI')\n", (938, 966), False, 'import mock\n'), ((1827, 1872), 'mock.patch.object', 'mock.patch.object', (['deapi', '"""DecisionEngineAPI"""'], {}), "(deapi, 'DecisionEngineAPI')\n", (1844, 1872), False, 'import mock\n'), ((2445, 2492), 'oslo_serialization.jsonutils.loads', 'jsonutils.loads', (["response.json['error_message']"], {}), "(response.json['error_message'])\n", (2460, 2492), False, 'from oslo_serialization import jsonutils\n')]
|
from algoliasearch_django import AlgoliaIndex
from algoliasearch_django.decorators import register
from django.conf import settings
from eahub.profiles.models import Profile, ProfileTag
if settings.IS_ENABLE_ALGOLIA:
class ProfilePublicIndex(AlgoliaIndex):
index_name = settings.ALGOLIA["INDEX_NAME_PROFILES_PUBLIC"]
should_index = "is_searchable_public"
fields = [
"job_title",
["get_full_name", "name"],
["get_messaging_url_if_can_receive_message", "messaging_url"],
"summary",
["get_tags_speech_topic_formatted", "speech_topics"],
"topics_i_speak_about",
"expertise_areas_other",
"cause_areas_other",
["get_absolute_url", "url"],
["get_image_url", "image"],
"personal_website_url",
"facebook_url",
"linkedin_url",
"available_as_speaker",
"available_to_volunteer",
"open_to_job_offers",
"is_organiser",
"city_or_town",
"country",
"lon",
"lat",
["get_tags_generic_formatted", "tags"],
["get_tags_affiliation_formatted", "affiliations"],
["get_tags_cause_area_formatted", "cause_areas"],
["get_tags_cause_area_expertise_formatted", "cause_areas_expertise"],
["get_tags_expertise_formatted", "expertise"],
["get_tags_career_interest_formatted", "career_interest_areas"],
["get_tags_pledge_formatted", "giving_pledges"],
["get_tags_event_attended_formatted", "events_attended"],
[
"get_tags_organisational_affiliation_formatted",
"organisational_affiliations",
],
["get_local_groups_formatted", "local_groups"],
["get_organizer_of_local_groups_formatted", "organizer_of_local_groups"],
["offering", "offering"],
["looking_for", "looking_for"],
]
class ProfileInternalIndex(AlgoliaIndex):
index_name = settings.ALGOLIA["INDEX_NAME_PROFILES_INTERNAL"]
should_index = "is_searchable_internal"
fields = ProfilePublicIndex.fields
@register(Profile)
class ProfileMetaIndex(AlgoliaIndex):
# noinspection PyShadowingNames,PyMissingConstructor
def __init__(self, model, client, settings):
self.indices = [
ProfilePublicIndex(model, client, settings),
ProfileInternalIndex(model, client, settings),
]
def raw_search(self, query="", params=None):
res = {}
for index in self.indices:
res[index.name] = index.raw_search(query, params)
return res
def update_records(self, qs, batch_size=1000, **kwargs):
for index in self.indices:
index.update_records(qs, batch_size, **kwargs)
def reindex_all(self, batch_size=1000):
for index in self.indices:
index.reindex_all(batch_size)
def set_settings(self):
for index in self.indices:
index.set_settings()
def clear_index(self):
for index in self.indices:
index.clear_index()
def save_record(self, instance, update_fields=None, **kwargs):
for index in self.indices:
index.save_record(instance, update_fields, **kwargs)
def delete_record(self, instance):
for index in self.indices:
index.delete_record(instance)
@register(ProfileTag)
class ProfileTagIndex(AlgoliaIndex):
index_name = settings.ALGOLIA["INDEX_NAME_TAGS"]
fields = [
"name",
"description",
"synonyms",
["get_types_formatted", "types"],
"created_at",
"status",
"is_featured",
"count",
]
|
[
"algoliasearch_django.decorators.register"
] |
[((2250, 2267), 'algoliasearch_django.decorators.register', 'register', (['Profile'], {}), '(Profile)\n', (2258, 2267), False, 'from algoliasearch_django.decorators import register\n'), ((3627, 3647), 'algoliasearch_django.decorators.register', 'register', (['ProfileTag'], {}), '(ProfileTag)\n', (3635, 3647), False, 'from algoliasearch_django.decorators import register\n')]
|
# Django
from django.db import models
# Models
from django.contrib.auth.models import User
class Profile(models.Model):
user = models.OneToOneField(User, on_delete=models.CASCADE, related_name='profile')
biography = models.CharField(max_length=160, blank=True, null=True)
follows = models.ManyToManyField(User, related_name='follows_user')
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
def __str__(self):
return "{} by {}".format(self.biography, self.user)
|
[
"django.db.models.CharField",
"django.db.models.OneToOneField",
"django.db.models.ManyToManyField",
"django.db.models.DateTimeField"
] |
[((135, 211), 'django.db.models.OneToOneField', 'models.OneToOneField', (['User'], {'on_delete': 'models.CASCADE', 'related_name': '"""profile"""'}), "(User, on_delete=models.CASCADE, related_name='profile')\n", (155, 211), False, 'from django.db import models\n'), ((228, 283), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(160)', 'blank': '(True)', 'null': '(True)'}), '(max_length=160, blank=True, null=True)\n', (244, 283), False, 'from django.db import models\n'), ((298, 355), 'django.db.models.ManyToManyField', 'models.ManyToManyField', (['User'], {'related_name': '"""follows_user"""'}), "(User, related_name='follows_user')\n", (320, 355), False, 'from django.db import models\n'), ((374, 413), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now_add': '(True)'}), '(auto_now_add=True)\n', (394, 413), False, 'from django.db import models\n'), ((431, 466), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now': '(True)'}), '(auto_now=True)\n', (451, 466), False, 'from django.db import models\n')]
|
#!/usr/bin/env python
# -*- coding:utf-8 -*-
"""
Date: 2022/5/11 17:52
Desc: 加密货币
https://cn.investing.com/crypto/currencies
高频数据
https://bitcoincharts.com/about/markets-api/
"""
import math
import pandas as pd
import requests
from tqdm import tqdm
from akshare.datasets import get_crypto_info_csv
def crypto_name_url_table(symbol: str = "web") -> pd.DataFrame:
"""
加密货币名称、代码和 ID,每次更新较慢
https://cn.investing.com/crypto/ethereum/historical-data
:param symbol: choice of {"web", "local"}; web 表示从网页获取最新,local 表示利用本地本文件
:type symbol: str
:return: 加密货币名称、代码和 ID
:rtype: pandas.DataFrame
"""
if symbol == "web":
headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/86.0.4240.111 Safari/537.36",
"X-Requested-With": "XMLHttpRequest",
}
url = "https://cn.investing.com/crypto/Service/LoadCryptoCurrencies"
payload = {
'draw': '14',
'columns[0][data]': 'currencies_order',
'columns[0][name]': 'currencies_order',
'columns[0][searchable]': 'true',
'columns[0][orderable]': 'true',
'columns[0][search][value]': '',
'columns[0][search][regex]': 'false',
'columns[1][data]': 'function',
'columns[1][name]': 'crypto_id',
'columns[1][searchable]': 'true',
'columns[1][orderable]': 'false',
'columns[1][search][value]': '',
'columns[1][search][regex]': 'false',
'columns[2][data]': 'function',
'columns[2][name]': 'name',
'columns[2][searchable]': 'true',
'columns[2][orderable]': 'true',
'columns[2][search][value]': '',
'columns[2][search][regex]': 'false',
'columns[3][data]': 'symbol',
'columns[3][name]': 'symbol',
'columns[3][searchable]': 'true',
'columns[3][orderable]': 'true',
'columns[3][search][value]': '',
'columns[3][search][regex]': 'false',
'columns[4][data]': 'function',
'columns[4][name]': 'price_usd',
'columns[4][searchable]': 'true',
'columns[4][orderable]': 'true',
'columns[4][search][value]': '',
'columns[4][search][regex]': 'false',
'columns[5][data]': 'market_cap_formatted',
'columns[5][name]': 'market_cap_usd',
'columns[5][searchable]': 'true',
'columns[5][orderable]': 'true',
'columns[5][search][value]': '',
'columns[5][search][regex]': 'false',
'columns[6][data]': '24h_volume_formatted',
'columns[6][name]': '24h_volume_usd',
'columns[6][searchable]': 'true',
'columns[6][orderable]': 'true',
'columns[6][search][value]': '',
'columns[6][search][regex]': 'false',
'columns[7][data]': 'total_volume',
'columns[7][name]': 'total_volume',
'columns[7][searchable]': 'true',
'columns[7][orderable]': 'true',
'columns[7][search][value]': '',
'columns[7][search][regex]': 'false',
'columns[8][data]': 'change_percent_formatted',
'columns[8][name]': 'change_percent',
'columns[8][searchable]': 'true',
'columns[8][orderable]': 'true',
'columns[8][search][value]': '',
'columns[8][search][regex]': 'false',
'columns[9][data]': 'percent_change_7d_formatted',
'columns[9][name]': 'percent_change_7d',
'columns[9][searchable]': 'true',
'columns[9][orderable]': 'true',
'columns[9][search][value]': '',
'columns[9][search][regex]': 'false',
'order[0][column]': 'currencies_order',
'order[0][dir]': 'asc',
'start': '0',
'length': '100',
'search[value]': '',
'search[regex]': 'false',
'currencyId': '12',
}
r = requests.post(url, data=payload, headers=headers)
data_json = r.json()
total_page = math.ceil(int(data_json['recordsTotal']) / 100)
big_df = pd.DataFrame()
for page in tqdm(range(1, total_page+1), leave=False):
payload.update({
"start": (page-1)*100,
'length': 100
})
r = requests.post(url, data=payload, headers=headers)
data_json = r.json()
temp_df = pd.DataFrame(data_json['data'])
big_df = pd.concat([big_df, temp_df], ignore_index=True)
big_df = big_df[[
'symbol',
'name',
'name_trans',
'sml_id',
'related_pair_ID',
]]
return big_df
else:
get_crypto_info_csv_path = get_crypto_info_csv()
name_url_df = pd.read_csv(get_crypto_info_csv_path)
return name_url_df
def crypto_hist(
symbol: str = "BTC",
period: str = "每日",
start_date: str = "20191020",
end_date: str = "20201020",
):
"""
加密货币历史数据
https://cn.investing.com/crypto/ethereum/historical-data
:param symbol: 货币名称
:type symbol: str
:param period: choice of {"每日", "每周", "每月"}
:type period: str
:param start_date: '20151020', 注意格式
:type start_date: str
:param end_date: '20201020', 注意格式
:type end_date: str
:return: 加密货币历史数据获取
:rtype: pandas.DataFrame
"""
import warnings
warnings.filterwarnings('ignore')
headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/86.0.4240.111 Safari/537.36",
"X-Requested-With": "XMLHttpRequest",
}
period_map = {"每日": "Daily", "每周": "Weekly", "每月": "Monthly"}
start_date = "/".join([start_date[:4], start_date[4:6], start_date[6:]])
end_date = "/".join([end_date[:4], end_date[4:6], end_date[6:]])
name_url_df = crypto_name_url_table(symbol='local')
curr_id = name_url_df[name_url_df["symbol"] == symbol]["related_pair_ID"].values[0]
sml_id = name_url_df[name_url_df["symbol"] == symbol]["sml_id"].values[0]
url = "https://cn.investing.com/instruments/HistoricalDataAjax"
payload = {
"curr_id": curr_id,
"smlID": sml_id,
"header": "null",
"st_date": start_date,
"end_date": end_date,
"interval_sec": period_map[period],
"sort_col": "date",
"sort_ord": "DESC",
"action": "historical_data",
}
r = requests.post(url, data=payload, headers=headers)
temp_df = pd.read_html(r.text)[0]
df_data = temp_df.copy()
if period == "每月":
df_data.index = pd.to_datetime(df_data["日期"], format="%Y年%m月")
else:
df_data.index = pd.to_datetime(df_data["日期"], format="%Y年%m月%d日")
if any(df_data["交易量"].astype(str).str.contains("-")):
df_data["交易量"][df_data["交易量"].str.contains("-")] = df_data["交易量"][
df_data["交易量"].str.contains("-")
].replace("-", 0)
if any(df_data["交易量"].astype(str).str.contains("B")):
df_data["交易量"][df_data["交易量"].str.contains("B").fillna(False)] = (
df_data["交易量"][df_data["交易量"].str.contains("B").fillna(False)]
.str.replace("B", "")
.str.replace(",", "")
.astype(float)
* 1000000000
)
if any(df_data["交易量"].astype(str).str.contains("M")):
df_data["交易量"][df_data["交易量"].str.contains("M").fillna(False)] = (
df_data["交易量"][df_data["交易量"].str.contains("M").fillna(False)]
.str.replace("M", "")
.str.replace(",", "")
.astype(float)
* 1000000
)
if any(df_data["交易量"].astype(str).str.contains("K")):
df_data["交易量"][df_data["交易量"].str.contains("K").fillna(False)] = (
df_data["交易量"][df_data["交易量"].str.contains("K").fillna(False)]
.str.replace("K", "")
.str.replace(",", "")
.astype(float)
* 1000
)
df_data["交易量"] = df_data["交易量"].astype(float)
df_data["涨跌幅"] = pd.DataFrame(
round(
df_data["涨跌幅"].str.replace(",", "").str.replace("%", "").astype(float)
/ 100,
6,
)
)
del df_data["日期"]
df_data.reset_index(inplace=True)
df_data = df_data[[
"日期",
"收盘",
"开盘",
"高",
"低",
"交易量",
"涨跌幅",
]]
df_data['日期'] = pd.to_datetime(df_data['日期']).dt.date
df_data['收盘'] = pd.to_numeric(df_data['收盘'])
df_data['开盘'] = pd.to_numeric(df_data['开盘'])
df_data['高'] = pd.to_numeric(df_data['高'])
df_data['低'] = pd.to_numeric(df_data['低'])
df_data['交易量'] = pd.to_numeric(df_data['交易量'])
df_data['涨跌幅'] = pd.to_numeric(df_data['涨跌幅'])
df_data.sort_values('日期', inplace=True)
df_data.reset_index(inplace=True, drop=True)
return df_data
if __name__ == "__main__":
crypto_name_url_table_df = crypto_name_url_table(symbol="local")
print(crypto_name_url_table_df)
crypto_hist_df = crypto_hist(
symbol="BTC", period="每日", start_date="20151020", end_date="20220511"
)
print(crypto_hist_df)
|
[
"pandas.DataFrame",
"pandas.read_html",
"warnings.filterwarnings",
"pandas.read_csv",
"pandas.to_datetime",
"akshare.datasets.get_crypto_info_csv",
"requests.post",
"pandas.concat",
"pandas.to_numeric"
] |
[((5568, 5601), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {}), "('ignore')\n", (5591, 5601), False, 'import warnings\n'), ((6620, 6669), 'requests.post', 'requests.post', (['url'], {'data': 'payload', 'headers': 'headers'}), '(url, data=payload, headers=headers)\n', (6633, 6669), False, 'import requests\n'), ((8629, 8657), 'pandas.to_numeric', 'pd.to_numeric', (["df_data['收盘']"], {}), "(df_data['收盘'])\n", (8642, 8657), True, 'import pandas as pd\n'), ((8678, 8706), 'pandas.to_numeric', 'pd.to_numeric', (["df_data['开盘']"], {}), "(df_data['开盘'])\n", (8691, 8706), True, 'import pandas as pd\n'), ((8724, 8751), 'pandas.to_numeric', 'pd.to_numeric', (["df_data['高']"], {}), "(df_data['高'])\n", (8737, 8751), True, 'import pandas as pd\n'), ((8771, 8798), 'pandas.to_numeric', 'pd.to_numeric', (["df_data['低']"], {}), "(df_data['低'])\n", (8784, 8798), True, 'import pandas as pd\n'), ((8824, 8853), 'pandas.to_numeric', 'pd.to_numeric', (["df_data['交易量']"], {}), "(df_data['交易量'])\n", (8837, 8853), True, 'import pandas as pd\n'), ((8875, 8904), 'pandas.to_numeric', 'pd.to_numeric', (["df_data['涨跌幅']"], {}), "(df_data['涨跌幅'])\n", (8888, 8904), True, 'import pandas as pd\n'), ((4108, 4157), 'requests.post', 'requests.post', (['url'], {'data': 'payload', 'headers': 'headers'}), '(url, data=payload, headers=headers)\n', (4121, 4157), False, 'import requests\n'), ((4273, 4287), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (4285, 4287), True, 'import pandas as pd\n'), ((4911, 4932), 'akshare.datasets.get_crypto_info_csv', 'get_crypto_info_csv', ([], {}), '()\n', (4930, 4932), False, 'from akshare.datasets import get_crypto_info_csv\n'), ((4955, 4992), 'pandas.read_csv', 'pd.read_csv', (['get_crypto_info_csv_path'], {}), '(get_crypto_info_csv_path)\n', (4966, 4992), True, 'import pandas as pd\n'), ((6685, 6705), 'pandas.read_html', 'pd.read_html', (['r.text'], {}), '(r.text)\n', (6697, 6705), True, 'import pandas as pd\n'), ((6785, 6831), 'pandas.to_datetime', 'pd.to_datetime', (["df_data['日期']"], {'format': '"""%Y年%m月"""'}), "(df_data['日期'], format='%Y年%m月')\n", (6799, 6831), True, 'import pandas as pd\n'), ((6866, 6915), 'pandas.to_datetime', 'pd.to_datetime', (["df_data['日期']"], {'format': '"""%Y年%m月%d日"""'}), "(df_data['日期'], format='%Y年%m月%d日')\n", (6880, 6915), True, 'import pandas as pd\n'), ((4480, 4529), 'requests.post', 'requests.post', (['url'], {'data': 'payload', 'headers': 'headers'}), '(url, data=payload, headers=headers)\n', (4493, 4529), False, 'import requests\n'), ((4585, 4616), 'pandas.DataFrame', 'pd.DataFrame', (["data_json['data']"], {}), "(data_json['data'])\n", (4597, 4616), True, 'import pandas as pd\n'), ((4638, 4685), 'pandas.concat', 'pd.concat', (['[big_df, temp_df]'], {'ignore_index': '(True)'}), '([big_df, temp_df], ignore_index=True)\n', (4647, 4685), True, 'import pandas as pd\n'), ((8571, 8600), 'pandas.to_datetime', 'pd.to_datetime', (["df_data['日期']"], {}), "(df_data['日期'])\n", (8585, 8600), True, 'import pandas as pd\n')]
|
#!/usr/bin/env python
"""
Name: nexus_from_genotype_probabilities.py
Author: <NAME>
Date: 10 July 2013
Convert genotype probabilities file output by Tom White's post-UNEAK processing scripts to nexus
alignment of concatenated SNPs
Usage: python nexus_from_genotype_probabilities.py in_file out_file sample_size
Ex.: python nexus_from_genotype_probabilities.py HapMapPairedFilt_1.txt HapMapPairedFilt_1.nex 73
"""
import os
import sys
import argparse
import csv
import numpy
def get_args():
parser = argparse.ArgumentParser(
description="""Program description""")
parser.add_argument(
"in_file",
type=str,
help="""The input genotype probabilities file from Tom White's scripts"""
)
parser.add_argument(
"out_file",
type=str,
help="""The file name"""
)
parser.add_argument(
"sample_size",
type=str,
help="""The number of samples/individuals in file"""
)
return parser.parse_args()
def read_samples(infile, sample_size):
samples = list()
first_line = infile.readline()
parts = first_line.split()
for i in range(int(sample_size)):
parts2 = parts[4+i]
parts3 = parts2.split('_')
samples.append(parts3[0])
return samples
def unphase(infile, sample_size):
array = list()
for line in infile:
parts = line.split()
alleles = str(parts[3]).split('/')
a1 = alleles[0]
a2 = alleles[1]
seq = list()
for i in range(int(sample_size)):
if parts[4+i] == "NA":
seq.append("N")
else:
alsp = parts[4+i]
als = alsp.split(',')
if a1 == "A":
if a2 == "C":
if als[0] == "1":
if als[1] == "1":
seq.append("A")
elif als[1] == "2":
seq.append("M")
elif als[0] == "2":
if als[1] == "2":
seq.append("C")
elif als[1] == "1":
seq.append("M")
if a2 == "G":
if als[0] == "1":
if als[1] == "1":
seq.append("A")
elif als[1] == "2":
seq.append("R")
elif als[0] == "2":
if als[1] == "2":
seq.append("G")
elif als[1] == "1":
seq.append("R")
if a2 == "T":
if als[0] == "1":
if als[1] == "1":
seq.append("A")
elif als[1] == "2":
seq.append("W")
elif als[0] == "2":
if als[1] == "2":
seq.append("T")
elif als[1] == "1":
seq.append("W")
if a1 == "C":
if a2 == "A":
if als[0] == "1":
if als[1] == "1":
seq.append("C")
elif als[1] == "2":
seq.append("M")
elif als[0] == "2":
if als[1] == "2":
seq.append("A")
elif als[1] == "1":
seq.append("M")
if a2 == "G":
if als[0] == "1":
if als[1] == "1":
seq.append("C")
elif als[1] == "2":
seq.append("S")
elif als[0] == "2":
if als[1] == "2":
seq.append("G")
elif als[1] == "1":
seq.append("S")
if a2 == "T":
if als[0] == "1":
if als[1] == "1":
seq.append("C")
elif als[1] == "2":
seq.append("Y")
elif als[0] == "2":
if als[1] == "2":
seq.append("T")
elif als[1] == "1":
seq.append("Y")
if a1 == "G":
if a2 == "A":
if als[0] == "1":
if als[1] == "1":
seq.append("G")
elif als[1] == "2":
seq.append("R")
elif als[0] == "2":
if als[1] == "2":
seq.append("A")
elif als[1] == "1":
seq.append("R")
if a2 == "C":
if als[0] == "1":
if als[1] == "1":
seq.append("G")
elif als[1] == "2":
seq.append("S")
elif als[0] == "2":
if als[1] == "2":
seq.append("C")
elif als[1] == "1":
seq.append("S")
if a2 == "T":
if als[0] == "1":
if als[1] == "1":
seq.append("G")
elif als[1] == "2":
seq.append("K")
elif als[0] == "2":
if als[1] == "2":
seq.append("T")
elif als[1] == "1":
seq.append("K")
if a1 == "T":
if a2 == "A":
if als[0] == "1":
if als[1] == "1":
seq.append("T")
elif als[1] == "2":
seq.append("W")
elif als[0] == "2":
if als[1] == "2":
seq.append("A")
elif als[1] == "1":
seq.append("W")
if a2 == "C":
if als[0] == "1":
if als[1] == "1":
seq.append("T")
elif als[1] == "2":
seq.append("Y")
elif als[0] == "2":
if als[1] == "2":
seq.append("C")
elif als[1] == "1":
seq.append("Y")
if a2 == "G":
if als[0] == "1":
if als[1] == "1":
seq.append("T")
elif als[1] == "2":
seq.append("K")
elif als[0] == "2":
if als[1] == "2":
seq.append("G")
elif als[1] == "1":
seq.append("K")
array.append(seq)
return array
def main():
args = get_args()
infile = open("{0}".format(args.in_file), 'r')
outfile = open("{0}".format(args.out_file), 'wb')
samples = read_samples(infile, args.sample_size)
array = unphase(infile, args.sample_size)
alignment = zip(*array)
outfile.write("#NEXUS\n")
outfile.write("begin data;\n")
outfile.write("\tdimensions ntax={0} nchar={1};\n".format(args.sample_size, len(alignment[0])))
outfile.write("\tformat datatype=dna missing=? gap=-;\n")
outfile.write("\tmatrix\n")
i = 0
for sample in samples:
outfile.write("{0}\t{1}\n".format(sample, ''.join(alignment[i])))
i += 1
outfile.write(";\n")
outfile.write("end;\n")
infile.close()
outfile.close()
if __name__ == '__main__':
main()
|
[
"argparse.ArgumentParser"
] |
[((514, 572), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Program description"""'}), "(description='Program description')\n", (537, 572), False, 'import argparse\n')]
|
import re
from sent2vec.vectorizer import Vectorizer
from scipy import spatial
class KG(object):
def __init__(self, mode='graph_overlap', n_gram=2):
self.mode = mode
self.graph = {}
self.set = set()
self.instr_node = None
self.instr_CC_size = 0
self.vectorizer = Vectorizer()
if self.mode == 'graph_overlap':
self.related_nodes_fn = self.get_all_related_nodes
elif self.mode == 'graph_cosine':
self.related_nodes_fn = self.get_all_related_nodes_cosine
self.n_gram = n_gram
def update(self, node):
node1 = tuple(node)
increased = False
if self.mode == 'graph_overlap' or self.mode == 'graph_cosine':
max_overlap = 0
if node1 not in self.graph:
related_nodes, max_overlap = self.related_nodes_fn(node1)
self.graph[node1] = []
for n in related_nodes:
self.graph[n].append(node1)
self.graph[node1].append(n)
increased = False
if len(self.getCC()) > self.instr_CC_size:
self.instr_CC_size += 1
increased = True
elif self.mode == 'set':
max_overlap = 0
if node1 not in self.set:
self.set.add(node1)
increased = True
max_overlap = 1
return increased, max_overlap
def is_related(self, node1, node2):
# simple heuristic, need update
for token in node1:
if token in node2:
return True
return False
def pre_process_n_gram(self, node1, n_gram):
if n_gram <= 1:
return node1
p_node1 = []
for i in range(len(node1) - n_gram + 1):
n_gram_phrase = node1[i]
for k in range(1, n_gram):
if i + k >= len(node1):
break
n_gram_phrase += " " + node1[i + k]
p_node1.append(n_gram_phrase)
return p_node1
def n_overlap(self, node1, node2, n_gram=2):
p_node1 = self.pre_process_n_gram(node1, n_gram)
p_node2 = self.pre_process_n_gram(node2, n_gram)
n = 0
for token in p_node1:
if token in p_node2:
n += 1
return n
def get_all_related_nodes(self, node1):
related_nodes = []
max_overlap = 0
for node2 in self.graph:
if node2 == node1: continue
n_overlap = self.n_overlap(node1, node2, n_gram=self.n_gram)
if n_overlap > 0:
related_nodes.append(node2)
max_overlap = max(max_overlap, n_overlap)
return related_nodes, max_overlap
def get_all_related_nodes_cosine(self, node1):
related_nodes = []
max_overlap = 0
n1 = node1[0]
for s in node1[1:]:
n1 = n1 + " " + s
for node2 in self.graph:
if node2 == node1: continue
n2 = node2[0]
for s in node2[1:]:
n2 = n2 + " " + s
self.vectorizer.bert([n1, n2])
vectors = self.vectorizer.vectors
n_overlap = spatial.distance.cosine(vectors[0], vectors[1])
if n_overlap > 0.01:
related_nodes.append(node2)
max_overlap = max(max_overlap, n_overlap)
return related_nodes, max_overlap
def getCC(self):
"""
:return: list of tuples which represents the connected component that contains the instr node.
Ex. [('go', 'to', 'jack', 'favorite toy'), ('blue ball', 'room0')]
"""
if self.mode == 'graph_overlap' or self.mode == 'graph_cosine':
def bfs(node, graph):
res = []
visited = set()
q = [node]
visited.add(node)
while q:
v = q.pop(0)
res.append(v)
for n in graph[v]:
if n not in visited:
q.append(n)
visited.add(n)
return res
return bfs(self.instr_node, self.graph)
else:
return list(self.set)
def reset(self, node):
# add none to instruction node of KG for no adj query
# TODO: ugly, may need refactor
self.instr_node = tuple(node)
self.graph = {self.instr_node: []}
self.set = set()
self.set.add(self.instr_node)
self.instr_CC_size = len(self.graph[self.instr_node]) + 1
def __repr__(self):
if self.mode == 'graph_overlap' or self.mode == 'graph_cosine':
ret = ""
for k, v in self.graph.items():
ret += str(k) + ": " + str(v) + "\n"
return ret
else:
return str(self.set)
|
[
"sent2vec.vectorizer.Vectorizer",
"scipy.spatial.distance.cosine"
] |
[((317, 329), 'sent2vec.vectorizer.Vectorizer', 'Vectorizer', ([], {}), '()\n', (327, 329), False, 'from sent2vec.vectorizer import Vectorizer\n'), ((3218, 3265), 'scipy.spatial.distance.cosine', 'spatial.distance.cosine', (['vectors[0]', 'vectors[1]'], {}), '(vectors[0], vectors[1])\n', (3241, 3265), False, 'from scipy import spatial\n')]
|
# Copyright 2017-2020 object_database Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""ProxyServer
Models a Server that sits on top of another Server, and acts to pool subscription
requests. This makes it possible for several connections that all tend to
subscribe to the same schemas or types (say, the list of hosts) to share
connect to the same proxy, and only place the load of one connection on the
core server.
Usually, we'll put one ProxyServer per physical host, at least when
we are using larger hosts. This way, the centeral server doesn't get
overloaded trying to write connection data out to all the connections.
"""
import logging
import threading
import uuid
from typed_python import (
OneOf,
NamedTuple,
Dict,
Set,
Tuple,
ConstDict,
makeNamedTuple,
TupleOf,
ListOf,
deserialize,
)
from .channel import ServerToClientChannel, ClientToServerChannel
from .messages import ServerToClient, ClientToServer
from .server import ObjectBase
from .schema import (
IndexValue,
FieldId,
FieldDefinition,
ObjectId,
ObjectFieldId,
SchemaDefinition,
TypeDefinition,
IndexId,
)
SubscriptionKey = NamedTuple(
schema=str,
typename=str,
fieldname_and_value=OneOf(None, Tuple(str, IndexValue)),
isLazy=bool,
)
# recall these definitions, included here for reference
# ObjectId = int
# FieldId = int
# ObjectFieldId = NamedTuple(objId=int, fieldId=int, isIndexValue=bool)
# IndexValue = bytes
# IndexId = NamedTuple(fieldId=int, indexValue=IndexValue)
# TypeDefinition = NamedTuple(fields=TupleOf(str), indices=TupleOf(str))
# SchemaDefinition = ConstDict(str, TypeDefinition)
# FieldDefinition = NamedTuple(schema=str, typename=str, fieldname=str)
def mergeTypeDefinition(typedef1, typedef2):
return TypeDefinition(
fields=typedef1.fields + [x for x in typedef2.fields if x not in typedef1.fields],
indices=typedef1.indices + [x for x in typedef2.indices if x not in typedef1.indices],
)
def mergeSchemaDefinitions(schemaDef1, schemaDef2):
out = dict(schemaDef1)
for typename, typedef in schemaDef2.items():
if typename not in out:
out[typename] = typedef
else:
out[typename] = mergeTypeDefinition(out[typename], typedef)
return SchemaDefinition(out)
class FieldIdToDefMapping:
def __init__(self):
self.fieldIdToDef = {}
self.fieldDefToId = {}
def addFieldMapping(self, fieldId: FieldId, fieldDef: FieldDefinition):
self.fieldIdToDef[fieldId] = fieldDef
self.fieldDefToId[fieldDef] = fieldId
class SubscriptionState:
def __init__(self):
# for each fieldId, the set of channels subscribed to it and vice versa
# this is what we use to determine which channels are subscribed
self.fieldIdToSubscribedChannels = Dict(FieldId, Set(ServerToClientChannel))()
self.channelToSubscribedFieldIds = Dict(ServerToClientChannel, Set(FieldId))()
self.indexIdToSubscribedChannels = Dict(IndexId, Set(ServerToClientChannel))()
self.channelToSubscribedIndexIds = Dict(ServerToClientChannel, Set(IndexId))()
self.channelToSubscribedOids = Dict(ServerToClientChannel, Set(ObjectId))()
self.oidToSubscribedChannels = Dict(ObjectId, Set(ServerToClientChannel))()
self.channelToLazilySubscribedFieldIds = Dict(ServerToClientChannel, Set(FieldId))()
self.channelToLazilySubscribedIndexIds = Dict(ServerToClientChannel, Set(IndexId))()
# the definition of each schema as we know it
self.schemaDefs = Dict(str, ConstDict(FieldDefinition, FieldId))()
# the schemas we've actually defined on the server
# map from name to SchemaDefinition
self._definedSchemas = Dict(str, SchemaDefinition)()
# map from schema -> typename -> fieldname -> fieldId
self.schemaTypeAndNameToFieldId = Dict(str, Dict(str, Dict(str, int)))()
self.fieldIdToDef = Dict(int, FieldDefinition)()
# mapping between a channel and its subscriptions
self.channelSubscriptions = Dict(ServerToClientChannel, Set(SubscriptionKey))()
# subscriptions pending a schema/typname being fully subscribed
self.channelToPendingSubscriptions = Dict(
ServerToClientChannel, Set(SubscriptionKey)
)()
self.channelToPendingTransactions = Dict(
ServerToClientChannel, ListOf(ClientToServer)
)()
self.subscriptionsPendingSubscriptionOnServer = Dict(
# schema and typename
Tuple(str, str),
Set(Tuple(ServerToClientChannel, SubscriptionKey)),
)()
# the current top transaction we've ever seen.
self.transactionId = -1
# set of schema/typename for which we have complete subscriptions
self.completedTypes = Set(NamedTuple(schema=str, typename=str))()
# the state of our subscriptions
self.objectValues = Dict(FieldId, Dict(ObjectId, bytes))()
self.indexValues = Dict(FieldId, Dict(ObjectId, IndexValue))()
self.reverseIndexValues = Dict(FieldId, Dict(IndexValue, Set(ObjectId)))()
def dropConnection(self, channel: ServerToClientChannel):
if channel in self.channelToSubscribedFieldIds:
for fieldId in self.channelToSubscribedFieldIds[channel]:
self.fieldIdToSubscribedChannels[fieldId].discard(channel)
self.channelToSubscribedFieldIds.pop(channel)
if channel in self.channelToSubscribedIndexIds:
for fieldAndIv in self.channelToSubscribedIndexIds[channel]:
self.indexIdToSubscribedChannels[fieldAndIv].discard(channel)
self.channelToSubscribedIndexIds.pop(channel)
if channel in self.channelToSubscribedOids:
for oid in self.channelToSubscribedOids[channel]:
self.oidToSubscribedChannels[oid].discard(channel)
if not self.oidToSubscribedChannels[oid]:
self.oidToSubscribedChannels.pop(oid)
self.channelToSubscribedOids.pop(channel)
if channel in self.channelSubscriptions:
self.channelSubscriptions.pop(channel)
if channel in self.channelToPendingTransactions:
self.channelToPendingTransactions.pop(channel)
if channel in self.channelToPendingSubscriptions:
for subsKey in self.channelToPendingSubscriptions[channel]:
self.subscriptionsPendingSubscriptionOnServer[
subsKey.schema, subsKey.typename
].pop((channel, subsKey))
self.channelToPendingSubscriptions.pop(channel)
if channel in self.channelToLazilySubscribedFieldIds:
self.channelToLazilySubscribedFieldIds.pop(channel)
if channel in self.channelToLazilySubscribedIndexIds:
self.channelToLazilySubscribedIndexIds.pop(channel)
def addSubscription(self, channel, subscriptionKey: SubscriptionKey):
self.channelSubscriptions.setdefault(channel).add(subscriptionKey)
if (
makeNamedTuple(schema=subscriptionKey.schema, typename=subscriptionKey.typename)
in self.completedTypes
):
self.sendDataForSubscription(channel, subscriptionKey)
else:
self.subscriptionsPendingSubscriptionOnServer.setdefault(
(subscriptionKey.schema, subscriptionKey.typename)
).add((channel, subscriptionKey))
self.channelToPendingSubscriptions.setdefault(channel).add(subscriptionKey)
def sendDataForSubscription(self, channel, key: SubscriptionKey):
# get the set of affected objects
oids = self.objectIndentitiesForSubscriptionKey(key)
if key.fieldname_and_value is not None:
fieldname, indexValue = key.fieldname_and_value
if fieldname != "_identity":
fieldId = self.schemaTypeAndNameToFieldId[key.schema][key.typename][fieldname]
self.indexIdToSubscribedChannels.setdefault((fieldId, indexValue)).add(channel)
self.channelToSubscribedIndexIds.setdefault(channel).add((fieldId, indexValue))
if key.isLazy:
self.channelToLazilySubscribedIndexIds.setdefault(channel).add(
IndexId(fieldId=fieldId, indexValue=indexValue)
)
# and also mark the specific values its subscribed to
self.channelToSubscribedOids[channel] = oids
for oid in oids:
self.oidToSubscribedChannels.setdefault(oid).add(channel)
else:
# subscribe this channel to all the values in this type
for fieldId in self.schemaTypeAndNameToFieldId[key.schema][key.typename].values():
self.fieldIdToSubscribedChannels.setdefault(fieldId).add(channel)
self.channelToSubscribedFieldIds.setdefault(channel).add(fieldId)
if key.isLazy:
self.channelToLazilySubscribedFieldIds.setdefault(channel).add(fieldId)
if key.isLazy:
channel.sendMessage(
ServerToClient.LazySubscriptionData(
schema=key.schema,
typename=key.typename,
fieldname_and_value=key.fieldname_and_value,
identities=oids,
index_values=self.indexValuesForOids(key.schema, key.typename, oids),
)
)
else:
channel.sendMessage(
ServerToClient.SubscriptionData(
schema=key.schema,
typename=key.typename,
fieldname_and_value=key.fieldname_and_value,
values=self.objectValuesForOids(key.schema, key.typename, oids),
index_values=self.indexValuesForOids(key.schema, key.typename, oids),
identities=None if key.fieldname_and_value is None else oids,
)
)
channel.sendMessage(
ServerToClient.SubscriptionComplete(
schema=key.schema,
typename=key.typename,
fieldname_and_value=key.fieldname_and_value,
tid=self.transactionId,
)
)
def objectIndentitiesForSubscriptionKey(self, key: SubscriptionKey) -> Set(ObjectId):
oids = Set(ObjectId)()
if key.fieldname_and_value is not None:
if key.fieldname_and_value[0] == "_identity":
# this is an 'identity' subscription, which subscribes to a single object
oids.add(deserialize(ObjectBase, key.fieldname_and_value[1])._identity)
return oids
if key.schema in self.schemaTypeAndNameToFieldId:
typenameToFieldMap = self.schemaTypeAndNameToFieldId[key.schema]
if key.typename in typenameToFieldMap:
if key.fieldname_and_value is None:
for fieldId in typenameToFieldMap[key.typename].values():
if fieldId in self.objectValues:
oids.update(self.objectValues[fieldId])
else:
fieldname, indexValue = key.fieldname_and_value
if fieldname in typenameToFieldMap[key.typename]:
fieldId = typenameToFieldMap[key.typename][fieldname]
if (
fieldId in self.reverseIndexValues
and indexValue in self.reverseIndexValues[fieldId]
):
oids.update(self.reverseIndexValues[fieldId][indexValue])
return oids
def objectValuesForOids(self, schema, typename, oids):
res = Dict(ObjectFieldId, OneOf(None, bytes))()
if schema in self.schemaTypeAndNameToFieldId:
typenameToFieldMap = self.schemaTypeAndNameToFieldId[schema]
if typename in typenameToFieldMap:
for fieldId in typenameToFieldMap[typename].values():
if fieldId in self.objectValues:
oidToVal = self.objectValues[fieldId]
for oid in oids:
if oid in oidToVal:
res[
ObjectFieldId(
objId=oid, fieldId=fieldId, isIndexValue=False
)
] = oidToVal[oid]
return ConstDict(ObjectFieldId, OneOf(None, bytes))(res)
def indexValuesForOids(self, schema, typename, oids):
res = Dict(ObjectFieldId, OneOf(None, IndexValue))()
if schema in self.schemaTypeAndNameToFieldId:
typenameToFieldMap = self.schemaTypeAndNameToFieldId[schema]
if typename in typenameToFieldMap:
for fieldId in typenameToFieldMap[typename].values():
if fieldId in self.indexValues:
oidToVal = self.indexValues[fieldId]
for oid in oids:
if oid in oidToVal:
res[
ObjectFieldId(
objId=oid, fieldId=fieldId, isIndexValue=True
)
] = oidToVal[oid]
return ConstDict(ObjectFieldId, OneOf(None, IndexValue))(res)
def objectValuesForSubscriptionKey(self, subscriptionKey):
res = Dict(ObjectFieldId, OneOf(None, bytes))()
if subscriptionKey.schema in self.schemaTypeAndNameToFieldId:
typenameToFieldMap = self.schemaTypeAndNameToFieldId[subscriptionKey.schema]
if subscriptionKey.typename in typenameToFieldMap:
for fieldId in typenameToFieldMap[subscriptionKey.typename].values():
for objectId, value in self.objectValues.setdefault(fieldId).items():
res[
ObjectFieldId(objId=objectId, fieldId=fieldId, isIndexValue=False)
] = value
return ConstDict(ObjectFieldId, OneOf(None, bytes))(res)
def indexValuesForSubscriptionKey(self, subscriptionKey):
res = Dict(ObjectFieldId, OneOf(None, bytes))()
if subscriptionKey.schema in self.schemaTypeAndNameToFieldId:
typenameToFieldMap = self.schemaTypeAndNameToFieldId[subscriptionKey.schema]
if subscriptionKey.typename in typenameToFieldMap:
for fieldId in typenameToFieldMap[subscriptionKey.typename].values():
for objectId, value in self.indexValues.setdefault(fieldId).items():
res[
ObjectFieldId(objId=objectId, fieldId=fieldId, isIndexValue=True)
] = value
return ConstDict(ObjectFieldId, OneOf(None, bytes))(res)
def mapSchema(self, schemaName, schemaDef: ConstDict(FieldDefinition, FieldId)):
self.schemaDefs[schemaName] = schemaDef
for fieldDef, fieldId in schemaDef.items():
self.schemaTypeAndNameToFieldId.setdefault(fieldDef.schema).setdefault(
fieldDef.typename
)[fieldDef.fieldname] = fieldId
self.fieldIdToDef[fieldId] = fieldDef
def handleSubscriptionData(
self, schema, typename, fieldnameAndValue, values, indexValues, identities
):
def update(dictlike, key, valueOrNone):
if valueOrNone is None:
if key in dictlike:
del dictlike[key]
else:
dictlike[key] = valueOrNone
# this will always be for an entire schema
for key, valueData in values.items():
assert not key.isIndexValue
update(self.objectValues.setdefault(key.fieldId), key.objId, valueData)
for key, indexData in indexValues.items():
assert key.isIndexValue
update(self.indexValues.setdefault(key.fieldId), key.objId, indexData)
indexValueToObjects = self.reverseIndexValues.setdefault(key.fieldId)
if indexData is not None:
indexValueToObjects.setdefault(indexData).add(key.objId)
def getChannelsForSchemaAndTypename(self, schema, typename):
channels = set()
if schema not in self.schemaTypeAndNameToFieldId:
return channels
if typename not in self.schemaTypeAndNameToFieldId[schema]:
return channels
for fieldId in self.schemaTypeAndNameToFieldId[schema][typename].values():
if fieldId in self.fieldIdToSubscribedChannels:
channels.update(self.fieldIdToSubscribedChannels[fieldId])
return channels
def handleSubscriptionComplete(self, schema, typename, fieldnameAndValue, tid):
if tid > self.transactionId:
self.transactionId = tid
channelsToMessageToSend = Dict(ServerToClientChannel, ListOf(ServerToClient))()
# this will always be for an entire schema
self.completedTypes.add(makeNamedTuple(schema=schema, typename=typename))
if (schema, typename) in self.subscriptionsPendingSubscriptionOnServer:
for channel, subscriptionKey in self.subscriptionsPendingSubscriptionOnServer.pop(
(schema, typename)
):
self.channelToPendingSubscriptions[channel].discard(subscriptionKey)
self.sendDataForSubscription(channel, subscriptionKey)
if not self.channelToPendingSubscriptions[channel]:
self.channelToPendingSubscriptions.pop(channel)
if channel in self.channelToPendingTransactions:
channelsToMessageToSend[
channel
] = self.channelToPendingTransactions.pop(channel)
return channelsToMessageToSend
def _increaseBroadcastTransactionToInclude(
self, indexId, writes, set_adds, set_removes, newOids
):
"""Update the transaction data in 'writes', 'set_adds', 'set_removes' to contain
all the definitions of the objects contained in newOids.
"""
# figure out what kind of objects these are. They all came from
# the same index id
indexFieldDef = self.fieldIdToDef[indexId.fieldId]
fieldnameToFieldId = self.schemaTypeAndNameToFieldId[indexFieldDef.schema][
indexFieldDef.typename
]
typeDefinition = self._definedSchemas[indexFieldDef.schema][indexFieldDef.typename]
for fieldname in typeDefinition.fields:
fieldId = fieldnameToFieldId[fieldname]
if fieldId in self.objectValues:
for oid in newOids:
if oid in self.objectValues[fieldId]:
writes[ObjectFieldId(objId=oid, fieldId=fieldId)] = self.objectValues[
fieldId
][oid]
for indexname in typeDefinition.indices:
fieldId = fieldnameToFieldId[indexname]
if fieldId in self.objectValues:
for oid in newOids:
if oid in self.objectValues[fieldId]:
fieldVal = self.objectValues[fieldId][oid]
set_adds.setdefault(IndexId(fieldId=fieldId, indexValue=fieldVal)).add(
oid
)
def handleTransaction(self, writes, set_adds, set_removes, transaction_id):
# we may have to modify the transaction values
writes = Dict(ObjectFieldId, OneOf(None, bytes))(writes)
priorValues = Dict(ObjectFieldId, OneOf(None, bytes))()
for ofi, value in writes.items():
priorValues[ofi] = self.objectValues.setdefault(ofi.fieldId).get(ofi.objId)
set_adds = Dict(IndexId, Set(ObjectId))(
{k: Set(ObjectId)(v) for k, v in set_adds.items()}
)
set_removes = Dict(IndexId, Set(ObjectId))(
{k: Set(ObjectId)(v) for k, v in set_removes.items()}
)
fieldIds = Set(FieldId)()
oidsMentioned = Set(ObjectId)()
# all channels that need to get the prior values of each thing
# being written before they receive the transaction (because of
# laziness)
channelsTriggeredForPriors = Set(ServerToClientChannel)()
for objectFieldId, val in writes.items():
oidsMentioned.add(objectFieldId.objId)
fieldIds.add(objectFieldId.fieldId)
oidMap = self.objectValues.setdefault(objectFieldId.fieldId)
if val is None:
oidMap.pop(objectFieldId.objId, b"")
else:
oidMap[objectFieldId.objId] = val
for indexId, oids in set_removes.items():
vals = self.indexValues.setdefault(indexId.fieldId)
for oid in oids:
oidsMentioned.add(oid)
if oid in vals:
vals.pop(oid)
objectsWithThisIndexVal = self.reverseIndexValues.setdefault(
indexId.fieldId
).setdefault(indexId.indexValue)
for oid in oids:
objectsWithThisIndexVal.discard(oid)
if not objectsWithThisIndexVal:
self.reverseIndexValues[indexId.fieldId].pop(indexId.indexValue)
idsToAddToTransaction = Dict(IndexId, Set(ObjectId))()
for indexId, oids in set_adds.items():
vals = self.indexValues.setdefault(indexId.fieldId)
# each channel subscribed to this indexid may need a 'SubscriptionIncrease'
# message.
if indexId in self.indexIdToSubscribedChannels:
for channel in self.indexIdToSubscribedChannels[indexId]:
# if this channel is lazily subscribed to this index then we need to send
# priors for every value we're updating. We're not being careful about
# tracking this on a per-object basis, so in theory we could do better
if indexId in self.channelToLazilySubscribedIndexIds.setdefault(channel):
channelsTriggeredForPriors.add(channel)
if channel not in self.channelToSubscribedOids:
newOids = oids
else:
existingSet = self.channelToSubscribedOids[channel]
newOids = [o for o in oids if o not in existingSet]
if newOids:
self.channelToSubscribedOids.setdefault(channel).update(newOids)
for n in newOids:
self.oidToSubscribedChannels.setdefault(n).add(channel)
fieldDef = self.fieldIdToDef[indexId.fieldId]
channel.sendMessage(
ServerToClient.SubscriptionIncrease(
schema=fieldDef.schema,
typename=fieldDef.typename,
fieldname_and_value=(fieldDef.fieldname, indexId.indexValue),
identities=newOids,
transaction_id=transaction_id,
)
)
idsToAddToTransaction.setdefault(indexId).update(newOids)
objectsWithThisIndexVal = self.reverseIndexValues.setdefault(
indexId.fieldId
).setdefault(indexId.indexValue)
for oid in oids:
oidsMentioned.add(oid)
vals[oid] = indexId.indexValue
objectsWithThisIndexVal.add(oid)
for indexId, oids in idsToAddToTransaction.items():
self._increaseBroadcastTransactionToInclude(
indexId, writes, set_adds, set_removes, oids
)
for indexId in set_adds:
fieldIds.add(indexId.fieldId)
for indexId in set_removes:
fieldIds.add(indexId.fieldId)
# determine which channels are affected
channels = set()
for f in fieldIds:
if f in self.fieldIdToSubscribedChannels:
channels.update(self.fieldIdToSubscribedChannels[f])
for c in self.fieldIdToSubscribedChannels[f]:
if f in self.channelToLazilySubscribedFieldIds.setdefault(c):
channelsTriggeredForPriors.add(c)
for oid in oidsMentioned:
if oid in self.oidToSubscribedChannels:
channels.update(self.oidToSubscribedChannels[oid])
if transaction_id > self.transactionId:
self.transactionId = transaction_id
for channel in channelsTriggeredForPriors:
channel.sendMessage(ServerToClient.LazyTransactionPriors(writes=priorValues))
if channels:
msg = ServerToClient.Transaction(
writes=writes,
set_adds=ConstDict(IndexId, TupleOf(ObjectId))(
{k: TupleOf(ObjectId)(v) for k, v in set_adds.items()}
),
set_removes=ConstDict(IndexId, TupleOf(ObjectId))(
{k: TupleOf(ObjectId)(v) for k, v in set_removes.items()}
),
transaction_id=transaction_id,
)
for c in channels:
c.sendMessage(msg)
def increaseSubscriptionIfNecessary(self, channel, set_adds, transaction_id):
"""Mark any new objects we need to track based on contents of 'set_adds'.
When a client creates new objects, it needs to track them regardless of
whether it's explicitly subscribed to the object.
So we check whether any new objects are being created (set_adds with field ' exists')
and if we're not subscribed the type we increase the subscription.
"""
for indexId, oids in set_adds.items():
fieldDef = self.fieldIdToDef[indexId.fieldId]
if fieldDef.fieldname == " exists" and (
channel not in self.fieldIdToSubscribedChannels.setdefault(indexId.fieldId)
):
newIds = [
x
for x in oids
if x not in self.channelToSubscribedOids.setdefault(channel)
]
if newIds:
self.channelToSubscribedOids[channel].update(newIds)
for oid in newIds:
self.oidToSubscribedChannels.setdefault(oid).add(channel)
channel.sendMessage(
ServerToClient.SubscriptionIncrease(
schema=fieldDef.schema,
typename=fieldDef.typename,
fieldname_and_value=(fieldDef.fieldname, indexId.indexValue),
identities=newIds,
transaction_id=transaction_id,
)
)
def lazyLoadObject(self, channel, schema, typename, identity):
channel.write(
ServerToClient.LazyLoadResponse(
identity=identity,
values=self.objectValuesForOids(schema, typename, [identity]),
)
)
class ProxyServer:
def __init__(self, upstreamChannel: ClientToServerChannel, authToken):
self._channelToMainServer = upstreamChannel
self._authToken = authToken
self._logger = logging.getLogger(__name__)
self._downstreamChannels = set()
self._authenticatedDownstreamChannels = set()
self._connectionIdentity = None
self._identityRoot = None
self._transactionNum = None
self._lock = threading.RLock()
self._deferredMessagesAndEndpoints = []
self._channelToMainServer.setServerToClientHandler(self.handleServerToClientMessage)
self._guidToChannelRequestingIdentity = {}
self._channelToMissedHeartbeatCount = Dict(ServerToClientChannel, int)()
self._channelToConnectionId = Dict(ServerToClientChannel, ObjectId)()
# dictionary from (channel, schemaName) -> SchemaDefinition
self._channelSchemas = Dict(Tuple(ServerToClientChannel, str), SchemaDefinition)()
# map from schema name to iteration number to ConstDict(FieldDefinition, int)
self._mappedSchemas = Dict(str, Dict(int, ConstDict(FieldDefinition, FieldId)))()
self._requestedSchemaIteration = Dict(str, int)()
self._receivedSchemaIteration = Dict(str, int)()
# for each requested (schema, iteration), the set of channels waiting for it
self._unmappedSchemasToChannels = {}
self._fieldIdToDefMapping = FieldIdToDefMapping()
self._subscriptionState = SubscriptionState()
# right now, we only subscribe to entire types
self._subscribedTypes = Set(NamedTuple(schema=str, typename=str))()
# state machine for tracking the flush guids we're getting
# from each channel
self._flushGuidIx = 0
self._outgoingFlushGuidToChannelAndFlushGuid = Dict(
int, Tuple(ServerToClientChannel, int)
)()
# state machine for managing the transactions we have pending
# on each channel
self._transactionGuidIx = 0
self._channelAndTransactionGuidToOutgoingTransactionGuid = Dict(
Tuple(ServerToClientChannel, int), int
)()
self._outgoingTransactionGuidToChannelAndTransactionGuid = Dict(
int, Tuple(ServerToClientChannel, int)
)()
@property
def authToken(self):
return self._authToken
def authenticate(self):
self._channelToMainServer.sendMessage(
ClientToServer.Authenticate(token=self.authToken)
)
def addConnection(self, channel: ServerToClientChannel):
"""An incoming connection is being made."""
with self._lock:
self._downstreamChannels.add(channel)
self._channelToMissedHeartbeatCount[channel] = 0
channel.setClientToServerHandler(
lambda msg: self.handleClientToServerMessage(channel, msg)
)
def dropConnection(self, channel: ServerToClientChannel):
"""An incoming connection has dropped."""
with self._lock:
if channel not in self._downstreamChannels:
return
self._subscriptionState.dropConnection(channel)
self._downstreamChannels.discard(channel)
del self._channelToMissedHeartbeatCount[channel]
self._authenticatedDownstreamChannels.discard(channel)
if channel in self._channelToConnectionId:
connId = self._channelToConnectionId.pop(channel)
self._channelToMainServer.sendMessage(
ClientToServer.DropDependentConnectionId(connIdentity=connId)
)
channel.close()
def handleClientToServerMessage(self, channel, msg: ClientToServer):
with self._lock:
self._handleClientToServerMessage(channel, msg)
def checkForDeadConnections(self):
with self._lock:
for c in list(self._channelToMissedHeartbeatCount):
self._channelToMissedHeartbeatCount[c] += 1
if self._channelToMissedHeartbeatCount[c] >= 4:
logging.info(
"Connection %s has not heartbeat in a long time. Killing it.",
self._channelToConnectionId.get(c),
)
c.close()
self.dropConnection(c)
def _handleClientToServerMessage(self, channel, msg: ClientToServer):
if channel not in self._downstreamChannels:
# this channel disconnected
return
if self._connectionIdentity is None:
# we are not authenticated yet.
self._deferredMessagesAndEndpoints.append((channel, msg))
return
if msg.matches.Authenticate:
if channel in self._authenticatedDownstreamChannels:
# the channel is already authenticated
self._logger.warn("Channel attempted to re-authenticate")
self.dropConnection(channel)
return
if msg.token != self._authToken:
self._logger.warn("Channel attempted to authenticate with invalid token.")
self.dropConnection(channel)
return
self._authenticatedDownstreamChannels.add(channel)
# we can request a new connection ID for this worker
guid = str(uuid.uuid4())
self._guidToChannelRequestingIdentity[guid] = channel
self._channelToMainServer.sendMessage(
ClientToServer.RequestDependentConnectionId(
parentId=self._connectionIdentity, guid=guid
)
)
return
# ensure that we're connected
if channel not in self._authenticatedDownstreamChannels:
# don't worry about heartbeats
if msg.matches.Heartbeat:
return
self._logger.warn(
"Channel attempted to communicate without authenticating: %s", type(msg)
)
self.dropConnection(channel)
return
self._handleAuthenticatedMessage(channel, msg)
def _handleAuthenticatedMessage(self, channel, msg: ClientToServer):
if msg.matches.DefineSchema:
self._channelSchemas[channel, msg.name] = msg.definition
if msg.name not in self._subscriptionState._definedSchemas:
self._requestedSchemaIteration[msg.name] = 0
self._channelToMainServer.sendMessage(
ClientToServer.DefineSchema(name=msg.name, definition=msg.definition)
)
self._subscriptionState._definedSchemas[msg.name] = msg.definition
else:
if msg.definition != self._subscriptionState._definedSchemas[msg.name]:
biggerSchema = mergeSchemaDefinitions(
self._subscriptionState._definedSchemas[msg.name], msg.definition
)
# if the schema contains new fields we need to send this message and
# enlarge the schema definition
if biggerSchema != self._subscriptionState._definedSchemas[msg.name]:
self._requestedSchemaIteration[msg.name] += 1
self._channelToMainServer.sendMessage(
ClientToServer.DefineSchema(name=msg.name, definition=biggerSchema)
)
self._subscriptionState._definedSchemas[msg.name] = biggerSchema
schemaIteration = self._requestedSchemaIteration[msg.name]
if (
msg.name in self._mappedSchemas
and schemaIteration in self._mappedSchemas[msg.name]
):
channel.sendMessage(
ServerToClient.SchemaMapping(
schema=msg.name, mapping=self._mappedSchemas[msg.name][schemaIteration]
)
)
else:
self._unmappedSchemasToChannels.setdefault(
(msg.name, schemaIteration), set()
).add(channel)
return
if msg.matches.SubscribeNone:
schemaAndTypename = makeNamedTuple(schema=msg.schema, typename=msg.typename)
if schemaAndTypename not in self._subscribedTypes:
self._channelToMainServer.sendMessage(
ClientToServer.Subscribe(
schema=msg.schema,
typename=msg.typename,
fieldname_and_value=None,
isLazy=False,
)
)
self._subscribedTypes.add(schemaAndTypename)
return
if msg.matches.Subscribe:
schemaAndTypename = makeNamedTuple(schema=msg.schema, typename=msg.typename)
if (channel, msg.schema) not in self._channelSchemas:
raise Exception(
f"Can't subscribe to schema {msg.schema} that we don't have "
f"a definition for."
)
subscription = SubscriptionKey(
schema=msg.schema,
typename=msg.typename,
fieldname_and_value=msg.fieldname_and_value,
isLazy=msg.isLazy,
)
if schemaAndTypename not in self._subscribedTypes:
self._channelToMainServer.sendMessage(
ClientToServer.Subscribe(
schema=subscription.schema,
typename=subscription.typename,
fieldname_and_value=None,
isLazy=False,
)
)
self._subscribedTypes.add(schemaAndTypename)
self._subscriptionState.addSubscription(channel, subscription)
return
if msg.matches.Flush:
self._flushGuidIx += 1
guid = self._flushGuidIx
self._outgoingFlushGuidToChannelAndFlushGuid[guid] = (channel, msg.guid)
self._channelToMainServer.sendMessage(ClientToServer.Flush(guid=guid))
return
if msg.matches.LoadLazyObject:
if (
makeNamedTuple(schema=msg.schema, typename=msg.typename)
not in self._subscriptionState.completedTypes
):
logging.error("Client tried to lazy load for a type we're not subscribed to")
self.dropConnection(channel)
return
self._subscriptionState.lazyLoadObject(
channel, msg.schema, msg.typename, msg.identity
)
return
if msg.matches.TransactionData:
if channel in self._subscriptionState.channelToPendingSubscriptions:
assert self._subscriptionState.channelToPendingSubscriptions[channel]
self._subscriptionState.channelToPendingTransactions.setdefault(
channel
).append(msg)
return
if (
channel,
msg.transaction_guid,
) in self._channelAndTransactionGuidToOutgoingTransactionGuid:
guid = self._channelAndTransactionGuidToOutgoingTransactionGuid[
channel, msg.transaction_guid
]
else:
self._transactionGuidIx += 1
guid = self._transactionGuidIx
self._outgoingTransactionGuidToChannelAndTransactionGuid[guid] = (
channel,
msg.transaction_guid,
)
self._channelAndTransactionGuidToOutgoingTransactionGuid[
channel, msg.transaction_guid
] = guid
self._subscriptionState.increaseSubscriptionIfNecessary(
channel, msg.set_adds, self._transactionNum
)
self._channelToMainServer.sendMessage(
ClientToServer.TransactionData(
writes=msg.writes,
set_adds=msg.set_adds,
set_removes=msg.set_removes,
key_versions=msg.key_versions,
index_versions=msg.index_versions,
transaction_guid=guid,
)
)
return
if msg.matches.CompleteTransaction:
if channel in self._subscriptionState.channelToPendingSubscriptions:
assert self._subscriptionState.channelToPendingSubscriptions[channel]
self._subscriptionState.channelToPendingTransactions.setdefault(
channel
).append(msg)
return
if (
channel,
msg.transaction_guid,
) not in self._channelAndTransactionGuidToOutgoingTransactionGuid:
logging.error(
"Received unexpected CompleteTransaction message: %s", msg.transaction_guid
)
return
guid = self._channelAndTransactionGuidToOutgoingTransactionGuid.pop(
(channel, msg.transaction_guid)
)
self._channelToMainServer.sendMessage(
ClientToServer.CompleteTransaction(
as_of_version=msg.as_of_version, transaction_guid=guid
)
)
return
if msg.matches.Heartbeat:
if channel in self._downstreamChannels:
self._channelToMissedHeartbeatCount[channel] = 0
return
raise Exception("Don't know how to handle ", msg)
def handleServerToClientMessage(self, msg: ServerToClient):
with self._lock:
if msg.matches.Initialize:
self._connectionIdentity = msg.connIdentity
self._identityRoot = msg.identity_root
self._transactionNum = msg.transaction_num
# process any messages we received while we were not yet
# authenticated.
for channel, msg in self._deferredMessagesAndEndpoints:
self._handleClientToServerMessage(channel, msg)
self._deferredMessagesAndEndpoints.clear()
return
if msg.matches.DependentConnectionId:
guid = msg.guid
channel = self._guidToChannelRequestingIdentity.pop(guid, None)
if channel is None or channel not in self._downstreamChannels:
# the channel was disconnected before we processed the message.
# just send the drop back.
self._channelToMainServer.sendMessage(
ClientToServer.DropDependentConnectionId(connIdentity=msg.connIdentity)
)
return None
self._channelToConnectionId[channel] = msg.connIdentity
channel.sendMessage(
ServerToClient.Initialize(
transaction_num=self._transactionNum,
connIdentity=msg.connIdentity,
identity_root=msg.identity_root,
)
)
return
if msg.matches.SchemaMapping:
assert msg.schema in self._requestedSchemaIteration
schemaIteration = self._receivedSchemaIteration.get(msg.schema, -1) + 1
self._receivedSchemaIteration[msg.schema] = schemaIteration
self._subscriptionState.mapSchema(msg.schema, msg.mapping)
self._mappedSchemas.setdefault(msg.schema)[schemaIteration] = msg.mapping
# forward the mapping to any of our channels who need it
for channel in self._unmappedSchemasToChannels.pop(
(msg.schema, schemaIteration), set()
):
channel.sendMessage(
ServerToClient.SchemaMapping(schema=msg.schema, mapping=msg.mapping)
)
return
if msg.matches.SubscriptionData:
self._subscriptionState.handleSubscriptionData(
msg.schema,
msg.typename,
msg.fieldname_and_value,
msg.values,
msg.index_values,
msg.identities,
)
return
if msg.matches.SubscriptionComplete:
channelsToMessageToSend = self._subscriptionState.handleSubscriptionComplete(
msg.schema, msg.typename, msg.fieldname_and_value, msg.tid
)
for channel, messages in channelsToMessageToSend.items():
for msg in messages:
self.handleClientToServerMessage(channel, msg)
return
if msg.matches.Transaction:
self._subscriptionState.handleTransaction(
msg.writes, msg.set_adds, msg.set_removes, msg.transaction_id
)
return
if msg.matches.FlushResponse:
if msg.guid not in self._outgoingFlushGuidToChannelAndFlushGuid:
logging.error("Received unexpected flush guid: %s", msg.guid)
return
channel, guid = self._outgoingFlushGuidToChannelAndFlushGuid.pop(msg.guid)
channel.sendMessage(ServerToClient.FlushResponse(guid=guid))
return
if msg.matches.TransactionResult:
if (
msg.transaction_guid
not in self._outgoingTransactionGuidToChannelAndTransactionGuid
):
logging.error(
"Received unexpected TransactionResult message: %s",
msg.transaction_guid,
)
return
channel, guid = self._outgoingTransactionGuidToChannelAndTransactionGuid.pop(
msg.transaction_guid
)
channel.sendMessage(
ServerToClient.TransactionResult(
transaction_guid=guid, success=msg.success, badKey=msg.badKey
)
)
return
raise Exception("Don't know how to handle ", msg)
|
[
"typed_python.Set",
"typed_python.ConstDict",
"uuid.uuid4",
"logging.error",
"threading.RLock",
"typed_python.Tuple",
"typed_python.makeNamedTuple",
"typed_python.NamedTuple",
"typed_python.deserialize",
"typed_python.TupleOf",
"typed_python.ListOf",
"typed_python.OneOf",
"typed_python.Dict",
"logging.getLogger"
] |
[((10939, 10952), 'typed_python.Set', 'Set', (['ObjectId'], {}), '(ObjectId)\n', (10942, 10952), False, 'from typed_python import OneOf, NamedTuple, Dict, Set, Tuple, ConstDict, makeNamedTuple, TupleOf, ListOf, deserialize\n'), ((28223, 28250), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (28240, 28250), False, 'import logging\n'), ((28480, 28497), 'threading.RLock', 'threading.RLock', ([], {}), '()\n', (28495, 28497), False, 'import threading\n'), ((1784, 1806), 'typed_python.Tuple', 'Tuple', (['str', 'IndexValue'], {}), '(str, IndexValue)\n', (1789, 1806), False, 'from typed_python import OneOf, NamedTuple, Dict, Set, Tuple, ConstDict, makeNamedTuple, TupleOf, ListOf, deserialize\n'), ((4312, 4339), 'typed_python.Dict', 'Dict', (['str', 'SchemaDefinition'], {}), '(str, SchemaDefinition)\n', (4316, 4339), False, 'from typed_python import OneOf, NamedTuple, Dict, Set, Tuple, ConstDict, makeNamedTuple, TupleOf, ListOf, deserialize\n'), ((4514, 4540), 'typed_python.Dict', 'Dict', (['int', 'FieldDefinition'], {}), '(int, FieldDefinition)\n', (4518, 4540), False, 'from typed_python import OneOf, NamedTuple, Dict, Set, Tuple, ConstDict, makeNamedTuple, TupleOf, ListOf, deserialize\n'), ((7643, 7728), 'typed_python.makeNamedTuple', 'makeNamedTuple', ([], {'schema': 'subscriptionKey.schema', 'typename': 'subscriptionKey.typename'}), '(schema=subscriptionKey.schema, typename=subscriptionKey.typename\n )\n', (7657, 7728), False, 'from typed_python import OneOf, NamedTuple, Dict, Set, Tuple, ConstDict, makeNamedTuple, TupleOf, ListOf, deserialize\n'), ((10969, 10982), 'typed_python.Set', 'Set', (['ObjectId'], {}), '(ObjectId)\n', (10972, 10982), False, 'from typed_python import OneOf, NamedTuple, Dict, Set, Tuple, ConstDict, makeNamedTuple, TupleOf, ListOf, deserialize\n'), ((15617, 15652), 'typed_python.ConstDict', 'ConstDict', (['FieldDefinition', 'FieldId'], {}), '(FieldDefinition, FieldId)\n', (15626, 15652), False, 'from typed_python import OneOf, NamedTuple, Dict, Set, Tuple, ConstDict, makeNamedTuple, TupleOf, ListOf, deserialize\n'), ((17750, 17798), 'typed_python.makeNamedTuple', 'makeNamedTuple', ([], {'schema': 'schema', 'typename': 'typename'}), '(schema=schema, typename=typename)\n', (17764, 17798), False, 'from typed_python import OneOf, NamedTuple, Dict, Set, Tuple, ConstDict, makeNamedTuple, TupleOf, ListOf, deserialize\n'), ((20788, 20800), 'typed_python.Set', 'Set', (['FieldId'], {}), '(FieldId)\n', (20791, 20800), False, 'from typed_python import OneOf, NamedTuple, Dict, Set, Tuple, ConstDict, makeNamedTuple, TupleOf, ListOf, deserialize\n'), ((20828, 20841), 'typed_python.Set', 'Set', (['ObjectId'], {}), '(ObjectId)\n', (20831, 20841), False, 'from typed_python import OneOf, NamedTuple, Dict, Set, Tuple, ConstDict, makeNamedTuple, TupleOf, ListOf, deserialize\n'), ((21045, 21071), 'typed_python.Set', 'Set', (['ServerToClientChannel'], {}), '(ServerToClientChannel)\n', (21048, 21071), False, 'from typed_python import OneOf, NamedTuple, Dict, Set, Tuple, ConstDict, makeNamedTuple, TupleOf, ListOf, deserialize\n'), ((28740, 28772), 'typed_python.Dict', 'Dict', (['ServerToClientChannel', 'int'], {}), '(ServerToClientChannel, int)\n', (28744, 28772), False, 'from typed_python import OneOf, NamedTuple, Dict, Set, Tuple, ConstDict, makeNamedTuple, TupleOf, ListOf, deserialize\n'), ((28814, 28851), 'typed_python.Dict', 'Dict', (['ServerToClientChannel', 'ObjectId'], {}), '(ServerToClientChannel, ObjectId)\n', (28818, 28851), False, 'from typed_python import OneOf, NamedTuple, Dict, Set, Tuple, ConstDict, makeNamedTuple, TupleOf, ListOf, deserialize\n'), ((29232, 29246), 'typed_python.Dict', 'Dict', (['str', 'int'], {}), '(str, int)\n', (29236, 29246), False, 'from typed_python import OneOf, NamedTuple, Dict, Set, Tuple, ConstDict, makeNamedTuple, TupleOf, ListOf, deserialize\n'), ((29289, 29303), 'typed_python.Dict', 'Dict', (['str', 'int'], {}), '(str, int)\n', (29293, 29303), False, 'from typed_python import OneOf, NamedTuple, Dict, Set, Tuple, ConstDict, makeNamedTuple, TupleOf, ListOf, deserialize\n'), ((36302, 36358), 'typed_python.makeNamedTuple', 'makeNamedTuple', ([], {'schema': 'msg.schema', 'typename': 'msg.typename'}), '(schema=msg.schema, typename=msg.typename)\n', (36316, 36358), False, 'from typed_python import OneOf, NamedTuple, Dict, Set, Tuple, ConstDict, makeNamedTuple, TupleOf, ListOf, deserialize\n'), ((36890, 36946), 'typed_python.makeNamedTuple', 'makeNamedTuple', ([], {'schema': 'msg.schema', 'typename': 'msg.typename'}), '(schema=msg.schema, typename=msg.typename)\n', (36904, 36946), False, 'from typed_python import OneOf, NamedTuple, Dict, Set, Tuple, ConstDict, makeNamedTuple, TupleOf, ListOf, deserialize\n'), ((3399, 3425), 'typed_python.Set', 'Set', (['ServerToClientChannel'], {}), '(ServerToClientChannel)\n', (3402, 3425), False, 'from typed_python import OneOf, NamedTuple, Dict, Set, Tuple, ConstDict, makeNamedTuple, TupleOf, ListOf, deserialize\n'), ((3500, 3512), 'typed_python.Set', 'Set', (['FieldId'], {}), '(FieldId)\n', (3503, 3512), False, 'from typed_python import OneOf, NamedTuple, Dict, Set, Tuple, ConstDict, makeNamedTuple, TupleOf, ListOf, deserialize\n'), ((3574, 3600), 'typed_python.Set', 'Set', (['ServerToClientChannel'], {}), '(ServerToClientChannel)\n', (3577, 3600), False, 'from typed_python import OneOf, NamedTuple, Dict, Set, Tuple, ConstDict, makeNamedTuple, TupleOf, ListOf, deserialize\n'), ((3675, 3687), 'typed_python.Set', 'Set', (['IndexId'], {}), '(IndexId)\n', (3678, 3687), False, 'from typed_python import OneOf, NamedTuple, Dict, Set, Tuple, ConstDict, makeNamedTuple, TupleOf, ListOf, deserialize\n'), ((3759, 3772), 'typed_python.Set', 'Set', (['ObjectId'], {}), '(ObjectId)\n', (3762, 3772), False, 'from typed_python import OneOf, NamedTuple, Dict, Set, Tuple, ConstDict, makeNamedTuple, TupleOf, ListOf, deserialize\n'), ((3830, 3856), 'typed_python.Set', 'Set', (['ServerToClientChannel'], {}), '(ServerToClientChannel)\n', (3833, 3856), False, 'from typed_python import OneOf, NamedTuple, Dict, Set, Tuple, ConstDict, makeNamedTuple, TupleOf, ListOf, deserialize\n'), ((3938, 3950), 'typed_python.Set', 'Set', (['FieldId'], {}), '(FieldId)\n', (3941, 3950), False, 'from typed_python import OneOf, NamedTuple, Dict, Set, Tuple, ConstDict, makeNamedTuple, TupleOf, ListOf, deserialize\n'), ((4031, 4043), 'typed_python.Set', 'Set', (['IndexId'], {}), '(IndexId)\n', (4034, 4043), False, 'from typed_python import OneOf, NamedTuple, Dict, Set, Tuple, ConstDict, makeNamedTuple, TupleOf, ListOf, deserialize\n'), ((4138, 4173), 'typed_python.ConstDict', 'ConstDict', (['FieldDefinition', 'FieldId'], {}), '(FieldDefinition, FieldId)\n', (4147, 4173), False, 'from typed_python import OneOf, NamedTuple, Dict, Set, Tuple, ConstDict, makeNamedTuple, TupleOf, ListOf, deserialize\n'), ((4666, 4686), 'typed_python.Set', 'Set', (['SubscriptionKey'], {}), '(SubscriptionKey)\n', (4669, 4686), False, 'from typed_python import OneOf, NamedTuple, Dict, Set, Tuple, ConstDict, makeNamedTuple, TupleOf, ListOf, deserialize\n'), ((4849, 4869), 'typed_python.Set', 'Set', (['SubscriptionKey'], {}), '(SubscriptionKey)\n', (4852, 4869), False, 'from typed_python import OneOf, NamedTuple, Dict, Set, Tuple, ConstDict, makeNamedTuple, TupleOf, ListOf, deserialize\n'), ((4967, 4989), 'typed_python.ListOf', 'ListOf', (['ClientToServer'], {}), '(ClientToServer)\n', (4973, 4989), False, 'from typed_python import OneOf, NamedTuple, Dict, Set, Tuple, ConstDict, makeNamedTuple, TupleOf, ListOf, deserialize\n'), ((5110, 5125), 'typed_python.Tuple', 'Tuple', (['str', 'str'], {}), '(str, str)\n', (5115, 5125), False, 'from typed_python import OneOf, NamedTuple, Dict, Set, Tuple, ConstDict, makeNamedTuple, TupleOf, ListOf, deserialize\n'), ((5400, 5436), 'typed_python.NamedTuple', 'NamedTuple', ([], {'schema': 'str', 'typename': 'str'}), '(schema=str, typename=str)\n', (5410, 5436), False, 'from typed_python import OneOf, NamedTuple, Dict, Set, Tuple, ConstDict, makeNamedTuple, TupleOf, ListOf, deserialize\n'), ((5524, 5545), 'typed_python.Dict', 'Dict', (['ObjectId', 'bytes'], {}), '(ObjectId, bytes)\n', (5528, 5545), False, 'from typed_python import OneOf, NamedTuple, Dict, Set, Tuple, ConstDict, makeNamedTuple, TupleOf, ListOf, deserialize\n'), ((5590, 5616), 'typed_python.Dict', 'Dict', (['ObjectId', 'IndexValue'], {}), '(ObjectId, IndexValue)\n', (5594, 5616), False, 'from typed_python import OneOf, NamedTuple, Dict, Set, Tuple, ConstDict, makeNamedTuple, TupleOf, ListOf, deserialize\n'), ((12380, 12398), 'typed_python.OneOf', 'OneOf', (['None', 'bytes'], {}), '(None, bytes)\n', (12385, 12398), False, 'from typed_python import OneOf, NamedTuple, Dict, Set, Tuple, ConstDict, makeNamedTuple, TupleOf, ListOf, deserialize\n'), ((13157, 13175), 'typed_python.OneOf', 'OneOf', (['None', 'bytes'], {}), '(None, bytes)\n', (13162, 13175), False, 'from typed_python import OneOf, NamedTuple, Dict, Set, Tuple, ConstDict, makeNamedTuple, TupleOf, ListOf, deserialize\n'), ((13275, 13298), 'typed_python.OneOf', 'OneOf', (['None', 'IndexValue'], {}), '(None, IndexValue)\n', (13280, 13298), False, 'from typed_python import OneOf, NamedTuple, Dict, Set, Tuple, ConstDict, makeNamedTuple, TupleOf, ListOf, deserialize\n'), ((14054, 14077), 'typed_python.OneOf', 'OneOf', (['None', 'IndexValue'], {}), '(None, IndexValue)\n', (14059, 14077), False, 'from typed_python import OneOf, NamedTuple, Dict, Set, Tuple, ConstDict, makeNamedTuple, TupleOf, ListOf, deserialize\n'), ((14182, 14200), 'typed_python.OneOf', 'OneOf', (['None', 'bytes'], {}), '(None, bytes)\n', (14187, 14200), False, 'from typed_python import OneOf, NamedTuple, Dict, Set, Tuple, ConstDict, makeNamedTuple, TupleOf, ListOf, deserialize\n'), ((14803, 14821), 'typed_python.OneOf', 'OneOf', (['None', 'bytes'], {}), '(None, bytes)\n', (14808, 14821), False, 'from typed_python import OneOf, NamedTuple, Dict, Set, Tuple, ConstDict, makeNamedTuple, TupleOf, ListOf, deserialize\n'), ((14925, 14943), 'typed_python.OneOf', 'OneOf', (['None', 'bytes'], {}), '(None, bytes)\n', (14930, 14943), False, 'from typed_python import OneOf, NamedTuple, Dict, Set, Tuple, ConstDict, makeNamedTuple, TupleOf, ListOf, deserialize\n'), ((15544, 15562), 'typed_python.OneOf', 'OneOf', (['None', 'bytes'], {}), '(None, bytes)\n', (15549, 15562), False, 'from typed_python import OneOf, NamedTuple, Dict, Set, Tuple, ConstDict, makeNamedTuple, TupleOf, ListOf, deserialize\n'), ((17640, 17662), 'typed_python.ListOf', 'ListOf', (['ServerToClient'], {}), '(ServerToClient)\n', (17646, 17662), False, 'from typed_python import OneOf, NamedTuple, Dict, Set, Tuple, ConstDict, makeNamedTuple, TupleOf, ListOf, deserialize\n'), ((20294, 20312), 'typed_python.OneOf', 'OneOf', (['None', 'bytes'], {}), '(None, bytes)\n', (20299, 20312), False, 'from typed_python import OneOf, NamedTuple, Dict, Set, Tuple, ConstDict, makeNamedTuple, TupleOf, ListOf, deserialize\n'), ((20364, 20382), 'typed_python.OneOf', 'OneOf', (['None', 'bytes'], {}), '(None, bytes)\n', (20369, 20382), False, 'from typed_python import OneOf, NamedTuple, Dict, Set, Tuple, ConstDict, makeNamedTuple, TupleOf, ListOf, deserialize\n'), ((20551, 20564), 'typed_python.Set', 'Set', (['ObjectId'], {}), '(ObjectId)\n', (20554, 20564), False, 'from typed_python import OneOf, NamedTuple, Dict, Set, Tuple, ConstDict, makeNamedTuple, TupleOf, ListOf, deserialize\n'), ((20676, 20689), 'typed_python.Set', 'Set', (['ObjectId'], {}), '(ObjectId)\n', (20679, 20689), False, 'from typed_python import OneOf, NamedTuple, Dict, Set, Tuple, ConstDict, makeNamedTuple, TupleOf, ListOf, deserialize\n'), ((22106, 22119), 'typed_python.Set', 'Set', (['ObjectId'], {}), '(ObjectId)\n', (22109, 22119), False, 'from typed_python import OneOf, NamedTuple, Dict, Set, Tuple, ConstDict, makeNamedTuple, TupleOf, ListOf, deserialize\n'), ((28959, 28992), 'typed_python.Tuple', 'Tuple', (['ServerToClientChannel', 'str'], {}), '(ServerToClientChannel, str)\n', (28964, 28992), False, 'from typed_python import OneOf, NamedTuple, Dict, Set, Tuple, ConstDict, makeNamedTuple, TupleOf, ListOf, deserialize\n'), ((29643, 29679), 'typed_python.NamedTuple', 'NamedTuple', ([], {'schema': 'str', 'typename': 'str'}), '(schema=str, typename=str)\n', (29653, 29679), False, 'from typed_python import OneOf, NamedTuple, Dict, Set, Tuple, ConstDict, makeNamedTuple, TupleOf, ListOf, deserialize\n'), ((29887, 29920), 'typed_python.Tuple', 'Tuple', (['ServerToClientChannel', 'int'], {}), '(ServerToClientChannel, int)\n', (29892, 29920), False, 'from typed_python import OneOf, NamedTuple, Dict, Set, Tuple, ConstDict, makeNamedTuple, TupleOf, ListOf, deserialize\n'), ((30151, 30184), 'typed_python.Tuple', 'Tuple', (['ServerToClientChannel', 'int'], {}), '(ServerToClientChannel, int)\n', (30156, 30184), False, 'from typed_python import OneOf, NamedTuple, Dict, Set, Tuple, ConstDict, makeNamedTuple, TupleOf, ListOf, deserialize\n'), ((30292, 30325), 'typed_python.Tuple', 'Tuple', (['ServerToClientChannel', 'int'], {}), '(ServerToClientChannel, int)\n', (30297, 30325), False, 'from typed_python import OneOf, NamedTuple, Dict, Set, Tuple, ConstDict, makeNamedTuple, TupleOf, ListOf, deserialize\n'), ((33420, 33432), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (33430, 33432), False, 'import uuid\n'), ((38340, 38396), 'typed_python.makeNamedTuple', 'makeNamedTuple', ([], {'schema': 'msg.schema', 'typename': 'msg.typename'}), '(schema=msg.schema, typename=msg.typename)\n', (38354, 38396), False, 'from typed_python import OneOf, NamedTuple, Dict, Set, Tuple, ConstDict, makeNamedTuple, TupleOf, ListOf, deserialize\n'), ((38490, 38567), 'logging.error', 'logging.error', (['"""Client tried to lazy load for a type we\'re not subscribed to"""'], {}), '("Client tried to lazy load for a type we\'re not subscribed to")\n', (38503, 38567), False, 'import logging\n'), ((41018, 41113), 'logging.error', 'logging.error', (['"""Received unexpected CompleteTransaction message: %s"""', 'msg.transaction_guid'], {}), "('Received unexpected CompleteTransaction message: %s', msg.\n transaction_guid)\n", (41031, 41113), False, 'import logging\n'), ((4467, 4481), 'typed_python.Dict', 'Dict', (['str', 'int'], {}), '(str, int)\n', (4471, 4481), False, 'from typed_python import OneOf, NamedTuple, Dict, Set, Tuple, ConstDict, makeNamedTuple, TupleOf, ListOf, deserialize\n'), ((5143, 5188), 'typed_python.Tuple', 'Tuple', (['ServerToClientChannel', 'SubscriptionKey'], {}), '(ServerToClientChannel, SubscriptionKey)\n', (5148, 5188), False, 'from typed_python import OneOf, NamedTuple, Dict, Set, Tuple, ConstDict, makeNamedTuple, TupleOf, ListOf, deserialize\n'), ((5685, 5698), 'typed_python.Set', 'Set', (['ObjectId'], {}), '(ObjectId)\n', (5688, 5698), False, 'from typed_python import OneOf, NamedTuple, Dict, Set, Tuple, ConstDict, makeNamedTuple, TupleOf, ListOf, deserialize\n'), ((20583, 20596), 'typed_python.Set', 'Set', (['ObjectId'], {}), '(ObjectId)\n', (20586, 20596), False, 'from typed_python import OneOf, NamedTuple, Dict, Set, Tuple, ConstDict, makeNamedTuple, TupleOf, ListOf, deserialize\n'), ((20708, 20721), 'typed_python.Set', 'Set', (['ObjectId'], {}), '(ObjectId)\n', (20711, 20721), False, 'from typed_python import OneOf, NamedTuple, Dict, Set, Tuple, ConstDict, makeNamedTuple, TupleOf, ListOf, deserialize\n'), ((29151, 29186), 'typed_python.ConstDict', 'ConstDict', (['FieldDefinition', 'FieldId'], {}), '(FieldDefinition, FieldId)\n', (29160, 29186), False, 'from typed_python import OneOf, NamedTuple, Dict, Set, Tuple, ConstDict, makeNamedTuple, TupleOf, ListOf, deserialize\n'), ((45410, 45471), 'logging.error', 'logging.error', (['"""Received unexpected flush guid: %s"""', 'msg.guid'], {}), "('Received unexpected flush guid: %s', msg.guid)\n", (45423, 45471), False, 'import logging\n'), ((45924, 46017), 'logging.error', 'logging.error', (['"""Received unexpected TransactionResult message: %s"""', 'msg.transaction_guid'], {}), "('Received unexpected TransactionResult message: %s', msg.\n transaction_guid)\n", (45937, 46017), False, 'import logging\n'), ((11207, 11258), 'typed_python.deserialize', 'deserialize', (['ObjectBase', 'key.fieldname_and_value[1]'], {}), '(ObjectBase, key.fieldname_and_value[1])\n', (11218, 11258), False, 'from typed_python import OneOf, NamedTuple, Dict, Set, Tuple, ConstDict, makeNamedTuple, TupleOf, ListOf, deserialize\n'), ((25724, 25741), 'typed_python.TupleOf', 'TupleOf', (['ObjectId'], {}), '(ObjectId)\n', (25731, 25741), False, 'from typed_python import OneOf, NamedTuple, Dict, Set, Tuple, ConstDict, makeNamedTuple, TupleOf, ListOf, deserialize\n'), ((25885, 25902), 'typed_python.TupleOf', 'TupleOf', (['ObjectId'], {}), '(ObjectId)\n', (25892, 25902), False, 'from typed_python import OneOf, NamedTuple, Dict, Set, Tuple, ConstDict, makeNamedTuple, TupleOf, ListOf, deserialize\n'), ((25768, 25785), 'typed_python.TupleOf', 'TupleOf', (['ObjectId'], {}), '(ObjectId)\n', (25775, 25785), False, 'from typed_python import OneOf, NamedTuple, Dict, Set, Tuple, ConstDict, makeNamedTuple, TupleOf, ListOf, deserialize\n'), ((25929, 25946), 'typed_python.TupleOf', 'TupleOf', (['ObjectId'], {}), '(ObjectId)\n', (25936, 25946), False, 'from typed_python import OneOf, NamedTuple, Dict, Set, Tuple, ConstDict, makeNamedTuple, TupleOf, ListOf, deserialize\n')]
|
import tempfile
import os
# import pygame
import config
from playsound import playsound
from gtts import gTTS
# from pygame import mixer
class TTS:
# def __init__(self):
# mixer.init()
def playMP3(self, fileName, filePath = config.SOUNDS_DIR, blocking = False):
playsound(os.path.join(filePath, fileName))
# if ".mp3" in fileName:
# mixer.music.load(os.path.join(filePath, fileName))
# mixer.music.play
# else:
# sound = pygame.mixer.Sound(os.path.join(filePath, fileName))
# chan = pygame.mixer.find_channel()
# chan.queue(sound)
# if blocking:
# while mixer.music.get_busy():
# pygame.time.delay(100)
def speak(self, text, showText = True):
if showText:
print(text)
try:
tts = gTTS(text = text)
with tempfile.NamedTemporaryFile(mode='wb', suffix='.mp3',
delete=False) as f:
(tempPath, tempName) = os.path.split(f.name)
tts.write_to_fp(f)
self.playMP3(tempName, tempPath)
os.remove(os.path.join(tempPath, tempName))
except Exception as e:
print('Unknown Google TTS issue: ' + str(e))
|
[
"gtts.gTTS",
"tempfile.NamedTemporaryFile",
"os.path.split",
"os.path.join"
] |
[((340, 372), 'os.path.join', 'os.path.join', (['filePath', 'fileName'], {}), '(filePath, fileName)\n', (352, 372), False, 'import os\n'), ((962, 977), 'gtts.gTTS', 'gTTS', ([], {'text': 'text'}), '(text=text)\n', (966, 977), False, 'from gtts import gTTS\n'), ((998, 1065), 'tempfile.NamedTemporaryFile', 'tempfile.NamedTemporaryFile', ([], {'mode': '"""wb"""', 'suffix': '""".mp3"""', 'delete': '(False)'}), "(mode='wb', suffix='.mp3', delete=False)\n", (1025, 1065), False, 'import tempfile\n'), ((1155, 1176), 'os.path.split', 'os.path.split', (['f.name'], {}), '(f.name)\n', (1168, 1176), False, 'import os\n'), ((1280, 1312), 'os.path.join', 'os.path.join', (['tempPath', 'tempName'], {}), '(tempPath, tempName)\n', (1292, 1312), False, 'import os\n')]
|
import os
import numpy as np
from netCDF4 import Dataset
from compliance_checker.ioos import (
IOOS0_1Check,
IOOS1_1Check,
IOOS1_2_PlatformIDValidator,
IOOS1_2Check,
NamingAuthorityValidator,
)
from compliance_checker.tests import BaseTestCase
from compliance_checker.tests.helpers import MockTimeSeries, MockVariable
from compliance_checker.tests.resources import STATIC_FILES
from compliance_checker.tests.test_cf import get_results
class TestIOOS0_1(BaseTestCase):
"""
Tests for the IOOS Inventory Metadata v0.1
"""
def setUp(self):
# Use the NCEI Gold Standard Point dataset for IOOS checks
self.ds = self.load_dataset(STATIC_FILES["ncei_gold_point_1"])
self.ioos = IOOS0_1Check()
def test_cc_meta(self):
assert self.ioos._cc_spec == "ioos"
assert self.ioos._cc_spec_version == "0.1"
def test_global_attributes(self):
"""
Tests that all global attributes checks are working
"""
# Create an empty dataset that writes to /dev/null This acts as a
# temporary netCDF file in-memory that never gets written to disk.
nc_obj = Dataset(os.devnull, "w", diskless=True)
self.addCleanup(nc_obj.close)
results = self.ioos.check_global_attributes(nc_obj)
for result in results:
self.assert_result_is_bad(result)
attrs = [
"acknowledgement",
"publisher_email",
"institution",
"publisher_name",
"Conventions",
]
for attr in attrs:
setattr(nc_obj, attr, "test")
results = self.ioos.check_global_attributes(nc_obj)
for result in results:
self.assert_result_is_good(result)
def test_variable_attributes(self):
"""
Tests that the platform variable attributes check is working
"""
# Create an empty dataset that writes to /dev/null This acts as a
# temporary netCDF file in-memory that never gets written to disk.
nc_obj = Dataset(os.devnull, "w", diskless=True)
self.addCleanup(nc_obj.close)
# The dataset needs at least one variable to check that it's missing
# all the required attributes.
nc_obj.createDimension("time", 1)
nc_obj.createVariable("platform", "S1", ())
platform = nc_obj.variables["platform"]
results = self.ioos.check_variable_attributes(nc_obj)
for result in results:
self.assert_result_is_bad(result)
platform.long_name = "platform"
platform.short_name = "platform"
platform.source = "glider"
platform.ioos_name = "urn:ioos:station:glos:leorgn"
platform.wmo_id = "1234"
platform.comment = "test"
results = self.ioos.check_variable_attributes(nc_obj)
for result in results:
self.assert_result_is_good(result)
def test_variable_units(self):
"""
Tests that the variable units test is working
"""
# this check tests that units attribute is present on EVERY variable
# Create an empty dataset that writes to /dev/null This acts as a
# temporary netCDF file in-memory that never gets written to disk.
nc_obj = Dataset(os.devnull, "w", diskless=True)
self.addCleanup(nc_obj.close)
# The dataset needs at least one variable to check that it's missing
# all the required attributes.
nc_obj.createDimension("time", 1)
nc_obj.createVariable("sample_var", "d", ("time",))
sample_var = nc_obj.variables["sample_var"]
results = self.ioos.check_variable_units(nc_obj)
self.assert_result_is_bad(results)
sample_var.units = "m"
sample_var.short_name = "sample_var"
results = self.ioos.check_variable_units(nc_obj)
self.assert_result_is_good(results)
def test_altitude_units(self):
"""
Tests that the altitude variable units test is working
"""
results = self.ioos.check_altitude_units(self.ds)
self.assert_result_is_good(results)
# Now test an nc file with a 'z' variable without units
# Create an empty dataset that writes to /dev/null This acts as a
# temporary netCDF file in-memory that never gets written to disk.
nc_obj = Dataset(os.devnull, "w", diskless=True)
self.addCleanup(nc_obj.close)
# The dataset needs at least one variable to check that it's missing
# all the required attributes.
nc_obj.createDimension("time", 1)
nc_obj.createVariable("z", "d", ("time",))
z = nc_obj.variables["z"]
z.short_name = "sample_var"
results = self.ioos.check_variable_units(nc_obj)
self.assert_result_is_bad(results)
class TestIOOS1_1(BaseTestCase):
"""
Tests for the compliance checker implementation of IOOS Metadata Profile
for NetCDF, Version 1.1
"""
def setUp(self):
# Use the IOOS 1_1 dataset for testing
self.ds = self.load_dataset(STATIC_FILES["ioos_gold_1_1"])
self.ioos = IOOS1_1Check()
def test_cc_meta(self):
assert self.ioos._cc_spec == "ioos"
assert self.ioos._cc_spec_version == "1.1"
def test_required_attributes(self):
"""
Tests that required attributes test is working properly
"""
results = self.ioos.check_high(self.ds)
for result in results:
self.assert_result_is_good(result)
def test_recomended_attributes(self):
"""
Tests that recommended attributes test is working properly
"""
results = self.ioos.check_recommended(self.ds)
for result in results:
self.assert_result_is_good(result)
def test_bad_platform_variables(self):
"""
Tests that the platform variable attributes check is working
"""
# Create an empty dataset that writes to /dev/null This acts as a
# temporary netCDF file in-memory that never gets written to disk.
nc_obj = Dataset(os.devnull, "w", diskless=True)
self.addCleanup(nc_obj.close)
# The dataset needs at least one variable to check that it's missing
# all the required attributes.
nc_obj.createDimension("time", 1)
nc_obj.platform = "platform"
# global attribute 'platform' points to variable that does not exist in dataset
results = self.ioos.check_platform_variables(nc_obj)
for result in results:
self.assert_result_is_bad(result)
def test_good_platform_variables(self):
"""
Tests that the platform variable attributes check is working
"""
results = self.ioos.check_platform_variables(self.ds)
for result in results:
self.assert_result_is_good(result)
def test_bad_geophysical_vars_fill_value(self):
"""
Tests that the geophysical variable _FillValue check is working
"""
# Create an empty dataset that writes to /dev/null This acts as a
# temporary netCDF file in-memory that never gets written to disk.
nc_obj = Dataset(os.devnull, "w", diskless=True)
self.addCleanup(nc_obj.close)
# The dataset needs at least one variable to check that it's missing
# all the required attributes.
nc_obj.createDimension("time", 1)
nc_obj.createVariable("sample_var", "d", ("time",))
# Define some variable attributes but don't specify _FillValue
sample_var = nc_obj.variables["sample_var"]
sample_var.units = "m"
sample_var.short_name = "temp"
# global attribute 'platform' points to variable that does not exist in dataset
results = self.ioos.check_geophysical_vars_fill_value(nc_obj)
for result in results:
self.assert_result_is_bad(result)
def test_good_geophysical_vars_fill_value(self):
"""
Tests that the geophysical variable _FillValue check is working
"""
results = self.ioos.check_geophysical_vars_fill_value(self.ds)
for result in results:
self.assert_result_is_good(result)
def test_bad_geophysical_vars_standard_name(self):
"""
Tests that the platform variable attributes check is working
"""
# Create an empty dataset that writes to /dev/null This acts as a
# temporary netCDF file in-memory that never gets written to disk.
nc_obj = Dataset(os.devnull, "w", diskless=True)
self.addCleanup(nc_obj.close)
# The dataset needs at least one variable to check that it's missing
# all the required attributes.
nc_obj.createDimension("time", 1)
nc_obj.createVariable("sample_var", "d", ("time",))
# Define some variable attributes but don't specify _FillValue
sample_var = nc_obj.variables["sample_var"]
sample_var.units = "m"
sample_var.short_name = "temp"
# global attribute 'platform' points to variable that does not exist in dataset
results = self.ioos.check_geophysical_vars_standard_name(nc_obj)
for result in results:
self.assert_result_is_bad(result)
def test_good_geophysical_vars_standard_name(self):
"""
Tests that the geophysical variable _FillValue check is working
"""
results = self.ioos.check_geophysical_vars_standard_name(self.ds)
for result in results:
self.assert_result_is_good(result)
def test_bad_units(self):
"""
Tests that the valid units check is working
"""
# Create an empty dataset that writes to /dev/null This acts as a
# temporary netCDF file in-memory that never gets written to disk.
nc_obj = Dataset(os.devnull, "w", diskless=True)
self.addCleanup(nc_obj.close)
# The dataset needs at least one variable to check that it's missing
# all the required attributes.
nc_obj.createDimension("time", 1)
nc_obj.createVariable("temperature", "d", ("time",))
# Define some variable attributes but don't specify _FillValue
sample_var = nc_obj.variables["temperature"]
sample_var.units = "degC" # Not valid units
sample_var.short_name = "temp"
# global attribute 'platform' points to variable that does not exist in dataset
results = self.ioos.check_geophysical_vars_standard_name(nc_obj)
for result in results:
self.assert_result_is_bad(result)
def test_good_units(self):
"""
Tests that the valid units check is working
"""
results = self.ioos.check_units(self.ds)
for result in results:
self.assert_result_is_good(result)
class TestIOOS1_2(BaseTestCase):
"""
Tests for the compliance checker implementation of IOOS Metadata Profile
for NetCDF, Version 1.1
"""
def setUp(self):
self.ioos = IOOS1_2Check()
def test_check_geophysical_vars_have_attrs(self):
# create geophysical variable
ds = MockTimeSeries() # time, lat, lon, depth
temp = ds.createVariable("temp", np.float64, dimensions=("time",))
# should fail here
results = self.ioos.check_geophysical_vars_have_attrs(ds)
scored, out_of, messages = get_results(results)
self.assertLess(scored, out_of)
# set the necessary attributes
ds = MockTimeSeries(default_fill_value=9999999999.0) # time, lat, lon, depth
temp = ds.createVariable(
"temp", np.float64, fill_value=9999999999.0
) # _FillValue
temp.setncattr("missing_value", 9999999999.0)
temp.setncattr("standard_name", "sea_surface_temperature")
temp.setncattr(
"standard_name_url",
"http://cfconventions.org/Data/cf-standard-names/64/build/cf-standard-name-table.html",
)
temp.setncattr("units", "degree_C")
temp.setncattr("platform", "myPlatform")
results = self.ioos.check_geophysical_vars_have_attrs(ds)
scored, out_of, messages = get_results(results)
self.assertEqual(scored, out_of)
def test_check_geospatial_vars_have_attrs(self):
# create geophysical variable
ds = MockTimeSeries() # time, lat, lon, depth
temp = ds.createVariable("temp", np.float64, dimensions=("time",))
# should fail here
results = self.ioos.check_geospatial_vars_have_attrs(ds)
scored, out_of, messages = get_results(results)
self.assertLess(scored, out_of)
# should pass - default_fill_value sets _FillValue attr
ds = MockTimeSeries(default_fill_value=9999999999.0) # time, lat, lon, depth
ds.variables["time"].setncattr("standard_name", "time")
ds.variables["time"].setncattr(
"standard_name_url",
"http://cfconventions.org/Data/cf-standard-names/64/build/cf-standard-name-table.html",
)
ds.variables["time"].setncattr("units", "hours since 1970-01-01T00:00:00")
ds.variables["time"].setncattr("missing_value", 9999999999.0)
results = self.ioos.check_geospatial_vars_have_attrs(ds)
scored, out_of, messages = get_results(results)
self.assertEqual(scored, out_of)
def test_check_contributor_role_and_vocabulary(self):
ds = MockTimeSeries() # time, lat, lon, depth
# no contributor_role or vocab, fail both
results = self.ioos.check_contributor_role_and_vocabulary(ds)
self.assertFalse(all(r.value for r in results))
# bad contributor_role and vocab
ds.setncattr("contributor_role", "bad")
ds.setncattr("contributor_role_vocabulary", "bad")
results = self.ioos.check_contributor_role_and_vocabulary(ds)
self.assertFalse(all(r.value for r in results))
# good role, bad vocab
ds.setncattr("contributor_role", "contributor")
results = self.ioos.check_contributor_role_and_vocabulary(ds)
self.assertTrue(results[0].value)
self.assertEqual(results[0].msgs, [])
self.assertFalse(results[1].value)
# bad role, good vocab
ds.setncattr("contributor_role", "bad")
ds.setncattr(
"contributor_role_vocabulary",
"http://vocab.nerc.ac.uk/collection/G04/current/",
)
results = self.ioos.check_contributor_role_and_vocabulary(ds)
self.assertFalse(results[0].value)
self.assertTrue(results[1].value)
self.assertEqual(results[1].msgs, [])
# good role, good vocab
ds.setncattr("contributor_role", "contributor")
ds.setncattr(
"contributor_role_vocabulary",
"http://vocab.nerc.ac.uk/collection/G04/current/",
)
results = self.ioos.check_contributor_role_and_vocabulary(ds)
self.assertTrue(results[0].value)
self.assertEqual(results[0].msgs, [])
self.assertTrue(results[1].value)
self.assertEqual(results[1].msgs, [])
ds.setncattr("contributor_role", "resourceProvider")
ds.setncattr(
"contributor_role_vocabulary",
"https://www.ngdc.noaa.gov/wiki/index.php?title=ISO_19115_and_19115-2_CodeList_Dictionaries#CI_RoleCode",
)
results = self.ioos.check_contributor_role_and_vocabulary(ds)
self.assertTrue(results[0].value)
self.assertEqual(results[0].msgs, [])
self.assertTrue(results[1].value)
self.assertEqual(results[1].msgs, [])
def test_check_creator_and_publisher_type(self):
"""
Checks the creator_type and publisher_type global attributes with
the following values:
Empty: Valid, defaults to "person" when not specified, which is
contained in the list of valid values.
Bad values: Invalid, not contained in list of valid values.
Good values: Valid, contained in list.
"""
ds = MockTimeSeries()
# values which are not set/specified default to person, which is valid
result_list = self.ioos.check_creator_and_publisher_type(ds)
self.assertTrue(all(res.value for res in result_list))
# create invalid values for attribute
ds.setncattr("creator_type", "PI")
ds.setncattr("publisher_type", "Funder")
result_list = self.ioos.check_creator_and_publisher_type(ds)
err_regex = (
r"^If specified, \w+_type must be in value list "
r"\(\['group', 'institution', 'person', 'position'\]\)$"
)
for res in result_list:
self.assertFalse(res.value)
self.assertRegex(res.msgs[0], err_regex)
# good values
ds.setncattr("creator_type", "person")
ds.setncattr("publisher_type", "institution")
result_list = self.ioos.check_creator_and_publisher_type(ds)
self.assertTrue(all(res.value for res in result_list))
def test_check_gts_ingest_global(self):
ds = MockTimeSeries() # time, lat, lon, depth
# no gts_ingest_requirements, should pass
result = self.ioos.check_gts_ingest_global(ds)
self.assertTrue(result.value)
self.assertEqual(result.msgs, [])
# passing value
ds.setncattr("gts_ingest", "true")
result = self.ioos.check_gts_ingest_global(ds)
self.assertTrue(result.value)
self.assertEqual(result.msgs, [])
ds.setncattr("gts_ingest", "false")
result = self.ioos.check_gts_ingest_global(ds)
self.assertTrue(result.value)
ds.setncattr("gts_ingest", "notgood")
result = self.ioos.check_gts_ingest_global(ds)
self.assertFalse(result.value)
def test_check_gts_ingest_requirements(self):
ds = MockTimeSeries() # time, lat, lon, depth
# NOTE: this check will always have a "failing" result; see
# https://github.com/ioos/compliance-checker/issues/759#issuecomment-625356938
# and subsequent discussion
# no gts_ingest_requirements, should pass
result = self.ioos.check_gts_ingest_requirements(ds)
self.assertFalse(result.value)
# flag for ingest, no variables flagged - default pass
ds.setncattr("gts_ingest", "true")
result = self.ioos.check_gts_ingest_requirements(ds)
self.assertFalse(result.value)
# give one variable the gts_ingest attribute
# no standard_name or ancillary vars, should fail
ds.variables["time"].setncattr("gts_ingest", "true")
result = self.ioos.check_gts_ingest_requirements(ds)
self.assertFalse(result.value)
# no ancillary vars, should fail
ds.variables["time"].setncattr("gts_ingest", "true")
ds.variables["time"].setncattr("standard_name", "time")
result = self.ioos.check_gts_ingest_requirements(ds)
self.assertFalse(result.value)
self.assertIn(
"The following variables did not qualify for NDBC/GTS Ingest: time\n",
result.msgs,
)
# set ancillary var with bad standard name
tmp = ds.createVariable("tmp", np.byte, ("time",))
tmp.setncattr("standard_name", "bad")
ds.variables["time"].setncattr("ancillary_variables", "tmp")
result = self.ioos.check_gts_ingest_requirements(ds)
self.assertFalse(result.value)
self.assertIn(
"The following variables did not qualify for NDBC/GTS Ingest: time\n",
result.msgs,
)
# good ancillary var standard name, time units are bad
tmp.setncattr("standard_name", "aggregate_quality_flag")
ds.variables["time"].setncattr("units", "bad since bad")
result = self.ioos.check_gts_ingest_requirements(ds)
self.assertFalse(result.value)
self.assertIn(
"The following variables did not qualify for NDBC/GTS Ingest: time\n",
result.msgs,
)
# good ancillary var stdname, good units, pass
tmp.setncattr("standard_name", "aggregate_quality_flag")
ds.variables["time"].setncattr("units", "seconds since 1970-01-01T00:00:00Z")
result = self.ioos.check_gts_ingest_requirements(ds)
self.assertFalse(result.value)
self.assertIn(
"The following variables qualified for NDBC/GTS Ingest: time\n", result.msgs
)
def test_check_instrument_variables(self):
ds = MockTimeSeries() # time, lat, lon, depth
# no instrument variable, should pass
results = self.ioos.check_instrument_variables(ds)
scored, out_of, messages = get_results(results)
self.assertEqual(scored, out_of)
temp = ds.createVariable("temp", np.float64, dimensions=("time",))
temp.setncattr("cf_role", "timeseries")
temp.setncattr("standard_name", "sea_surface_temperature")
temp.setncattr("units", "degree_C")
temp.setncattr("axis", "Y")
temp.setncattr("instrument", "myInstrument")
temp[:] = 45.0
instr = ds.createVariable("myInstrument", np.float64, dimensions=("time",))
# give instrument variable with component
instr.setncattr("component", "someComponent")
results = self.ioos.check_instrument_variables(ds)
scored, out_of, messages = get_results(results)
self.assertEqual(scored, out_of)
# give discriminant
instr.setncattr("discriminant", "someDiscriminant")
results = self.ioos.check_instrument_variables(ds)
scored, out_of, messages = get_results(results)
self.assertEqual(scored, out_of)
# bad component
instr.setncattr("component", 45)
results = self.ioos.check_instrument_variables(ds)
scored, out_of, messages = get_results(results)
self.assertLess(scored, out_of)
def test_check_wmo_platform_code(self):
ds = MockTimeSeries() # time, lat, lon, depth
# no wmo_platform_code, pass
result = self.ioos.check_wmo_platform_code(ds)
self.assertTrue(result.value)
self.assertEqual(result.msgs, [])
# valid code
ds.setncattr("wmo_platform_code", "12345")
result = self.ioos.check_wmo_platform_code(ds)
self.assertTrue(result.value)
# valid code
ds.setncattr("wmo_platform_code", "7654321")
result = self.ioos.check_wmo_platform_code(ds)
self.assertTrue(result.value)
# alphanumeric, valid
ds.setncattr("wmo_platform_code", "abcd1")
result = self.ioos.check_wmo_platform_code(ds)
self.assertTrue(result.value)
# invalid length, fail
ds.setncattr("wmo_platform_code", "123")
result = self.ioos.check_wmo_platform_code(ds)
self.assertFalse(result.value)
# alphanumeric len 7, fail
ds.setncattr("wmo_platform_code", "1a2b3c7")
result = self.ioos.check_wmo_platform_code(ds)
self.assertFalse(result.value)
def test_check_standard_name(self):
ds = MockTimeSeries() # time, lat, lon, depth
# no standard names
results = self.ioos.check_standard_name(ds)
scored, out_of, messages = get_results(results)
self.assertLess(scored, out_of)
# give standard names to all variables
ds.variables["time"].setncattr("standard_name", "time")
ds.variables["lon"].setncattr("standard_name", "longitude")
ds.variables["lat"].setncattr("standard_name", "latitude")
ds.variables["depth"].setncattr("standard_name", "depth")
results = self.ioos.check_standard_name(ds)
scored, out_of, messages = get_results(results)
self.assertEqual(scored, out_of)
# add a QARTOD variable, no standard name - should fail
qr = ds.createVariable("depth_qc", np.byte)
qr.setncattr("flag_meanings", "blah")
results = self.ioos.check_standard_name(ds)
scored, out_of, messages = get_results(results)
self.assertLess(scored, out_of)
# bad standard name
qr.setncattr("standard_name", "blah")
results = self.ioos.check_standard_name(ds)
scored, out_of, messages = get_results(results)
self.assertLess(scored, out_of)
# good standard name
qr.setncattr("standard_name", "spike_test_quality_flag")
results = self.ioos.check_standard_name(ds)
scored, out_of, messages = get_results(results)
self.assertEqual(scored, out_of)
def test_naming_authority_validation(self):
test_attr_name = "naming_authority"
validator = NamingAuthorityValidator()
# check URL - should pass
self.assertTrue(validator.validate(test_attr_name, "https://ioos.us")[0])
# check reverse DNS - should pass
self.assertTrue(validator.validate(test_attr_name, "edu.ucar.unidata")[0])
# email address is neither of the above, so should fail
bad_result = validator.validate(test_attr_name, "<EMAIL>")
self.assertFalse(bad_result[0])
self.assertEqual(
bad_result[1],
[
"naming_authority should either be a URL or a "
'reversed DNS name (e.g "edu.ucar.unidata")'
],
)
def test_platform_id_validation(self):
attn = "platform_id"
attv = "alphaNum3R1C"
v = IOOS1_2_PlatformIDValidator()
self.assertTrue(v.validate(attn, attv)[0])
attv = "alpha"
v = IOOS1_2_PlatformIDValidator()
self.assertTrue(v.validate(attn, attv)[0])
attv = "311123331112"
v = IOOS1_2_PlatformIDValidator()
self.assertTrue(v.validate(attn, attv)[0])
attv = "---fail---"
v = IOOS1_2_PlatformIDValidator()
self.assertFalse(v.validate(attn, attv)[0])
def test_check_platform_cf_role(self):
"""
Check that cf_role inside platform variables only allows certain
values, namely "profile_id", "timeseries_id", or "trajectory_id"
"""
ds = MockTimeSeries()
plat_var = ds.createVariable("platform", np.int8, ())
ds.variables["depth"].platform = "platform"
self.ioos.setup(ds)
results = self.ioos.check_platform_variable_cf_role(ds)
# don't set attribute, should raise error about attribute not
# existing
self.assertEqual(len(results), 1)
score, out_of = results[0].value
self.assertLess(score, out_of)
# set to invalid value
plat_var.setncattr("cf_role", "bad_value")
results = self.ioos.check_platform_variable_cf_role(ds)
self.assertLess(score, out_of)
expected_vals = {"profile_id", "timeseries_id", "trajectory_id"}
expect_msg = (
'Platform variable "platform" must have a cf_role attribute '
"with one of the values {}".format(sorted(expected_vals))
)
self.assertEqual(results[0].msgs, [expect_msg])
# set to valid value
plat_var.setncattr("cf_role", "timeseries_id")
results = self.ioos.check_platform_variable_cf_role(ds)
score, out_of = results[0].value
self.assertEqual(score, out_of)
def test_check_platform_global(self):
ds = MockTimeSeries() # time, lat, lon, depth
# no global attr, fail
self.assertFalse(self.ioos.check_platform_global(ds).value)
# bad global attr, fail
ds.setncattr("platform", "bad value")
self.assertFalse(self.ioos.check_platform_global(ds).value)
# another bad value
ds.setncattr("platform", " bad")
self.assertFalse(self.ioos.check_platform_global(ds).value)
# good value
ds.setncattr("platform", "single_string")
res = self.ioos.check_platform_global(ds)
self.assertTrue(res.value)
self.assertEqual(res.msgs, [])
def test_check_single_platform(self):
ds = MockTimeSeries() # time, lat, lon, depth
# no global attr but also no platform variables, should pass
result = self.ioos.check_single_platform(ds)
self.assertTrue(result.value)
self.assertEqual(result.msgs, [])
# give platform global, no variables, fail
ds.setncattr("platform", "buoy")
result = self.ioos.check_single_platform(ds)
self.assertFalse(result.value)
# global platform, one platform variable, pass
temp = ds.createVariable("temp", "d", ("time"))
temp.setncattr("platform", "platform_var")
plat = ds.createVariable("platform_var", np.byte)
result = self.ioos.check_single_platform(ds)
self.assertTrue(result.value)
self.assertEqual(result.msgs, [])
# two platform variables, fail
temp2 = ds.createVariable("temp2", "d", ("time"))
temp2.setncattr("platform", "platform_var2")
plat = ds.createVariable("platform_var2", np.byte)
result = self.ioos.check_single_platform(ds)
self.assertFalse(result.value)
# no global attr, one variable, fail
ds = MockTimeSeries() # time, lat, lon, depth
temp = ds.createVariable("temp", "d", ("time"))
temp.setncattr("platform", "platform_var")
plat = ds.createVariable("platform_var", np.byte)
result = self.ioos.check_single_platform(ds)
self.assertFalse(result.value)
def test_check_cf_dsg(self):
ds = MockTimeSeries() # time, lat, lon, depth
ds.setncattr("platform", "single_string")
# correct cf_role & featureType, pass
ds.setncattr("featureType", "profile")
ds.createDimension("profile", 1)
temp = ds.createVariable("temp", "d", ("time"))
temp.setncattr("platform", "platform_var")
plat = ds.createVariable("platform_var", np.byte)
cf_role_var = ds.createVariable("cf_role_var", np.byte, ("profile",))
cf_role_var.setncattr("cf_role", "timeseries_id")
results = self.ioos.check_cf_dsg(ds)
self.assertTrue(all(r.value for r in results))
self.assertTrue(all(r.msgs == [] for r in results))
# correct featureType, incorrect cf_role var dimension
ds = MockTimeSeries() # time, lat, lon, depth
ds.setncattr("featureType", "trajectoryprofile")
ds.createDimension("trajectory", 2) # should only be 1
temp = ds.createVariable("temp", "d", ("time"))
temp.setncattr("platform", "platform_var")
plat = ds.createVariable("platform_var", np.byte)
cf_role_var = ds.createVariable("cf_role_var", np.byte, ("trajectory",))
cf_role_var.setncattr("cf_role", "trajectory_id")
results = self.ioos.check_cf_dsg(ds)
self.assertFalse(results[0].value)
# featureType==timeSeries, cf_role=timeseries_id
ds = MockTimeSeries()
ds.setncattr("featureType", "timeSeries")
ds.createDimension("station", 1)
temp = ds.createVariable("temp", "d", ("time"))
temp.setncattr("platform", "platform_var")
plat = ds.createVariable("platform_var", np.byte)
cf_role_var = ds.createVariable("cf_role_var", np.byte, ("station",))
cf_role_var.setncattr("cf_role", "timeseries_id")
results = self.ioos.check_cf_dsg(ds)
# check should pass with no results
self.assertEqual(results, [])
# featureType==timeSeriesProfile, cf_role==timeseries_id, dim 1, pass
ds = MockTimeSeries()
ds.setncattr("featureType", "timeSeriesProfile")
ds.createDimension("station", 1)
temp = ds.createVariable("temp", "d", ("time"))
temp.setncattr("platform", "platform_var")
plat = ds.createVariable("platform_var", np.byte)
cf_role_var = ds.createVariable("cf_role_var", np.byte, ("station",))
cf_role_var.setncattr("cf_role", "timeseries_id")
results = self.ioos.check_cf_dsg(ds)
self.assertEqual(results, [])
# featureType==timeSeriesProfile, cf_role==timeseries_id, dim 2, fail
ds = MockTimeSeries()
ds.setncattr("platform", "platform")
ds.setncattr("featureType", "timeSeriesProfile")
ds.createDimension("station", 2)
temp = ds.createVariable("temp", "d", ("time"))
temp.setncattr("platform", "platform_var")
plat = ds.createVariable("platform_var", np.byte)
cf_role_var = ds.createVariable("cf_role_var", np.byte, ("station",))
cf_role_var.setncattr("cf_role", "timeseries_id")
results = self.ioos.check_cf_dsg(ds)
self.assertFalse(results[0].value)
# featureType==trajectory, cf_role==trajectory_id, dim 1, pass
ds = MockTimeSeries()
ds.setncattr("featureType", "trajectory")
ds.createDimension("trajectory", 1)
temp = ds.createVariable("temp", "d", ("time"))
temp.setncattr("platform", "platform_var")
plat = ds.createVariable("platform_var", np.byte)
cf_role_var = ds.createVariable("cf_role_var", np.byte, ("trajectory",))
cf_role_var.setncattr("cf_role", "trajectory_id")
results = self.ioos.check_cf_dsg(ds)
self.assertEqual(results, [])
# featureType==trajectory, cf_role==trajectory, dim 2, fail
ds = MockTimeSeries()
ds.setncattr("featureType", "trajectory")
ds.createDimension("trajectory", 2)
temp = ds.createVariable("temp", "d", ("time"))
temp.setncattr("platform", "platform_var")
plat = ds.createVariable("platform_var", np.byte)
cf_role_var = ds.createVariable("cf_role_var", np.byte, ("trajectory",))
cf_role_var.setncattr("cf_role", "trajectory_id")
results = self.ioos.check_cf_dsg(ds)
self.assertFalse(results[0].value)
# featureType==trajectoryProfile, cf_role==trajectory_id, dim 1, pass
ds = MockTimeSeries()
ds.setncattr("featureType", "trajectoryProfile")
ds.createDimension("trajectoryprof", 1)
temp = ds.createVariable("temp", "d", ("time"))
temp.setncattr("platform", "platform_var")
plat = ds.createVariable("platform_var", np.byte)
cf_role_var = ds.createVariable("cf_role_var", np.byte, ("trajectoryprof",))
cf_role_var.setncattr("cf_role", "trajectory_id")
results = self.ioos.check_cf_dsg(ds)
self.assertEqual(results, [])
# featureType==trajectoryProfile, cf_role==trajectory_id, dim 2, fail
ds = MockTimeSeries()
ds.setncattr("featureType", "trajectoryProfile")
ds.createDimension("trajectoryprof", 2)
temp = ds.createVariable("temp", "d", ("time"))
temp.setncattr("platform", "platform_var")
plat = ds.createVariable("platform_var", np.byte)
cf_role_var = ds.createVariable("cf_role_var", np.byte, ("trajectoryprof",))
cf_role_var.setncattr("cf_role", "trajectory_id")
results = self.ioos.check_cf_dsg(ds)
self.assertFalse(results[0].value)
# featureType==profile, cf_role==profile_id, dim 1, pass
ds = MockTimeSeries()
ds.setncattr("featureType", "profile")
ds.createDimension("prof", 1)
temp = ds.createVariable("temp", "d", ("time"))
temp.setncattr("platform", "platform_var")
plat = ds.createVariable("platform_var", np.byte)
cf_role_var = ds.createVariable("cf_role_var", np.byte, ("prof",))
cf_role_var.setncattr("cf_role", "profile_id")
results = self.ioos.check_cf_dsg(ds)
self.assertEqual(results, [])
# featureType==profile, cf_role==profile_id, dim 2, fail
ds = MockTimeSeries()
ds.setncattr("featureType", "profile")
ds.createDimension("prof", 2)
temp = ds.createVariable("temp", "d", ("time"))
temp.setncattr("platform", "platform_var")
plat = ds.createVariable("platform_var", np.byte)
cf_role_var = ds.createVariable("cf_role_var", np.byte, ("prof",))
cf_role_var.setncattr("cf_role", "profile_id")
results = self.ioos.check_cf_dsg(ds)
self.assertFalse(results[0].value)
# featureType==point -- do nothing
ds = MockTimeSeries()
ds.setncattr("featureType", "point")
ds.createDimension("blah", 2)
temp = ds.createVariable("temp", "d", ("time"))
temp.setncattr("platform", "platform_var")
plat = ds.createVariable("platform_var", np.byte)
cf_role_var = ds.createVariable("cf_role_var", np.byte, ("blah",))
cf_role_var.setncattr("cf_role", "profile_id")
results = self.ioos.check_cf_dsg(ds)
self.assertEqual(results, [])
def test_check_platform_vocabulary(self):
ds = MockTimeSeries() # time, lat, lon, depth
ds.setncattr("platform_vocabulary", "http://google.com")
result = self.ioos.check_platform_vocabulary(ds)
self.assertTrue(result.value)
self.assertEqual(result.msgs, [])
ds.setncattr("platform_vocabulary", "bad")
self.assertFalse(self.ioos.check_platform_vocabulary(ds).value)
def test_check_qartod_variables_flags(self):
ds = MockTimeSeries() # time, lat, lon, depth
# no QARTOD variables
results = self.ioos.check_qartod_variables_flags(ds)
scored, out_of, messages = get_results(results)
self.assertEqual(scored, out_of)
# QARTOD variable without flag_values, flag_meanings (fail)
qr = ds.createVariable("depth_qc", np.byte)
qr.setncattr("standard_name", "spike_test_quality_flag")
results = self.ioos.check_qartod_variables_flags(ds)
self.assertTrue(not any(r.value for r in results)) # all False
# QARTOD variable with flag meanings, without flag_meanings
qr.setncattr("flag_values", np.array([0, 1, 2], dtype=np.byte))
results = self.ioos.check_qartod_variables_flags(ds)
self.assertEqual(results[0].value[0], results[0].value[1]) # should pass
self.assertFalse(results[1].value) # still fail
# QARTOD variable with flag meanings, flag_values
qr.setncattr("flag_meanings", "x y z") # alphanumeric, space-separated
results = self.ioos.check_qartod_variables_flags(ds)
self.assertEqual(results[0].value[0], results[0].value[1]) # pass
self.assertEqual(results[1].value[0], results[1].value[1]) # pass
# flag_values array not equal to length of flag_meanings
qr.setncattr("flag_values", np.array([0, 1], dtype=np.byte))
results = self.ioos.check_qartod_variables_flags(ds)
self.assertLess(results[0].value[0], results[0].value[1]) # should fail
self.assertEqual(results[1].value[0], results[1].value[1]) # pass
# flag_values right length, wrong type
qr.setncattr("flag_values", np.array([0, 1, 2], dtype=np.float64))
results = self.ioos.check_qartod_variables_flags(ds)
self.assertLess(results[0].value[0], results[0].value[1]) # should fail
self.assertEqual(results[1].value[0], results[1].value[1]) # pass
def test_check_qartod_variables_references(self):
ds = MockTimeSeries() # time, lat, lon, depth
# no QARTOD variables
results = self.ioos.check_qartod_variables_references(ds)
scored, out_of, messages = get_results(results)
self.assertEqual(scored, out_of)
# QARTOD variable without references (fail)
qr = ds.createVariable("depth_qc", np.byte)
qr.setncattr("flag_meanings", "blah")
qr.setncattr("standard_name", "spike_test_quality_flag")
results = self.ioos.check_qartod_variables_references(ds)
self.assertFalse(all(r.value for r in results))
# QARTOD variable with references (pass)
qr.setncattr("references", "http://services.cormp.org/quality.php")
results = self.ioos.check_qartod_variables_references(ds)
self.assertTrue(all(r.value for r in results))
self.assertEqual(results[0].msgs, []) # only one Result to test
# QARTOD variable with bad references (fail)
qr.setncattr(
"references", r"p9q384ht09q38@@####???????////??//\/\/\/\//\/\74ht"
)
results = self.ioos.check_qartod_variables_references(ds)
self.assertFalse(all(r.value for r in results))
def test_check_ioos_ingest(self):
ds = MockTimeSeries()
# no value, pass
res = self.ioos.check_ioos_ingest(ds)
self.assertTrue(res.value)
self.assertEqual(res.msgs, [])
# value false
ds.setncattr("ioos_ingest", "false")
self.assertTrue(self.ioos.check_ioos_ingest(ds).value)
# value true
ds.setncattr("ioos_ingest", "true")
self.assertTrue(self.ioos.check_ioos_ingest(ds).value)
# case insensitive
ds.setncattr("ioos_ingest", "True")
self.assertTrue(self.ioos.check_ioos_ingest(ds).value)
ds.setncattr("ioos_ingest", "False")
self.assertTrue(self.ioos.check_ioos_ingest(ds).value)
# anything else fails
ds.setncattr("ioos_ingest", "badval")
self.assertFalse(self.ioos.check_ioos_ingest(ds).value)
ds.setncattr("ioos_ingest", 0)
self.assertFalse(self.ioos.check_ioos_ingest(ds).value)
def test_vertical_dimension(self):
# MockTimeSeries has a depth variable, with axis of 'Z', units of 'm',
# and positive = 'down'
nc_obj = MockTimeSeries()
result = self.ioos.check_vertical_coordinates(nc_obj)[0]
self.assertEqual(*result.value)
nc_obj.variables["depth"].positive = "upwards"
result = self.ioos.check_vertical_coordinates(nc_obj)[0]
self.assertNotEqual(*result.value)
nc_obj.variables["depth"].positive = "up"
result = self.ioos.check_vertical_coordinates(nc_obj)[0]
self.assertEqual(*result.value)
# test units
nc_obj.variables["depth"].units = "furlong"
result = self.ioos.check_vertical_coordinates(nc_obj)[0]
expected_msg = (
"depth's units attribute furlong is not equivalent to "
"one of ('meter', 'inch', 'foot', 'yard', "
"'US_survey_foot', 'mile', 'fathom')"
)
self.assertEqual(result.msgs[0], expected_msg)
self.assertNotEqual(*result.value)
accepted_units = (
"meter",
"meters",
"inch",
"foot",
"yard",
"mile",
"miles",
"US_survey_foot",
"US_survey_feet",
"fathom",
"fathoms",
"international_inch",
"international_inches",
"international_foot",
"international_feet",
"international_yard",
"international_yards",
"international_mile",
"international_miles",
"inches",
"in",
"feet",
"ft",
"yd",
"mi",
)
for units in accepted_units:
nc_obj.variables["depth"].units = units
result = self.ioos.check_vertical_coordinates(nc_obj)[0]
self.assertEqual(*result.value)
|
[
"netCDF4.Dataset",
"compliance_checker.ioos.IOOS1_1Check",
"compliance_checker.tests.test_cf.get_results",
"compliance_checker.ioos.IOOS0_1Check",
"compliance_checker.ioos.IOOS1_2_PlatformIDValidator",
"compliance_checker.ioos.IOOS1_2Check",
"compliance_checker.ioos.NamingAuthorityValidator",
"numpy.array",
"compliance_checker.tests.helpers.MockTimeSeries"
] |
[((737, 751), 'compliance_checker.ioos.IOOS0_1Check', 'IOOS0_1Check', ([], {}), '()\n', (749, 751), False, 'from compliance_checker.ioos import IOOS0_1Check, IOOS1_1Check, IOOS1_2_PlatformIDValidator, IOOS1_2Check, NamingAuthorityValidator\n'), ((1166, 1205), 'netCDF4.Dataset', 'Dataset', (['os.devnull', '"""w"""'], {'diskless': '(True)'}), "(os.devnull, 'w', diskless=True)\n", (1173, 1205), False, 'from netCDF4 import Dataset\n'), ((2066, 2105), 'netCDF4.Dataset', 'Dataset', (['os.devnull', '"""w"""'], {'diskless': '(True)'}), "(os.devnull, 'w', diskless=True)\n", (2073, 2105), False, 'from netCDF4 import Dataset\n'), ((3288, 3327), 'netCDF4.Dataset', 'Dataset', (['os.devnull', '"""w"""'], {'diskless': '(True)'}), "(os.devnull, 'w', diskless=True)\n", (3295, 3327), False, 'from netCDF4 import Dataset\n'), ((4375, 4414), 'netCDF4.Dataset', 'Dataset', (['os.devnull', '"""w"""'], {'diskless': '(True)'}), "(os.devnull, 'w', diskless=True)\n", (4382, 4414), False, 'from netCDF4 import Dataset\n'), ((5147, 5161), 'compliance_checker.ioos.IOOS1_1Check', 'IOOS1_1Check', ([], {}), '()\n', (5159, 5161), False, 'from compliance_checker.ioos import IOOS0_1Check, IOOS1_1Check, IOOS1_2_PlatformIDValidator, IOOS1_2Check, NamingAuthorityValidator\n'), ((6114, 6153), 'netCDF4.Dataset', 'Dataset', (['os.devnull', '"""w"""'], {'diskless': '(True)'}), "(os.devnull, 'w', diskless=True)\n", (6121, 6153), False, 'from netCDF4 import Dataset\n'), ((7210, 7249), 'netCDF4.Dataset', 'Dataset', (['os.devnull', '"""w"""'], {'diskless': '(True)'}), "(os.devnull, 'w', diskless=True)\n", (7217, 7249), False, 'from netCDF4 import Dataset\n'), ((8551, 8590), 'netCDF4.Dataset', 'Dataset', (['os.devnull', '"""w"""'], {'diskless': '(True)'}), "(os.devnull, 'w', diskless=True)\n", (8558, 8590), False, 'from netCDF4 import Dataset\n'), ((9859, 9898), 'netCDF4.Dataset', 'Dataset', (['os.devnull', '"""w"""'], {'diskless': '(True)'}), "(os.devnull, 'w', diskless=True)\n", (9866, 9898), False, 'from netCDF4 import Dataset\n'), ((11045, 11059), 'compliance_checker.ioos.IOOS1_2Check', 'IOOS1_2Check', ([], {}), '()\n', (11057, 11059), False, 'from compliance_checker.ioos import IOOS0_1Check, IOOS1_1Check, IOOS1_2_PlatformIDValidator, IOOS1_2Check, NamingAuthorityValidator\n'), ((11167, 11183), 'compliance_checker.tests.helpers.MockTimeSeries', 'MockTimeSeries', ([], {}), '()\n', (11181, 11183), False, 'from compliance_checker.tests.helpers import MockTimeSeries, MockVariable\n'), ((11413, 11433), 'compliance_checker.tests.test_cf.get_results', 'get_results', (['results'], {}), '(results)\n', (11424, 11433), False, 'from compliance_checker.tests.test_cf import get_results\n'), ((11527, 11574), 'compliance_checker.tests.helpers.MockTimeSeries', 'MockTimeSeries', ([], {'default_fill_value': '(9999999999.0)'}), '(default_fill_value=9999999999.0)\n', (11541, 11574), False, 'from compliance_checker.tests.helpers import MockTimeSeries, MockVariable\n'), ((12197, 12217), 'compliance_checker.tests.test_cf.get_results', 'get_results', (['results'], {}), '(results)\n', (12208, 12217), False, 'from compliance_checker.tests.test_cf import get_results\n'), ((12365, 12381), 'compliance_checker.tests.helpers.MockTimeSeries', 'MockTimeSeries', ([], {}), '()\n', (12379, 12381), False, 'from compliance_checker.tests.helpers import MockTimeSeries, MockVariable\n'), ((12610, 12630), 'compliance_checker.tests.test_cf.get_results', 'get_results', (['results'], {}), '(results)\n', (12621, 12630), False, 'from compliance_checker.tests.test_cf import get_results\n'), ((12749, 12796), 'compliance_checker.tests.helpers.MockTimeSeries', 'MockTimeSeries', ([], {'default_fill_value': '(9999999999.0)'}), '(default_fill_value=9999999999.0)\n', (12763, 12796), False, 'from compliance_checker.tests.helpers import MockTimeSeries, MockVariable\n'), ((13324, 13344), 'compliance_checker.tests.test_cf.get_results', 'get_results', (['results'], {}), '(results)\n', (13335, 13344), False, 'from compliance_checker.tests.test_cf import get_results\n'), ((13458, 13474), 'compliance_checker.tests.helpers.MockTimeSeries', 'MockTimeSeries', ([], {}), '()\n', (13472, 13474), False, 'from compliance_checker.tests.helpers import MockTimeSeries, MockVariable\n'), ((16070, 16086), 'compliance_checker.tests.helpers.MockTimeSeries', 'MockTimeSeries', ([], {}), '()\n', (16084, 16086), False, 'from compliance_checker.tests.helpers import MockTimeSeries, MockVariable\n'), ((17106, 17122), 'compliance_checker.tests.helpers.MockTimeSeries', 'MockTimeSeries', ([], {}), '()\n', (17120, 17122), False, 'from compliance_checker.tests.helpers import MockTimeSeries, MockVariable\n'), ((17880, 17896), 'compliance_checker.tests.helpers.MockTimeSeries', 'MockTimeSeries', ([], {}), '()\n', (17894, 17896), False, 'from compliance_checker.tests.helpers import MockTimeSeries, MockVariable\n'), ((20546, 20562), 'compliance_checker.tests.helpers.MockTimeSeries', 'MockTimeSeries', ([], {}), '()\n', (20560, 20562), False, 'from compliance_checker.tests.helpers import MockTimeSeries, MockVariable\n'), ((20729, 20749), 'compliance_checker.tests.test_cf.get_results', 'get_results', (['results'], {}), '(results)\n', (20740, 20749), False, 'from compliance_checker.tests.test_cf import get_results\n'), ((21421, 21441), 'compliance_checker.tests.test_cf.get_results', 'get_results', (['results'], {}), '(results)\n', (21432, 21441), False, 'from compliance_checker.tests.test_cf import get_results\n'), ((21666, 21686), 'compliance_checker.tests.test_cf.get_results', 'get_results', (['results'], {}), '(results)\n', (21677, 21686), False, 'from compliance_checker.tests.test_cf import get_results\n'), ((21888, 21908), 'compliance_checker.tests.test_cf.get_results', 'get_results', (['results'], {}), '(results)\n', (21899, 21908), False, 'from compliance_checker.tests.test_cf import get_results\n'), ((22007, 22023), 'compliance_checker.tests.helpers.MockTimeSeries', 'MockTimeSeries', ([], {}), '()\n', (22021, 22023), False, 'from compliance_checker.tests.helpers import MockTimeSeries, MockVariable\n'), ((23143, 23159), 'compliance_checker.tests.helpers.MockTimeSeries', 'MockTimeSeries', ([], {}), '()\n', (23157, 23159), False, 'from compliance_checker.tests.helpers import MockTimeSeries, MockVariable\n'), ((23301, 23321), 'compliance_checker.tests.test_cf.get_results', 'get_results', (['results'], {}), '(results)\n', (23312, 23321), False, 'from compliance_checker.tests.test_cf import get_results\n'), ((23762, 23782), 'compliance_checker.tests.test_cf.get_results', 'get_results', (['results'], {}), '(results)\n', (23773, 23782), False, 'from compliance_checker.tests.test_cf import get_results\n'), ((24074, 24094), 'compliance_checker.tests.test_cf.get_results', 'get_results', (['results'], {}), '(results)\n', (24085, 24094), False, 'from compliance_checker.tests.test_cf import get_results\n'), ((24297, 24317), 'compliance_checker.tests.test_cf.get_results', 'get_results', (['results'], {}), '(results)\n', (24308, 24317), False, 'from compliance_checker.tests.test_cf import get_results\n'), ((24540, 24560), 'compliance_checker.tests.test_cf.get_results', 'get_results', (['results'], {}), '(results)\n', (24551, 24560), False, 'from compliance_checker.tests.test_cf import get_results\n'), ((24715, 24741), 'compliance_checker.ioos.NamingAuthorityValidator', 'NamingAuthorityValidator', ([], {}), '()\n', (24739, 24741), False, 'from compliance_checker.ioos import IOOS0_1Check, IOOS1_1Check, IOOS1_2_PlatformIDValidator, IOOS1_2Check, NamingAuthorityValidator\n'), ((25486, 25515), 'compliance_checker.ioos.IOOS1_2_PlatformIDValidator', 'IOOS1_2_PlatformIDValidator', ([], {}), '()\n', (25513, 25515), False, 'from compliance_checker.ioos import IOOS0_1Check, IOOS1_1Check, IOOS1_2_PlatformIDValidator, IOOS1_2Check, NamingAuthorityValidator\n'), ((25603, 25632), 'compliance_checker.ioos.IOOS1_2_PlatformIDValidator', 'IOOS1_2_PlatformIDValidator', ([], {}), '()\n', (25630, 25632), False, 'from compliance_checker.ioos import IOOS0_1Check, IOOS1_1Check, IOOS1_2_PlatformIDValidator, IOOS1_2Check, NamingAuthorityValidator\n'), ((25727, 25756), 'compliance_checker.ioos.IOOS1_2_PlatformIDValidator', 'IOOS1_2_PlatformIDValidator', ([], {}), '()\n', (25754, 25756), False, 'from compliance_checker.ioos import IOOS0_1Check, IOOS1_1Check, IOOS1_2_PlatformIDValidator, IOOS1_2Check, NamingAuthorityValidator\n'), ((25849, 25878), 'compliance_checker.ioos.IOOS1_2_PlatformIDValidator', 'IOOS1_2_PlatformIDValidator', ([], {}), '()\n', (25876, 25878), False, 'from compliance_checker.ioos import IOOS0_1Check, IOOS1_1Check, IOOS1_2_PlatformIDValidator, IOOS1_2Check, NamingAuthorityValidator\n'), ((26158, 26174), 'compliance_checker.tests.helpers.MockTimeSeries', 'MockTimeSeries', ([], {}), '()\n', (26172, 26174), False, 'from compliance_checker.tests.helpers import MockTimeSeries, MockVariable\n'), ((27368, 27384), 'compliance_checker.tests.helpers.MockTimeSeries', 'MockTimeSeries', ([], {}), '()\n', (27382, 27384), False, 'from compliance_checker.tests.helpers import MockTimeSeries, MockVariable\n'), ((28048, 28064), 'compliance_checker.tests.helpers.MockTimeSeries', 'MockTimeSeries', ([], {}), '()\n', (28062, 28064), False, 'from compliance_checker.tests.helpers import MockTimeSeries, MockVariable\n'), ((29193, 29209), 'compliance_checker.tests.helpers.MockTimeSeries', 'MockTimeSeries', ([], {}), '()\n', (29207, 29209), False, 'from compliance_checker.tests.helpers import MockTimeSeries, MockVariable\n'), ((29540, 29556), 'compliance_checker.tests.helpers.MockTimeSeries', 'MockTimeSeries', ([], {}), '()\n', (29554, 29556), False, 'from compliance_checker.tests.helpers import MockTimeSeries, MockVariable\n'), ((30305, 30321), 'compliance_checker.tests.helpers.MockTimeSeries', 'MockTimeSeries', ([], {}), '()\n', (30319, 30321), False, 'from compliance_checker.tests.helpers import MockTimeSeries, MockVariable\n'), ((30931, 30947), 'compliance_checker.tests.helpers.MockTimeSeries', 'MockTimeSeries', ([], {}), '()\n', (30945, 30947), False, 'from compliance_checker.tests.helpers import MockTimeSeries, MockVariable\n'), ((31560, 31576), 'compliance_checker.tests.helpers.MockTimeSeries', 'MockTimeSeries', ([], {}), '()\n', (31574, 31576), False, 'from compliance_checker.tests.helpers import MockTimeSeries, MockVariable\n'), ((32151, 32167), 'compliance_checker.tests.helpers.MockTimeSeries', 'MockTimeSeries', ([], {}), '()\n', (32165, 32167), False, 'from compliance_checker.tests.helpers import MockTimeSeries, MockVariable\n'), ((32785, 32801), 'compliance_checker.tests.helpers.MockTimeSeries', 'MockTimeSeries', ([], {}), '()\n', (32799, 32801), False, 'from compliance_checker.tests.helpers import MockTimeSeries, MockVariable\n'), ((33365, 33381), 'compliance_checker.tests.helpers.MockTimeSeries', 'MockTimeSeries', ([], {}), '()\n', (33379, 33381), False, 'from compliance_checker.tests.helpers import MockTimeSeries, MockVariable\n'), ((33960, 33976), 'compliance_checker.tests.helpers.MockTimeSeries', 'MockTimeSeries', ([], {}), '()\n', (33974, 33976), False, 'from compliance_checker.tests.helpers import MockTimeSeries, MockVariable\n'), ((34565, 34581), 'compliance_checker.tests.helpers.MockTimeSeries', 'MockTimeSeries', ([], {}), '()\n', (34579, 34581), False, 'from compliance_checker.tests.helpers import MockTimeSeries, MockVariable\n'), ((35162, 35178), 'compliance_checker.tests.helpers.MockTimeSeries', 'MockTimeSeries', ([], {}), '()\n', (35176, 35178), False, 'from compliance_checker.tests.helpers import MockTimeSeries, MockVariable\n'), ((35721, 35737), 'compliance_checker.tests.helpers.MockTimeSeries', 'MockTimeSeries', ([], {}), '()\n', (35735, 35737), False, 'from compliance_checker.tests.helpers import MockTimeSeries, MockVariable\n'), ((36263, 36279), 'compliance_checker.tests.helpers.MockTimeSeries', 'MockTimeSeries', ([], {}), '()\n', (36277, 36279), False, 'from compliance_checker.tests.helpers import MockTimeSeries, MockVariable\n'), ((36801, 36817), 'compliance_checker.tests.helpers.MockTimeSeries', 'MockTimeSeries', ([], {}), '()\n', (36815, 36817), False, 'from compliance_checker.tests.helpers import MockTimeSeries, MockVariable\n'), ((37232, 37248), 'compliance_checker.tests.helpers.MockTimeSeries', 'MockTimeSeries', ([], {}), '()\n', (37246, 37248), False, 'from compliance_checker.tests.helpers import MockTimeSeries, MockVariable\n'), ((37401, 37421), 'compliance_checker.tests.test_cf.get_results', 'get_results', (['results'], {}), '(results)\n', (37412, 37421), False, 'from compliance_checker.tests.test_cf import get_results\n'), ((39233, 39249), 'compliance_checker.tests.helpers.MockTimeSeries', 'MockTimeSeries', ([], {}), '()\n', (39247, 39249), False, 'from compliance_checker.tests.helpers import MockTimeSeries, MockVariable\n'), ((39407, 39427), 'compliance_checker.tests.test_cf.get_results', 'get_results', (['results'], {}), '(results)\n', (39418, 39427), False, 'from compliance_checker.tests.test_cf import get_results\n'), ((40467, 40483), 'compliance_checker.tests.helpers.MockTimeSeries', 'MockTimeSeries', ([], {}), '()\n', (40481, 40483), False, 'from compliance_checker.tests.helpers import MockTimeSeries, MockVariable\n'), ((41545, 41561), 'compliance_checker.tests.helpers.MockTimeSeries', 'MockTimeSeries', ([], {}), '()\n', (41559, 41561), False, 'from compliance_checker.tests.helpers import MockTimeSeries, MockVariable\n'), ((37887, 37921), 'numpy.array', 'np.array', (['[0, 1, 2]'], {'dtype': 'np.byte'}), '([0, 1, 2], dtype=np.byte)\n', (37895, 37921), True, 'import numpy as np\n'), ((38575, 38606), 'numpy.array', 'np.array', (['[0, 1]'], {'dtype': 'np.byte'}), '([0, 1], dtype=np.byte)\n', (38583, 38606), True, 'import numpy as np\n'), ((38909, 38946), 'numpy.array', 'np.array', (['[0, 1, 2]'], {'dtype': 'np.float64'}), '([0, 1, 2], dtype=np.float64)\n', (38917, 38946), True, 'import numpy as np\n')]
|
import argparse
import os.path
# https://stackoverflow.com/a/19476216/598057
import sys
main_parser = argparse.ArgumentParser()
main_parser.add_argument(
"input_path", type=str, help="One or more folders with *.sdoc files"
)
main_parser.add_argument(
"--file",
action="store_true",
default=False,
help="Enforce checking that input_path is a file",
)
main_parser.add_argument(
"--dir",
action="store_true",
default=False,
help="Enforce checking that input_path is a directory",
)
main_parser.add_argument(
"--invert",
action="store_true",
default=False,
help="Enforce checking that input_path is a file",
)
args = main_parser.parse_args()
invert: bool = args.invert
if not invert:
if not os.path.exists(args.input_path):
print(
"error: path does not exist: {}".format(args.input_path),
file=sys.stderr,
)
exit(1)
if args.file and not os.path.isfile(args.input_path):
print(
"error: path is not a file: {}".format(args.input_path),
file=sys.stderr,
)
exit(1)
if args.dir and not os.path.isdir(args.input_path):
print(
"error: path is not a directory: {}".format(args.input_path),
file=sys.stderr,
)
exit(1)
else:
if os.path.exists(args.input_path):
print(
"error: expected path to not exist, but it does: {}".format(
args.input_path
),
file=sys.stderr,
)
exit(1)
if args.file and os.path.isfile(args.input_path):
print(
"error: expected path to not exist, but is a file: {}".format(
args.input_path
),
file=sys.stderr,
)
exit(1)
if args.dir and os.path.isdir(args.input_path):
print(
"error: expected path to not exist, but is a directory: {}".format(
args.input_path
),
file=sys.stderr,
)
exit(1)
exit(0)
|
[
"argparse.ArgumentParser"
] |
[((104, 129), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (127, 129), False, 'import argparse\n')]
|
import weakref
class FlyweightMeta(type):
def __new__(mcs, name, parents, dct):
"""
Set up object pool
:param name: class name
:param parents: class parents
:param dct: dict: includes class attributes, class methods,
static methods, etc
:return: new class
"""
dct["pool"] = weakref.WeakValueDictionary()
return super().__new__(mcs, name, parents, dct)
@staticmethod
def _serialize_params(cls, *args, **kwargs):
"""
Serialize input parameters to a key.
Simple implementation is just to serialize it as a string
"""
args_list = list(map(str, args))
args_list.extend([str(kwargs), cls.__name__])
key = "".join(args_list)
return key
def __call__(cls, *args, **kwargs):
key = FlyweightMeta._serialize_params(cls, *args, **kwargs)
pool = getattr(cls, "pool", {})
instance = pool.get(key)
if instance is None:
instance = super().__call__(*args, **kwargs)
pool[key] = instance
return instance
class Card2(metaclass=FlyweightMeta):
def __init__(self, *args, **kwargs):
# print('Init {}: {}'.format(self.__class__, (args, kwargs)))
pass
if __name__ == "__main__":
instances_pool = getattr(Card2, "pool")
cm1 = Card2("10", "h", a=1)
cm2 = Card2("10", "h", a=1)
cm3 = Card2("10", "h", a=2)
assert (cm1 == cm2) and (cm1 != cm3)
assert (cm1 is cm2) and (cm1 is not cm3)
assert len(instances_pool) == 2
del cm1
assert len(instances_pool) == 2
del cm2
assert len(instances_pool) == 1
del cm3
assert len(instances_pool) == 0
|
[
"weakref.WeakValueDictionary"
] |
[((353, 382), 'weakref.WeakValueDictionary', 'weakref.WeakValueDictionary', ([], {}), '()\n', (380, 382), False, 'import weakref\n')]
|
import flatbuffers
import sys
from websocket import create_connection
sys.path.append("../../.") #Append project root directory so importing from schema works
import schema.GetHardwarePool as GetHardwarePool
import schema.GetResult as GetResult
import schema.Message as Message
import schema.Task as Task
import schema.Stage as Stage
binFile = "dataToSend.bin"
ImgFile = "hellgo.png"
targetAgentId = 1
headers = {
"Content-Type": "application/octet-stream",
}
# Creates a HWFMessage with param info, returns builder output
def createAndSendBuffer():
stage_amount = 2
cmd_amount = 1
cmdo = "I'm the first stage"
builder = flatbuffers.Builder(1024)
cmd = builder.CreateString(cmdo)
Stage.StartCmdListVector(builder, cmd_amount)
builder.PrependUOffsetTRelative(cmd)
cmdVector = builder.EndVector()
Stage.Start(builder)
Stage.AddCmdList(builder, cmdVector)
stage = Stage.End(builder)
cmdo = "I'm the second stage"
cmd = builder.CreateString(cmdo)
Stage.StartCmdListVector(builder, cmd_amount)
builder.PrependUOffsetTRelative(cmd)
cmdVector = builder.EndVector()
Stage.Start(builder)
Stage.AddCmdList(builder, cmdVector)
stage2 = Stage.End(builder)
Task.StartStagesVector(builder, stage_amount)
builder.PrependUOffsetTRelative(stage2)
builder.PrependUOffsetTRelative(stage)
stages = builder.EndVector()
Task.Start(builder)
Task.AddStages(builder, stages)
task = Task.End(builder)
Message.Start(builder)
Message.AddType(builder, 1)
Message.AddTask(builder, task)
message = Message.End(builder)
builder.Finish(message)
buf = builder.Output()
ws = create_connection("ws://localhost:3001")
ws.send_binary(buf)
return 0
# def build_binary_message(_agentId, _cmd, _srcFile):
# fbb = flatbuffers.Builder(1024)
# # create cmd string
# cmd = fbb.CreateString(_cmd)
# # create srcfile byte arr
# with open(_srcFile, "rb") as bin:
# readBytes = bin.read()
# byteVector = fbb.CreateByteVector(readBytes)
# HWFMessage.MessageStart(fbb)
# # agent id is temporary since server doesn't assign tasks yet
# HWFMessage.MessageAddAgentId(fbb, _agentId)
# HWFMessage.MessageAddCmd(fbb, cmd)
# HWFMessage.MessageAddData(fbb, byteVector)
# readyMsg = HWFMessage.MessageEnd(fbb)
# fbb.Finish(readyMsg)
# return fbb.Output()
"""""
# Creates a bin file containing a target agent id and a string.
def CreateBinary(destFile):
fbb = flatbuffers.Builder(1024)
cmd = fbb.CreateString("find / -name secretpasswordsdontlook.txt")
HWFMessage.Start(fbb)
HWFMessage.AddAgentId(fbb, targetAgentId)
HWFMessage.AddCmd(fbb, cmd)
readyMsg = HWFMessage.End(fbb)
fbb.Finish(readyMsg)
buf = fbb.Output()
with open(destFile, "wb") as bin:
bin.write(buf)
#Reads a file, saves its bytes in the vector "Data", then sends them to the hub together with an agent id
def SendBinaryFromSourceFile(srcFile):
with open(srcFile, "rb") as bin:
readBytes = bin.read()
fbb = flatbuffers.Builder(1024)
byteVector = fbb.CreateByteVector(readBytes)
HWFMessage.Start(fbb)
HWFMessage.AddAgentId(fbb, targetAgentId)
HWFMessage.AddData(fbb, byteVector)
readyMsg = HWFMessage.End(fbb)
fbb.Finish(readyMsg)
buf = fbb.Output()
ws = create_connection("ws://localhost:3001")
ws.send_binary(buf)
#Sends a binary file to the hub
def SendBinary(srcFile):
with open(srcFile, "rb") as bin:
buf = bin.read()
ws = create_connection("ws://localhost:3001")
ws.send_binary(buf)
"""
# def send_request(cmd, filename):
# temp_agent_id = 1
# buf = createBuffer()
# #buf = build_binary_message(temp_agent_id, cmd, filename)
# # create bin file from message
# global binFile
# with open(binFile, "wb") as bin:
# bin.write(buf)
# ws = create_connection("ws://localhost:3001")
# ws.send_binary(buf)
if __name__ == "__main__":
createAndSendBuffer()
# CreateBinary(binFile)
# SendBinary(binFile)
# SendBinaryFromSourceFile(ImgFile)
# what cmd command to run?
# what file to send?
#send_request("echo hello world", "hellgo.png")
|
[
"sys.path.append",
"schema.Message.Start",
"schema.Message.AddTask",
"flatbuffers.Builder",
"schema.Stage.End",
"schema.Task.End",
"schema.Task.StartStagesVector",
"schema.Stage.Start",
"schema.Task.Start",
"schema.Message.AddType",
"websocket.create_connection",
"schema.Stage.StartCmdListVector",
"schema.Task.AddStages",
"schema.Message.End",
"schema.Stage.AddCmdList"
] |
[((71, 97), 'sys.path.append', 'sys.path.append', (['"""../../."""'], {}), "('../../.')\n", (86, 97), False, 'import sys\n'), ((661, 686), 'flatbuffers.Builder', 'flatbuffers.Builder', (['(1024)'], {}), '(1024)\n', (680, 686), False, 'import flatbuffers\n'), ((734, 779), 'schema.Stage.StartCmdListVector', 'Stage.StartCmdListVector', (['builder', 'cmd_amount'], {}), '(builder, cmd_amount)\n', (758, 779), True, 'import schema.Stage as Stage\n'), ((861, 881), 'schema.Stage.Start', 'Stage.Start', (['builder'], {}), '(builder)\n', (872, 881), True, 'import schema.Stage as Stage\n'), ((886, 922), 'schema.Stage.AddCmdList', 'Stage.AddCmdList', (['builder', 'cmdVector'], {}), '(builder, cmdVector)\n', (902, 922), True, 'import schema.Stage as Stage\n'), ((935, 953), 'schema.Stage.End', 'Stage.End', (['builder'], {}), '(builder)\n', (944, 953), True, 'import schema.Stage as Stage\n'), ((1033, 1078), 'schema.Stage.StartCmdListVector', 'Stage.StartCmdListVector', (['builder', 'cmd_amount'], {}), '(builder, cmd_amount)\n', (1057, 1078), True, 'import schema.Stage as Stage\n'), ((1160, 1180), 'schema.Stage.Start', 'Stage.Start', (['builder'], {}), '(builder)\n', (1171, 1180), True, 'import schema.Stage as Stage\n'), ((1185, 1221), 'schema.Stage.AddCmdList', 'Stage.AddCmdList', (['builder', 'cmdVector'], {}), '(builder, cmdVector)\n', (1201, 1221), True, 'import schema.Stage as Stage\n'), ((1235, 1253), 'schema.Stage.End', 'Stage.End', (['builder'], {}), '(builder)\n', (1244, 1253), True, 'import schema.Stage as Stage\n'), ((1264, 1309), 'schema.Task.StartStagesVector', 'Task.StartStagesVector', (['builder', 'stage_amount'], {}), '(builder, stage_amount)\n', (1286, 1309), True, 'import schema.Task as Task\n'), ((1435, 1454), 'schema.Task.Start', 'Task.Start', (['builder'], {}), '(builder)\n', (1445, 1454), True, 'import schema.Task as Task\n'), ((1459, 1490), 'schema.Task.AddStages', 'Task.AddStages', (['builder', 'stages'], {}), '(builder, stages)\n', (1473, 1490), True, 'import schema.Task as Task\n'), ((1502, 1519), 'schema.Task.End', 'Task.End', (['builder'], {}), '(builder)\n', (1510, 1519), True, 'import schema.Task as Task\n'), ((1526, 1548), 'schema.Message.Start', 'Message.Start', (['builder'], {}), '(builder)\n', (1539, 1548), True, 'import schema.Message as Message\n'), ((1553, 1580), 'schema.Message.AddType', 'Message.AddType', (['builder', '(1)'], {}), '(builder, 1)\n', (1568, 1580), True, 'import schema.Message as Message\n'), ((1585, 1615), 'schema.Message.AddTask', 'Message.AddTask', (['builder', 'task'], {}), '(builder, task)\n', (1600, 1615), True, 'import schema.Message as Message\n'), ((1630, 1650), 'schema.Message.End', 'Message.End', (['builder'], {}), '(builder)\n', (1641, 1650), True, 'import schema.Message as Message\n'), ((1717, 1757), 'websocket.create_connection', 'create_connection', (['"""ws://localhost:3001"""'], {}), "('ws://localhost:3001')\n", (1734, 1757), False, 'from websocket import create_connection\n')]
|
import multiprocessing as mp
from multiprocessing.sharedctypes import RawArray
from ctypes import c_bool, c_double
import numpy as np
import pandas as pd
def standardize(X):
"""
Standardize each row in X to mean = 0 and SD = 1.
"""
X_m = np.ma.masked_invalid(X)
return ((X.T - X_m.mean(axis=1)) / X_m.std(axis=1)).T.data
mask = None
X_s = None
X = None
k = None
def knn_init(k_, mask_, X_, X_s_):
global k, mask, X_s, X
mask = from_shared(mask_)
X_s = from_shared(X_s_)
X = from_shared(X_)
k = k_
def knn_work(i):
print(i)
dx = X_s.dot(X_s[i,:]) / ((~ mask) & (~ mask[i,:])).sum(axis=1)
ix = (-dx).argsort()
for j in np.isnan(X[i,:]).nonzero()[0]:
v = X[ix,j]
v = v[np.invert(np.isnan(v))]
X[i,j] = v[:k].mean()
def ctype_to_dtype(ctype):
if ctype == c_double:
return np.float64
elif ctype == c_bool:
return np.bool
else:
raise Exception
def to_shared(arr, type=c_double):
shared = RawArray(type, arr.flat)
return (shared, ctype_to_dtype(type), arr.shape)
def from_shared(args):
arr, dtype, shape = args
return np.frombuffer(arr, dtype=dtype).reshape(shape)
class KNNImputer(object):
def __init__(self, k=50):
self._k = k
def fit_transform(self, X, axis=0):
assert(axis in (0,1))
if isinstance(X, pd.DataFrame):
X = X.dropna(axis=0, how="all").dropna(axis=1, thresh=self._k)
return pd.DataFrame(
self.fit_transform(X.as_matrix(), axis=axis),
index=X.index,
columns=X.columns)
if axis==0:
return self.fit_transform(X.T, axis=1).T
X_s = standardize(X)
mask = np.ma.masked_invalid(X_s).mask
X_s[np.isnan(X_s)] = 0
mask_shared = to_shared(mask, c_bool)
X_shared = to_shared(X)
X_s_shared = to_shared(X_s)
pool = mp.Pool(initializer=knn_init,
initargs=(self._k, mask_shared, X_shared, X_s_shared))
pool.map(knn_work, range(X.shape[0]))
return from_shared(X_shared)
|
[
"numpy.frombuffer",
"multiprocessing.sharedctypes.RawArray",
"numpy.ma.masked_invalid",
"numpy.isnan",
"multiprocessing.Pool"
] |
[((255, 278), 'numpy.ma.masked_invalid', 'np.ma.masked_invalid', (['X'], {}), '(X)\n', (275, 278), True, 'import numpy as np\n'), ((1008, 1032), 'multiprocessing.sharedctypes.RawArray', 'RawArray', (['type', 'arr.flat'], {}), '(type, arr.flat)\n', (1016, 1032), False, 'from multiprocessing.sharedctypes import RawArray\n'), ((1948, 2036), 'multiprocessing.Pool', 'mp.Pool', ([], {'initializer': 'knn_init', 'initargs': '(self._k, mask_shared, X_shared, X_s_shared)'}), '(initializer=knn_init, initargs=(self._k, mask_shared, X_shared,\n X_s_shared))\n', (1955, 2036), True, 'import multiprocessing as mp\n'), ((1150, 1181), 'numpy.frombuffer', 'np.frombuffer', (['arr'], {'dtype': 'dtype'}), '(arr, dtype=dtype)\n', (1163, 1181), True, 'import numpy as np\n'), ((1739, 1764), 'numpy.ma.masked_invalid', 'np.ma.masked_invalid', (['X_s'], {}), '(X_s)\n', (1759, 1764), True, 'import numpy as np\n'), ((1782, 1795), 'numpy.isnan', 'np.isnan', (['X_s'], {}), '(X_s)\n', (1790, 1795), True, 'import numpy as np\n'), ((677, 694), 'numpy.isnan', 'np.isnan', (['X[i, :]'], {}), '(X[i, :])\n', (685, 694), True, 'import numpy as np\n'), ((752, 763), 'numpy.isnan', 'np.isnan', (['v'], {}), '(v)\n', (760, 763), True, 'import numpy as np\n')]
|
import typing
from flask_httpauth import HTTPTokenAuth, HTTPBasicAuth, MultiAuth
class Auth:
def __init__(self):
"""Creates access control class for authentication and authorization."""
self._basic_auth = HTTPBasicAuth()
self._token_auth = HTTPTokenAuth()
self._auth = MultiAuth(self._basic_auth, self._token_auth)
self._resources = {}
def error_handler(self, f: typing.Callable) -> typing.NoReturn:
"""Set error handler for Authentication Errors.
:param f: error handler.
:return: NoReturn
"""
self._token_auth.error_handler(f)
self._basic_auth.error_handler(f)
def verify_password(self, f: typing.Callable) -> typing.Any:
""" Verifies basic password.
:param f: function defining verification process.
:return: Any
"""
return self._basic_auth.verify_password(f)
def verify_token(self, f: typing.Callable) -> typing.Any:
""" Verifies token.
:param f: function defining verification process.
:return: Any
"""
return self._token_auth.verify_token(f)
def login_required(self, f: typing.Callable = None, role: typing.Text = None) -> typing.Any:
""" Identifies as login required for provided function {f}.
:param f: input function.
:param role: user role
:return: func
"""
return self._auth.login_required(f, role)
|
[
"flask_httpauth.HTTPTokenAuth",
"flask_httpauth.HTTPBasicAuth",
"flask_httpauth.MultiAuth"
] |
[((228, 243), 'flask_httpauth.HTTPBasicAuth', 'HTTPBasicAuth', ([], {}), '()\n', (241, 243), False, 'from flask_httpauth import HTTPTokenAuth, HTTPBasicAuth, MultiAuth\n'), ((271, 286), 'flask_httpauth.HTTPTokenAuth', 'HTTPTokenAuth', ([], {}), '()\n', (284, 286), False, 'from flask_httpauth import HTTPTokenAuth, HTTPBasicAuth, MultiAuth\n'), ((308, 353), 'flask_httpauth.MultiAuth', 'MultiAuth', (['self._basic_auth', 'self._token_auth'], {}), '(self._basic_auth, self._token_auth)\n', (317, 353), False, 'from flask_httpauth import HTTPTokenAuth, HTTPBasicAuth, MultiAuth\n')]
|
import subprocess
import sys
def glslify(shader_path, *transforms):
args = ["glslify", shader_path]
if transforms:
args.append("-t")
args.extend(transforms)
return subprocess.check_output(args, encoding=sys.getdefaultencoding())
if __name__ == "__main__":
shader = glslify(sys.argv[1], *sys.argv[2:])
print(shader)
|
[
"sys.getdefaultencoding"
] |
[((233, 257), 'sys.getdefaultencoding', 'sys.getdefaultencoding', ([], {}), '()\n', (255, 257), False, 'import sys\n')]
|
"""Platform for sensor integration."""
from __future__ import annotations
from collections.abc import Callable
from dataclasses import dataclass
from typing import cast
from geocachingapi.models import GeocachingStatus
from homeassistant.components.sensor import SensorEntity, SensorEntityDescription
from homeassistant.config_entries import ConfigEntry
from homeassistant.core import HomeAssistant
from homeassistant.helpers.device_registry import DeviceEntryType
from homeassistant.helpers.entity import DeviceInfo
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from homeassistant.helpers.update_coordinator import CoordinatorEntity
from .const import DOMAIN
from .coordinator import GeocachingDataUpdateCoordinator
@dataclass
class GeocachingRequiredKeysMixin:
"""Mixin for required keys."""
value_fn: Callable[[GeocachingStatus], str | int | None]
@dataclass
class GeocachingSensorEntityDescription(
SensorEntityDescription, GeocachingRequiredKeysMixin
):
"""Define Sensor entity description class."""
SENSORS: tuple[GeocachingSensorEntityDescription, ...] = (
GeocachingSensorEntityDescription(
key="find_count",
name="Total finds",
icon="mdi:notebook-edit-outline",
native_unit_of_measurement="caches",
value_fn=lambda status: status.user.find_count,
),
GeocachingSensorEntityDescription(
key="hide_count",
name="Total hides",
icon="mdi:eye-off-outline",
native_unit_of_measurement="caches",
entity_registry_visible_default=False,
value_fn=lambda status: status.user.hide_count,
),
GeocachingSensorEntityDescription(
key="favorite_points",
name="Favorite points",
icon="mdi:heart-outline",
native_unit_of_measurement="points",
entity_registry_visible_default=False,
value_fn=lambda status: status.user.favorite_points,
),
GeocachingSensorEntityDescription(
key="souvenir_count",
name="Total souvenirs",
icon="mdi:license",
native_unit_of_measurement="souvenirs",
value_fn=lambda status: status.user.souvenir_count,
),
GeocachingSensorEntityDescription(
key="awarded_favorite_points",
name="Awarded favorite points",
icon="mdi:heart",
native_unit_of_measurement="points",
entity_registry_visible_default=False,
value_fn=lambda status: status.user.awarded_favorite_points,
),
)
async def async_setup_entry(
hass: HomeAssistant, entry: ConfigEntry, async_add_entities: AddEntitiesCallback
) -> None:
"""Set up a Geocaching sensor entry."""
coordinator = hass.data[DOMAIN][entry.entry_id]
async_add_entities(
GeocachingSensor(coordinator, description) for description in SENSORS
)
class GeocachingSensor(
CoordinatorEntity[GeocachingDataUpdateCoordinator], SensorEntity
):
"""Representation of a Sensor."""
entity_description: GeocachingSensorEntityDescription
def __init__(
self,
coordinator: GeocachingDataUpdateCoordinator,
description: GeocachingSensorEntityDescription,
) -> None:
"""Initialize the Geocaching sensor."""
super().__init__(coordinator)
self.entity_description = description
self._attr_name = (
f"Geocaching {coordinator.data.user.username} {description.name}"
)
self._attr_unique_id = (
f"{coordinator.data.user.reference_code}_{description.key}"
)
self._attr_device_info = DeviceInfo(
name=f"Geocaching {coordinator.data.user.username}",
identifiers={(DOMAIN, cast(str, coordinator.data.user.reference_code))},
entry_type=DeviceEntryType.SERVICE,
manufacturer="Groundspeak, Inc.",
)
@property
def native_value(self) -> str | int | None:
"""Return the state of the sensor."""
return self.entity_description.value_fn(self.coordinator.data)
|
[
"typing.cast"
] |
[((3687, 3734), 'typing.cast', 'cast', (['str', 'coordinator.data.user.reference_code'], {}), '(str, coordinator.data.user.reference_code)\n', (3691, 3734), False, 'from typing import cast\n')]
|
"""
Utilities
@author: <NAME>
"""
import zlib
import sys
from sphinx.util.pycompat import sys_encoding
from bkcharts.stats import stats
def read_file(path):
"""Reads contents of file as bytes."""
with open(path, 'rb') as f:
return f.read()
def write_file(path, data):
"""Writes data bytes to file at given path."""
with open(path, 'wb') as f:
f.write(data)
def find_object(sha1_prefix):
"""
Finds object with the given SHA-1 prefix.
Returns the path to object in object store
Raises a ValueError if there are no objects or multiple objects
"""
if (len(sha1_prefix) < 2):
raise ValueError("Hash prefix must have 2 or more characters")
obj_dir = os.path.join('.git', 'objects', sha1_prefix[:2])
rmng = sha1_prefix[2:]
objects = [name for name in os.listdir(obj_dir) if name.startswith(rmng)]
if not objects:
raise ValueError("Object {} not found".format(sha1_prefix))
if len(objects) >= 2:
raise ValueError("Multiple objects ({}) with prefix {}".format(
len(objects), sha1_prefix))
return os.path.join(obj_dir, objects[0])
def read_object(sha1_prefix):
"""
Reads object with given SHA-1 prefix
Return a tuple(object_type , data_bytes)
Raises ValueError of object not found
"""
from builtins import int
path = find_object(sha1_prefix)
data_full = zlib.decompress(read_file(path))
idx = data_full.index(b'\x00')
header = data_full[:idx]
object_type, size_data = header.decode().split()
size = int(size_data)
data = data_full[idx + 1:]
size_recvd = len(data)
assert size == len(data), 'expected size {} but received {} bytes'.format(
size, size_recvd)
return (object_type, data)
def cat_file(mode, sha1_prefix):
"""
Writes the contents or the info about the object with given SHA-1 prefix to stdout.
Prints raw data bytes if mode is 'commit', 'tree' or 'blob'
Prints size of the object if mode is 'size'
Prints type of the object if mode is 'type'
Prints pretty version of the object if mode is 'pretty'
"""
object_type , data = read_object(sha1_prefix)
if mode in ['commit', 'tree', 'blob']:
if object_type != mode:
raise ValueError('Expected object type {} but received {}'.
format(mode, object_type))
sys.stdout.write(data)
elif mode == 'type':
print (object_type)
elif mode == 'size':
print(len(data))
elif mode == 'pretty':
if object_type in ['commit', 'blob']:
sys_encoding.stdout.write(data)
elif object_type == 'tree':
for mode, path, sha1 in read_tree(data=data):
type_string = 'tree' if stat.S_ISDIR(mode) else 'blob'
else:
assert False, 'Unhandled object type: {}'.format(object_type)
else:
raise ValueError('Unexpected mode type: {}'.format(mode))
|
[
"sys.stdout.write",
"sphinx.util.pycompat.sys_encoding.stdout.write",
"builtins.int"
] |
[((1572, 1586), 'builtins.int', 'int', (['size_data'], {}), '(size_data)\n', (1575, 1586), False, 'from builtins import int\n'), ((2400, 2422), 'sys.stdout.write', 'sys.stdout.write', (['data'], {}), '(data)\n', (2416, 2422), False, 'import sys\n'), ((2611, 2642), 'sphinx.util.pycompat.sys_encoding.stdout.write', 'sys_encoding.stdout.write', (['data'], {}), '(data)\n', (2636, 2642), False, 'from sphinx.util.pycompat import sys_encoding\n')]
|
from __future__ import print_function
import torch, PIL.Image, cv2, pickle, sys, argparse
import numpy as np
import openmesh as om
from tqdm import trange
sys.path.append("../src/")
from network import shading_net
import renderer as rd
from utility import subdiv_mesh_x4
from utility import CamPara
from utility import make_trimesh
from utility import flatten_naval
from utility import smpl_detoe
from matplotlib import pyplot as plt
# parse arguments
parser = argparse.ArgumentParser()
parser.add_argument('--num', type = int, required = True,
help = 'data_num')
parser.add_argument('--set', type = str, required = True,
help = 'recon or syn')
opt = parser.parse_args()
assert opt.set in ["recon", "syn"], \
"set must be one of [recon, syn]"
# prepare
data_num = int(opt.num)
model_file = "../demo/pretrained_model/pretrained_shading.pth"
device = torch.device("cuda:0")
net_shading = shading_net().to(device).eval()
net_shading.load_state_dict(torch.load(model_file, map_location='cuda:0'))
renderer = rd.SMPLRenderer(face_path =
"../predef/smpl_faces.npy")
cam_para = CamPara(K = np.array([[1000, 0, 224],
[0, 1000, 224],
[0, 0, 1]]))
with open ('../predef/exempt_vert_list.pkl', 'rb') as fp:
exempt_vert_list = pickle.load(fp)
tr = trange(data_num, desc='Bar desc', leave=True)
for test_num in tr:
# read mesh
mesh = om.read_trimesh("./eval_data/%s_set/pred_save/a_%03d.obj" % \
(opt.set, test_num))
proj_sil = renderer.silhouette(verts = mesh.points())
proj_sil_l = cv2.resize(proj_sil, dsize=(448, 448))
proj_sil_l[proj_sil_l<0.5] = 0
proj_sil_l[proj_sil_l>=0.5] = 1
# load data
src_img = np.array(PIL.Image.open("./eval_data/%s_set/input_img/%03d_img.png"%\
(opt.set, test_num)))
src_img_l = cv2.resize(src_img, dsize=(448, 448))
input_arr = np.rollaxis(src_img_l, 2, 0)
input_arr = np.expand_dims(input_arr, 0)
input_arr = torch.tensor(input_arr).float().to(device)
input_arr = input_arr/255.0
proj_sil_l = np.expand_dims(proj_sil_l, 0)
proj_sil_l = np.expand_dims(proj_sil_l, 0)
proj_sil_l = torch.tensor(proj_sil_l)
proj_sil_l = proj_sil_l.float().to(device)
# predict
pred = net_shading(input_arr, proj_sil_l)
pred_depth = np.array(pred.data.cpu()[0][0])
# pred_depth = np.load('/home/zhangtianyi/github/hmd/eval/eval_data/syn_set/pred_depth/' + '%03d_img.npy'%\
# (test_num))
# pred_depth = pred_depth*5.0
#show_img_arr(src_img)
mesh = flatten_naval(mesh)
# remove toes
mesh = smpl_detoe(mesh)
# subdivide the mesh to x4
subdiv_mesh = subdiv_mesh_x4(mesh)
# genrate boundary buffering mask
sil_img = rd.render_sil(subdiv_mesh)
bound_img = rd.render_bound(subdiv_mesh)
radius = 10
circ_template = np.zeros((radius*2+1, radius*2+1))
for i in range(radius):
cv2.circle(img = circ_template,
center = (radius, radius),
radius = i+2,
color = (radius-i)*0.1,
thickness = 2)
img_size = bound_img.shape
draw_img = np.zeros(img_size, dtype=np.float)
draw_img = np.pad(draw_img, radius, 'edge')
for y in range(img_size[0]):
for x in range(img_size[1]):
if bound_img[y, x] == 0:
continue
win = draw_img[y:y+2*radius+1, x:x+2*radius+1]
win[circ_template>win] = circ_template[circ_template>win]
draw_img[y:y+2*radius+1, x:x+2*radius+1] = win
final_mask = sil_img - draw_img[10:10+img_size[0], 10:10+img_size[1]]
final_mask[sil_img==0] = 0
# apply bias
d_max = np.max(pred_depth[pred_depth!=0])
d_min = np.min(pred_depth[pred_depth!=0])
bias = -(d_max - d_min)/2.
pred_depth = pred_depth + bias
# apply bright scale
weight_map = np.dot(src_img_l[...,:3], [0.299, 0.587, 0.114])
pred_depth = pred_depth * weight_map / 255.
pred_depth = pred_depth * 0.001
pred_depth = pred_depth * final_mask
# plt.imshow(pred_depth)
# plt.show()
# project mesh to depth and merge with depth difference
proj_depth, visi_map = rd.render_depth(subdiv_mesh, require_visi = True)
# get all visible vertex index
verts = subdiv_mesh.points()
faces = subdiv_mesh.face_vertex_indices()
visi_vert_inds = []
for y in range(visi_map.shape[0]):
for x in range(visi_map.shape[1]):
f_ind = visi_map[y, x]
if f_ind >= len(faces):
continue
else:
fv = faces[f_ind]
visi_vert_inds.append(fv[0])
visi_vert_inds.append(fv[1])
visi_vert_inds.append(fv[2])
visi_vert_inds = set(visi_vert_inds)
# filter out exempt version
visi_vert_inds = list(set(visi_vert_inds).difference(exempt_vert_list))
visi_vert_inds_m = []
for i in visi_vert_inds:
xy = cam_para.project(verts[i])
x = int(round(xy[1]))
y = int(round(xy[0]))
if x<0 or y<0 or x>=448 or y>=448:
continue
if np.absolute(proj_depth[x, y] - verts[i,2])<0.01:
visi_vert_inds_m.append(i)
for i in visi_vert_inds_m:
xy = cam_para.project(verts[i])
x = int(round(xy[1]))
y = int(round(xy[0]))
depth = proj_depth[x, y] + pred_depth[x, y]
#print(depth, verts[i])
if depth>8.:
continue
verts[i][2] = depth
deformed_mesh = make_trimesh(verts, faces)
om.write_mesh("./eval_data/%s_set/pred_save/s_%03d.obj" % \
(opt.set, test_num), deformed_mesh)
|
[
"numpy.absolute",
"network.shading_net",
"argparse.ArgumentParser",
"pickle.load",
"utility.subdiv_mesh_x4",
"torch.device",
"sys.path.append",
"numpy.pad",
"torch.load",
"utility.flatten_naval",
"utility.smpl_detoe",
"numpy.max",
"utility.make_trimesh",
"numpy.rollaxis",
"cv2.resize",
"openmesh.write_mesh",
"renderer.SMPLRenderer",
"cv2.circle",
"tqdm.trange",
"numpy.min",
"numpy.dot",
"openmesh.read_trimesh",
"renderer.render_bound",
"renderer.render_sil",
"numpy.zeros",
"numpy.expand_dims",
"renderer.render_depth",
"numpy.array",
"torch.tensor"
] |
[((155, 181), 'sys.path.append', 'sys.path.append', (['"""../src/"""'], {}), "('../src/')\n", (170, 181), False, 'import torch, PIL.Image, cv2, pickle, sys, argparse\n'), ((463, 488), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (486, 488), False, 'import torch, PIL.Image, cv2, pickle, sys, argparse\n'), ((902, 924), 'torch.device', 'torch.device', (['"""cuda:0"""'], {}), "('cuda:0')\n", (914, 924), False, 'import torch, PIL.Image, cv2, pickle, sys, argparse\n'), ((1057, 1110), 'renderer.SMPLRenderer', 'rd.SMPLRenderer', ([], {'face_path': '"""../predef/smpl_faces.npy"""'}), "(face_path='../predef/smpl_faces.npy')\n", (1072, 1110), True, 'import renderer as rd\n'), ((1388, 1433), 'tqdm.trange', 'trange', (['data_num'], {'desc': '"""Bar desc"""', 'leave': '(True)'}), "(data_num, desc='Bar desc', leave=True)\n", (1394, 1433), False, 'from tqdm import trange\n'), ((999, 1044), 'torch.load', 'torch.load', (['model_file'], {'map_location': '"""cuda:0"""'}), "(model_file, map_location='cuda:0')\n", (1009, 1044), False, 'import torch, PIL.Image, cv2, pickle, sys, argparse\n'), ((1366, 1381), 'pickle.load', 'pickle.load', (['fp'], {}), '(fp)\n', (1377, 1381), False, 'import torch, PIL.Image, cv2, pickle, sys, argparse\n'), ((1477, 1562), 'openmesh.read_trimesh', 'om.read_trimesh', (["('./eval_data/%s_set/pred_save/a_%03d.obj' % (opt.set, test_num))"], {}), "('./eval_data/%s_set/pred_save/a_%03d.obj' % (opt.set, test_num)\n )\n", (1492, 1562), True, 'import openmesh as om\n'), ((1672, 1710), 'cv2.resize', 'cv2.resize', (['proj_sil'], {'dsize': '(448, 448)'}), '(proj_sil, dsize=(448, 448))\n', (1682, 1710), False, 'import torch, PIL.Image, cv2, pickle, sys, argparse\n'), ((1963, 2000), 'cv2.resize', 'cv2.resize', (['src_img'], {'dsize': '(448, 448)'}), '(src_img, dsize=(448, 448))\n', (1973, 2000), False, 'import torch, PIL.Image, cv2, pickle, sys, argparse\n'), ((2017, 2045), 'numpy.rollaxis', 'np.rollaxis', (['src_img_l', '(2)', '(0)'], {}), '(src_img_l, 2, 0)\n', (2028, 2045), True, 'import numpy as np\n'), ((2062, 2090), 'numpy.expand_dims', 'np.expand_dims', (['input_arr', '(0)'], {}), '(input_arr, 0)\n', (2076, 2090), True, 'import numpy as np\n'), ((2200, 2229), 'numpy.expand_dims', 'np.expand_dims', (['proj_sil_l', '(0)'], {}), '(proj_sil_l, 0)\n', (2214, 2229), True, 'import numpy as np\n'), ((2247, 2276), 'numpy.expand_dims', 'np.expand_dims', (['proj_sil_l', '(0)'], {}), '(proj_sil_l, 0)\n', (2261, 2276), True, 'import numpy as np\n'), ((2294, 2318), 'torch.tensor', 'torch.tensor', (['proj_sil_l'], {}), '(proj_sil_l)\n', (2306, 2318), False, 'import torch, PIL.Image, cv2, pickle, sys, argparse\n'), ((2699, 2718), 'utility.flatten_naval', 'flatten_naval', (['mesh'], {}), '(mesh)\n', (2712, 2718), False, 'from utility import flatten_naval\n'), ((2749, 2765), 'utility.smpl_detoe', 'smpl_detoe', (['mesh'], {}), '(mesh)\n', (2759, 2765), False, 'from utility import smpl_detoe\n'), ((2816, 2836), 'utility.subdiv_mesh_x4', 'subdiv_mesh_x4', (['mesh'], {}), '(mesh)\n', (2830, 2836), False, 'from utility import subdiv_mesh_x4\n'), ((2890, 2916), 'renderer.render_sil', 'rd.render_sil', (['subdiv_mesh'], {}), '(subdiv_mesh)\n', (2903, 2916), True, 'import renderer as rd\n'), ((2933, 2961), 'renderer.render_bound', 'rd.render_bound', (['subdiv_mesh'], {}), '(subdiv_mesh)\n', (2948, 2961), True, 'import renderer as rd\n'), ((2999, 3041), 'numpy.zeros', 'np.zeros', (['(radius * 2 + 1, radius * 2 + 1)'], {}), '((radius * 2 + 1, radius * 2 + 1))\n', (3007, 3041), True, 'import numpy as np\n'), ((3313, 3347), 'numpy.zeros', 'np.zeros', (['img_size'], {'dtype': 'np.float'}), '(img_size, dtype=np.float)\n', (3321, 3347), True, 'import numpy as np\n'), ((3363, 3395), 'numpy.pad', 'np.pad', (['draw_img', 'radius', '"""edge"""'], {}), "(draw_img, radius, 'edge')\n", (3369, 3395), True, 'import numpy as np\n'), ((3860, 3895), 'numpy.max', 'np.max', (['pred_depth[pred_depth != 0]'], {}), '(pred_depth[pred_depth != 0])\n', (3866, 3895), True, 'import numpy as np\n'), ((3906, 3941), 'numpy.min', 'np.min', (['pred_depth[pred_depth != 0]'], {}), '(pred_depth[pred_depth != 0])\n', (3912, 3941), True, 'import numpy as np\n'), ((4053, 4102), 'numpy.dot', 'np.dot', (['src_img_l[..., :3]', '[0.299, 0.587, 0.114]'], {}), '(src_img_l[..., :3], [0.299, 0.587, 0.114])\n', (4059, 4102), True, 'import numpy as np\n'), ((4371, 4418), 'renderer.render_depth', 'rd.render_depth', (['subdiv_mesh'], {'require_visi': '(True)'}), '(subdiv_mesh, require_visi=True)\n', (4386, 4418), True, 'import renderer as rd\n'), ((5701, 5727), 'utility.make_trimesh', 'make_trimesh', (['verts', 'faces'], {}), '(verts, faces)\n', (5713, 5727), False, 'from utility import make_trimesh\n'), ((5732, 5829), 'openmesh.write_mesh', 'om.write_mesh', (["('./eval_data/%s_set/pred_save/s_%03d.obj' % (opt.set, test_num))", 'deformed_mesh'], {}), "('./eval_data/%s_set/pred_save/s_%03d.obj' % (opt.set,\n test_num), deformed_mesh)\n", (5745, 5829), True, 'import openmesh as om\n'), ((1164, 1217), 'numpy.array', 'np.array', (['[[1000, 0, 224], [0, 1000, 224], [0, 0, 1]]'], {}), '([[1000, 0, 224], [0, 1000, 224], [0, 0, 1]])\n', (1172, 1217), True, 'import numpy as np\n'), ((3070, 3182), 'cv2.circle', 'cv2.circle', ([], {'img': 'circ_template', 'center': '(radius, radius)', 'radius': '(i + 2)', 'color': '((radius - i) * 0.1)', 'thickness': '(2)'}), '(img=circ_template, center=(radius, radius), radius=i + 2, color=\n (radius - i) * 0.1, thickness=2)\n', (3080, 3182), False, 'import torch, PIL.Image, cv2, pickle, sys, argparse\n'), ((5306, 5349), 'numpy.absolute', 'np.absolute', (['(proj_depth[x, y] - verts[i, 2])'], {}), '(proj_depth[x, y] - verts[i, 2])\n', (5317, 5349), True, 'import numpy as np\n'), ((939, 952), 'network.shading_net', 'shading_net', ([], {}), '()\n', (950, 952), False, 'from network import shading_net\n'), ((2107, 2130), 'torch.tensor', 'torch.tensor', (['input_arr'], {}), '(input_arr)\n', (2119, 2130), False, 'import torch, PIL.Image, cv2, pickle, sys, argparse\n')]
|
from discord.ext import commands
import discord
import functions as fnc
from cogs.help import get_help_skill
class Skill(commands.Cog):
def __init__(self, bot):
self.bot = bot
@commands.Cog.listener()
# Cogが読み込まれた時に発動
async def on_ready(self):
print("Load Skill module...")
@commands.command()
async def skill(self, ctx, *args):
if len(args) == 0 or args[0] == "":
await ctx.send("調べたいレアスキルの名前を入力してね!")
return
search = args[0]
if args[0] == "-help":
embed = get_help_skill()
await ctx.send(embed=embed)
return
sql = "SELECT name, skill, effect FROM characters\
LEFT OUTER JOIN skills\
ON characters.skill_cd = skills.id\
WHERE skill_cd = (SELECT id FROM skills WHERE skill LIKE %s);"
result = fnc.select_sql_with_param_fetch(sql, ("%" + str(search) + "%",))
if result is None:
await ctx.send("そのレアスキルはまだ未実装か、名前が間違ってるよ!")
return
results = fnc.select_sql_with_param_fetchall(sql, ("%" + str(search) + "%",))
members = ""
for i in range(len(results)):
members += results[i][0] + " / "
embed = discord.Embed(
title=results[0][1], description=results[0][2], color=0x7289DA
)
embed.set_footer(text="スキル保有者: " + members)
await ctx.send(embed=embed)
def setup(bot):
bot.add_cog(Skill(bot))
|
[
"cogs.help.get_help_skill",
"discord.ext.commands.command",
"discord.Embed",
"discord.ext.commands.Cog.listener"
] |
[((196, 219), 'discord.ext.commands.Cog.listener', 'commands.Cog.listener', ([], {}), '()\n', (217, 219), False, 'from discord.ext import commands\n'), ((315, 333), 'discord.ext.commands.command', 'commands.command', ([], {}), '()\n', (331, 333), False, 'from discord.ext import commands\n'), ((1255, 1331), 'discord.Embed', 'discord.Embed', ([], {'title': 'results[0][1]', 'description': 'results[0][2]', 'color': '(7506394)'}), '(title=results[0][1], description=results[0][2], color=7506394)\n', (1268, 1331), False, 'import discord\n'), ((562, 578), 'cogs.help.get_help_skill', 'get_help_skill', ([], {}), '()\n', (576, 578), False, 'from cogs.help import get_help_skill\n')]
|
'''
Created on 17.07.2011
@author: kca
'''
from ..collections import OrderedDict
import futile
class LRUCache(OrderedDict):
max_items = 100
def __init__(self, max_items = None, threadsafe = None, *args, **kw):
super(LRUCache, self).__init__(*args, **kw)
if max_items is not None:
if max_items <= 0:
raise ValueError(max_items)
self.max_items = max_items
if threadsafe is None:
threadsafe = futile.THREADSAFE
if threadsafe:
from threading import RLock
self.__lock = RLock()
else:
self.__lock = None
self.__getitem__ = self._getitem
self.__setitem__ = self._setitem
def __getitem__(self, k):
if self.__lock is None:
return self._getitem(k)
with self.__lock:
return self._getitem(k)
def get(self, k, default = None):
try:
return self[k]
except KeyError:
return default
def _getitem(self, k):
v = super(LRUCache, self).__getitem__(k)
del self[k]
super(LRUCache, self).__setitem__(k, v)
return v
def __iter__(self):
for k in tuple(super(LRUCache, self).__iter__()):
yield k
def __setitem__(self, k, v):
if self.__lock is None:
return self._setitem(k, v)
with self.__lock:
self._setitem(k, v)
def _setitem(self, k, v):
super(LRUCache, self).__setitem__(k, v)
if len(self) > self.max_items:
self.popitem(False)
|
[
"threading.RLock"
] |
[((507, 514), 'threading.RLock', 'RLock', ([], {}), '()\n', (512, 514), False, 'from threading import RLock\n')]
|
import ovito
print("Hello, this is OVITO %i.%i.%i" % ovito.version)
# Import OVITO modules.
from ovito.io import *
from ovito.modifiers import *
from ovito.data import *
from collections import Counter
# Import standard Python and NumPy modules.
import sys
import numpy
import os
from ovito.pipeline import StaticSource, Pipeline
from ovito.io.ase import ase_to_ovito
from ase.atoms import Atoms
from ase.db import connect
from sklearn.metrics import accuracy_score
from sklearn.metrics import confusion_matrix
import numpy as np
import matplotlib.pyplot as plt
import itertools
##################
# run with
# conda activate ovito
# ~/apps/ovito-3.0.0-dev284-x86_64/bin/ovitos benchmarking_ovito.py
####################
def read_ase_db(db_path):
"""From the path to an ASE database file, return a list of ASE atom object contained in it.
.. codeauthor:: <NAME> <<EMAIL>>
"""
db = connect(db_path)
ase_list = []
for idx_db in range(len(db)):
atoms = db.get_atoms(selection=idx_db + 1, add_additional_information=True)
# put info from atoms.info['data'] back at their original place (atoms.info)
# this is because when an ASE atoms object is saved into the SQLite database,
# ASE does not save automatically atoms.info but instead to
# atoms.info are saved in atoms.info['data']
if 'data' in atoms.info.keys():
atoms.info = atoms.info['data']
ase_list.append(atoms)
return ase_list
def plot_confusion_matrix(cm, classes,
normalize=False,
title='Confusion matrix',
cmap=plt.cm.Blues):
"""
This function prints and plots the confusion matrix.
Normalization can be applied by setting `normalize=True`.
"""
if normalize:
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
print("Normalized confusion matrix")
else:
print('Confusion matrix, without normalization')
print(cm)
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(title)
plt.colorbar()
tick_marks = np.arange(len(classes))
plt.xticks(tick_marks, classes, rotation=45)
plt.yticks(tick_marks, classes)
fmt = '.2f' if normalize else 'd'
thresh = cm.max() / 2.
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
plt.text(j, i, format(cm[i, j], fmt),
horizontalalignment="center",
color="white" if cm[i, j] > thresh else "black")
plt.ylabel('True label')
plt.xlabel('Predicted label')
plt.tight_layout()
plt.show()
#filepath = '/home/ziletti/Documents/calc_nomadml/rot_inv_3d/structures_for_paper/four_grains/four_grains_poly.xyz'
#node = import_file(filepath, columns=["Particle Type", "Position.X", "Position.Y", "Position.Z"])
ase_db_dataset_dir = '/home/ziletti/Documents/calc_nomadml/rot_inv_3d/db_ase'
# ase_db = os.path.join(ase_db_dataset_dir, 'hcp-sc-fcc-diam-bcc_pristine' + '.db')
# ase_db = os.path.join(ase_db_dataset_dir, 'hcp-sc-fcc-diam-bcc_displacement-0.1%' + '.db')
# ase_db = os.path.join(ase_db_dataset_dir, 'hcp-sc-fcc-diam-bcc_displacement-0.2%' + '.db')
# ase_db = os.path.join(ase_db_dataset_dir, 'hcp-sc-fcc-diam-bcc_displacement-0.6%' + '.db')
# ase_db = os.path.join(ase_db_dataset_dir, 'hcp-sc-fcc-diam-bcc_displacement-1%' + '.db')
# ase_db = os.path.join(ase_db_dataset_dir, 'hcp-sc-fcc-diam-bcc_displacement-2%' + '.db')
# ase_db = os.path.join(ase_db_dataset_dir, 'hcp-sc-fcc-diam-bcc_displacement-4%' + '.db')
# ase_db = os.path.join(ase_db_dataset_dir, 'hcp-sc-fcc-diam-bcc_displacement-5%' + '.db')
# ase_db = os.path.join(ase_db_dataset_dir, 'hcp-sc-fcc-diam-bcc_displacement-8%' + '.db')
# ase_db = os.path.join(ase_db_dataset_dir, 'hcp-sc-fcc-diam-bcc_displacement-10%' + '.db')
# ase_db = os.path.join(ase_db_dataset_dir, 'hcp-sc-fcc-diam-bcc_displacement-12%' + '.db')
# ase_db = os.path.join(ase_db_dataset_dir, 'hcp-sc-fcc-diam-bcc_displacement-20%' + '.db')
ase_db = os.path.join(ase_db_dataset_dir, 'hcp-sc-fcc-diam-bcc_displacement-30%' + '.db')
# ase_db = os.path.join(ase_db_dataset_dir, 'hcp-sc-fcc-diam-bcc_displacement-50%' + '.db')
# ase_db = os.path.join(ase_db_dataset_dir, 'hcp-sc-fcc-diam-bcc_vacancies-1%' + '.db')
# ase_db = os.path.join(ase_db_dataset_dir, 'hcp-sc-fcc-diam-bcc_vacancies-2%' + '.db')
# ase_db = os.path.join(ase_db_dataset_dir, 'hcp-sc-fcc-diam-bcc_vacancies-5%' + '.db')
# ase_db = os.path.join(ase_db_dataset_dir, 'hcp-sc-fcc-diam-bcc_vacancies-10%' + '.db')
# ase_db = os.path.join(ase_db_dataset_dir, 'hcp-sc-fcc-diam-bcc_vacancies-20%' + '.db')
# ase_db = os.path.join(ase_db_dataset_dir, 'hcp-sc-fcc-diam-bcc_vacancies-50%' + '.db')
ase_atoms_list = read_ase_db(db_path=ase_db)
y_pred = []
y_true = []
atom_classes_list = []
for idx, atoms in enumerate(ase_atoms_list):
if idx % 1000 == 0:
print(idx)
# if str(atoms.info['target']) == '227':
if str(atoms.info['target']) == '227' or str(atoms.info['target']) == '221':
pass
# if False:
# pass
else:
# atoms = atoms*(2, 2, 2)
data = ase_to_ovito(atoms)
node = Pipeline(source=StaticSource(data=data))
# node.modifiers.append(CommonNeighborAnalysisModifier(mode=CommonNeighborAnalysisModifier.Mode.FixedCutoff))
# node.modifiers.append(CommonNeighborAnalysisModifier(mode=CommonNeighborAnalysisModifier.Mode.AdaptiveCutoff))
node.modifiers.append(AcklandJonesModifier())
# node.modifiers.append(BondAngleAnalysisModifier())
# node.modifiers.append(PolyhedralTemplateMatchingModifier(rmsd_cutoff=0.0))
# Let OVITO's data pipeline do the heavy work.
node.compute()
# A two-dimensional array containing the three CNA indices
# computed for each bond in the system.
atom_classes = list(node.output.particle_properties['Structure Type'].array)
#AcklandJonesModifier.Type.OTHER(0)
#AcklandJonesModifier.Type.FCC(1)
#AcklandJonesModifier.Type.HCP(2)
#AcklandJonesModifier.Type.BCC(3)
#AcklandJonesModifier.Type.ICO(4)
# CommonNeighborAnalysisModifier.Type.OTHER(0)
# CommonNeighborAnalysisModifier.Type.FCC(1)
# CommonNeighborAnalysisModifier.Type.HCP(2)
# CommonNeighborAnalysisModifier.Type.BCC(3)
# CommonNeighborAnalysisModifier.Type.ICO(4)
#
classes = dict(ack_jones=['None', '225', '194', '229', 'Ic'], cna=['None', '225', '194', '229', 'Ic'],
ptm=['None', '225', '194', '229', 'Ic', '221', '227', '227'],
baa=['None', '225', '194', '229', 'Ic'])
# ovito 3.0.0
# Type.OTHER(0)
# PolyhedralTemplateMatchingModifier.Type.FCC(1)
# PolyhedralTemplateMatchingModifier.Type.HCP(2)
# PolyhedralTemplateMatchingModifier.Type.BCC(3)
# PolyhedralTemplateMatchingModifier.Type.ICO(4)
# PolyhedralTemplateMatchingModifier.Type.SC(5)
# PolyhedralTemplateMatchingModifier.Type.CUBIC_DIAMOND(6)
# PolyhedralTemplateMatchingModifier.Type.HEX_DIAMOND(7)
y_pred_i = [classes['cna'][item] for item in atom_classes]
#y_pred_acna = [acna_classes[item] for item in y_pred]
# y_pred_baa = [baa_classes[item] for item in y_pred]
#print(y_pred_this)
#atoms = atoms * (2, 2, 2)
atom_class_true = [str(atoms.info['target'])] * len(atoms)
y_true.extend(atom_class_true)
y_pred.extend(y_pred_i)
atom_classes_list.extend(atom_classes)
print(len(y_true))
print('y_true', Counter(y_true))
print('y_pred', Counter(y_pred))
#print(Counter(y_true), Counter(y_pred))
print('Accuracy: {}'.format(accuracy_score(y_true, y_pred)))
cnf_matrix = confusion_matrix(y_true, y_pred)
np.set_printoptions(precision=4)
print(cnf_matrix)
# y_pred Counter({'194': 583828, '229': 116999, '225': 115152, 'None': 968})
ack_jones_classes = ['194', '229', '225', 'None']
# plot_confusion_matrix(cnf_matrix, classes=ack_jones_classes,
# normalize=False, title='Confusion matrix, without normalization')
# Loop over particles and print their CNA indices.
#for idx_particle, particle_index in enumerate(range(node.output.number_of_particles)):
#pass
# Print particle index (1-based).
#sys.stdout.write("%i " % (particle_index + 1))
#outname = 'BondAngleAnalysis.counts.'
#print(node.output.particle_properties['Structure Type'].array[idx_particle])
# print(y_pred[idx_particle])
# Create local list with CNA indices of the bonds of the current particle.
#bond_index_list = list(bond_enumerator.bonds_of_particle(particle_index))
#local_cna_indices = cna_indices[bond_index_list]
# Count how often each type of CNA triplet occurred.
#unique_triplets, triplet_counts = row_histogram(local_cna_indices)
# Print list of triplets with their respective counts.
#for triplet, count in zip(unique_triplets, triplet_counts):
# sys.stdout.write("%s:%i " % (triplet, count))
# End of particle line
#sys.stdout.write("\n")
|
[
"matplotlib.pyplot.title",
"matplotlib.pyplot.tight_layout",
"numpy.set_printoptions",
"matplotlib.pyplot.show",
"ovito.pipeline.StaticSource",
"matplotlib.pyplot.imshow",
"sklearn.metrics.accuracy_score",
"matplotlib.pyplot.yticks",
"matplotlib.pyplot.ylabel",
"collections.Counter",
"matplotlib.pyplot.colorbar",
"ovito.io.ase.ase_to_ovito",
"ase.db.connect",
"sklearn.metrics.confusion_matrix",
"matplotlib.pyplot.xticks",
"os.path.join",
"matplotlib.pyplot.xlabel"
] |
[((4035, 4120), 'os.path.join', 'os.path.join', (['ase_db_dataset_dir', "('hcp-sc-fcc-diam-bcc_displacement-30%' + '.db')"], {}), "(ase_db_dataset_dir, 'hcp-sc-fcc-diam-bcc_displacement-30%' + '.db'\n )\n", (4047, 4120), False, 'import os\n'), ((7813, 7845), 'sklearn.metrics.confusion_matrix', 'confusion_matrix', (['y_true', 'y_pred'], {}), '(y_true, y_pred)\n', (7829, 7845), False, 'from sklearn.metrics import confusion_matrix\n'), ((7846, 7878), 'numpy.set_printoptions', 'np.set_printoptions', ([], {'precision': '(4)'}), '(precision=4)\n', (7865, 7878), True, 'import numpy as np\n'), ((904, 920), 'ase.db.connect', 'connect', (['db_path'], {}), '(db_path)\n', (911, 920), False, 'from ase.db import connect\n'), ((2018, 2068), 'matplotlib.pyplot.imshow', 'plt.imshow', (['cm'], {'interpolation': '"""nearest"""', 'cmap': 'cmap'}), "(cm, interpolation='nearest', cmap=cmap)\n", (2028, 2068), True, 'import matplotlib.pyplot as plt\n'), ((2073, 2089), 'matplotlib.pyplot.title', 'plt.title', (['title'], {}), '(title)\n', (2082, 2089), True, 'import matplotlib.pyplot as plt\n'), ((2094, 2108), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {}), '()\n', (2106, 2108), True, 'import matplotlib.pyplot as plt\n'), ((2154, 2198), 'matplotlib.pyplot.xticks', 'plt.xticks', (['tick_marks', 'classes'], {'rotation': '(45)'}), '(tick_marks, classes, rotation=45)\n', (2164, 2198), True, 'import matplotlib.pyplot as plt\n'), ((2203, 2234), 'matplotlib.pyplot.yticks', 'plt.yticks', (['tick_marks', 'classes'], {}), '(tick_marks, classes)\n', (2213, 2234), True, 'import matplotlib.pyplot as plt\n'), ((2540, 2564), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""True label"""'], {}), "('True label')\n", (2550, 2564), True, 'import matplotlib.pyplot as plt\n'), ((2569, 2598), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Predicted label"""'], {}), "('Predicted label')\n", (2579, 2598), True, 'import matplotlib.pyplot as plt\n'), ((2603, 2621), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (2619, 2621), True, 'import matplotlib.pyplot as plt\n'), ((2626, 2636), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2634, 2636), True, 'import matplotlib.pyplot as plt\n'), ((7645, 7660), 'collections.Counter', 'Counter', (['y_true'], {}), '(y_true)\n', (7652, 7660), False, 'from collections import Counter\n'), ((7678, 7693), 'collections.Counter', 'Counter', (['y_pred'], {}), '(y_pred)\n', (7685, 7693), False, 'from collections import Counter\n'), ((5154, 5173), 'ovito.io.ase.ase_to_ovito', 'ase_to_ovito', (['atoms'], {}), '(atoms)\n', (5166, 5173), False, 'from ovito.io.ase import ase_to_ovito\n'), ((7767, 7797), 'sklearn.metrics.accuracy_score', 'accuracy_score', (['y_true', 'y_pred'], {}), '(y_true, y_pred)\n', (7781, 7797), False, 'from sklearn.metrics import accuracy_score\n'), ((5205, 5228), 'ovito.pipeline.StaticSource', 'StaticSource', ([], {'data': 'data'}), '(data=data)\n', (5217, 5228), False, 'from ovito.pipeline import StaticSource, Pipeline\n')]
|
from esp8266_i2c_lcd import I2cLcd
from machine import I2C
from machine import Pin
# i2c = I2C(scl=Pin(22),sda=Pin(21),freq=100000)
# lcd = I2cLcd(i2c, 0x27, 2, 16)
# lcd.clear()
# lcd.putstr('Uncle Engineer\nMicroPython')
i2c = I2C(scl=Pin(22),sda=Pin(21),freq=100000)
lcd = I2cLcd(i2c, 0x27, 2, 16)
import utime as time
text = (' '*16)+ 'Uncle Engineer MicroPython'
count = 0
counttext = len(text)
while True:
lcd.clear()
print(text[count:16+count])
lcd.putstr(text[count:16+count])
time.sleep(0.5)
count += 1
if count > counttext:
count = 0
|
[
"esp8266_i2c_lcd.I2cLcd",
"utime.sleep",
"machine.Pin"
] |
[((286, 308), 'esp8266_i2c_lcd.I2cLcd', 'I2cLcd', (['i2c', '(39)', '(2)', '(16)'], {}), '(i2c, 39, 2, 16)\n', (292, 308), False, 'from esp8266_i2c_lcd import I2cLcd\n'), ((522, 537), 'utime.sleep', 'time.sleep', (['(0.5)'], {}), '(0.5)\n', (532, 537), True, 'import utime as time\n'), ((246, 253), 'machine.Pin', 'Pin', (['(22)'], {}), '(22)\n', (249, 253), False, 'from machine import Pin\n'), ((258, 265), 'machine.Pin', 'Pin', (['(21)'], {}), '(21)\n', (261, 265), False, 'from machine import Pin\n')]
|
import unittest
from ms_deisotope.data_source import memory
from ms_deisotope.test.common import datafile
from ms_deisotope.data_source import infer_type
scan_ids = [
"controllerType=0 controllerNumber=1 scan=10014",
"controllerType=0 controllerNumber=1 scan=10015",
"controllerType=0 controllerNumber=1 scan=10016"
]
class TestMemoryScanSource(unittest.TestCase):
path = datafile("three_test_scans.mzML")
@property
def source_reader(self):
return infer_type.MSFileLoader(self.path)
@property
def prepare_source(self):
source = self.source_reader
loader = memory.MemoryScanLoader.build(source)
return loader
def test_iteration(self):
g = iter(scan_ids)
bunch = next(self.prepare_source)
assert bunch.precursor.id == next(g)
for product in bunch.products:
assert product.id == next(g)
def test_source_file_name_none(self):
source = self.prepare_source
assert source.source_file_name is None
if __name__ == '__main__':
unittest.main()
|
[
"unittest.main",
"ms_deisotope.data_source.memory.MemoryScanLoader.build",
"ms_deisotope.test.common.datafile",
"ms_deisotope.data_source.infer_type.MSFileLoader"
] |
[((392, 425), 'ms_deisotope.test.common.datafile', 'datafile', (['"""three_test_scans.mzML"""'], {}), "('three_test_scans.mzML')\n", (400, 425), False, 'from ms_deisotope.test.common import datafile\n'), ((1063, 1078), 'unittest.main', 'unittest.main', ([], {}), '()\n', (1076, 1078), False, 'import unittest\n'), ((485, 519), 'ms_deisotope.data_source.infer_type.MSFileLoader', 'infer_type.MSFileLoader', (['self.path'], {}), '(self.path)\n', (508, 519), False, 'from ms_deisotope.data_source import infer_type\n'), ((618, 655), 'ms_deisotope.data_source.memory.MemoryScanLoader.build', 'memory.MemoryScanLoader.build', (['source'], {}), '(source)\n', (647, 655), False, 'from ms_deisotope.data_source import memory\n')]
|
import subprocess as sbp
import sys
import os
import numpy as np
import numpy.linalg as la
import pandas as pd
import time
import math
from ast import literal_eval
from pdb import set_trace as pst
'''
decfreq01: The original opitimization with negative freq as first one.
decfreq02: Move the atoms to direction of negative freq then got the normal positive freq.
decfreq03: Extract from the decfreq02's last optimized geometry.
All the indexs start with 0!
All carbon number should be even and should be odd's double !! why? otherwise need to check cellsnumber
'''
#=========================prefix setup part==============================#
#filesname_FC2fchk = 'decfreq02'
filesname_FC3fchk = 'C14H30Freq'
filesname_FC2fchk = 'C34H70Freq'
#filesname_FC2fchk = 'C34H70HFixed'
#filesname_com = 'decfreq03cart'
filesname_FC3com = 'C14H30Freq'
filesname_FC2com ='C34H70Freq'
filesname_FC3csv = 'FC3_C14AnaHess.csv'
#++++++++++++++constant setting++++++++++++++++++++++++++++++++++++++
meconstant = 1822.888486
Ang_bohr = 1.8897259886
au_cm = 4.359743E-18/(1.660538E-27 * 0.5292E-10 * 0.5292E-10/meconstant)#hatree/(amu*bohr*bohr) it transfer to SI unit
len_a = 2.567381*Ang_bohr #The average length of cell(transfer to bohr)
massau = [12.0107*meconstant,1.00794*meconstant] #From NIST database
#XXX the K is depended on how many the cells we used. here FC only take neighbor 1 cell so in total is 3 cells
#klist= np.linspace(0,1,K//2+1)#XXX here still use 0-1 but later should times pi/len_a when using
#FC4klist = np.linspace(0,2,K4+1)[:K4//2+1]#XXX here still use 0-1 but later should times pi/len_a when using
#XXX: plus cellsnumber and endidx
#........................... 2nd
#XXX this global variables could only be modified in local scope but not redefined.
FC2Coef_kp = {}
#............................3rd
FC2atomsname= []
#coordA = []
cal_method = '#p freq B3YLP/6-31G(d)'
#atomcharge = 0
#atommult = 1
'''
H13 H14
\/
C4-- C1 -- C2 -- C3
/\
H11 H12
'''
#===============================HARMONIC PART===============================#
def harmFreq_per_k():
for i in range(len(FC2klist)):
getCoef_w_perk(i)
print("The w (omg) in a.u is :\n")
print(w_omgkpcm[0])
#XXX the following is to check Coeficient is right
#eigvaltest = np.zeros((len(FC2klist),P),dtype = np.complex_)
#for _p in range(P):
# for kidx in range(len(FC2klist)):
# for kappa in range(P):
# atom1 = 3*(cellsnumber[0][kappa//3] - 1) + kappa%3
# for gamma in range(P):
# for midx in range(-endidx,endidx + 1):
# atom2 = 3*(cellsnumber[midx][gamma//3] - 1) + gamma%3
# eigvaltest[kidx][_p] += FC2[getidx(atom1,atom2)] * Coef_kp[kidx][kappa][_p] * Coef_kp[kidx][gamma][_p].conjugate()* math.e**(- 1j * midx * klistFC2[kidx] * math.pi) / (math.sqrt(massau[int(kappa>5)]*massau[int(gamma>5)]))
#print(w_omgkp)
#print(eigvaltest)
#For now I just calculate the neaby cells
#Fuvk is 18*18 for atoms in first cell but Force constant was store in 96*96 but in lower dense triangular form.
#XXX: u and v is the uth vth Cartesian Coordinates!!!
def getCoef_w_perk(kidx,Fcc):
kk = FC2klist[kidx] + 0.1
Fuvk = np.zeros((P,P),dtype = np.complex_)
#XXX: m is just -1 0 1 for decane
for u in range(P):
atom1 = 3*(FC2cellsnumber[0][u//3] - 1) + u%3
for v in range(P):
eachterm = 0.0
for midx in range(-FC2endidx,FC2endidx+1):
# F u(0)v(m) :
# Cell[m] [v//3] give us the atoms number in FC matrix XXX:which started with 1!
# atom2 is the nth coordinates of each atoms XXX: which started with 0!
atom2 = 3*(FC2cellsnumber[midx][v//3] - 1) + v%3
# transfer to k space
eachterm += Fcc[getidx(atom1,atom2)]* math.e ** (-1j * kk * midx*math.pi)#/(math.sqrt(massau[int(u>5)]*massau[int(v>5)]))
#eachterm += Fcc[atom1,atom2]* math.e ** (-1j * kk * midx * len_a)
# mass weighted : if u and v is > 5 so it is not Carbon's coordinates
Fuvk[u][v] = eachterm /(math.sqrt(massau[int(u>5)]*massau[int(v>5)]))
eigval, eigvector = la.eigh(Fuvk)#hermition matrix to get real eigenvalue
#print(eigval)
for i in range(P):
w_omgkp[kidx][i] = math.sqrt(abs(eigval[i]))
w_omgkpcm[kidx][i] = math.sqrt(abs(eigval[i]*au_cm))/(2.99792458E10 * 2 * math.pi)
print(w_omgkpcm[kidx])
FC2Coef_kp[kidx] = eigvector.conjugate() #here we add v is a p*p matrix (p is branch number and number of atoms in cell
return eigvector, Fuvk ,eigval
#df = pd.DataFrame(w_omgkpcm)
#df.to_csv('./w_omgkpcmNorm.csv')
def cleanFC2():
#XXX My way
#test = []
#u = 1
#atom1 = 3*(FC2cellsnumber[0][u//3] - 1) + u%3
#v = 3
#for midx in range(-FC2endidx,FC2endidx+1):
# atom2 = 3*(FC2cellsnumber[midx][v//3] - 1) + v%3
# test.append(FC2[getidx(atom1,atom2)])
#print(test)
#print(w)
#XXX Sode way
#mass weighted first
FCinput = FC2.copy()
for u in range(P):
atom1 = 3*(FC2cellsnumber[0][u//3] - 1) + u%3
for v in range(P):
for midx in range(-FC2endidx,FC2endidx+1):
atom2 = 3*(FC2cellsnumber[midx][v//3] - 1) + v%3
FCinput[getidx(atom1,atom2)] = FC2[getidx(atom1,atom2)]/(math.sqrt(massau[int(u>5)]*massau[int(v>5)]))
L,D0,k = getCoef_w_perk(0,FCinput.copy())
print(L[:,0])
print(L[:,1])
print(L[:,2])
print(L[:,3])
#I = np.eye(P)
#L1 = np.outer(L[:,0],L[:,0])
#L2 = np.outer(L[:,1],L[:,1])
#L3 = np.outer(L[:,2],L[:,2])
#L4 = np.outer(L[:,3],L[:,3])
##Pp = (I - L1@L1)@(I - L2@L2)@(I - L3@L3)@(I - L4@L4)
#Pp = (I - L4@L4)
#corrct = (Pp@D0@Pp - D0)/(15)
##print(corrct.shape)
#FC2new = np.zeros(FC2.shape,dtype = np.complex_)
#for u in range(P):
# atom1 = 3*(FC2cellsnumber[0][u//3] - 1) + u%3
# for v in range(P):
# for midx in range(-FC2endidx,FC2endidx+1):
# atom2 = 3*(FC2cellsnumber[midx][v//3] - 1) + v%3
# FC2new[getidx(atom1,atom2)] = FCinput[getidx(atom1,atom2)] + corrct[u,v]
#FCinput = FC2new.copy()
#L,D0,k = getCoef_w_perk(1,FCinput.copy())
#return Fnew
#XXX:Works really well! Check!
#def C14harmonicFreqCheck():
# eigvaltestOriginFC3 = np.zeros((len(FC3klist),P),dtype = np.complex_)
# for kk in range(len(FC3klist)):
# Fuvk = np.zeros((P,P),dtype = np.complex_)
# #XXX: m is just -1 0 1 for decane
# #Carbon 1
# for kappa in range(P):
# atom1 = 3*(FC3cellsnumber[0][kappa//3] - 1) + kappa%3
# for gamma in range(P):
# eachterm = 0.0
# for midx in range(-FC3endidx,FC3endidx+1):
# # F u(0)v(m) :
# # Cell[m] [v//3] give us the atoms number in FC matrix XXX:which started with 1!
# # atom2 is the nth coordinates of each atoms XXX: which started with 0!
# atom2 = 3*(FC3cellsnumber[midx][gamma//3] - 1) + gamma%3
# # transfer to k space
# eachterm += FC3FC2[getidx(atom1,atom2)]* math.e ** (-1j * klistFC3[kk] * midx * math.pi)
# # mass weighted : if u and v is > 5 so it is not Carbon's coordinates
# Fuvk[kappa][gamma] = eachterm /(math.sqrt(massau[int(kappa>5)]*massau[int(gamma>5)]))
# eigval, eigvector = la.eigh(Fuvk)#hermition matrix to get real eigenvalue
# for i in range(P):
# eigvaltestOriginFC3[kk][i] = math.sqrt(abs(eigval[i]*au_cm))/(2.99792458E10 * 2 * math.pi)
# print(eigvaltestOriginFC3)
# eigvaltestFC3 = np.zeros((len(FC3klist),P),dtype = np.complex_)
# for _p in range(P):
# for kidx in range(len(FC3klist)):
# for kappa in range(P):
# atom1 = 3*(FC3cellsnumber[0][kappa//3] - 1) + kappa%3
# for gamma in range(P):
# for midx in range(-FC3endidx,FC3endidx + 1):
# atom2 = 3*(FC3cellsnumber[midx][gamma//3] - 1) + gamma%3
# eigvaltestFC3[kidx][_p] += FC3FC2[getidx(atom1,atom2)] * FC2Coef_kp[3*kidx][kappa][_p] * FC2Coef_kp[3*kidx][gamma][_p].conjugate()* math.e**(- 1j * midx * FC2klist[3* kidx] * math.pi) / (math.sqrt(massau[int(kappa>5)]*massau[int(gamma>5)]))
# eigvaltestFC3[kidx][_p] = math.sqrt(abs(eigvaltestFC3[kidx][_p] * au_cm))/(2.99792458E10 * 2 * math.pi)
# print(eigvaltestFC3)
#===============================ANHARM PART=============================#
#read in the csv file for force constant directly.
#TODO:Finish the code for polyethylene (already have FC)
#TODO:- readin FC - transfer FC to k space - diagrams - find root - last step.
"""
FC3 is stored in csv file need to read in
"""
#===============================HELPER FUNCTION========================#
"""
#helper function to readin the fchk FC2 and store in array and return copy
"""
def readFC2(filename):
for fname in os.listdir('.'):
if fname == filename + '.fchk':
with open(fname) as f:
search = f.readlines()
for fcidx in range(len(search)):
eachline = search[fcidx].split()
if eachline and eachline[0] == "Cartesian" and eachline[1] == "Force":
fcnum = int(eachline[5])
break
tempFC2 = [0]*fcnum
i = 0
plus = int(fcnum%5==0)
for itr in range(fcidx+1, fcidx+int(fcnum)//5+2- plus):
for ele in search[itr].split():
tempFC2[i] = float(ele)
i+=1
return tempFC2
"""
#get idx of FCs
"""
def getidx(*args):#XXX:started with 0!
output = list(args)
if len(args)==2:
output.sort()
return int(output[1]*(output[1]+1)/2 + output[0])
elif len(output) == 3:
output.sort()
return str(output[0]) + '_' + str(output[1]) + '_' + str(output[2])
elif len(output) == 4:
output.sort()
return str(output[0]) + '_' + str(output[1]) + '_' + str(output[2]) +'_' + str(output[3])
sys.exit("wrong input for idx()")
return 0
"""
#cells setting
#return a numpy array of the index of the cell atoms
"""
def cellsetting():
##totalnum = len(FC2atomsname)
##assert (totalnum-2)%3 == 0
###eg carbon_num is 10
FC2carbon_num = int((len(FC2atomsname)-2)/3)
###eg numcellused is 3
FC2numcellused = int((FC2carbon_num-4)/2)
##assert (carbon_num-4)%2 == 0
global FC2cellsnumber
FC2cellsnumber = np.zeros((FC2numcellused,6))#XXX:we use EVEN number of carbon here!!! and cut off the end 4 carbons
FC2cellsnumber = FC2cellsnumber.astype(int)
FC2cellsnumber[:2,:2] = np.array([[1,2],[3,5]])
FC2cellsnumber[FC2numcellused//2 + 1,:2] = np.array([FC2carbon_num - 4,FC2carbon_num - 6])
for i in range(FC2numcellused):
if i > 1 and i < FC2numcellused//2 + 1:
FC2cellsnumber[i,:2] = FC2cellsnumber[i-1,:2] + 4
elif i > FC2numcellused//2 + 1 :
FC2cellsnumber[i,:2] = FC2cellsnumber[i-1,:2] - 4
for j in range(1,3):
FC2cellsnumber[i,2*j] = 2*(FC2cellsnumber[i,j-1]-1) + FC2carbon_num +1
FC2cellsnumber[i,2*j+1] = 2*(FC2cellsnumber[i,j-1]-1) + FC2carbon_num +2
FC2cellused = len(FC2cellsnumber)#XXX should be odd
global FC2endidx
FC2endidx = FC2cellused//2# if cellused is 3 then endidx is 3//2 = 1 so the range is (-1, 2)
print("For FC2 number of cells used is", FC2numcellused,"and the endidx is", FC2endidx)
print(FC2cellsnumber)
'''
#get atoms name, charge, multi num, coordA(actually no use here)
'''
def init_para():
with open(filesname_FC2com + ".com") as f:
read = f.readlines()
for idx in range(len(read)):
eachline = read[idx].split()
if eachline and eachline[0] == "calculation":
break
idx += 3 #move from the title section to coordinates part
while read[idx]!= '\n':
eachline = read[idx].split()
FC2atomsname.append(eachline[0])
#for cdidx in range(1, len(eachline)):
#coordA.append(float(eachline[cdidx]))
idx+=1
print("The number of FC2 atoms is",len(FC2atomsname))
#readin the FC2 of the oject
global FC2
FC2 = np.array(readFC2(filesname_FC2fchk))
global K
K = 15
global K2
K2 = 15 # number of cells harmonic
K3 = 5 # number of cells FC3
K4 = 3 # number of cells FC4
N = 6
global P
P = 3*N #branch number of normal modes in first BZ
global FC2klist
FC2klist = np.linspace(0,2,K2+1)[:K2//2+1]#XXX here still use 0-1 but later should times pi/len_a when using
#FC3klist = np.linspace(0,2,K3+1)[:K3//2+1]#XXX here still use 0-1 but later should times pi/len_a when using
#global FC3FC2
#FC3FC2 = readFC2(filesname_FC3fchk)
global w_omgkp
global w_omgkpcm
w_omgkp = np.zeros((len(FC2klist),P))#We just store the half BZ plus zero's w_omg since they are symmetric
w_omgkpcm = np.zeros((len(FC2klist),P))
#===================================TEST PART ==============================#
t1 = time.time()
init_para()
cellsetting()
#cleanFC2()
L,D0,k = getCoef_w_perk(0,FC2)
#do mass-weighted back
#print(L.real)
for i in range(4):
temp = L[i,:].real.copy()
print(temp)
#for a in range(len(temp)):
# temp[a] *= (math.sqrt(massau[int(a>5)]*massau[int(i>5)]))
#print("cellsnumber is ,",FC2cellsnumber)#,FC3cellsnumber)
print(time.time()-t1)
#testpart(0)
|
[
"numpy.zeros",
"time.time",
"numpy.linalg.eigh",
"numpy.array",
"numpy.linspace",
"os.listdir",
"sys.exit"
] |
[((13279, 13290), 'time.time', 'time.time', ([], {}), '()\n', (13288, 13290), False, 'import time\n'), ((3268, 3303), 'numpy.zeros', 'np.zeros', (['(P, P)'], {'dtype': 'np.complex_'}), '((P, P), dtype=np.complex_)\n', (3276, 3303), True, 'import numpy as np\n'), ((4268, 4281), 'numpy.linalg.eigh', 'la.eigh', (['Fuvk'], {}), '(Fuvk)\n', (4275, 4281), True, 'import numpy.linalg as la\n'), ((9183, 9198), 'os.listdir', 'os.listdir', (['"""."""'], {}), "('.')\n", (9193, 9198), False, 'import os\n'), ((10224, 10257), 'sys.exit', 'sys.exit', (['"""wrong input for idx()"""'], {}), "('wrong input for idx()')\n", (10232, 10257), False, 'import sys\n'), ((10672, 10701), 'numpy.zeros', 'np.zeros', (['(FC2numcellused, 6)'], {}), '((FC2numcellused, 6))\n', (10680, 10701), True, 'import numpy as np\n'), ((10849, 10875), 'numpy.array', 'np.array', (['[[1, 2], [3, 5]]'], {}), '([[1, 2], [3, 5]])\n', (10857, 10875), True, 'import numpy as np\n'), ((10920, 10968), 'numpy.array', 'np.array', (['[FC2carbon_num - 4, FC2carbon_num - 6]'], {}), '([FC2carbon_num - 4, FC2carbon_num - 6])\n', (10928, 10968), True, 'import numpy as np\n'), ((12725, 12750), 'numpy.linspace', 'np.linspace', (['(0)', '(2)', '(K2 + 1)'], {}), '(0, 2, K2 + 1)\n', (12736, 12750), True, 'import numpy as np\n'), ((13629, 13640), 'time.time', 'time.time', ([], {}), '()\n', (13638, 13640), False, 'import time\n')]
|
#!/usr/bin/env python
"""This module supplies a function that can generate custom sequences of
optimization passes for arbitrary programs.
This module provides an implementation of a hill climber algorithm presented by
Kulkarni in his paper "Evaluating Heuristic Optimization Phase Order Search
Algorithms" (published 2007). The algorithm is used to generate a custom
optimization sequence for an arbitrary application. The resulting sequence
is a list of flags that can be set by the LLVM opt tool. The generated
sequence is meant to be a good flag combination that increases the amount of
code that can be detected by Polly.
"""
import random
import multiprocessing
import logging
import polyjit.experiments.sequences.polly_stats as polly_stats
__author__ = "<NAME>"
__credits__ = ["<NAME>"]
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
# Default values
DEFAULT_PASS_SPACE = ['-basicaa', '-mem2reg']
DEFAULT_SEQ_LENGTH = 10
DEFAULT_ITERATIONS = 100
print_debug = False
def create_random_sequence(pass_space, seq_length):
"""Creates a random sequence.
This methods generates a sequence by randomly picking available passes.
Args:
pass_space (list[string]): contains the available passes.
seq_length (int): the length the sequence should have.
Returns:
list: the created sequence.
"""
sequence = []
for _ in range(seq_length):
sequence.append(random.choice(pass_space))
return sequence
def calculate_fitness_value(sequence, seq_to_fitness, key, program):
"""Calculates the fitness value of the provided sequence.
This method calculates the fitness of the sequence by using the number
of regions that are no valid SCoPs if this sequence is used for
preoptimization before Polly's SCoP detection.
Args:
sequence (list[string]): the sequence for that the fitness value should
be calculated.
seq_to_fitness (dict): dictionary that stores calculated fitness
values.
key (string): the key of the provided sequence for the dictionary.
program (string): the name of the application this sequence
should be used for.
"""
if key not in seq_to_fitness:
seq_to_fitness[key] = polly_stats.get_regions_without_scops(sequence,
program)
def calculate_neighbours(sequence, seq_to_fitness, pass_space, program):
"""Calculates the neighbours of the specified sequence.
This method calculates all sequences that differ from the specified
sequence at exactly one position. Furthermore this method calculates the
fitness values of the neighbours.
A sequence is a neighbour of another sequence if they have exactly one
different pass.
E.g.: Sequences s1 = [a, a], s2 = [a, b], s3 = [b, b].
s2 is a neighbour of s1, because they differ in the second position.
s3 is not a neighbour of s1, because they differ in position one and
two.
Args:
sequence (list[string]): the specified sequence.
seq_to_fitness (dict): dictionary that contains calculated fitness
values.
pass_space (list[string]): a list of all available passes.
program (string): the name of the application the neighbour
sequences should be used for.
Returns:
list[list[string]]: all neighbours of the specified sequence are
returned as list.
"""
neighbours = []
pool = multiprocessing.Pool()
pool.apply_async(calculate_fitness_value,
args=(sequence, seq_to_fitness, str(sequence), program))
for i in range(len(sequence)):
remaining_passes = list(pass_space)
remaining_passes.remove(sequence[i])
# Create sequences with different pass at position i.
for remaining_pass in remaining_passes:
neighbour = list(sequence)
neighbour[i] = remaining_pass
pool.apply_async(calculate_fitness_value,
args=(neighbour, seq_to_fitness, str(neighbour),
program))
neighbours.append(neighbour)
pool.close()
pool.join()
return neighbours
def climb(sequence, program, pass_space, seq_to_fitness):
"""Performs the actual hill climbing.
Args:
sequence (list[string]): the sequence that should be used as base
sequence.
program (string): name of the application the sequences are applied
on.
pass_space (list[string]): a list containing all available passes.
seq_to_fitness (dict): dictionary that stores calculated fitness
values.
"""
log = logging.getLogger(__name__)
base_sequence = sequence
base_sequence_key = str(base_sequence)
log.debug("Start climbing...")
log.debug("Initial base sequence: %s", str(base_sequence))
# Take the base sequence and calculate all neighbours. Check if the best
# performing neighbour is better than the base sequence. If this is the
# case this neighbour becomes the new base sequence.
# This process is repeated until the base sequence outperforms all its
# neighbours.
climbs = 0
changed = True
while changed:
changed = False
# Calculate its neighbours.
neighbours = calculate_neighbours(base_sequence, seq_to_fitness,
pass_space, program)
# Check if there is a better performing neighbour.
for neighbour in neighbours:
if seq_to_fitness[base_sequence_key] \
> seq_to_fitness[str(neighbour)]:
base_sequence = neighbour
base_sequence_key = str(neighbour)
changed = True
climbs += 1
log.debug("\n---> Climb number %s <---", str(climbs))
log.debug("---> Base sequence: %s <---", str(base_sequence))
log.debug("---> Neighbours: <---")
if print_debug:
for neighbour in neighbours:
log.debug('Neighbour: %s; Fitness value: %s',
str(neighbour),
str(seq_to_fitness[str(neighbour)]))
log.debug("Local optimum reached!\n")
return base_sequence
def generate_custom_sequence(program, pass_space=DEFAULT_PASS_SPACE,
seq_length=DEFAULT_SEQ_LENGTH,
iterations=DEFAULT_ITERATIONS, debug=False):
"""Generates a custom optimization sequence for a provided application.
Args:
program (string): the name of the application a custom sequence should
be generated for.
pass_space (list[string], optional): list of passes that should be
taken into consideration for the generation of the custom
sequence.
seq_length(int, optional): the length of the sequence that should be
generated.
iterations (int, optional): the number of times the hill climbing
process is to be repeated.
debug (boolean, optional): true if debug information should be printed;
false otherwise.
Returns:
list[string]: the generated custom optimization sequence. Each element
of the list represents one optimization pass.
"""
global print_debug
print_debug = debug
log = logging.getLogger(__name__)
best_sequence = []
seq_to_fitness = multiprocessing.Manager().dict()
log.debug("\n Start hill climbing algorithm...")
for i in range(iterations):
log.debug("Iteration: %d", i + 1)
base_sequence = create_random_sequence(pass_space, seq_length)
base_sequence = climb(base_sequence, program, pass_space,
seq_to_fitness)
if not best_sequence or seq_to_fitness[str(best_sequence)] < \
seq_to_fitness[str(base_sequence)]:
best_sequence = base_sequence
log.debug("Best sequence found in %d iterations:")
log.debug("Sequence: %s", best_sequence)
log.debug("Fitness value: %s", str(seq_to_fitness[str(best_sequence)]))
return best_sequence
|
[
"polyjit.experiments.sequences.polly_stats.get_regions_without_scops",
"multiprocessing.Manager",
"random.choice",
"multiprocessing.Pool",
"logging.getLogger"
] |
[((3506, 3528), 'multiprocessing.Pool', 'multiprocessing.Pool', ([], {}), '()\n', (3526, 3528), False, 'import multiprocessing\n'), ((4732, 4759), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (4749, 4759), False, 'import logging\n'), ((7414, 7441), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (7431, 7441), False, 'import logging\n'), ((2252, 2308), 'polyjit.experiments.sequences.polly_stats.get_regions_without_scops', 'polly_stats.get_regions_without_scops', (['sequence', 'program'], {}), '(sequence, program)\n', (2289, 2308), True, 'import polyjit.experiments.sequences.polly_stats as polly_stats\n'), ((1418, 1443), 'random.choice', 'random.choice', (['pass_space'], {}), '(pass_space)\n', (1431, 1443), False, 'import random\n'), ((7487, 7512), 'multiprocessing.Manager', 'multiprocessing.Manager', ([], {}), '()\n', (7510, 7512), False, 'import multiprocessing\n')]
|
from unittest import skip
from scrapy.http import Request
from scrapers.scrapers.spiders.dcwd import DcwdSpider
from scrapers.tests.base_scraper_test_setup import ScraperTestCase, html_files
from scrapers.tests.utils import make_response_object, make_fake_id
class DcwdParserTests(ScraperTestCase):
def setUp(self):
self.spider = DcwdSpider(limit=1)
def test_parse(self):
"""Tests the spider's main parse method."""
valid_results = self.get_parse_results(
response=make_response_object(html_files['dcwd_index'])
)
# Test if list is non-empty
self.assertGreater(len(valid_results), 0)
# Test each Request object in the result
for request in valid_results:
self.assertIsInstance(request, Request)
self.assertIsNotNone(request.meta)
self.assertIsNotNone(request.meta.get('urgency'))
self.assertIsNotNone(request.meta.get('title'))
self.assertIsNotNone(request.meta.get('notice_id'))
def test_parse_page(self):
"""Tests the spider's parse_page method."""
valid_results = self.get_parse_results(
parse_method_name='parse_page',
response=make_response_object(
filepath=html_files['dcwd_details'],
meta={'urgency': 'a', 'title': 'Some Title',
'notice_id': make_fake_id()}
)
)
self.assertGreater(len(valid_results), 0)
for item in valid_results:
self.assertIsNotNone(item.get('urgency'))
self.assertIsNotNone(item.get('headline'))
self.assertIsNotNone(item.get('source_url'))
self.assertIsNotNone(item.get('notice_id'))
self.assertIsNotNone(item.get('posted_on'))
self.assertIsInstance(item.get('details'), list)
self.assertIsNotNone(item.get('scraped_on'))
self.assertEqual(item.get('provider'), 'DCWD')
self.assertEqual(item.get('service'), 'Water')
def test_water_outage_details(self):
"""Tests if scraped outage details are complete."""
# Get results from parse_page
valid_results = self.get_parse_results(
parse_method_name='parse_page',
response=make_response_object(
filepath=html_files['dcwd_details'],
meta={'urgency': 'a', 'title': 'Some Title',
'notice_id': make_fake_id()}
)
)
for item in valid_results:
# Unpack list of outage details (dicts)
details_per_set = item['details']
for outage_set in details_per_set:
self.assertIsNotNone(outage_set.get('set_n'))
self.assertIsNotNone(outage_set.get('when'))
self.assertIsNotNone(outage_set.get('where'))
self.assertIsNotNone(outage_set.get('why'))
|
[
"scrapers.scrapers.spiders.dcwd.DcwdSpider",
"scrapers.tests.utils.make_response_object",
"scrapers.tests.utils.make_fake_id"
] |
[((347, 366), 'scrapers.scrapers.spiders.dcwd.DcwdSpider', 'DcwdSpider', ([], {'limit': '(1)'}), '(limit=1)\n', (357, 366), False, 'from scrapers.scrapers.spiders.dcwd import DcwdSpider\n'), ((516, 562), 'scrapers.tests.utils.make_response_object', 'make_response_object', (["html_files['dcwd_index']"], {}), "(html_files['dcwd_index'])\n", (536, 562), False, 'from scrapers.tests.utils import make_response_object, make_fake_id\n'), ((1402, 1416), 'scrapers.tests.utils.make_fake_id', 'make_fake_id', ([], {}), '()\n', (1414, 1416), False, 'from scrapers.tests.utils import make_response_object, make_fake_id\n'), ((2467, 2481), 'scrapers.tests.utils.make_fake_id', 'make_fake_id', ([], {}), '()\n', (2479, 2481), False, 'from scrapers.tests.utils import make_response_object, make_fake_id\n')]
|
from django.shortcuts import render
# Create your views here.
from .models import Comment
from blog.models import Article
from .forms import CommentForm
from django.views.generic.edit import FormView
from django.http import HttpResponseRedirect
from django.contrib.auth import get_user_model
from django import forms
class CommentPostView(FormView):
form_class = CommentForm
template_name = 'blog/article_detail.html'
def get(self, request, *args, **kwargs):
article_id = self.kwargs['article_id']
article = Article.objects.get(pk=article_id)
url = article.get_absolute_url()
return HttpResponseRedirect(url + "#comments")
def form_invalid(self, form):
article_id = self.kwargs['article_id']
article = Article.objects.get(pk=article_id)
u = self.request.user
if self.request.user.is_authenticated:
form.fields.update({
'email': forms.CharField(widget=forms.HiddenInput()),
'name': forms.CharField(widget=forms.HiddenInput()),
})
user = self.request.user
form.fields["email"].initial = user.email
form.fields["name"].initial = user.username
return self.render_to_response({
'form': form,
'article': article
})
def form_valid(self, form):
"""提交的数据验证合法后的逻辑"""
user = self.request.user
article_id = self.kwargs['article_id']
article = Article.objects.get(pk=article_id)
if not self.request.user.is_authenticated():
email = form.cleaned_data['email']
username = form.cleaned_data['name']
user = get_user_model().objects.get_or_create(username=username, email=email)[0]
# auth.login(self.request, user)
comment = form.save(False)
comment.article = article
comment.author = user
if form.cleaned_data['parent_comment_id']:
parent_comment = Comment.objects.get(pk=form.cleaned_data['parent_comment_id'])
comment.parent_comment = parent_comment
comment.save(True)
from DjangoBlog.utils import expire_view_cache, cache
from django.contrib.sites.models import Site
path = article.get_absolute_url()
site = Site.objects.get_current().domain
if site.find(':') > 0:
site = site[0:site.find(':')]
port = 80
try:
# django1.8 没有这个方法...
port = self.request.get_port()
except:
pass
expire_view_cache(path, servername=site, serverport=port, key_prefix='blogdetail')
if cache.get('seo_processor'):
cache.delete('seo_processor')
comment_cache_key = 'article_comments_{id}'.format(id=article_id)
cache.delete(comment_cache_key)
from django.core.cache.utils import make_template_fragment_key
username = self.request.user.username if self.request.user else ''
key = make_template_fragment_key('sidebar', [username])
cache.delete(key)
return HttpResponseRedirect("%s#div-comment-%d" % (article.get_absolute_url(), comment.pk))
|
[
"DjangoBlog.utils.cache.delete",
"django.http.HttpResponseRedirect",
"DjangoBlog.utils.cache.get",
"django.contrib.sites.models.Site.objects.get_current",
"django.contrib.auth.get_user_model",
"django.forms.HiddenInput",
"django.core.cache.utils.make_template_fragment_key",
"DjangoBlog.utils.expire_view_cache",
"blog.models.Article.objects.get"
] |
[((541, 575), 'blog.models.Article.objects.get', 'Article.objects.get', ([], {'pk': 'article_id'}), '(pk=article_id)\n', (560, 575), False, 'from blog.models import Article\n'), ((632, 671), 'django.http.HttpResponseRedirect', 'HttpResponseRedirect', (["(url + '#comments')"], {}), "(url + '#comments')\n", (652, 671), False, 'from django.http import HttpResponseRedirect\n'), ((772, 806), 'blog.models.Article.objects.get', 'Article.objects.get', ([], {'pk': 'article_id'}), '(pk=article_id)\n', (791, 806), False, 'from blog.models import Article\n'), ((1489, 1523), 'blog.models.Article.objects.get', 'Article.objects.get', ([], {'pk': 'article_id'}), '(pk=article_id)\n', (1508, 1523), False, 'from blog.models import Article\n'), ((2564, 2651), 'DjangoBlog.utils.expire_view_cache', 'expire_view_cache', (['path'], {'servername': 'site', 'serverport': 'port', 'key_prefix': '"""blogdetail"""'}), "(path, servername=site, serverport=port, key_prefix=\n 'blogdetail')\n", (2581, 2651), False, 'from DjangoBlog.utils import expire_view_cache, cache\n'), ((2658, 2684), 'DjangoBlog.utils.cache.get', 'cache.get', (['"""seo_processor"""'], {}), "('seo_processor')\n", (2667, 2684), False, 'from DjangoBlog.utils import expire_view_cache, cache\n'), ((2810, 2841), 'DjangoBlog.utils.cache.delete', 'cache.delete', (['comment_cache_key'], {}), '(comment_cache_key)\n', (2822, 2841), False, 'from DjangoBlog.utils import expire_view_cache, cache\n'), ((3002, 3051), 'django.core.cache.utils.make_template_fragment_key', 'make_template_fragment_key', (['"""sidebar"""', '[username]'], {}), "('sidebar', [username])\n", (3028, 3051), False, 'from django.core.cache.utils import make_template_fragment_key\n'), ((3060, 3077), 'DjangoBlog.utils.cache.delete', 'cache.delete', (['key'], {}), '(key)\n', (3072, 3077), False, 'from DjangoBlog.utils import expire_view_cache, cache\n'), ((2308, 2334), 'django.contrib.sites.models.Site.objects.get_current', 'Site.objects.get_current', ([], {}), '()\n', (2332, 2334), False, 'from django.contrib.sites.models import Site\n'), ((2698, 2727), 'DjangoBlog.utils.cache.delete', 'cache.delete', (['"""seo_processor"""'], {}), "('seo_processor')\n", (2710, 2727), False, 'from DjangoBlog.utils import expire_view_cache, cache\n'), ((966, 985), 'django.forms.HiddenInput', 'forms.HiddenInput', ([], {}), '()\n', (983, 985), False, 'from django import forms\n'), ((1035, 1054), 'django.forms.HiddenInput', 'forms.HiddenInput', ([], {}), '()\n', (1052, 1054), False, 'from django import forms\n'), ((1693, 1709), 'django.contrib.auth.get_user_model', 'get_user_model', ([], {}), '()\n', (1707, 1709), False, 'from django.contrib.auth import get_user_model\n')]
|
"""
Adapted from Res2Net (v1b) official git repo:
https://github.com/Res2Net/Res2Net-PretrainedModels/blob/master/res2net_v1b.py
"""
from os import stat
import pathlib
import math
import torch
import torch.nn as nn
import torch.utils.model_zoo as model_zoo
import torch.nn.functional as F
from lib.nets.basemodel import BaseModel
__all__ = ['Res2Net', 'res2net50_v1b', 'res2net101_v1b']
model_urls = {
'res2net50_v1b_26w_4s': 'https://shanghuagao.oss-cn-beijing.aliyuncs.com/res2net/res2net50_v1b_26w_4s-3cf99910.pth',
'res2net101_v1b_26w_4s': 'https://shanghuagao.oss-cn-beijing.aliyuncs.com/res2net/res2net101_v1b_26w_4s-0812c246.pth',
}
model_params_path = '/afs/crc.nd.edu/user/y/yzhang46/_DLResources/Models/'
model_params = {
'res2net50_v1b_26w_4s': str(model_params_path / 'res2net50_v1b_26w_4s.pth'),
'res2net101_v1b_26w_4s': str(model_params_path / 'res2net101_v1b_26w_4s.pth'),
}
class Bottle2neck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None,
baseWidth=26, scale=4, stype='normal'):
""" Constructor
Args:
inplanes: input channel dimensionality
planes: output channel dimensionality
stride: conv stride. Replaces pooling layer.
downsample: None when stride = 1
baseWidth: basic width of conv3x3
scale: number of scale.
type: 'normal': normal set. 'stage': first block of a new stage.
"""
super(Bottle2neck, self).__init__()
width = int(math.floor(planes * (baseWidth/64.0)))
self.conv1 = nn.Conv2d(inplanes, width*scale, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(width*scale)
if scale == 1:
self.nums = 1
else:
self.nums = scale -1
if stype == 'stage':
self.pool = nn.AvgPool2d(kernel_size=3, stride = stride, padding=1)
convs = []
bns = []
for i in range(self.nums):
convs.append(nn.Conv2d(width, width, kernel_size=3, stride = stride,
padding=1, bias=False))
bns.append(nn.BatchNorm2d(width))
self.convs = nn.ModuleList(convs)
self.bns = nn.ModuleList(bns)
self.conv3 = nn.Conv2d(width*scale, planes * self.expansion,
kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(planes * self.expansion)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stype = stype
self.scale = scale
self.width = width
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
spx = torch.split(out, self.width, 1)
for i in range(self.nums):
if i==0 or self.stype=='stage':
sp = spx[i]
else:
sp = sp + spx[i]
sp = self.convs[i](sp)
sp = self.relu(self.bns[i](sp))
if i==0:
out = sp
else:
out = torch.cat((out, sp), 1)
if self.scale != 1 and self.stype=='normal':
out = torch.cat((out, spx[self.nums]),1)
elif self.scale != 1 and self.stype=='stage':
out = torch.cat((out, self.pool(spx[self.nums])),1)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class Res2Net(BaseModel):
def __init__(self, block, layers, baseWidth=26, scale=4, num_classes=1000,
prelinear_dropout=0):
self.num_classes = num_classes
self.prelinear_dropout = prelinear_dropout
self.inplanes = 64
super(Res2Net, self).__init__()
self.baseWidth = baseWidth
self.scale = scale
self.conv1 = nn.Sequential(
nn.Conv2d(3, 32, 3, 2, 1, bias=False),
nn.BatchNorm2d(32),
nn.ReLU(inplace=True),
nn.Conv2d(32, 32, 3, 1, 1, bias=False),
nn.BatchNorm2d(32),
nn.ReLU(inplace=True),
nn.Conv2d(32, 64, 3, 1, 1, bias=False)
)
self.bn1 = nn.BatchNorm2d(64)
self.relu = nn.ReLU()
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
self.layer4 = self._make_layer(block, 512, layers[3], stride=2)
self.avgpool = nn.AdaptiveAvgPool2d(1)
self.fc = nn.Linear(512 * block.expansion, num_classes)
self.classifier = self.fc # compatibility with linear eval
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out',
nonlinearity='relu')
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
tot_params, tot_tparams = self.param_counts
print(f'💠 Res2Netv1b model initiated with n_classes={num_classes}, \n'
f' layers={layers}, base-width={baseWidth}, scale={scale}\n'
f' params={tot_params:,}, trainable_params={tot_tparams:,}.')
def _make_layer(self, block, planes, blocks, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.AvgPool2d(kernel_size=stride, stride=stride,
ceil_mode=True, count_include_pad=False),
nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=1, bias=False),
nn.BatchNorm2d(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride,
downsample=downsample, stype='stage',
baseWidth=self.baseWidth, scale=self.scale))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes, baseWidth=self.baseWidth,
scale=self.scale))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
x = x.view(x.size(0), -1)
if self.prelinear_dropout > 0:
x = F.dropout(x, p=self.prelinear_dropout, training=self.training)
x = self.fc(x)
return x
def res2net50_v1b(pretrained=False, **kwargs):
"""Constructs a Res2Net-50_v1b model.
Res2Net-50 refers to the Res2Net-50_v1b_26w_4s.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = Res2Net(Bottle2neck, [3, 4, 6, 3], baseWidth=26, scale=4, **kwargs)
if pretrained:
load_state_dict(model, 'res2net50_v1b_26w_4s')
return model
def res2net101_v1b(pretrained=False, **kwargs):
"""Constructs a Res2Net-50_v1b_26w_4s model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = Res2Net(Bottle2neck, [3, 4, 23, 3], baseWidth=26, scale=4, **kwargs)
if pretrained:
load_state_dict(model, 'res2net101_v1b_26w_4s')
return model
def res2net50_v1b_26w_4s(pretrained=False, **kwargs):
"""Constructs a Res2Net-50_v1b_26w_4s model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = Res2Net(Bottle2neck, [3, 4, 6, 3], baseWidth=26, scale=4, **kwargs)
if pretrained:
load_state_dict(model, 'res2net50_v1b_26w_4s')
return model
def res2net101_v1b_26w_4s(pretrained=False, **kwargs):
"""Constructs a Res2Net-50_v1b_26w_4s model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = Res2Net(Bottle2neck, [3, 4, 23, 3], baseWidth=26, scale=4, **kwargs)
if pretrained:
load_state_dict(model, 'res2net101_v1b_26w_4s')
return model
def res2net152_v1b_26w_4s(pretrained=False, **kwargs):
"""Constructs a Res2Net-50_v1b_26w_4s model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = Res2Net(Bottle2neck, [3, 8, 36, 3], baseWidth=26, scale=4, **kwargs)
if pretrained:
load_state_dict(model, 'res2net152_v1b_26w_4s')
return model
def load_state_dict(model, model_key):
print(f' * Res2Net1b loading pretrained ImageNet weights.')
# print(model.load_state_dict(model_zoo.load_url(model_urls[model_key])))
# My code after downloading model params
state_dict = torch.load(model_params[model_key], map_location='cpu')
if model.num_classes != 1000:
del state_dict['fc.weight']
del state_dict['fc.bias']
print(model.load_state_dict(state_dict, strict=False))
def get_model(layers, num_classes, pretrained=True, prelinear_dropout=0):
layers = int(layers)
if layers == 50:
model = res2net50_v1b(pretrained=pretrained, num_classes=num_classes,
prelinear_dropout=prelinear_dropout)
elif layers == 101:
model = res2net101_v1b(pretrained=pretrained, num_classes=num_classes,
prelinear_dropout=prelinear_dropout)
else:
raise ValueError(f'{layers} layers is not supported right now.')
return model
if __name__ == '__main__':
images = torch.rand(1, 3, 224, 224).cuda(0)
model = res2net50_v1b_26w_4s(pretrained=True)
model = model.cuda(0)
print(model(images).size())
|
[
"torch.nn.AdaptiveAvgPool2d",
"torch.nn.ReLU",
"torch.nn.init.kaiming_normal_",
"torch.rand",
"torch.nn.ModuleList",
"torch.nn.Sequential",
"torch.load",
"torch.nn.Conv2d",
"torch.split",
"math.floor",
"torch.cat",
"torch.nn.functional.dropout",
"torch.nn.BatchNorm2d",
"torch.nn.init.constant_",
"torch.nn.Linear",
"torch.nn.MaxPool2d",
"torch.nn.AvgPool2d"
] |
[((9126, 9181), 'torch.load', 'torch.load', (['model_params[model_key]'], {'map_location': '"""cpu"""'}), "(model_params[model_key], map_location='cpu')\n", (9136, 9181), False, 'import torch\n'), ((1627, 1688), 'torch.nn.Conv2d', 'nn.Conv2d', (['inplanes', '(width * scale)'], {'kernel_size': '(1)', 'bias': '(False)'}), '(inplanes, width * scale, kernel_size=1, bias=False)\n', (1636, 1688), True, 'import torch.nn as nn\n'), ((1706, 1735), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(width * scale)'], {}), '(width * scale)\n', (1720, 1735), True, 'import torch.nn as nn\n'), ((2217, 2237), 'torch.nn.ModuleList', 'nn.ModuleList', (['convs'], {}), '(convs)\n', (2230, 2237), True, 'import torch.nn as nn\n'), ((2257, 2275), 'torch.nn.ModuleList', 'nn.ModuleList', (['bns'], {}), '(bns)\n', (2270, 2275), True, 'import torch.nn as nn\n'), ((2298, 2374), 'torch.nn.Conv2d', 'nn.Conv2d', (['(width * scale)', '(planes * self.expansion)'], {'kernel_size': '(1)', 'bias': '(False)'}), '(width * scale, planes * self.expansion, kernel_size=1, bias=False)\n', (2307, 2374), True, 'import torch.nn as nn\n'), ((2424, 2463), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(planes * self.expansion)'], {}), '(planes * self.expansion)\n', (2438, 2463), True, 'import torch.nn as nn\n'), ((2485, 2506), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (2492, 2506), True, 'import torch.nn as nn\n'), ((2775, 2806), 'torch.split', 'torch.split', (['out', 'self.width', '(1)'], {}), '(out, self.width, 1)\n', (2786, 2806), False, 'import torch\n'), ((4299, 4317), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(64)'], {}), '(64)\n', (4313, 4317), True, 'import torch.nn as nn\n'), ((4338, 4347), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (4345, 4347), True, 'import torch.nn as nn\n'), ((4371, 4419), 'torch.nn.MaxPool2d', 'nn.MaxPool2d', ([], {'kernel_size': '(3)', 'stride': '(2)', 'padding': '(1)'}), '(kernel_size=3, stride=2, padding=1)\n', (4383, 4419), True, 'import torch.nn as nn\n'), ((4720, 4743), 'torch.nn.AdaptiveAvgPool2d', 'nn.AdaptiveAvgPool2d', (['(1)'], {}), '(1)\n', (4740, 4743), True, 'import torch.nn as nn\n'), ((4762, 4807), 'torch.nn.Linear', 'nn.Linear', (['(512 * block.expansion)', 'num_classes'], {}), '(512 * block.expansion, num_classes)\n', (4771, 4807), True, 'import torch.nn as nn\n'), ((6497, 6519), 'torch.nn.Sequential', 'nn.Sequential', (['*layers'], {}), '(*layers)\n', (6510, 6519), True, 'import torch.nn as nn\n'), ((1567, 1606), 'math.floor', 'math.floor', (['(planes * (baseWidth / 64.0))'], {}), '(planes * (baseWidth / 64.0))\n', (1577, 1606), False, 'import math\n'), ((1888, 1941), 'torch.nn.AvgPool2d', 'nn.AvgPool2d', ([], {'kernel_size': '(3)', 'stride': 'stride', 'padding': '(1)'}), '(kernel_size=3, stride=stride, padding=1)\n', (1900, 1941), True, 'import torch.nn as nn\n'), ((3195, 3230), 'torch.cat', 'torch.cat', (['(out, spx[self.nums])', '(1)'], {}), '((out, spx[self.nums]), 1)\n', (3204, 3230), False, 'import torch\n'), ((3994, 4031), 'torch.nn.Conv2d', 'nn.Conv2d', (['(3)', '(32)', '(3)', '(2)', '(1)'], {'bias': '(False)'}), '(3, 32, 3, 2, 1, bias=False)\n', (4003, 4031), True, 'import torch.nn as nn\n'), ((4045, 4063), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(32)'], {}), '(32)\n', (4059, 4063), True, 'import torch.nn as nn\n'), ((4077, 4098), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (4084, 4098), True, 'import torch.nn as nn\n'), ((4112, 4150), 'torch.nn.Conv2d', 'nn.Conv2d', (['(32)', '(32)', '(3)', '(1)', '(1)'], {'bias': '(False)'}), '(32, 32, 3, 1, 1, bias=False)\n', (4121, 4150), True, 'import torch.nn as nn\n'), ((4164, 4182), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(32)'], {}), '(32)\n', (4178, 4182), True, 'import torch.nn as nn\n'), ((4196, 4217), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (4203, 4217), True, 'import torch.nn as nn\n'), ((4231, 4269), 'torch.nn.Conv2d', 'nn.Conv2d', (['(32)', '(64)', '(3)', '(1)', '(1)'], {'bias': '(False)'}), '(32, 64, 3, 1, 1, bias=False)\n', (4240, 4269), True, 'import torch.nn as nn\n'), ((6878, 6940), 'torch.nn.functional.dropout', 'F.dropout', (['x'], {'p': 'self.prelinear_dropout', 'training': 'self.training'}), '(x, p=self.prelinear_dropout, training=self.training)\n', (6887, 6940), True, 'import torch.nn.functional as F\n'), ((9924, 9950), 'torch.rand', 'torch.rand', (['(1)', '(3)', '(224)', '(224)'], {}), '(1, 3, 224, 224)\n', (9934, 9950), False, 'import torch\n'), ((2038, 2114), 'torch.nn.Conv2d', 'nn.Conv2d', (['width', 'width'], {'kernel_size': '(3)', 'stride': 'stride', 'padding': '(1)', 'bias': '(False)'}), '(width, width, kernel_size=3, stride=stride, padding=1, bias=False)\n', (2047, 2114), True, 'import torch.nn as nn\n'), ((2173, 2194), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['width'], {}), '(width)\n', (2187, 2194), True, 'import torch.nn as nn\n'), ((3102, 3125), 'torch.cat', 'torch.cat', (['(out, sp)', '(1)'], {}), '((out, sp), 1)\n', (3111, 3125), False, 'import torch\n'), ((4967, 5037), 'torch.nn.init.kaiming_normal_', 'nn.init.kaiming_normal_', (['m.weight'], {'mode': '"""fan_out"""', 'nonlinearity': '"""relu"""'}), "(m.weight, mode='fan_out', nonlinearity='relu')\n", (4990, 5037), True, 'import torch.nn as nn\n'), ((5734, 5826), 'torch.nn.AvgPool2d', 'nn.AvgPool2d', ([], {'kernel_size': 'stride', 'stride': 'stride', 'ceil_mode': '(True)', 'count_include_pad': '(False)'}), '(kernel_size=stride, stride=stride, ceil_mode=True,\n count_include_pad=False)\n', (5746, 5826), True, 'import torch.nn as nn\n'), ((5861, 5952), 'torch.nn.Conv2d', 'nn.Conv2d', (['self.inplanes', '(planes * block.expansion)'], {'kernel_size': '(1)', 'stride': '(1)', 'bias': '(False)'}), '(self.inplanes, planes * block.expansion, kernel_size=1, stride=1,\n bias=False)\n', (5870, 5952), True, 'import torch.nn as nn\n'), ((5987, 6027), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(planes * block.expansion)'], {}), '(planes * block.expansion)\n', (6001, 6027), True, 'import torch.nn as nn\n'), ((5143, 5173), 'torch.nn.init.constant_', 'nn.init.constant_', (['m.weight', '(1)'], {}), '(m.weight, 1)\n', (5160, 5173), True, 'import torch.nn as nn\n'), ((5190, 5218), 'torch.nn.init.constant_', 'nn.init.constant_', (['m.bias', '(0)'], {}), '(m.bias, 0)\n', (5207, 5218), True, 'import torch.nn as nn\n')]
|
from itertools import permutations
import torch
from torch import nn
from scipy.optimize import linear_sum_assignment
import numpy as np
import torch.nn.functional as F
class PITLossWrapper(nn.Module):
r"""Permutation invariant loss wrapper.
Args:
loss_func: function with signature (est_targets, targets, **kwargs).
pit_from (str): Determines how PIT is applied.
* ``'pw_mtx'`` (pairwise matrix): `loss_func` computes pairwise
losses and returns a torch.Tensor of shape
:math:`(batch, n\_src, n\_src)`. Each element
:math:`(batch, i, j)` corresponds to the loss between
:math:`targets[:, i]` and :math:`est\_targets[:, j]`
* ``'pw_pt'`` (pairwise point): `loss_func` computes the loss for
a batch of single source and single estimates (tensors won't
have the source axis). Output shape : :math:`(batch)`.
See :meth:`~PITLossWrapper.get_pw_losses`.
* ``'perm_avg'`` (permutation average): `loss_func` computes the
average loss for a given permutations of the sources and
estimates. Output shape : :math:`(batch)`.
See :meth:`~PITLossWrapper.best_perm_from_perm_avg_loss`.
In terms of efficiency, ``'perm_avg'`` is the least efficicient.
perm_reduce (Callable): torch function to reduce permutation losses.
Defaults to None (equivalent to mean). Signature of the func
(pwl_set, **kwargs) : :math:`(B, n\_src!, n\_src) --> (B, n\_src!)`.
`perm_reduce` can receive **kwargs during forward using the
`reduce_kwargs` argument (dict). If those argument are static,
consider defining a small function or using `functools.partial`.
Only used in `'pw_mtx'` and `'pw_pt'` `pit_from` modes.
For each of these modes, the best permutation and reordering will be
automatically computed. When either ``'pw_mtx'`` or ``'pw_pt'`` is used,
and the number of sources is larger than three, the hungarian algorithm is
used to find the best permutation.
Examples
>>> import torch
>>> from asteroid.losses import pairwise_neg_sisdr
>>> sources = torch.randn(10, 3, 16000)
>>> est_sources = torch.randn(10, 3, 16000)
>>> # Compute PIT loss based on pairwise losses
>>> loss_func = PITLossWrapper(pairwise_neg_sisdr, pit_from='pw_mtx')
>>> loss_val = loss_func(est_sources, sources)
>>>
>>> # Using reduce
>>> def reduce(perm_loss, src):
>>> weighted = perm_loss * src.norm(dim=-1, keepdim=True)
>>> return torch.mean(weighted, dim=-1)
>>>
>>> loss_func = PITLossWrapper(pairwise_neg_sisdr, pit_from='pw_mtx',
>>> perm_reduce=reduce)
>>> reduce_kwargs = {'src': sources}
>>> loss_val = loss_func(est_sources, sources,
>>> reduce_kwargs=reduce_kwargs)
"""
def __init__(self, loss_func, pit_from="pw_mtx", perm_reduce=None):
super().__init__()
self.loss_func = loss_func
self.pit_from = pit_from
self.perm_reduce = perm_reduce
if self.pit_from not in ["pw_mtx", "pw_pt", "perm_avg"]:
raise ValueError(
"Unsupported loss function type for now. Expected"
"one of [`pw_mtx`, `pw_pt`, `perm_avg`]"
)
def forward(self, est_targets, targets, return_est=False, reduce_kwargs=None, **kwargs):
r"""Find the best permutation and return the loss.
Args:
est_targets: torch.Tensor. Expected shape $(batch, nsrc, ...)$.
The batch of target estimates.
targets: torch.Tensor. Expected shape $(batch, nsrc, ...)$.
The batch of training targets
return_est: Boolean. Whether to return the reordered targets
estimates (To compute metrics or to save example).
reduce_kwargs (dict or None): kwargs that will be passed to the
pairwise losses reduce function (`perm_reduce`).
**kwargs: additional keyword argument that will be passed to the
loss function.
Returns:
- Best permutation loss for each batch sample, average over
the batch.
- The reordered targets estimates if ``return_est`` is True.
:class:`torch.Tensor` of shape $(batch, nsrc, ...)$.
"""
n_src = targets.shape[1]
# assert n_src < 10, f"Expected source axis along dim 1, found {n_src}"
if self.pit_from == "pw_mtx":
# Loss function already returns pairwise losses
pw_losses = self.loss_func(est_targets, targets, **kwargs)
elif self.pit_from == "pw_pt":
# Compute pairwise losses with a for loop.
pw_losses = self.get_pw_losses(self.loss_func, est_targets, targets, **kwargs)
elif self.pit_from == "perm_avg":
# Cannot get pairwise losses from this type of loss.
# Find best permutation directly.
min_loss, batch_indices = self.best_perm_from_perm_avg_loss(
self.loss_func, est_targets, targets, **kwargs
)
# Take the mean over the batch
mean_loss = torch.mean(min_loss)
if not return_est:
return mean_loss
reordered = self.reorder_source(est_targets, batch_indices)
return mean_loss, reordered
else:
return
assert pw_losses.ndim == 3, (
"Something went wrong with the loss " "function, please read the docs."
)
assert pw_losses.shape[0] == targets.shape[0], "PIT loss needs same batch dim as input"
reduce_kwargs = reduce_kwargs if reduce_kwargs is not None else dict()
min_loss, batch_indices = self.find_best_perm(
pw_losses, perm_reduce=self.perm_reduce, **reduce_kwargs
)
mean_loss = torch.mean(min_loss)
if not return_est:
return mean_loss
reordered = self.reorder_source(est_targets, batch_indices)
return mean_loss, reordered
@staticmethod
def get_pw_losses(loss_func, est_targets, targets, **kwargs):
r"""Get pair-wise losses between the training targets and its estimate
for a given loss function.
Args:
loss_func: function with signature (est_targets, targets, **kwargs)
The loss function to get pair-wise losses from.
est_targets: torch.Tensor. Expected shape $(batch, nsrc, ...)$.
The batch of target estimates.
targets: torch.Tensor. Expected shape $(batch, nsrc, ...)$.
The batch of training targets.
**kwargs: additional keyword argument that will be passed to the
loss function.
Returns:
torch.Tensor or size $(batch, nsrc, nsrc)$, losses computed for
all permutations of the targets and est_targets.
This function can be called on a loss function which returns a tensor
of size :math:`(batch)`. There are more efficient ways to compute pair-wise
losses using broadcasting.
"""
batch_size, n_src, *_ = targets.shape
pair_wise_losses = targets.new_empty(batch_size, n_src, n_src)
for est_idx, est_src in enumerate(est_targets.transpose(0, 1)):
for target_idx, target_src in enumerate(targets.transpose(0, 1)):
pair_wise_losses[:, est_idx, target_idx] = loss_func(est_src, target_src, **kwargs)
return pair_wise_losses
@staticmethod
def best_perm_from_perm_avg_loss(loss_func, est_targets, targets, **kwargs):
r"""Find best permutation from loss function with source axis.
Args:
loss_func: function with signature $(est_targets, targets, **kwargs)$
The loss function batch losses from.
est_targets: torch.Tensor. Expected shape $(batch, nsrc, *)$.
The batch of target estimates.
targets: torch.Tensor. Expected shape $(batch, nsrc, *)$.
The batch of training targets.
**kwargs: additional keyword argument that will be passed to the
loss function.
Returns:
- :class:`torch.Tensor`:
The loss corresponding to the best permutation of size $(batch,)$.
- :class:`torch.Tensor`:
The indices of the best permutations.
"""
n_src = targets.shape[1]
perms = torch.tensor(list(permutations(range(n_src))), dtype=torch.long)
loss_set = torch.stack(
[loss_func(est_targets[:, perm], targets, **kwargs) for perm in perms], dim=1
)
# Indexes and values of min losses for each batch element
min_loss, min_loss_idx = torch.min(loss_set, dim=1)
# Permutation indices for each batch.
batch_indices = torch.stack([perms[m] for m in min_loss_idx], dim=0)
return min_loss, batch_indices
@staticmethod
def find_best_perm(pair_wise_losses, perm_reduce=None, **kwargs):
r"""Find the best permutation, given the pair-wise losses.
Dispatch between factorial method if number of sources is small (<3)
and hungarian method for more sources. If ``perm_reduce`` is not None,
the factorial method is always used.
Args:
pair_wise_losses (:class:`torch.Tensor`):
Tensor of shape :math:`(batch, n\_src, n\_src)`. Pairwise losses.
perm_reduce (Callable): torch function to reduce permutation losses.
Defaults to None (equivalent to mean). Signature of the func
(pwl_set, **kwargs) : :math:`(B, n\_src!, n\_src) -> (B, n\_src!)`
**kwargs: additional keyword argument that will be passed to the
permutation reduce function.
Returns:
- :class:`torch.Tensor`:
The loss corresponding to the best permutation of size $(batch,)$.
- :class:`torch.Tensor`:
The indices of the best permutations.
"""
n_src = pair_wise_losses.shape[-1]
if perm_reduce is not None or n_src <= 3:
min_loss, batch_indices = PITLossWrapper.find_best_perm_factorial(
pair_wise_losses, perm_reduce=perm_reduce, **kwargs
)
else:
min_loss, batch_indices = PITLossWrapper.find_best_perm_hungarian(pair_wise_losses)
return min_loss, batch_indices
@staticmethod
def reorder_source(source, batch_indices):
r"""Reorder sources according to the best permutation.
Args:
source (torch.Tensor): Tensor of shape :math:`(batch, n_src, time)`
batch_indices (torch.Tensor): Tensor of shape :math:`(batch, n_src)`.
Contains optimal permutation indices for each batch.
Returns:
:class:`torch.Tensor`: Reordered sources.
"""
reordered_sources = torch.stack(
[torch.index_select(s, 0, b) for s, b in zip(source, batch_indices)]
)
return reordered_sources
@staticmethod
def find_best_perm_factorial(pair_wise_losses, perm_reduce=None, **kwargs):
r"""Find the best permutation given the pair-wise losses by looping
through all the permutations.
Args:
pair_wise_losses (:class:`torch.Tensor`):
Tensor of shape :math:`(batch, n_src, n_src)`. Pairwise losses.
perm_reduce (Callable): torch function to reduce permutation losses.
Defaults to None (equivalent to mean). Signature of the func
(pwl_set, **kwargs) : :math:`(B, n\_src!, n\_src) -> (B, n\_src!)`
**kwargs: additional keyword argument that will be passed to the
permutation reduce function.
Returns:
- :class:`torch.Tensor`:
The loss corresponding to the best permutation of size $(batch,)$.
- :class:`torch.Tensor`:
The indices of the best permutations.
MIT Copyright (c) 2018 <NAME>.
See `Original code
<https://github.com/kaituoxu/Conv-TasNet/blob/master>`__ and `License
<https://github.com/kaituoxu/Conv-TasNet/blob/master/LICENSE>`__.
"""
n_src = pair_wise_losses.shape[-1]
# After transposition, dim 1 corresp. to sources and dim 2 to estimates
pwl = pair_wise_losses.transpose(-1, -2)
perms = pwl.new_tensor(list(permutations(range(n_src))), dtype=torch.long)
# Column permutation indices
idx = torch.unsqueeze(perms, 2)
# Loss mean of each permutation
if perm_reduce is None:
# one-hot, [n_src!, n_src, n_src]
perms_one_hot = pwl.new_zeros((*perms.size(), n_src)).scatter_(2, idx, 1)
loss_set = torch.einsum("bij,pij->bp", [pwl, perms_one_hot])
loss_set /= n_src
else:
# batch = pwl.shape[0]; n_perm = idx.shape[0]
# [batch, n_src!, n_src] : Pairwise losses for each permutation.
pwl_set = pwl[:, torch.arange(n_src), idx.squeeze(-1)]
# Apply reduce [batch, n_src!, n_src] --> [batch, n_src!]
loss_set = perm_reduce(pwl_set, **kwargs)
# Indexes and values of min losses for each batch element
min_loss, min_loss_idx = torch.min(loss_set, dim=1)
# Permutation indices for each batch.
batch_indices = torch.stack([perms[m] for m in min_loss_idx], dim=0)
return min_loss, batch_indices
@staticmethod
def find_best_perm_hungarian(pair_wise_losses: torch.Tensor):
"""
Find the best permutation given the pair-wise losses, using the Hungarian algorithm.
Returns:
- :class:`torch.Tensor`:
The loss corresponding to the best permutation of size (batch,).
- :class:`torch.Tensor`:
The indices of the best permutations.
"""
# After transposition, dim 1 corresp. to sources and dim 2 to estimates
pwl = pair_wise_losses.transpose(-1, -2)
# Just bring the numbers to cpu(), not the graph
pwl_copy = pwl.detach().cpu()
# Loop over batch + row indices are always ordered for square matrices.
batch_indices = torch.tensor([linear_sum_assignment(pwl)[1] for pwl in pwl_copy]).to(
pwl.device
)
min_loss = torch.gather(pwl, 2, batch_indices[..., None]).mean([-1, -2])
return min_loss, batch_indices
class PITReorder(PITLossWrapper):
"""Permutation invariant reorderer. Only returns the reordered estimates.
See `:py:class:asteroid.losses.PITLossWrapper`."""
def forward(self, est_targets, targets, reduce_kwargs=None, **kwargs):
_, reordered = super().forward(
est_targets=est_targets,
targets=targets,
return_est=True,
reduce_kwargs=reduce_kwargs,
**kwargs,
)
return reordered
class LambdaOverlapAdd(torch.nn.Module):
"""Overlap-add with lambda transform on segments (not scriptable).
Segment input signal, apply lambda function (a neural network for example)
and combine with OLA.
`LambdaOverlapAdd` can be used with :mod:`asteroid.separate` and the
`asteroid-infer` CLI.
Args:
nnet (callable): Function to apply to each segment.
n_src (Optional[int]): Number of sources in the output of nnet.
If None, the number of sources is determined by the network's output,
but some correctness checks cannot be performed.
window_size (int): Size of segmenting window.
hop_size (int): Segmentation hop size.
window (str): Name of the window (see scipy.signal.get_window) used
for the synthesis.
reorder_chunks (bool): Whether to reorder each consecutive segment.
This might be useful when `nnet` is permutation invariant, as
source assignements might change output channel from one segment
to the next (in classic speech separation for example).
Reordering is performed based on the correlation between
the overlapped part of consecutive segment.
Examples
>>> from asteroid import ConvTasNet
>>> nnet = ConvTasNet(n_src=2)
>>> continuous_nnet = LambdaOverlapAdd(
>>> nnet=nnet,
>>> n_src=2,
>>> window_size=64000,
>>> hop_size=None, >>> window="hanning",
>>> reorder_chunks=True,
>>> enable_grad=False,
>>> )
>>> # Process wav tensor:
>>> wav = torch.randn(1, 1, 500000)
>>> out_wavs = continuous_nnet.forward(wav)
>>> # asteroid.separate.Separatable support:
>>> from asteroid.separate import file_separate
>>> file_separate(continuous_nnet, "example.wav")
"""
def __init__(
self,
nnet,
n_src,
window_size,
in_margin,
window="hanning",
reorder_chunks=True,
enable_grad=False,
):
super().__init__()
assert window_size % 2 == 0, "Window size must be even"
self.nnet = nnet
self.window_size = window_size
self.hop_size = window_size
self.n_src = n_src
self.in_channels = getattr(nnet, "in_channels", None)
self.in_margin = in_margin
if window:
from scipy.signal import get_window # for torch.hub
window = get_window(window, self.window_size).astype("float32")
window = torch.from_numpy(window)
self.use_window = True
else:
self.use_window = False
self.register_buffer("window", window.type_as(nnet.f_helper.stft.conv_real.weight))
self.reorder_chunks = reorder_chunks
self.enable_grad = enable_grad
def ola_forward(self, x, key='wav'):
"""Heart of the class: segment signal, apply func, combine with OLA."""
"""
x: [batchsize, channels, samples]
"""
assert x.ndim == 3
batch, channels, n_frames = x.size()
# Overlap and add:
# [batch, chans, n_frames] -> [batch, chans, win_size, n_chunks]
# ================================================================================================
def calc_L(outputsize, padding, dilation, kernel_size, stride):
return int((outputsize+2*padding-dilation*(kernel_size-1)-1)/stride+1)
# Pad signal
last_frame_samples = n_frames - int(n_frames/self.window_size) * self.window_size
if(last_frame_samples != 0):
x = F.pad(x,(0,self.window_size-last_frame_samples))
unfolded = F.unfold(
x.unsqueeze(-1),
kernel_size=(self.window_size+self.in_margin, 1),
padding=(self.in_margin, 0),
stride=(self.hop_size, 1),
)
out = []
n_chunks = unfolded.shape[-1]
######################################################################
# unfolded = unfolded.view(batch, self.window_size, channels, n_chunks) # Wrong!!!
unfolded = unfolded.view(batch, channels, self.window_size+self.in_margin, n_chunks) # Split channel out !
margin = torch.zeros(size=(batch,channels,self.in_margin,n_chunks)).type_as(unfolded)
margin[...,:-1] = unfolded[...,self.in_margin:self.in_margin*2,1:]
unfolded = torch.cat([unfolded,margin],dim=2)
# unfolded = unfolded.permute(0,2,1,3) # convert to the shape of the model input
######################################################################
for frame_idx in range(n_chunks): # for loop to spare memory
# print(unfolded[..., frame_idx].size())
if(frame_idx == 0):
frame = self.nnet(unfolded[..., frame_idx][...,self.in_margin:])
frame = frame[key] # convert to what the following code needs
frame = frame[:, :, :-self.in_margin]
elif(frame_idx == n_chunks-1 and last_frame_samples != 0):
frame = self.nnet(unfolded[..., frame_idx][...,:self.in_margin+last_frame_samples])
frame = frame[key] # convert to what the following code needs
frame = frame[:, :, self.in_margin:]
frame = F.pad(frame,(0,self.window_size-last_frame_samples))
elif(frame_idx == n_chunks-1 and last_frame_samples == 0):
frame = self.nnet(unfolded[..., frame_idx][...,:-self.in_margin])
frame = frame[key] # convert to what the following code needs
frame = frame[:, :, self.in_margin:]
else:
frame = self.nnet(unfolded[..., frame_idx])
# x_out = self.nnet(x[:,:,int(frame_idx*self.window_size)-self.in_margin:int((frame_idx+1)*self.window_size)+self.in_margin])
# print("out",torch.sum(x_out['wav']-frame['wav']))
######################################################################
# frame = frame['wav'].permute(0,2,1) # convert to what the following code needs
frame = frame[key] # convert to what the following code needs
frame = frame[:,:,self.in_margin:-self.in_margin]
# print(torch.sum(unfolded[..., frame_idx]-x[:,:,int(frame_idx*self.window_size)-self.in_margin:int((frame_idx+1)*self.window_size)+self.in_margin]))
######################################################################
# user must handle multichannel by reshaping to batch
if frame_idx == 0:
assert frame.ndim == 3, "nnet should return (batch, n_src, time)"
if self.n_src is not None:
assert frame.shape[1] == self.n_src, "nnet should return (batch, n_src, time)"
n_src = frame.shape[1]
frame = frame.reshape(batch * n_src, -1)
if frame_idx != 0 and self.reorder_chunks:
# we determine best perm based on xcorr with previous sources
frame = _reorder_sources(frame, out[-1], n_src, self.window_size, self.hop_size)
if self.use_window:
frame = frame * self.window
else:
frame = frame / (self.window_size / self.hop_size)
out.append(frame)
out = torch.stack(out).reshape(n_chunks, batch * n_src, self.window_size)
out = out.permute(1, 2, 0)
L = calc_L(outputsize=out.size()[-1]*out.size()[-2],padding=0,dilation=1,kernel_size=self.window_size,stride=self.hop_size)
out = out[...,:L]
out = torch.nn.functional.fold(
out,
(out.size()[-1]*out.size()[-2], 1),
kernel_size=(self.window_size, 1),
padding=(0, 0),
stride=(self.hop_size, 1),
)
out = out.squeeze(-1).reshape(batch, n_src, -1)
out = out[...,:n_frames]
return out
def forward(self, x, type:str, key='wav'):
"""Forward module: segment signal, apply func, combine with OLA.
Args:
x (:class:`torch.Tensor`): waveform signal of shape (batch, channels, time).
Returns:
:class:`torch.Tensor`: The output of the lambda OLA.
"""
# Here we can do the reshaping
with torch.autograd.set_grad_enabled(self.enable_grad):
olad = self.ola_forward(x,key=key)
return olad
# Implement `asteroid.separate.Separatable` (separation support)
@property
def sample_rate(self):
return self.nnet.sample_rate
def _separate(self, wav, *args, **kwargs):
return self.forward(wav, *args, **kwargs)
def _reorder_sources(
current: torch.FloatTensor,
previous: torch.FloatTensor,
n_src: int,
window_size: int,
hop_size: int,
):
"""
Reorder sources in current chunk to maximize correlation with previous chunk.
Used for Continuous Source Separation. Standard dsp correlation is used
for reordering.
Args:
current (:class:`torch.Tensor`): current chunk, tensor
of shape (batch, n_src, window_size)
previous (:class:`torch.Tensor`): previous chunk, tensor
of shape (batch, n_src, window_size)
n_src (:class:`int`): number of sources.
window_size (:class:`int`): window_size, equal to last dimension of
both current and previous.
hop_size (:class:`int`): hop_size between current and previous tensors.
"""
batch, frames = current.size()
current = current.reshape(-1, n_src, frames)
previous = previous.reshape(-1, n_src, frames)
overlap_f = window_size - hop_size
def reorder_func(x, y):
x = x[..., :overlap_f]
y = y[..., -overlap_f:]
# Mean normalization
x = x - x.mean(-1, keepdim=True)
y = y - y.mean(-1, keepdim=True)
# Negative mean Correlation
return -torch.sum(x.unsqueeze(1) * y.unsqueeze(2), dim=-1)
# We maximize correlation-like between previous and current.
pit = PITReorder(reorder_func)
current = pit(current, previous)
return current.reshape(batch, frames)
class DualPathProcessing(nn.Module):
"""
Perform Dual-Path processing via overlap-add as in DPRNN [1].
Args:
chunk_size (int): Size of segmenting window.
hop_size (int): segmentation hop size.
References
[1] <NAME>, <NAME> and <NAME>. "Dual-path RNN: efficient
long sequence modeling for time-domain single-channel speech separation"
https://arxiv.g/abs/1910.06379
"""
def __init__(self, chunk_size, hop_size):
super(DualPathProcessing, self).__init__()
self.chunk_size = chunk_size
self.hop_size = hop_size
self.n_orig_frames = None
def unfold(self, x):
r"""
Unfold the feature tensor from $(batch, channels, time)$ to
$(batch, channels, chunksize, nchunks)$.
Args:
x (:class:`torch.Tensor`): feature tensor of shape $(batch, channels, time)$.
Returns:
:class:`torch.Tensor`: spliced feature tensor of shape
$(batch, channels, chunksize, nchunks)$.
"""
# x is (batch, chan, frames)
batch, chan, frames = x.size()
assert x.ndim == 3
self.n_orig_frames = x.shape[-1]
unfolded = torch.nn.functional.unfold(
x.unsqueeze(-1),
kernel_size=(self.chunk_size, 1),
padding=(self.chunk_size, 0),
stride=(self.hop_size, 1),
)
return unfolded.reshape(
batch, chan, self.chunk_size, -1
) # (batch, chan, chunk_size, n_chunks)
def fold(self, x, output_size=None):
r"""
Folds back the spliced feature tensor.
Input shape $(batch, channels, chunksize, nchunks)$ to original shape
$(batch, channels, time)$ using overlap-add.
Args:
x (:class:`torch.Tensor`): spliced feature tensor of shape
$(batch, channels, chunksize, nchunks)$.
output_size (int, optional): sequence length of original feature tensor.
If None, the original length cached by the previous call of
:meth:`unfold` will be used.
Returns:
:class:`torch.Tensor`: feature tensor of shape $(batch, channels, time)$.
.. note:: `fold` caches the original length of the input.
"""
output_size = output_size if output_size is not None else self.n_orig_frames
# x is (batch, chan, chunk_size, n_chunks)
batch, chan, chunk_size, n_chunks = x.size()
to_unfold = x.reshape(batch, chan * self.chunk_size, n_chunks)
x = torch.nn.functional.fold(
to_unfold,
(output_size, 1),
kernel_size=(self.chunk_size, 1),
padding=(self.chunk_size, 0),
stride=(self.hop_size, 1),
)
# force float div for torch jit
x /= float(self.chunk_size) / self.hop_size
return x.reshape(batch, chan, self.n_orig_frames)
@staticmethod
def intra_process(x, module):
r"""Performs intra-chunk processing.
Args:
x (:class:`torch.Tensor`): spliced feature tensor of shape
(batch, channels, chunk_size, n_chunks).
module (:class:`torch.nn.Module`): module one wish to apply to each chunk
of the spliced feature tensor.
Returns:
:class:`torch.Tensor`: processed spliced feature tensor of shape
$(batch, channels, chunksize, nchunks)$.
.. note:: the module should have the channel first convention and accept
a 3D tensor of shape $(batch, channels, time)$.
"""
# x is (batch, channels, chunk_size, n_chunks)
batch, channels, chunk_size, n_chunks = x.size()
# we reshape to batch*chunk_size, channels, n_chunks
x = x.transpose(1, -1).reshape(batch * n_chunks, chunk_size, channels).transpose(1, -1)
x = module(x)
x = x.reshape(batch, n_chunks, channels, chunk_size).transpose(1, -1).transpose(1, 2)
return x
@staticmethod
def inter_process(x, module):
r"""Performs inter-chunk processing.
Args:
x (:class:`torch.Tensor`): spliced feature tensor of shape
$(batch, channels, chunksize, nchunks)$.
module (:class:`torch.nn.Module`): module one wish to apply between
each chunk of the spliced feature tensor.
Returns:
x (:class:`torch.Tensor`): processed spliced feature tensor of shape
$(batch, channels, chunksize, nchunks)$.
.. note:: the module should have the channel first convention and accept
a 3D tensor of shape $(batch, channels, time)$.
"""
batch, channels, chunk_size, n_chunks = x.size()
x = x.transpose(1, 2).reshape(batch * chunk_size, channels, n_chunks)
x = module(x)
x = x.reshape(batch, chunk_size, channels, n_chunks).transpose(1, 2)
return x
|
[
"torch.mean",
"scipy.optimize.linear_sum_assignment",
"torch.stack",
"torch.gather",
"scipy.signal.get_window",
"torch.nn.functional.pad",
"torch.cat",
"torch.zeros",
"torch.index_select",
"torch.einsum",
"torch.autograd.set_grad_enabled",
"torch.arange",
"torch.unsqueeze",
"torch.nn.functional.fold",
"torch.min",
"torch.from_numpy"
] |
[((6089, 6109), 'torch.mean', 'torch.mean', (['min_loss'], {}), '(min_loss)\n', (6099, 6109), False, 'import torch\n'), ((8996, 9022), 'torch.min', 'torch.min', (['loss_set'], {'dim': '(1)'}), '(loss_set, dim=1)\n', (9005, 9022), False, 'import torch\n'), ((9093, 9145), 'torch.stack', 'torch.stack', (['[perms[m] for m in min_loss_idx]'], {'dim': '(0)'}), '([perms[m] for m in min_loss_idx], dim=0)\n', (9104, 9145), False, 'import torch\n'), ((12810, 12835), 'torch.unsqueeze', 'torch.unsqueeze', (['perms', '(2)'], {}), '(perms, 2)\n', (12825, 12835), False, 'import torch\n'), ((13582, 13608), 'torch.min', 'torch.min', (['loss_set'], {'dim': '(1)'}), '(loss_set, dim=1)\n', (13591, 13608), False, 'import torch\n'), ((13680, 13732), 'torch.stack', 'torch.stack', (['[perms[m] for m in min_loss_idx]'], {'dim': '(0)'}), '([perms[m] for m in min_loss_idx], dim=0)\n', (13691, 13732), False, 'import torch\n'), ((19704, 19740), 'torch.cat', 'torch.cat', (['[unfolded, margin]'], {'dim': '(2)'}), '([unfolded, margin], dim=2)\n', (19713, 19740), False, 'import torch\n'), ((28139, 28288), 'torch.nn.functional.fold', 'torch.nn.functional.fold', (['to_unfold', '(output_size, 1)'], {'kernel_size': '(self.chunk_size, 1)', 'padding': '(self.chunk_size, 0)', 'stride': '(self.hop_size, 1)'}), '(to_unfold, (output_size, 1), kernel_size=(self.\n chunk_size, 1), padding=(self.chunk_size, 0), stride=(self.hop_size, 1))\n', (28163, 28288), False, 'import torch\n'), ((13063, 13112), 'torch.einsum', 'torch.einsum', (['"""bij,pij->bp"""', '[pwl, perms_one_hot]'], {}), "('bij,pij->bp', [pwl, perms_one_hot])\n", (13075, 13112), False, 'import torch\n'), ((17838, 17862), 'torch.from_numpy', 'torch.from_numpy', (['window'], {}), '(window)\n', (17854, 17862), False, 'import torch\n'), ((18913, 18965), 'torch.nn.functional.pad', 'F.pad', (['x', '(0, self.window_size - last_frame_samples)'], {}), '(x, (0, self.window_size - last_frame_samples))\n', (18918, 18965), True, 'import torch.nn.functional as F\n'), ((23629, 23678), 'torch.autograd.set_grad_enabled', 'torch.autograd.set_grad_enabled', (['self.enable_grad'], {}), '(self.enable_grad)\n', (23660, 23678), False, 'import torch\n'), ((11211, 11238), 'torch.index_select', 'torch.index_select', (['s', '(0)', 'b'], {}), '(s, 0, b)\n', (11229, 11238), False, 'import torch\n'), ((14648, 14694), 'torch.gather', 'torch.gather', (['pwl', '(2)', 'batch_indices[..., None]'], {}), '(pwl, 2, batch_indices[..., None])\n', (14660, 14694), False, 'import torch\n'), ((19533, 19594), 'torch.zeros', 'torch.zeros', ([], {'size': '(batch, channels, self.in_margin, n_chunks)'}), '(size=(batch, channels, self.in_margin, n_chunks))\n', (19544, 19594), False, 'import torch\n'), ((22658, 22674), 'torch.stack', 'torch.stack', (['out'], {}), '(out)\n', (22669, 22674), False, 'import torch\n'), ((5396, 5416), 'torch.mean', 'torch.mean', (['min_loss'], {}), '(min_loss)\n', (5406, 5416), False, 'import torch\n'), ((13321, 13340), 'torch.arange', 'torch.arange', (['n_src'], {}), '(n_src)\n', (13333, 13340), False, 'import torch\n'), ((17762, 17798), 'scipy.signal.get_window', 'get_window', (['window', 'self.window_size'], {}), '(window, self.window_size)\n', (17772, 17798), False, 'from scipy.signal import get_window\n'), ((20604, 20660), 'torch.nn.functional.pad', 'F.pad', (['frame', '(0, self.window_size - last_frame_samples)'], {}), '(frame, (0, self.window_size - last_frame_samples))\n', (20609, 20660), True, 'import torch.nn.functional as F\n'), ((14540, 14566), 'scipy.optimize.linear_sum_assignment', 'linear_sum_assignment', (['pwl'], {}), '(pwl)\n', (14561, 14566), False, 'from scipy.optimize import linear_sum_assignment\n')]
|
import sqlite3
import json
import os
import db_constants
import frontmatter
from datetime import datetime
def create_projects_table_query(table_name, title, subtitle, content, image_path):
return 'CREATE TABLE IF NOT EXISTS {table_name}' \
' ({title} TEXT, {subtitle} TEXT,' \
' {content} TEXT, {image_path} TEXT);'.format(table_name, title, subtitle, content, image_path)
def create_blog_table_query(table_name, title, subtitle, content, date):
return 'CREATE TABLE IF NOT EXISTS {table_name}' \
' ({title} TEXT, {subtitle} TEXT,' \
' {content} TEXT, {date} TEXT);'.format(table_name, title, subtitle, content, date)
insert_project_query = 'INSERT OR REPLACE INTO {table_name} (title,subtitle, content, image_path) VALUES (?, ?, ?, ?);'
# Helper to insert an entry in the table
def insert_project(cursor, title, subtitle, content, path):
cursor.execute(
insert_project_query.format(table_name=db_constants.project_table),
(title, subtitle, content, path)
)
insert_blog_query = 'INSERT OR REPLACE INTO {table_name} (title, subtitle, content, date) VALUES (?, ?, ?, ?);'
def insert_blog(cursor, title, subtitle, content, date):
cursor.execute(
insert_blog_query.format(table_name=db_constants.blog_table),
(title, subtitle, content, date)
)
# Search for JSON files with content to render and parse them
def updateProjectDatabase():
for x in os.listdir(os.path.join(os.getcwd(), db_constants.content_dir)):
if (x.endswith('.json')):
with open(os.path.join(os.getcwd(), db_constants.content_dir, x)) as file:
jsondata = json.loads(file.read())
insert_project(
jsondata['title'],
jsondata['subheading'],
jsondata['description'],
jsondata['url']
)
def updateDBMarkdown(directory, function):
"""
Gets the frontmatter from the markdown pages and puts it into the database
Args:
directory: path to insert files
table_name: name of table in database to store the information
function: function to apply to all the front matter (i.e. insert all into a blog post)
"""
data_to_write = []
for x in os.listdir(os.path.join(os.getcwd(), directory)):
if (x.endswith('.md')):
with open(os.path.join(os.getcwd(), directory, x)) as file:
data = getMarkdownFrontMatter(file.read())
data_to_write.append(data)
# sort entries by time
data_to_write = sorted(data_to_write, key=lambda x: datetime.strptime(x['date'], "%A %B %d, %Y"), reverse=True)
for data in data_to_write:
print (data['date'])
function( data['title'], data['subtitle'], data.content, data['date'])
"""
Specialized helper to get notes from folder and render markdown
"""
def updateBlogPosts(cursor):
updateDBMarkdown(db_constants.blog_dir, insert_blog)
def createProjectsTable(cursor):
return cursor.execute(
create_projects_table_query(
table_name = db_constants.project_table,
title = db_constants.project_title,
subtitle = db_constants.project_subtitle,
content = db_constants.project_content,
image_path = db_constants.project_image
)
)
def createBlogTable(cursor):
return cursor.execute(
create_blog_table_query(
table_name = db_constants.blog_table,
title = db_constants.blog_title,
subtitle = db_constants.blog_subtitle,
content = db_constants.blog_content,
date = db_constants.blog_date
)
)
def createTablesIfNotExist(cursor):
# Create the tables if they don't already exist.
createProjectsTable(cursor)
createBlogTable(cursor)
if __name__ == "__main__":
conn = sqlite3.connect(db_constants.content_path)
cursor = conn.cursor()
createTablesIfNotExist(cursor)
updateProjectDatabase(cursor)
updateBlogPosts(cursor)
conn.commit()
conn.close()
|
[
"os.getcwd",
"datetime.datetime.strptime",
"sqlite3.connect"
] |
[((3928, 3970), 'sqlite3.connect', 'sqlite3.connect', (['db_constants.content_path'], {}), '(db_constants.content_path)\n', (3943, 3970), False, 'import sqlite3\n'), ((1478, 1489), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (1487, 1489), False, 'import os\n'), ((2322, 2333), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (2331, 2333), False, 'import os\n'), ((2638, 2682), 'datetime.datetime.strptime', 'datetime.strptime', (["x['date']", '"""%A %B %d, %Y"""'], {}), "(x['date'], '%A %B %d, %Y')\n", (2655, 2682), False, 'from datetime import datetime\n'), ((1588, 1599), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (1597, 1599), False, 'import os\n'), ((2415, 2426), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (2424, 2426), False, 'import os\n')]
|