code
stringlengths 22
1.05M
| apis
listlengths 1
3.31k
| extract_api
stringlengths 75
3.25M
|
|---|---|---|
from django.db import models
PLUG_STATES = (
(0, 'None'),
(1, 'Some'),
(2, 'Many'))
WIFI_STATES = (
(0, 'Bad'),
(1, 'Fine'),
(2, 'Good'))
class Cafe(models.Model):
name = models.CharField(max_length=256)
address = models.CharField(max_length=256)
def __str__(self):
return self.name
class Review(models.Model):
cafe = models.ForeignKey(Cafe, on_delete=models.CASCADE)
comment = models.TextField(null=True)
plug = models.IntegerField(choices=PLUG_STATES)
wifi = models.IntegerField(choices=WIFI_STATES)
def plug_string(self):
if self.plug == 0:
return "No plugs :("
if self.plug == 1:
return "Some plugs"
if self.plug == 2:
return "Lot's of plugs!"
def wifi_string(self):
if self.wifi == 0:
return "Bad wifi"
if self.wifi == 1:
return "Okay wifi"
if self.wifi == 2:
return "Good wifi"
class Hours(models.Model):
cafe = models.ForeignKey(Cafe, on_delete=models.CASCADE)
# In minutes
open_weekday = models.IntegerField()
open_sat = models.IntegerField()
open_sun = models.IntegerField()
# In minutes
close_weekday = models.IntegerField()
close_sat = models.IntegerField()
close_sun = models.IntegerField()
class Prices(models.Model):
cafe = models.ForeignKey(Cafe, on_delete=models.CASCADE)
americano = models.IntegerField()
|
[
"django.db.models.CharField",
"django.db.models.TextField",
"django.db.models.IntegerField",
"django.db.models.ForeignKey"
] |
[((227, 259), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(256)'}), '(max_length=256)\n', (243, 259), False, 'from django.db import models\n'), ((274, 306), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(256)'}), '(max_length=256)\n', (290, 306), False, 'from django.db import models\n'), ((397, 446), 'django.db.models.ForeignKey', 'models.ForeignKey', (['Cafe'], {'on_delete': 'models.CASCADE'}), '(Cafe, on_delete=models.CASCADE)\n', (414, 446), False, 'from django.db import models\n'), ((461, 488), 'django.db.models.TextField', 'models.TextField', ([], {'null': '(True)'}), '(null=True)\n', (477, 488), False, 'from django.db import models\n'), ((500, 540), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'choices': 'PLUG_STATES'}), '(choices=PLUG_STATES)\n', (519, 540), False, 'from django.db import models\n'), ((552, 592), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'choices': 'WIFI_STATES'}), '(choices=WIFI_STATES)\n', (571, 592), False, 'from django.db import models\n'), ((1045, 1094), 'django.db.models.ForeignKey', 'models.ForeignKey', (['Cafe'], {'on_delete': 'models.CASCADE'}), '(Cafe, on_delete=models.CASCADE)\n', (1062, 1094), False, 'from django.db import models\n'), ((1131, 1152), 'django.db.models.IntegerField', 'models.IntegerField', ([], {}), '()\n', (1150, 1152), False, 'from django.db import models\n'), ((1168, 1189), 'django.db.models.IntegerField', 'models.IntegerField', ([], {}), '()\n', (1187, 1189), False, 'from django.db import models\n'), ((1205, 1226), 'django.db.models.IntegerField', 'models.IntegerField', ([], {}), '()\n', (1224, 1226), False, 'from django.db import models\n'), ((1264, 1285), 'django.db.models.IntegerField', 'models.IntegerField', ([], {}), '()\n', (1283, 1285), False, 'from django.db import models\n'), ((1302, 1323), 'django.db.models.IntegerField', 'models.IntegerField', ([], {}), '()\n', (1321, 1323), False, 'from django.db import models\n'), ((1340, 1361), 'django.db.models.IntegerField', 'models.IntegerField', ([], {}), '()\n', (1359, 1361), False, 'from django.db import models\n'), ((1403, 1452), 'django.db.models.ForeignKey', 'models.ForeignKey', (['Cafe'], {'on_delete': 'models.CASCADE'}), '(Cafe, on_delete=models.CASCADE)\n', (1420, 1452), False, 'from django.db import models\n'), ((1469, 1490), 'django.db.models.IntegerField', 'models.IntegerField', ([], {}), '()\n', (1488, 1490), False, 'from django.db import models\n')]
|
import torch
import numpy as np
from tqdm import tqdm
from torch.utils.data import DataLoader
class TrainingConfig:
lr=3e-4
betas=(0.9,0.995)
weight_decay=5e-4
num_workers=0
max_epochs=10
batch_size=64
ckpt_path=None #Specify a model path here. Ex: "./Model.pt"
shuffle=True
pin_memory=True
verbose=True
def __init__(self,**kwargs):
for key,value in kwargs.items():
setattr(self,key,value)
class Trainer:
def __init__(self,model,train_dataset,test_dataset,config):
self.model = model
self.train_dataset=train_dataset
self.test_dataset=test_dataset
self.config = config
self.train_losses = []
self.train_accuracies = []
self.test_losses = []
self.test_accuracies = []
self.device = "cpu"
if torch.cuda.is_available():
self.device = torch.cuda.current_device()
self.model = self.model.to(self.device)
def save_checkpoint(self):
raw_model = self.model.module if hasattr(self.model,"module") else self.model
torch.save(raw_model.state_dict(),self.config.ckpt_path)
print("Model Saved!")
def train(self):
model,config = self.model,self.config
raw_model = self.model.module if hasattr(self.model,"module") else self.model
optimizer = raw_model.configure_optimizers(config)
def run_epoch(split):
is_train = split=="train"
if is_train:
model.train()
else:
model.eval() #important don't miss this. Since we have used dropout, this is required.
data = self.train_dataset if is_train else self.test_dataset
loader = DataLoader(data,batch_size=config.batch_size,
shuffle=config.shuffle,
pin_memory=config.pin_memory,
num_workers=config.num_workers)
losses = []
accuracies = []
correct = 0
num_samples = 0
pbar = tqdm(enumerate(loader),total=len(loader)) if is_train and config.verbose else enumerate(loader)
for it,(images,targets) in pbar:
images = images.to(self.device)
targets = targets.to(self.device)
num_samples += targets.size(0)
with torch.set_grad_enabled(is_train):
#forward the model
logits,loss = model(images,targets)
loss = loss.mean()
losses.append(loss.item())
with torch.no_grad():
predictions = torch.argmax(logits,dim=1) #softmax gives prob distribution. Find the index of max prob
correct+= predictions.eq(targets).sum().item()
accuracies.append(correct/num_samples)
if is_train:
model.zero_grad()
loss.backward()
optimizer.step()
if config.verbose:
pbar.set_description(f"Epoch:{epoch+1} iteration:{it+1} | loss:{np.mean(losses)} accuracy:{np.mean(accuracies)} lr:{config.lr}")
self.train_losses.append(np.mean(losses))
self.train_accuracies.append(np.mean(accuracies))
if not is_train:
test_loss = np.mean(losses)
if config.verbose:
print(f"\nEpoch:{epoch+1} | Test Loss:{test_loss} Test Accuracy:{correct/num_samples}\n")
self.test_losses.append(test_loss)
self.test_accuracies.append(correct/num_samples)
return test_loss
best_loss = float('inf')
test_loss = float('inf')
for epoch in range(config.max_epochs):
run_epoch('train')
if self.test_dataset is not None:
test_loss = run_epoch("test")
good_model = self.test_dataset is not None and test_loss < best_loss
if config.ckpt_path is not None and good_model:
best_loss = test_loss
self.save_checkpoint()
|
[
"torch.utils.data.DataLoader",
"torch.argmax",
"numpy.mean",
"torch.cuda.is_available",
"torch.set_grad_enabled",
"torch.cuda.current_device",
"torch.no_grad"
] |
[((907, 932), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (930, 932), False, 'import torch\n'), ((961, 988), 'torch.cuda.current_device', 'torch.cuda.current_device', ([], {}), '()\n', (986, 988), False, 'import torch\n'), ((1846, 1982), 'torch.utils.data.DataLoader', 'DataLoader', (['data'], {'batch_size': 'config.batch_size', 'shuffle': 'config.shuffle', 'pin_memory': 'config.pin_memory', 'num_workers': 'config.num_workers'}), '(data, batch_size=config.batch_size, shuffle=config.shuffle,\n pin_memory=config.pin_memory, num_workers=config.num_workers)\n', (1856, 1982), False, 'from torch.utils.data import DataLoader\n'), ((3705, 3720), 'numpy.mean', 'np.mean', (['losses'], {}), '(losses)\n', (3712, 3720), True, 'import numpy as np\n'), ((2563, 2595), 'torch.set_grad_enabled', 'torch.set_grad_enabled', (['is_train'], {}), '(is_train)\n', (2585, 2595), False, 'import torch\n'), ((2826, 2841), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (2839, 2841), False, 'import torch\n'), ((2878, 2905), 'torch.argmax', 'torch.argmax', (['logits'], {'dim': '(1)'}), '(logits, dim=1)\n', (2890, 2905), False, 'import torch\n'), ((3544, 3559), 'numpy.mean', 'np.mean', (['losses'], {}), '(losses)\n', (3551, 3559), True, 'import numpy as np\n'), ((3611, 3630), 'numpy.mean', 'np.mean', (['accuracies'], {}), '(accuracies)\n', (3618, 3630), True, 'import numpy as np\n'), ((3411, 3426), 'numpy.mean', 'np.mean', (['losses'], {}), '(losses)\n', (3418, 3426), True, 'import numpy as np\n'), ((3438, 3457), 'numpy.mean', 'np.mean', (['accuracies'], {}), '(accuracies)\n', (3445, 3457), True, 'import numpy as np\n')]
|
#!/usr/bin/env python
#
# Copyright 2017-2018 University Of Southern California
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import print_function
import os
__author__ = '<NAME>'
class Type:
INSTALLED = 'INSTALLED'
STAGEABLE = 'STAGEABLE'
class TransformationCatalog:
def __init__(self, workflow_dir, filename='tc.txt'):
"""
Create a Pegasus transformation catalog.
:param workflow_dir: Path to the workflow directory
:param filename: catalog filename (default: rc.txt)
"""
self.workflow_dir = workflow_dir
self.filename = filename
self._executables = []
self._containers = []
def add(self, executable):
"""
Add an executable to the transformation catalog.
:param executable: A DAX3 Executable object
"""
if not executable:
raise Exception('An executable should be provided.')
if executable not in self._executables:
self._executables.append(executable)
def add_container(self, container):
"""
Add a container to the transformation catalog.
:param container: A DAX3 Container object
"""
if not container:
raise Exception('A container should be provided.')
if container not in self._containers:
self._containers.append(container)
def write(self, force=False):
"""
Write the catalog to a file.
:param force: whether to overwrite the catalog file
"""
catalog_file = self.workflow_dir + '/' + self.filename
if not os.path.isfile(catalog_file) or force:
with open(catalog_file, 'w') as ppf:
# executables
for e in self._executables:
# executable name
name = e.name
if e.namespace:
name = e.namespace + '::' + name
if e.version:
name = name + ':' + e.version
ppf.write('tr %s {\n' % name)
# profiles
for p in e.profiles:
ppf.write(
'\tprofile %s "%s" "%s"\n' %
(p.namespace, p.key, p.value)
)
# pfns
installed = 'INSTALLED'
if not e.installed:
installed = 'STAGEABLE'
for pfn in e.pfns:
ppf.write('\tsite %s {\n' % pfn.site)
# profiles
for p in pfn.profiles:
ppf.write(
'\t\tprofile %s "%s" "%s"\n' %
(p.namespace, p.key, p.value)
)
ppf.write('\t\tpfn "%s"\n' % pfn.url)
if e.arch:
ppf.write('\t\tarch "%s"\n' % e.arch)
if e.os:
ppf.write('\t\tos "%s"\n' % e.os)
if e.osrelease:
ppf.write('\t\tosrelease "%s"\n' % e.osrelease)
if e.osversion:
ppf.write('\t\tosversion "%s"\n' % e.osversion)
ppf.write('\t\ttype "%s"\n' % installed)
# reference to container
if e.container:
ppf.write('\t\tcontainer "%s"\n' %
e.container.name
)
ppf.write('\t}\n')
ppf.write('}\n\n')
# containers
for c in self._containers:
ppf.write('cont %s {\n' % c.name)
ppf.write('\ttype "%s"\n' % c.type)
ppf.write('\timage "%s"\n' % c.image)
if c.imagesite:
ppf.write('\timage_site "%s"\n' % c.imagesite)
if c.dockerfile:
ppf.write('\tdockerfile "%s"\n' % c.dockerfile)
# mount
for m in c.mount:
ppf.write('\tmount "%s"\n' % m)
# profiles
for p in c.profiles:
ppf.write(
'\tprofile %s "%s" "%s"\n' %
(p.namespace, p.key, p.value)
)
ppf.write('}\n\n')
else:
print(
'\x1b[0;35mWARNING: Transformation Catalog (%s) already exists. Use "force=True" '
'to overwrite it.\n\x1b[0m' % catalog_file
)
|
[
"os.path.isfile"
] |
[((2135, 2163), 'os.path.isfile', 'os.path.isfile', (['catalog_file'], {}), '(catalog_file)\n', (2149, 2163), False, 'import os\n')]
|
# -*- coding: utf-8 -*-
from PyQt4 import QtGui, QtCore
class MainWindow(QtGui.QWidget):
def __init__(self, parent=None):
QtGui.QMainWindow.__init__(self, parent)
# ´´½¨tableºÍmodel
table = QtGui.QTableView(parent=self)
self.model = QtGui.QStandardItemModel(parent=self)
self.model.setHorizontalHeaderLabels(('Name', 'Age'))
table.setModel(self.model)
# ´´½¨Ìí¼Ó°´Å¥
button = QtGui.QPushButton('Add', parent=self)
# Ìí¼ÓÐźŲÛ
button.clicked.connect(self.add)
#button.clicked.connect(self.info)
# ´´½¨Ò»¸ö´¹Ö±²¼¾Ö£¬ÓÃÓÚ·ÀÖ¹±í¸ñºÍ°´Å¥
layout = QtGui.QVBoxLayout()
layout.addWidget(table)
layout.addWidget(button)
self.setLayout(layout)
def add(self):
dialog = AskDialog(parent=self)
#dialog = infoDialog(parent=self)
#if dialog.exec_():
# self.model.appendRow((
# QtGui.QStandardItem(dialog.name()),
# QtGui.QStandardItem(str(dialog.age())),
# ))
dialog.exec_()
print("name=%s,age=%s"%(dialog.name(),str(dialog.age())))
dialog.destroy()
def info(self):
infostr='info need to show'
dialog = InfoDialog(infostr,parent=self)
dialog.exec_()
class AskDialog(QtGui.QDialog):
def __init__(self, parent=None):
QtGui.QDialog.__init__(self, parent)
self.resize(240, 100)
# ±í¸ñ²¼¾Ö£¬ÓÃÀ´²¼¾ÖQLabelºÍQLineEdit¼°QSpinBox
grid = QtGui.QGridLayout()
grid.addWidget(QtGui.QLabel('ProjectName', parent=self), 0, 0, 1, 1)
self.leName = QtGui.QLineEdit(parent=self)
grid.addWidget(self.leName, 0, 1, 1, 1)
#grid.addWidget(QtGui.QLabel('Age', parent=self), 1, 0, 1, 1)
#self.sbAge = QtGui.QSpinBox(parent=self)
#grid.addWidget(self.sbAge, 1, 1, 1, 1)
# ´´½¨ButtonBox£¬Óû§È·¶¨ºÍÈ¡Ïû
buttonBox = QtGui.QDialogButtonBox(parent=self)
buttonBox.setOrientation(QtCore.Qt.Horizontal) # ÉèÖÃΪˮƽ·½Ïò
buttonBox.setStandardButtons(QtGui.QDialogButtonBox.Cancel|QtGui.QDialogButtonBox.Ok) # È·¶¨ºÍÈ¡ÏûÁ½¸ö°´Å¥
# Á¬½ÓÐźźͲÛ
buttonBox.accepted.connect(self.accept) # È·¶¨
buttonBox.rejected.connect(self.reject) # È¡Ïû
# ´¹Ö±²¼¾Ö£¬²¼¾Ö±í¸ñ¼°°´Å¥
layout = QtGui.QVBoxLayout()
# ¼ÓÈëÇ°Ãæ´´½¨µÄ±í¸ñ²¼¾Ö
layout.addLayout(grid)
# ·ÅÒ»¸ö¼ä¸ô¶ÔÏóÃÀ»¯²¼¾Ö
spacerItem = QtGui.QSpacerItem(20, 48, QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Expanding)
layout.addItem(spacerItem)
# ButtonBox
layout.addWidget(buttonBox)
self.setLayout(layout)
def name(self):
return self.leName.text()
#def age(self):
# return self.sbAge.value()
class InfoDialog(QtGui.QDialog):
def __init__(self, infostr,parent=None):
QtGui.QDialog.__init__(self, parent)
self.resize(240, 200)
# ±í¸ñ²¼¾Ö£¬ÓÃÀ´²¼¾ÖQLabelºÍQLineEdit¼°QSpinBox
grid = QtGui.QGridLayout()
grid.addWidget(QtGui.QLabel(infostr, parent=self), 0, 0, 1, 1)
if __name__ == '__main__':
import sys
app = QtGui.QApplication(sys.argv)
mainWindow = MainWindow()
mainWindow.show()
sys.exit(app.exec_())
|
[
"PyQt4.QtGui.QSpacerItem",
"PyQt4.QtGui.QLineEdit",
"PyQt4.QtGui.QGridLayout",
"PyQt4.QtGui.QDialog.__init__",
"PyQt4.QtGui.QLabel",
"PyQt4.QtGui.QMainWindow.__init__",
"PyQt4.QtGui.QStandardItemModel",
"PyQt4.QtGui.QVBoxLayout",
"PyQt4.QtGui.QApplication",
"PyQt4.QtGui.QPushButton",
"PyQt4.QtGui.QDialogButtonBox",
"PyQt4.QtGui.QTableView"
] |
[((3299, 3327), 'PyQt4.QtGui.QApplication', 'QtGui.QApplication', (['sys.argv'], {}), '(sys.argv)\n', (3317, 3327), False, 'from PyQt4 import QtGui, QtCore\n'), ((140, 180), 'PyQt4.QtGui.QMainWindow.__init__', 'QtGui.QMainWindow.__init__', (['self', 'parent'], {}), '(self, parent)\n', (166, 180), False, 'from PyQt4 import QtGui, QtCore\n'), ((228, 257), 'PyQt4.QtGui.QTableView', 'QtGui.QTableView', ([], {'parent': 'self'}), '(parent=self)\n', (244, 257), False, 'from PyQt4 import QtGui, QtCore\n'), ((280, 317), 'PyQt4.QtGui.QStandardItemModel', 'QtGui.QStandardItemModel', ([], {'parent': 'self'}), '(parent=self)\n', (304, 317), False, 'from PyQt4 import QtGui, QtCore\n'), ((461, 498), 'PyQt4.QtGui.QPushButton', 'QtGui.QPushButton', (['"""Add"""'], {'parent': 'self'}), "('Add', parent=self)\n", (478, 498), False, 'from PyQt4 import QtGui, QtCore\n'), ((677, 696), 'PyQt4.QtGui.QVBoxLayout', 'QtGui.QVBoxLayout', ([], {}), '()\n', (694, 696), False, 'from PyQt4 import QtGui, QtCore\n'), ((1435, 1471), 'PyQt4.QtGui.QDialog.__init__', 'QtGui.QDialog.__init__', (['self', 'parent'], {}), '(self, parent)\n', (1457, 1471), False, 'from PyQt4 import QtGui, QtCore\n'), ((1578, 1597), 'PyQt4.QtGui.QGridLayout', 'QtGui.QGridLayout', ([], {}), '()\n', (1595, 1597), False, 'from PyQt4 import QtGui, QtCore\n'), ((1703, 1731), 'PyQt4.QtGui.QLineEdit', 'QtGui.QLineEdit', ([], {'parent': 'self'}), '(parent=self)\n', (1718, 1731), False, 'from PyQt4 import QtGui, QtCore\n'), ((2020, 2055), 'PyQt4.QtGui.QDialogButtonBox', 'QtGui.QDialogButtonBox', ([], {'parent': 'self'}), '(parent=self)\n', (2042, 2055), False, 'from PyQt4 import QtGui, QtCore\n'), ((2437, 2456), 'PyQt4.QtGui.QVBoxLayout', 'QtGui.QVBoxLayout', ([], {}), '()\n', (2454, 2456), False, 'from PyQt4 import QtGui, QtCore\n'), ((2583, 2669), 'PyQt4.QtGui.QSpacerItem', 'QtGui.QSpacerItem', (['(20)', '(48)', 'QtGui.QSizePolicy.Minimum', 'QtGui.QSizePolicy.Expanding'], {}), '(20, 48, QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.\n Expanding)\n', (2600, 2669), False, 'from PyQt4 import QtGui, QtCore\n'), ((3003, 3039), 'PyQt4.QtGui.QDialog.__init__', 'QtGui.QDialog.__init__', (['self', 'parent'], {}), '(self, parent)\n', (3025, 3039), False, 'from PyQt4 import QtGui, QtCore\n'), ((3146, 3165), 'PyQt4.QtGui.QGridLayout', 'QtGui.QGridLayout', ([], {}), '()\n', (3163, 3165), False, 'from PyQt4 import QtGui, QtCore\n'), ((1624, 1664), 'PyQt4.QtGui.QLabel', 'QtGui.QLabel', (['"""ProjectName"""'], {'parent': 'self'}), "('ProjectName', parent=self)\n", (1636, 1664), False, 'from PyQt4 import QtGui, QtCore\n'), ((3192, 3226), 'PyQt4.QtGui.QLabel', 'QtGui.QLabel', (['infostr'], {'parent': 'self'}), '(infostr, parent=self)\n', (3204, 3226), False, 'from PyQt4 import QtGui, QtCore\n')]
|
import yfinance as yf
from datetime import date
from datetime import timedelta
import ReportModule as Rm
import mplfinance as mpf
import matplotlib.pyplot as plt
import math
list_of_stocks = []
print(date.today().weekday())
stock = "AAPL"
zile_in_trecut = 1 # cate zile o trecut de vineri pana acum: daca e luni ii 3
if (date.today() + timedelta(-zile_in_trecut)).weekday() == 4:
print("This is vineri")
weekly = yf.download(tickers=stock, interval="1wk", period="2y")
weekly = weekly.drop([date.today() + timedelta(-1)]) # this is because if I just get the data by 1 week I have also the last friday
print(weekly)
print(Rm.return_report_from_3_weeks_ago())
|
[
"ReportModule.return_report_from_3_weeks_ago",
"datetime.timedelta",
"datetime.date.today",
"yfinance.download"
] |
[((422, 477), 'yfinance.download', 'yf.download', ([], {'tickers': 'stock', 'interval': '"""1wk"""', 'period': '"""2y"""'}), "(tickers=stock, interval='1wk', period='2y')\n", (433, 477), True, 'import yfinance as yf\n'), ((633, 668), 'ReportModule.return_report_from_3_weeks_ago', 'Rm.return_report_from_3_weeks_ago', ([], {}), '()\n', (666, 668), True, 'import ReportModule as Rm\n'), ((200, 212), 'datetime.date.today', 'date.today', ([], {}), '()\n', (210, 212), False, 'from datetime import date\n'), ((500, 512), 'datetime.date.today', 'date.today', ([], {}), '()\n', (510, 512), False, 'from datetime import date\n'), ((515, 528), 'datetime.timedelta', 'timedelta', (['(-1)'], {}), '(-1)\n', (524, 528), False, 'from datetime import timedelta\n'), ((325, 337), 'datetime.date.today', 'date.today', ([], {}), '()\n', (335, 337), False, 'from datetime import date\n'), ((340, 366), 'datetime.timedelta', 'timedelta', (['(-zile_in_trecut)'], {}), '(-zile_in_trecut)\n', (349, 366), False, 'from datetime import timedelta\n')]
|
"""
some helper function for setup.py file
use: copy it alongside setup.py and in the setup.py file:
from helper_setup import read_readme, cmd_publish_activate
"""
import sys
import os
import shutil
from setuptools import find_packages
from importlib import import_module
def read(*paths):
with open(*paths, 'r') as f:
return f.read()
def read_readme():
"""will read the README.* file, it can be any extention"""
return read(next(filter(lambda f: 'README.' in f, os.listdir('.'))))
def activate_cmd_publish():
"""
need to be run in setup.py to take action,
if `python setup.py publish`: will build / upload / clean
"""
if sys.argv[-1] == 'publish':
publish()
sys.exit()
def activate_cmd_build():
"""
need to be run in setup.py to take action,
if `python setup.py publish`: will build / upload / clean
"""
if sys.argv[-1] == 'build':
wheel()
sys.exit()
def wheel():
clean_dirs()
check_installed_tools()
build_wheel()
def publish():
wheel()
upload_wheel()
clean_dirs()
print_git_tag_info()
def check_installed_tools():
if os.system('pip freeze | grep twine > /dev/null'):
print('twine not installed.\nUse `pip install twine`.\nExiting.')
sys.exit()
def build_wheel():
print('\nbuilding ...')
os.system('python setup.py bdist_wheel > /dev/null') # omit sdist, build only the wheel
def upload_wheel():
print('\nuploading ...')
os.system('twine upload dist/*') # upload the package to PyPI
def is_ext(ext):
ext = ext[1:] if ext[0] == '.' else ext
def with_file(file):
splited = file.split('.')
return len(splited) > 1 and splited[-1] == ext
return with_file
def get_files_ext(ext):
return filter(is_ext(ext), os.listdir('.'))
def clean_dirs():
for dir in ['dist', 'build']:
shutil.rmtree(dir, ignore_errors=True)
for egg in get_files_ext('.egg-info'):
shutil.rmtree(egg)
def print_git_tag_info():
__init__ = import_module(find_packages()[0])
print('\nYou probably want to also tag the version now:')
print(" git tag -a v{0} -m 'release {0}'".format(__init__.__version__))
print(' git push --tags')
|
[
"os.system",
"sys.exit",
"shutil.rmtree",
"os.listdir",
"setuptools.find_packages"
] |
[((1186, 1234), 'os.system', 'os.system', (['"""pip freeze | grep twine > /dev/null"""'], {}), "('pip freeze | grep twine > /dev/null')\n", (1195, 1234), False, 'import os\n'), ((1382, 1434), 'os.system', 'os.system', (['"""python setup.py bdist_wheel > /dev/null"""'], {}), "('python setup.py bdist_wheel > /dev/null')\n", (1391, 1434), False, 'import os\n'), ((1526, 1558), 'os.system', 'os.system', (['"""twine upload dist/*"""'], {}), "('twine upload dist/*')\n", (1535, 1558), False, 'import os\n'), ((741, 751), 'sys.exit', 'sys.exit', ([], {}), '()\n', (749, 751), False, 'import sys\n'), ((969, 979), 'sys.exit', 'sys.exit', ([], {}), '()\n', (977, 979), False, 'import sys\n'), ((1318, 1328), 'sys.exit', 'sys.exit', ([], {}), '()\n', (1326, 1328), False, 'import sys\n'), ((1845, 1860), 'os.listdir', 'os.listdir', (['"""."""'], {}), "('.')\n", (1855, 1860), False, 'import os\n'), ((1924, 1962), 'shutil.rmtree', 'shutil.rmtree', (['dir'], {'ignore_errors': '(True)'}), '(dir, ignore_errors=True)\n', (1937, 1962), False, 'import shutil\n'), ((2014, 2032), 'shutil.rmtree', 'shutil.rmtree', (['egg'], {}), '(egg)\n', (2027, 2032), False, 'import shutil\n'), ((2090, 2105), 'setuptools.find_packages', 'find_packages', ([], {}), '()\n', (2103, 2105), False, 'from setuptools import find_packages\n'), ((499, 514), 'os.listdir', 'os.listdir', (['"""."""'], {}), "('.')\n", (509, 514), False, 'import os\n')]
|
from voluptuous import Schema, Required, Optional, ALLOW_EXTRA, REMOVE_EXTRA, Any, Url, Range, All
from githubcap.enums import State
from .user import USER_SCHEMA
MILESTONE_SCHEMA = Schema({
Required("url"): Url(),
Required("html_url"): Url(),
Required("labels_url"): Url(),
Required("id"): int,
Required("number"): int,
Required("state"): Schema(Any(*State.all_values())),
Required("title"): str,
Required("description"): Schema(Any(str, None)),
Required("creator"): USER_SCHEMA,
Required("open_issues"): int,
Required("closed_issues"): int,
Required("created_at"): str,
Required("updated_at"): Schema(Any(str, None)),
Required("closed_at"): Schema(Any(str, None)),
Required("due_on"): Schema(Any(str, None))
})
|
[
"githubcap.enums.State.all_values",
"voluptuous.Required",
"voluptuous.Url",
"voluptuous.Any"
] |
[((197, 212), 'voluptuous.Required', 'Required', (['"""url"""'], {}), "('url')\n", (205, 212), False, 'from voluptuous import Schema, Required, Optional, ALLOW_EXTRA, REMOVE_EXTRA, Any, Url, Range, All\n'), ((225, 245), 'voluptuous.Required', 'Required', (['"""html_url"""'], {}), "('html_url')\n", (233, 245), False, 'from voluptuous import Schema, Required, Optional, ALLOW_EXTRA, REMOVE_EXTRA, Any, Url, Range, All\n'), ((258, 280), 'voluptuous.Required', 'Required', (['"""labels_url"""'], {}), "('labels_url')\n", (266, 280), False, 'from voluptuous import Schema, Required, Optional, ALLOW_EXTRA, REMOVE_EXTRA, Any, Url, Range, All\n'), ((293, 307), 'voluptuous.Required', 'Required', (['"""id"""'], {}), "('id')\n", (301, 307), False, 'from voluptuous import Schema, Required, Optional, ALLOW_EXTRA, REMOVE_EXTRA, Any, Url, Range, All\n'), ((318, 336), 'voluptuous.Required', 'Required', (['"""number"""'], {}), "('number')\n", (326, 336), False, 'from voluptuous import Schema, Required, Optional, ALLOW_EXTRA, REMOVE_EXTRA, Any, Url, Range, All\n'), ((347, 364), 'voluptuous.Required', 'Required', (['"""state"""'], {}), "('state')\n", (355, 364), False, 'from voluptuous import Schema, Required, Optional, ALLOW_EXTRA, REMOVE_EXTRA, Any, Url, Range, All\n'), ((404, 421), 'voluptuous.Required', 'Required', (['"""title"""'], {}), "('title')\n", (412, 421), False, 'from voluptuous import Schema, Required, Optional, ALLOW_EXTRA, REMOVE_EXTRA, Any, Url, Range, All\n'), ((432, 455), 'voluptuous.Required', 'Required', (['"""description"""'], {}), "('description')\n", (440, 455), False, 'from voluptuous import Schema, Required, Optional, ALLOW_EXTRA, REMOVE_EXTRA, Any, Url, Range, All\n'), ((485, 504), 'voluptuous.Required', 'Required', (['"""creator"""'], {}), "('creator')\n", (493, 504), False, 'from voluptuous import Schema, Required, Optional, ALLOW_EXTRA, REMOVE_EXTRA, Any, Url, Range, All\n'), ((523, 546), 'voluptuous.Required', 'Required', (['"""open_issues"""'], {}), "('open_issues')\n", (531, 546), False, 'from voluptuous import Schema, Required, Optional, ALLOW_EXTRA, REMOVE_EXTRA, Any, Url, Range, All\n'), ((557, 582), 'voluptuous.Required', 'Required', (['"""closed_issues"""'], {}), "('closed_issues')\n", (565, 582), False, 'from voluptuous import Schema, Required, Optional, ALLOW_EXTRA, REMOVE_EXTRA, Any, Url, Range, All\n'), ((593, 615), 'voluptuous.Required', 'Required', (['"""created_at"""'], {}), "('created_at')\n", (601, 615), False, 'from voluptuous import Schema, Required, Optional, ALLOW_EXTRA, REMOVE_EXTRA, Any, Url, Range, All\n'), ((626, 648), 'voluptuous.Required', 'Required', (['"""updated_at"""'], {}), "('updated_at')\n", (634, 648), False, 'from voluptuous import Schema, Required, Optional, ALLOW_EXTRA, REMOVE_EXTRA, Any, Url, Range, All\n'), ((678, 699), 'voluptuous.Required', 'Required', (['"""closed_at"""'], {}), "('closed_at')\n", (686, 699), False, 'from voluptuous import Schema, Required, Optional, ALLOW_EXTRA, REMOVE_EXTRA, Any, Url, Range, All\n'), ((729, 747), 'voluptuous.Required', 'Required', (['"""due_on"""'], {}), "('due_on')\n", (737, 747), False, 'from voluptuous import Schema, Required, Optional, ALLOW_EXTRA, REMOVE_EXTRA, Any, Url, Range, All\n'), ((214, 219), 'voluptuous.Url', 'Url', ([], {}), '()\n', (217, 219), False, 'from voluptuous import Schema, Required, Optional, ALLOW_EXTRA, REMOVE_EXTRA, Any, Url, Range, All\n'), ((247, 252), 'voluptuous.Url', 'Url', ([], {}), '()\n', (250, 252), False, 'from voluptuous import Schema, Required, Optional, ALLOW_EXTRA, REMOVE_EXTRA, Any, Url, Range, All\n'), ((282, 287), 'voluptuous.Url', 'Url', ([], {}), '()\n', (285, 287), False, 'from voluptuous import Schema, Required, Optional, ALLOW_EXTRA, REMOVE_EXTRA, Any, Url, Range, All\n'), ((464, 478), 'voluptuous.Any', 'Any', (['str', 'None'], {}), '(str, None)\n', (467, 478), False, 'from voluptuous import Schema, Required, Optional, ALLOW_EXTRA, REMOVE_EXTRA, Any, Url, Range, All\n'), ((657, 671), 'voluptuous.Any', 'Any', (['str', 'None'], {}), '(str, None)\n', (660, 671), False, 'from voluptuous import Schema, Required, Optional, ALLOW_EXTRA, REMOVE_EXTRA, Any, Url, Range, All\n'), ((708, 722), 'voluptuous.Any', 'Any', (['str', 'None'], {}), '(str, None)\n', (711, 722), False, 'from voluptuous import Schema, Required, Optional, ALLOW_EXTRA, REMOVE_EXTRA, Any, Url, Range, All\n'), ((756, 770), 'voluptuous.Any', 'Any', (['str', 'None'], {}), '(str, None)\n', (759, 770), False, 'from voluptuous import Schema, Required, Optional, ALLOW_EXTRA, REMOVE_EXTRA, Any, Url, Range, All\n'), ((378, 396), 'githubcap.enums.State.all_values', 'State.all_values', ([], {}), '()\n', (394, 396), False, 'from githubcap.enums import State\n')]
|
# Copyright (c) 2016-2021 <NAME>
# Licensed under the zlib/libpng License
# https://opensource.org/licenses/Zlib
from libusb._platform import is_windows
if is_windows:
import ctypes
from ctypes import windll
from ctypes import wintypes
from ctypes import WINFUNCTYPE
from ctypes.wintypes import (
CHAR, WCHAR, BOOLEAN, BOOL, BYTE, WORD, DWORD, SHORT, USHORT, INT,
UINT, LONG, ULONG, LARGE_INTEGER, ULARGE_INTEGER, FLOAT, DOUBLE,
LPBYTE, PBYTE, LPWORD, PWORD, LPDWORD, PDWORD, LPLONG, PLONG, LPSTR,
LPCSTR, LPVOID, LPCVOID, LPVOID as PVOID, HANDLE, LPHANDLE, PHANDLE,
WPARAM, LPARAM, FILETIME, LPFILETIME,
)
from ctypes.wintypes import WPARAM as ULONG_PTR # workaround
PULONG_PTR = ctypes.POINTER(ULONG_PTR)
ULONG32 = ctypes.c_uint32
ULONGLONG = ctypes.c_uint64
DWORDLONG = ctypes.c_uint64
SIZE_T = ctypes.c_size_t
class SECURITY_ATTRIBUTES(ctypes.Structure):
_fields_ = [
("nLength", DWORD),
("lpSecurityDescriptor", LPVOID),
("bInheritHandle", BOOL),
]
LPSECURITY_ATTRIBUTES = ctypes.POINTER(SECURITY_ATTRIBUTES)
LPTHREAD_START_ROUTINE = WINFUNCTYPE(DWORD, LPVOID)
CreateThread = windll.kernel32.CreateThread
CreateThread.restype = HANDLE
CreateThread.argtypes = [LPSECURITY_ATTRIBUTES,
SIZE_T,
LPTHREAD_START_ROUTINE,
LPVOID,
DWORD,
LPDWORD]
WaitForSingleObject = windll.kernel32.WaitForSingleObject
WaitForSingleObject.restype = DWORD
WaitForSingleObject.argtypes = [HANDLE,
DWORD]
CreateSemaphore = windll.kernel32.CreateSemaphoreA
CreateSemaphore.restype = HANDLE
CreateSemaphore.argtypes = [LPSECURITY_ATTRIBUTES,
LONG,
LONG,
LPCSTR]
ReleaseSemaphore = windll.kernel32.ReleaseSemaphore
ReleaseSemaphore.restype = BOOL
ReleaseSemaphore.argtypes = [HANDLE,
LONG,
LPLONG]
Sleep = windll.kernel32.Sleep
Sleep.restype = None
Sleep.argtypes = [DWORD]
CloseHandle = windll.kernel32.CloseHandle
CloseHandle.restype = BOOL
CloseHandle.argtypes = [HANDLE]
del ctypes
|
[
"ctypes.WINFUNCTYPE",
"ctypes.POINTER"
] |
[((757, 782), 'ctypes.POINTER', 'ctypes.POINTER', (['ULONG_PTR'], {}), '(ULONG_PTR)\n', (771, 782), False, 'import ctypes\n'), ((1140, 1175), 'ctypes.POINTER', 'ctypes.POINTER', (['SECURITY_ATTRIBUTES'], {}), '(SECURITY_ATTRIBUTES)\n', (1154, 1175), False, 'import ctypes\n'), ((1206, 1232), 'ctypes.WINFUNCTYPE', 'WINFUNCTYPE', (['DWORD', 'LPVOID'], {}), '(DWORD, LPVOID)\n', (1217, 1232), False, 'from ctypes import WINFUNCTYPE\n')]
|
import json
import re
import argparse
from difflib import SequenceMatcher
from pprint import pprint
from collections import defaultdict
import matplotlib.pyplot as plt
import numpy as np
from scipy import stats
from terminaltables import AsciiTable
from transformers import AutoTokenizer
import stanza
import udon2
from udon2.kernels import ConvPartialTreeKernel
def exists_in_distractors(distractors, dataset):
data = dataset["data"]
for x in data:
for alt in x["choices"]:
comment = alt["extra"].get("comment") if alt["extra"] else None
if alt["type"] == "Distractor" and (alt["text"] in distractors or (comment and comment in distractors)):
return True
return False
def all_exist_in_distractors(distractors, dataset):
data = dataset["data"]
mask = [False] * len(distractors)
for x in data:
for alt in x["choices"]:
comment = alt["extra"].get("comment") if alt["extra"] else None
if alt["type"] == "Distractor":
for i, d in enumerate(distractors):
if alt["text"] == d or (comment and comment == d):
mask[i] = True
return all(mask)
def exists_in_context(distractors, dataset):
if type(dataset) == str:
for d in distractors:
if dataset.find(d) != -1:
return True
else:
data = dataset["data"]
for x in data:
for d in distractors:
if x["context"].find(d) != -1:
return True
return False
def all_exist_in_context(distractors, dataset):
mask = [False] * len(distractors)
if type(dataset) == str:
for i, d in enumerate(distractors):
if dataset.find(d) != -1:
mask[i] = True
else:
data = dataset["data"]
for x in data:
for i, d in enumerate(distractors):
if x["context"].find(d) != -1:
mask[i] = True
return all(mask)
def is_same_context(ctx, dataset, overlap=False):
if overlap:
Nctx = len(ctx)
limit = Nctx / 4
data = dataset["data"]
for x in data:
match = SequenceMatcher(None, x["context"], ctx).find_longest_match(0, len(x["context"]), 0, Nctx)
if match.size > limit:
return True
else:
data = dataset["data"]
for x in data:
if x["context"] == ctx:
return True
return False
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-f', '--file', type=str, required=True, help="Report file to process")
parser.add_argument('-t', '--training-data', type=str, default="", help="Training data file")
args = parser.parse_args()
tok = AutoTokenizer.from_pretrained("KB/bert-base-swedish-cased")
training_data = json.load(open(args.training_data)) if args.training_data else None
SPECIAL_TOKENS_REGEX = r"(\[SEP\]|\[[A-Z]\]|')"
sv = stanza.Pipeline(lang="sv", processors='tokenize,lemma,pos,depparse')
so_kernel = ConvPartialTreeKernel("GRCT", includeForm=False)
so_feats_kernel = ConvPartialTreeKernel("GRCT", includeForm=False, includeFeats=True)
examples = []
report = {
"total": 0,
"correct_in_distractors": [],
"any_same_distractors": [],
"all_same_distractors": [],
"avg_length_difference": defaultdict(list),
"any_different_capitalization": [],
"any_start_with_same_word": [],
"all_start_with_same_word": [],
"subseq_repetitive_words": [],
"empty_distractors": [],
"any_exists_in_context": [],
"any_exists_in_context_and_training_ctx": [],
"any_exists_in_context_and_training_dis": [],
"any_exists_in_training_distractors": [],
"any_exists_in_training_context": [],
"all_exist_in_context": [],
"all_exist_in_context_and_training_ctx": [],
"all_exist_in_context_and_training_dis": [],
"all_exist_in_training_distractors": [],
"all_exist_in_training_context": [],
"is_context_in_training_data": [],
"context_overlaps_with_training_data": [],
"any_predicted_gold_distractors": [],
"ca_norm_tree_kernel": [],
"ca_feats_norm_tree_kernel": [],
"start_with_same_pos": 0,
"start_with_same_pos_morph": 0,
"tp": 0,
"p": 0
}
inside_example, current_id = False, -1
gen_context_position = []
with open(args.file) as f:
for line in f:
line = line.strip()
if line.startswith("[CLS]"):
inside_example = True
correct = re.sub(SPECIAL_TOKENS_REGEX, "", line.split("[SEP]")[2])
examples.append({
"text": line,
"correct": correct.strip(),
"gold": None,
"gen": None
})
current_id += 1
elif inside_example:
if examples[-1]["gen"]:
examples[-1]["gold"] = [
re.sub(SPECIAL_TOKENS_REGEX, "", x).strip()
for x in line[1:-1].split("', '")
]
else:
examples[-1]["gen"] = [
re.sub(SPECIAL_TOKENS_REGEX, "", x).strip()
for x in line[1:-1].split("', '")
]
gen, gold = examples[-1]["gen"], examples[-1]["gold"]
correct = examples[-1]["correct"]
context = examples[-1]["text"]
if gen and gold:
set_gen, set_gold = set(gen), set(gold)
ca = udon2.Importer.from_stanza(sv(correct).to_dict())[0]
ca_norm = np.sqrt(so_kernel(ca, ca))
ca_feats_norm = np.sqrt(so_feats_kernel(ca, ca))
for g in set_gen:
if g:
gd = udon2.Importer.from_stanza(sv(g).to_dict())[0]
report["ca_norm_tree_kernel"].append(
so_kernel(ca, gd) / (ca_norm * np.sqrt(so_kernel(gd, gd))))
report["ca_feats_norm_tree_kernel"].append(
so_feats_kernel(ca, gd) / (ca_feats_norm * np.sqrt(so_feats_kernel(gd, gd))))
report["tp"] += g in set_gold
report["p"] += len(set_gold)
if correct in set_gen:
report["correct_in_distractors"].append(current_id)
if len(set_gen) != len(gen):
report["any_same_distractors"].append(current_id)
if len(set_gen) == 1:
report["all_same_distractors"].append(current_id)
if set_gen & set_gold:
report["any_predicted_gold_distractors"].append(current_id)
correct_capitalized = correct[0].isupper()
if any([correct_capitalized != x[0].isupper() for x in gen if x]):
report["any_different_capitalization"].append(current_id)
# this assumes a whitespace tokenization
cwords = correct.split()
dwords = [x.split() for x in gen]
Nc, Nd = len(cwords), [len(x) for x in dwords]
diff = [abs(Nc - Ndd) for Ndd in Nd]
report["avg_length_difference"][sum(diff) / len(diff)].append(current_id)
if any([x == 0 for x in Nd]):
report["empty_distractors"].append(current_id)
same_first_word = [cwords[0] == x[0] for x in dwords if x]
all_same_first_word = all(same_first_word)
if any(same_first_word) and not all_same_first_word:
report["any_start_with_same_word"].append(current_id)
if all_same_first_word:
report["all_start_with_same_word"].append(current_id)
if any([any([y == z for y, z in zip(x[:-1], x[1:])]) for x in dwords]):
report["subseq_repetitive_words"].append(current_id)
inside_example = False
report["total"] += 1
if is_same_context(context, training_data):
report["is_context_in_training_data"].append(current_id)
# if is_same_context(context, training_data, overlap=True):
# report["context_overlaps_with_training_data"].append(current_id)
if training_data:
gen_in_train_ctx = exists_in_context(gen, training_data)
gen_in_train_dis = exists_in_distractors(gen, training_data)
if gen_in_train_ctx:
all_gen_in_train_ctx = all_exist_in_context(gen, training_data)
else:
all_gen_in_train_ctx = False
if gen_in_train_dis:
all_gen_in_train_dis = all_exist_in_distractors(gen, training_data)
else:
all_gen_in_train_dis = False
else:
gen_in_train_ctx, gen_in_train_dis = False, False
if exists_in_context(gen, context):
report["any_exists_in_context"].append(current_id)
if all_exist_in_context(gen, context):
report["all_exist_in_context"].append(current_id)
if all_gen_in_train_ctx:
report["all_exist_in_context_and_training_ctx"].append(current_id)
if all_gen_in_train_dis:
report["all_exist_in_context_and_training_dis"].append(current_id)
if gen_in_train_ctx:
report["any_exists_in_context_and_training_ctx"].append(current_id)
if gen_in_train_dis:
report["any_exists_in_context_and_training_dis"].append(current_id)
if gen_in_train_ctx:
report["any_exists_in_training_context"].append(current_id)
if all_gen_in_train_ctx:
report["all_exist_in_training_context"].append(current_id)
if gen_in_train_dis:
report["any_exists_in_training_distractors"].append(current_id)
if all_gen_in_train_dis:
report["all_exist_in_training_distractors"].append(current_id)
for gdis in gen:
ddp = context.find(gdis)
if ddp > -1:
gen_context_position.append(len(tok.tokenize(context[:ddp])))
else:
inside_example = False
# pprint(report)
print(len(report["ca_norm_tree_kernel"]))
mode = stats.mode(report["ca_norm_tree_kernel"])
feats_mode = stats.mode(report["ca_feats_norm_tree_kernel"])
table_data = [
["Metric", "Value"],
["Total", report["total"]],
["Any of the generated distractors matches with a gold one", "{}%".format(
round(len(report["any_predicted_gold_distractors"]) * 100 / report["total"], 2))],
["The correct answer is among generated distractors", "{}%".format(
round(len(report["correct_in_distractors"]) * 100 / report["total"], 2))],
["Any (but not all) generated distractors are the same", "{}%".format(
round(len(report["any_same_distractors"]) * 100 / report["total"], 2))],
["All generated distractors are the same", "{}%".format(
round(len(report["all_same_distractors"]) * 100 / report["total"], 2))],
["Any distractor is capitalized differently from the correct answer", "{}%".format(
round(len(report["any_different_capitalization"]) * 100 / report["total"], 2))],
["Any distractor contains repetitive words", "{}%".format(
round(len(report["subseq_repetitive_words"]) * 100 / report["total"], 2))],
["Any distractor is an empty string", "{}%".format(
round(len(report["empty_distractors"]) * 100 / report["total"], 2))],
["(A) Any distractor is in its own context", "{}%".format(
round(len(report["any_exists_in_context"]) * 100 / report["total"], 2))],
["(B) Any distractor is in any context from training data", "{}%".format(
round(len(report["any_exists_in_training_context"]) * 100 / report["total"], 2))],
["(C) Any distractor is a distractor from training data", "{}%".format(
round(len(report["any_exists_in_training_distractors"]) * 100 / report["total"], 2))],
["(A) and (B)", "{}%".format(
round(len(report["any_exists_in_context_and_training_ctx"]) * 100 / report["total"], 2))],
["(A) and (C)", "{}%".format(
round(len(report["any_exists_in_context_and_training_dis"]) * 100 / report["total"], 2))],
["(A1) All distractors are in their own context", "{}%".format(
round(len(report["all_exist_in_context"]) * 100 / report["total"], 2))],
["(B1) All distractors are in any context from training data", "{}%".format(
round(len(report["all_exist_in_training_context"]) * 100 / report["total"], 2))],
["(C1) All distractors are distractors from training data", "{}%".format(
round(len(report["all_exist_in_training_distractors"]) * 100 / report["total"], 2))],
["(A1) and (B1)", "{}%".format(
round(len(report["all_exist_in_context_and_training_ctx"]) * 100 / report["total"], 2))],
["(A1) and (C1)", "{}%".format(
round(len(report["all_exist_in_context_and_training_dis"]) * 100 / report["total"], 2))],
["Normalized conv. kernel (SO)", "{} +/- {}".format(
round(np.mean(report["ca_norm_tree_kernel"]), 2),
round(np.std(report["ca_norm_tree_kernel"]), 2))],
["Median normalized conv. kernel (SO)", "{}".format(
round(np.median(report["ca_norm_tree_kernel"]), 2))],
["Mode normalized conv. kernel (SO)", "{} ({}%)".format(
round(mode[0][0], 2), round(mode[1][0] * 100 / len(report["ca_norm_tree_kernel"]), 2))],
["Normalized conv. kernel (SO, feats)", "{} +/- {}".format(
round(np.mean(report["ca_feats_norm_tree_kernel"]), 2),
round(np.std(report["ca_feats_norm_tree_kernel"]), 2))],
["Median normalized conv. kernel (SO, feats)", "{}".format(
round(np.median(report["ca_feats_norm_tree_kernel"]), 2))],
["Mode normalized conv. kernel (SO, feats)", "{} ({}%)".format(
round(feats_mode[0][0], 2), round(feats_mode[1][0] * 100 / len(report["ca_feats_norm_tree_kernel"]), 2))],
["Distractor recall", "{}%".format(round(report["tp"] * 100 / report["p"], 2))]
# ["A context exists in training data", "{}%".format(
# round(len(report["is_context_in_training_data"]) * 100 / report["total"], 2))]
# ["A context overlaps with training data (> 25\% overlap)", "{}%".format(
# round(len(report["context_overlaps_with_training_data"]) * 100 / report["total"], 2))]
]
t = AsciiTable(table_data)
print(t.table)
plt.hist(gen_context_position, bins=range(min(gen_context_position), max(gen_context_position)))
plt.show()
|
[
"matplotlib.pyplot.show",
"argparse.ArgumentParser",
"scipy.stats.mode",
"numpy.std",
"numpy.median",
"terminaltables.AsciiTable",
"udon2.kernels.ConvPartialTreeKernel",
"difflib.SequenceMatcher",
"collections.defaultdict",
"transformers.AutoTokenizer.from_pretrained",
"numpy.mean",
"stanza.Pipeline",
"re.sub"
] |
[((2553, 2578), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (2576, 2578), False, 'import argparse\n'), ((2815, 2874), 'transformers.AutoTokenizer.from_pretrained', 'AutoTokenizer.from_pretrained', (['"""KB/bert-base-swedish-cased"""'], {}), "('KB/bert-base-swedish-cased')\n", (2844, 2874), False, 'from transformers import AutoTokenizer\n'), ((3027, 3095), 'stanza.Pipeline', 'stanza.Pipeline', ([], {'lang': '"""sv"""', 'processors': '"""tokenize,lemma,pos,depparse"""'}), "(lang='sv', processors='tokenize,lemma,pos,depparse')\n", (3042, 3095), False, 'import stanza\n'), ((3112, 3160), 'udon2.kernels.ConvPartialTreeKernel', 'ConvPartialTreeKernel', (['"""GRCT"""'], {'includeForm': '(False)'}), "('GRCT', includeForm=False)\n", (3133, 3160), False, 'from udon2.kernels import ConvPartialTreeKernel\n'), ((3183, 3250), 'udon2.kernels.ConvPartialTreeKernel', 'ConvPartialTreeKernel', (['"""GRCT"""'], {'includeForm': '(False)', 'includeFeats': '(True)'}), "('GRCT', includeForm=False, includeFeats=True)\n", (3204, 3250), False, 'from udon2.kernels import ConvPartialTreeKernel\n'), ((11339, 11380), 'scipy.stats.mode', 'stats.mode', (["report['ca_norm_tree_kernel']"], {}), "(report['ca_norm_tree_kernel'])\n", (11349, 11380), False, 'from scipy import stats\n'), ((11398, 11445), 'scipy.stats.mode', 'stats.mode', (["report['ca_feats_norm_tree_kernel']"], {}), "(report['ca_feats_norm_tree_kernel'])\n", (11408, 11445), False, 'from scipy import stats\n'), ((15716, 15738), 'terminaltables.AsciiTable', 'AsciiTable', (['table_data'], {}), '(table_data)\n', (15726, 15738), False, 'from terminaltables import AsciiTable\n'), ((15864, 15874), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (15872, 15874), True, 'import matplotlib.pyplot as plt\n'), ((3448, 3465), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (3459, 3465), False, 'from collections import defaultdict\n'), ((2212, 2252), 'difflib.SequenceMatcher', 'SequenceMatcher', (['None', "x['context']", 'ctx'], {}), "(None, x['context'], ctx)\n", (2227, 2252), False, 'from difflib import SequenceMatcher\n'), ((14338, 14376), 'numpy.mean', 'np.mean', (["report['ca_norm_tree_kernel']"], {}), "(report['ca_norm_tree_kernel'])\n", (14345, 14376), True, 'import numpy as np\n'), ((14400, 14437), 'numpy.std', 'np.std', (["report['ca_norm_tree_kernel']"], {}), "(report['ca_norm_tree_kernel'])\n", (14406, 14437), True, 'import numpy as np\n'), ((14524, 14564), 'numpy.median', 'np.median', (["report['ca_norm_tree_kernel']"], {}), "(report['ca_norm_tree_kernel'])\n", (14533, 14564), True, 'import numpy as np\n'), ((14824, 14868), 'numpy.mean', 'np.mean', (["report['ca_feats_norm_tree_kernel']"], {}), "(report['ca_feats_norm_tree_kernel'])\n", (14831, 14868), True, 'import numpy as np\n'), ((14892, 14935), 'numpy.std', 'np.std', (["report['ca_feats_norm_tree_kernel']"], {}), "(report['ca_feats_norm_tree_kernel'])\n", (14898, 14935), True, 'import numpy as np\n'), ((15029, 15075), 'numpy.median', 'np.median', (["report['ca_feats_norm_tree_kernel']"], {}), "(report['ca_feats_norm_tree_kernel'])\n", (15038, 15075), True, 'import numpy as np\n'), ((5166, 5201), 're.sub', 're.sub', (['SPECIAL_TOKENS_REGEX', '""""""', 'x'], {}), "(SPECIAL_TOKENS_REGEX, '', x)\n", (5172, 5201), False, 'import re\n'), ((5380, 5415), 're.sub', 're.sub', (['SPECIAL_TOKENS_REGEX', '""""""', 'x'], {}), "(SPECIAL_TOKENS_REGEX, '', x)\n", (5386, 5415), False, 'import re\n')]
|
'''
From https://github.com/tsc2017/Frechet-Inception-Distance
Code derived from https://github.com/tensorflow/tensorflow/blob/master/tensorflow/contrib/gan/python/eval/python/classifier_metrics_impl.py
Usage:
Call get_fid(images1, images2)
Args:
images1, images2: Numpy arrays with values ranging from 0 to 255 and shape in the form [N, 3, HEIGHT, WIDTH] where N, HEIGHT and WIDTH can be arbitrary.
dtype of the images is recommended to be np.uint8 to save CPU memory.
Returns:
Frechet Inception Distance between the two image distributions.
'''
import tensorflow as tf
import os, sys
import functools
import numpy as np
import time
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import functional_ops
tfgan = tf.contrib.gan
session = tf.InteractiveSession()
# A smaller BATCH_SIZE reduces GPU memory usage, but at the cost of a slight slowdown
BATCH_SIZE = 64
# Run images through Inception.
inception_images = tf.placeholder(tf.float32, [BATCH_SIZE, 3, None, None])
activations1 = tf.placeholder(tf.float32, [None, None], name = 'activations1')
activations2 = tf.placeholder(tf.float32, [None, None], name = 'activations2')
fcd = tfgan.eval.frechet_classifier_distance_from_activations(activations1, activations2)
def inception_activations(images = inception_images, num_splits = 1):
images = tf.transpose(images, [0, 2, 3, 1])
size = 299
images = tf.image.resize_bilinear(images, [size, size])
generated_images_list = array_ops.split(images, num_or_size_splits = num_splits)
activations = functional_ops.map_fn(
fn = functools.partial(tfgan.eval.run_inception, output_tensor = 'pool_3:0'),
elems = array_ops.stack(generated_images_list),
parallel_iterations = 1,
back_prop = False,
swap_memory = True,
name = 'RunClassifier')
activations = array_ops.concat(array_ops.unstack(activations), 0)
return activations
activations =inception_activations()
def get_inception_activations(inps):
n_batches = inps.shape[0]//BATCH_SIZE
act = np.zeros([n_batches * BATCH_SIZE, 2048], dtype = np.float32)
for i in range(n_batches):
inp = inps[i * BATCH_SIZE:(i + 1) * BATCH_SIZE] / 255. * 2 - 1
act[i * BATCH_SIZE:(i + 1) * BATCH_SIZE] = activations.eval(feed_dict = {inception_images: inp})
return act
def activations2distance(act1, act2):
return fcd.eval(feed_dict = {activations1: act1, activations2: act2})
def get_fid(images1, images2):
assert(type(images1) == np.ndarray)
assert(len(images1.shape) == 4)
assert(images1.shape[1] == 3)
assert(np.min(images1[0]) >= 0 and np.max(images1[0]) > 10), 'Image values should be in the range [0, 255]'
assert(type(images2) == np.ndarray)
assert(len(images2.shape) == 4)
assert(images2.shape[1] == 3)
assert(np.min(images2[0]) >= 0 and np.max(images2[0]) > 10), 'Image values should be in the range [0, 255]'
assert(images1.shape == images2.shape), 'The two numpy arrays must have the same shape'
print('Calculating FID with %i images from each distribution' % (images1.shape[0]))
start_time = time.time()
act1 = get_inception_activations(images1)
act2 = get_inception_activations(images2)
fid = activations2distance(act1, act2)
print('FID calculation time: %f s' % (time.time() - start_time))
return fid
|
[
"functools.partial",
"numpy.zeros",
"tensorflow.transpose",
"time.time",
"tensorflow.placeholder",
"tensorflow.python.ops.array_ops.split",
"numpy.min",
"numpy.max",
"tensorflow.python.ops.array_ops.unstack",
"tensorflow.InteractiveSession",
"tensorflow.python.ops.array_ops.stack",
"tensorflow.image.resize_bilinear"
] |
[((780, 803), 'tensorflow.InteractiveSession', 'tf.InteractiveSession', ([], {}), '()\n', (801, 803), True, 'import tensorflow as tf\n'), ((959, 1014), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[BATCH_SIZE, 3, None, None]'], {}), '(tf.float32, [BATCH_SIZE, 3, None, None])\n', (973, 1014), True, 'import tensorflow as tf\n'), ((1030, 1091), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[None, None]'], {'name': '"""activations1"""'}), "(tf.float32, [None, None], name='activations1')\n", (1044, 1091), True, 'import tensorflow as tf\n'), ((1109, 1170), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[None, None]'], {'name': '"""activations2"""'}), "(tf.float32, [None, None], name='activations2')\n", (1123, 1170), True, 'import tensorflow as tf\n'), ((1347, 1381), 'tensorflow.transpose', 'tf.transpose', (['images', '[0, 2, 3, 1]'], {}), '(images, [0, 2, 3, 1])\n', (1359, 1381), True, 'import tensorflow as tf\n'), ((1410, 1456), 'tensorflow.image.resize_bilinear', 'tf.image.resize_bilinear', (['images', '[size, size]'], {}), '(images, [size, size])\n', (1434, 1456), True, 'import tensorflow as tf\n'), ((1485, 1539), 'tensorflow.python.ops.array_ops.split', 'array_ops.split', (['images'], {'num_or_size_splits': 'num_splits'}), '(images, num_or_size_splits=num_splits)\n', (1500, 1539), False, 'from tensorflow.python.ops import array_ops\n'), ((2066, 2124), 'numpy.zeros', 'np.zeros', (['[n_batches * BATCH_SIZE, 2048]'], {'dtype': 'np.float32'}), '([n_batches * BATCH_SIZE, 2048], dtype=np.float32)\n', (2074, 2124), True, 'import numpy as np\n'), ((3144, 3155), 'time.time', 'time.time', ([], {}), '()\n', (3153, 3155), False, 'import time\n'), ((1880, 1910), 'tensorflow.python.ops.array_ops.unstack', 'array_ops.unstack', (['activations'], {}), '(activations)\n', (1897, 1910), False, 'from tensorflow.python.ops import array_ops\n'), ((1596, 1665), 'functools.partial', 'functools.partial', (['tfgan.eval.run_inception'], {'output_tensor': '"""pool_3:0"""'}), "(tfgan.eval.run_inception, output_tensor='pool_3:0')\n", (1613, 1665), False, 'import functools\n'), ((1685, 1723), 'tensorflow.python.ops.array_ops.stack', 'array_ops.stack', (['generated_images_list'], {}), '(generated_images_list)\n', (1700, 1723), False, 'from tensorflow.python.ops import array_ops\n'), ((2624, 2642), 'numpy.min', 'np.min', (['images1[0]'], {}), '(images1[0])\n', (2630, 2642), True, 'import numpy as np\n'), ((2652, 2670), 'numpy.max', 'np.max', (['images1[0]'], {}), '(images1[0])\n', (2658, 2670), True, 'import numpy as np\n'), ((2846, 2864), 'numpy.min', 'np.min', (['images2[0]'], {}), '(images2[0])\n', (2852, 2864), True, 'import numpy as np\n'), ((2874, 2892), 'numpy.max', 'np.max', (['images2[0]'], {}), '(images2[0])\n', (2880, 2892), True, 'import numpy as np\n'), ((3333, 3344), 'time.time', 'time.time', ([], {}), '()\n', (3342, 3344), False, 'import time\n')]
|
#!/usr/bin/python3.6
import os
import re
import sys
import yaml
from glob import glob
from collections import OrderedDict
from typing import List
import numpy as np
import pandas as pd
from tqdm import tqdm
from metrics import F_score
from debug import dprint
IN_KERNEL = os.environ.get('KAGGLE_WORKING_DIR') is not None
INPUT_PATH = '../input/imet-2019-fgvc6/' if IN_KERNEL else '../input/'
NUM_ATTEMPTS = 100
NUM_FOLDS = 5
NUM_CLASSES = 1103
if __name__ == '__main__':
if len(sys.argv) < 4:
print(f'usage: {sys.argv[0]} <ensemble_name> predict1.npy ...')
sys.exit()
ensemble_name, predicts = sys.argv[1], sys.argv[2:]
level1_filenames: List[List[str]] = []
level1_train_predicts: List[List[np.array]] = []
# load labels
fold_num = np.load('folds.npy')
train_df = pd.read_csv(INPUT_PATH + 'train.csv')
def parse_labels(s: str) -> np.array:
res = np.zeros(NUM_CLASSES)
res[list(map(int, s.split()))] = 1
return res - 0.5 # we use zero threshold instead of 0.5
all_labels = np.vstack(list(map(parse_labels, train_df.attribute_ids)))
dprint(fold_num.shape)
dprint(all_labels.shape)
# build a list of models, for every model build a list of predicts
for predict in predicts:
assert 'level1_train_' in predict
m = re.match(r'(.*)_f(\d)_e\d+.*\.npy', predict)
assert m
model_path = m.group(1)
level1_fnames, level1_train = [], []
for fold in range(NUM_FOLDS):
filenames = glob(f'{model_path}_f{fold}_*.npy')
assert len(filenames) == 1 # the model must be unique in this fold
filename = filenames[0]
print('found', filename)
level1_fnames.append(filename)
level1_train.append(np.load(filename))
level1_filenames.append(level1_fnames)
level1_train_predicts.append(level1_train)
# search for the best blend weights
best_weights = np.ones(len(level1_train_predicts))
best_score = 0.0
for _ in tqdm(range(NUM_ATTEMPTS)):
# print('-' * 50)
weights = np.random.rand(len(level1_train_predicts))
weights /= sum(weights)
all_predicts = np.zeros_like(all_labels)
for lvl1_predicts, w in zip(level1_train_predicts, weights):
model_predict = np.zeros_like(all_labels)
for fold, lvl1_pred in enumerate(lvl1_predicts):
predict = lvl1_pred * w
model_predict[fold_num == fold] = predict
all_predicts += model_predict
score = F_score(all_predicts, all_labels, beta=2, threshold=0)
if score > best_score:
best_score, best_weights = score, weights
print('best_score', best_score, 'weights', weights)
# generate an ensemble description file
ensemble = []
for model, weight in zip(level1_filenames, best_weights):
model_filenames = [os.path.basename(f) for f in model]
ensemble.append({'predicts': model_filenames, 'weight': weight.item()})
filename = f'{ensemble_name}_val_{best_score:.04f}.yml'
print('saving weights to', filename)
with open(filename, 'w') as f:
yaml.dump(ensemble, f)
|
[
"numpy.load",
"numpy.zeros_like",
"metrics.F_score",
"pandas.read_csv",
"os.path.basename",
"yaml.dump",
"numpy.zeros",
"re.match",
"os.environ.get",
"debug.dprint",
"glob.glob",
"sys.exit"
] |
[((277, 313), 'os.environ.get', 'os.environ.get', (['"""KAGGLE_WORKING_DIR"""'], {}), "('KAGGLE_WORKING_DIR')\n", (291, 313), False, 'import os\n'), ((781, 801), 'numpy.load', 'np.load', (['"""folds.npy"""'], {}), "('folds.npy')\n", (788, 801), True, 'import numpy as np\n'), ((817, 854), 'pandas.read_csv', 'pd.read_csv', (["(INPUT_PATH + 'train.csv')"], {}), "(INPUT_PATH + 'train.csv')\n", (828, 854), True, 'import pandas as pd\n'), ((1125, 1147), 'debug.dprint', 'dprint', (['fold_num.shape'], {}), '(fold_num.shape)\n', (1131, 1147), False, 'from debug import dprint\n'), ((1152, 1176), 'debug.dprint', 'dprint', (['all_labels.shape'], {}), '(all_labels.shape)\n', (1158, 1176), False, 'from debug import dprint\n'), ((583, 593), 'sys.exit', 'sys.exit', ([], {}), '()\n', (591, 593), False, 'import sys\n'), ((912, 933), 'numpy.zeros', 'np.zeros', (['NUM_CLASSES'], {}), '(NUM_CLASSES)\n', (920, 933), True, 'import numpy as np\n'), ((1332, 1378), 're.match', 're.match', (['"""(.*)_f(\\\\d)_e\\\\d+.*\\\\.npy"""', 'predict'], {}), "('(.*)_f(\\\\d)_e\\\\d+.*\\\\.npy', predict)\n", (1340, 1378), False, 'import re\n'), ((2216, 2241), 'numpy.zeros_like', 'np.zeros_like', (['all_labels'], {}), '(all_labels)\n', (2229, 2241), True, 'import numpy as np\n'), ((2586, 2640), 'metrics.F_score', 'F_score', (['all_predicts', 'all_labels'], {'beta': '(2)', 'threshold': '(0)'}), '(all_predicts, all_labels, beta=2, threshold=0)\n', (2593, 2640), False, 'from metrics import F_score\n'), ((3206, 3228), 'yaml.dump', 'yaml.dump', (['ensemble', 'f'], {}), '(ensemble, f)\n', (3215, 3228), False, 'import yaml\n'), ((1534, 1569), 'glob.glob', 'glob', (['f"""{model_path}_f{fold}_*.npy"""'], {}), "(f'{model_path}_f{fold}_*.npy')\n", (1538, 1569), False, 'from glob import glob\n'), ((2340, 2365), 'numpy.zeros_like', 'np.zeros_like', (['all_labels'], {}), '(all_labels)\n', (2353, 2365), True, 'import numpy as np\n'), ((2944, 2963), 'os.path.basename', 'os.path.basename', (['f'], {}), '(f)\n', (2960, 2963), False, 'import os\n'), ((1798, 1815), 'numpy.load', 'np.load', (['filename'], {}), '(filename)\n', (1805, 1815), True, 'import numpy as np\n')]
|
#!/usr/bin/env python
#
# Database Helper class
#
# by <NAME>
# References:
#
# Copyright 2018
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
import sqlite3
SETTING = 0
VALUE = 1
MESSAGE = 2
class DB (object):
def __init__(self):
self._file_path = '//var//www//html//GlanceWeb//clp.db'
self._field_dict = {}
self._message_dict = {}
def db_load(self):
self._field_dict.clear()
self.load_settings()
def get_file_path(self):
return self._file_path
def load_settings(self):
self._conn = sqlite3.connect(self._file_path)
curs=self._conn.cursor()
self._field_dict.clear()
for row in curs.execute("SELECT setting,value FROM settings"):
if row[SETTING] not in self._field_dict:
self._field_dict[row[SETTING]]= row[VALUE].strip()
self._message_dict.clear()
for row in curs.execute("select s.setting, s.value, (m.host || '/' || m.name || '/' || m.value) as message from settings s, messages m where s.value = m.id"):
if row[SETTING] not in self._message_dict:
self._message_dict[row[SETTING]]= row[MESSAGE].strip()
def get_value(self, setting):
if setting in self._field_dict:
return self._field_dict[setting]
def get_int_value(self, setting):
if setting in self._field_dict:
return int(self._field_dict[setting])
def get_message(self, setting):
if setting in self._message_dict:
return self._message_dict[setting]
def save_setting(self, setting, newvalue):
result = False
print(("saving setting - " , setting , " " , newvalue))
self._conn = sqlite3.connect(self._file_path)
curs=self._conn.cursor()
curs.execute("UPDATE settings SET value = '" + newvalue + "' WHERE setting = '" + setting + "'")
self._conn.commit()
if curs.rowcount==1:
self._field_dict[setting] = newvalue
result = True
return result
if __name__ == "__main__":
db = DB()
db.load_settings()
print(db.get_message('display12'))
|
[
"sqlite3.connect"
] |
[((1151, 1183), 'sqlite3.connect', 'sqlite3.connect', (['self._file_path'], {}), '(self._file_path)\n', (1166, 1183), False, 'import sqlite3\n'), ((2500, 2532), 'sqlite3.connect', 'sqlite3.connect', (['self._file_path'], {}), '(self._file_path)\n', (2515, 2532), False, 'import sqlite3\n')]
|
'''
Created on Jan 15, 2020
@author: gsnyder
Generate vulnerability status report
'''
from blackduck.HubRestApi import HubInstance
import argparse
import json
import time
parser = argparse.ArgumentParser("A program to create a vulnerability status report")
parser.add_argument("--file_name", default="vuln_status_report")
parser.add_argument('-f', '--format', default='CSV', choices=["CSV", "JSON"], help="Report format")
parser.add_argument('-t', '--tries', default=4, type=int, help="How many times to retry downloading the report, i.e. wait for the report to be generated")
parser.add_argument('-s', '--sleep_time', default=5, type=int, help="The amount of time to sleep in-between (re-)tries to download the report")
args = parser.parse_args()
hub = HubInstance()
class FailedReportDownload(Exception):
pass
def download_report(location, report_format, filename, retries=args.tries):
report_id = location.split("/")[-1]
if retries:
print("Retrieving generated report from {}".format(location))
# response = hub.download_vuln_status_report(location)
response = hub.execute_get(location)
if response.status_code == 200:
report_obj = response.json()
download_url = hub.get_link(report_obj, "download") + ".json"
content_url = hub.get_link(report_obj, "content")
if report_format == "CSV":
download_filename = filename + ".zip"
response = hub.execute_get(download_url, {'Content-Type': 'application/zip'})
else:
download_filename = filename + ".json"
response = hub.execute_get(content_url)
if response.status_code == 200:
if report_format == "CSV":
with open(download_filename, "wb") as f:
f.write(response.content)
print("Successfully downloaded zip file to {} for report {}".format(
download_filename, report_id))
else:
with open(download_filename, "w") as f:
json.dump(response.json(), f, indent=3)
print("Successfully downloaded json report data to {} for report {}".format(
download_filename, report_id))
else:
print("Failed to retrieve report {}".format(report_id))
print(f"Probably not ready yet, waiting {args.sleep_time} seconds then retrying...")
time.sleep(args.sleep_time)
retries -= 1
download_report(location, report_format, filename, retries)
else:
print("Failed to find report information at location {}, status code: {}".format(location, response.status_code))
else:
raise FailedReportDownload("Failed to retrieve report {} after {} retries".format(report_id, args.tries))
response = hub.create_vuln_status_report(format=args.format)
if response.status_code == 201:
print("Successfully created vulnerability status report")
location = response.headers['Location']
download_report(location, args.format, args.file_name)
else:
print("Failed to create vulnerability status report, status code returned: {}".format(response.status_code))
|
[
"blackduck.HubRestApi.HubInstance",
"argparse.ArgumentParser",
"time.sleep"
] |
[((185, 261), 'argparse.ArgumentParser', 'argparse.ArgumentParser', (['"""A program to create a vulnerability status report"""'], {}), "('A program to create a vulnerability status report')\n", (208, 261), False, 'import argparse\n'), ((761, 774), 'blackduck.HubRestApi.HubInstance', 'HubInstance', ([], {}), '()\n', (772, 774), False, 'from blackduck.HubRestApi import HubInstance\n'), ((2183, 2210), 'time.sleep', 'time.sleep', (['args.sleep_time'], {}), '(args.sleep_time)\n', (2193, 2210), False, 'import time\n')]
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
from .mqtt_provider import MQTTProvider
import types
from azure.iot.hub.devicesdk.transport.abstract_transport import AbstractTransport
class MQTTTransport(AbstractTransport):
def __init__(self, auth_provider):
"""
Constructor for instantiating a transport
:param auth_provider: The authentication provider
"""
AbstractTransport.__init__(self, auth_provider)
self._mqtt_provider = None
self.on_transport_connected = types.FunctionType
def connect(self):
client_id = self._auth_provider.device_id
if self._auth_provider.module_id is not None:
client_id += "/" + self._auth_provider.module_id
username = self._auth_provider.hostname + "/" + client_id + "/" + "?api-version=2018-06-30"
self._mqtt_provider = MQTTProvider(client_id, self._auth_provider.hostname, username,
self._auth_provider.get_current_sas_token())
self._mqtt_provider.on_mqtt_connected = self._handle_provider_connected_state
self._mqtt_provider.connect()
def send_event(self, event):
topic = self._get_telemetry_topic()
self._mqtt_provider.publish(topic, event)
def disconnect(self):
self._mqtt_provider.disconnect()
def _handle_provider_connected_state(self, machine_state):
return self.on_transport_connected(machine_state)
def _get_telemetry_topic(self):
topic = "devices/" + self._auth_provider.device_id
if self._auth_provider.module_id is not None:
topic += "/modules/" + self._auth_provider.module_id
topic += "/messages/events/"
return topic
|
[
"azure.iot.hub.devicesdk.transport.abstract_transport.AbstractTransport.__init__"
] |
[((703, 750), 'azure.iot.hub.devicesdk.transport.abstract_transport.AbstractTransport.__init__', 'AbstractTransport.__init__', (['self', 'auth_provider'], {}), '(self, auth_provider)\n', (729, 750), False, 'from azure.iot.hub.devicesdk.transport.abstract_transport import AbstractTransport\n')]
|
# coding=utf-8
# Copyright 2022 The TensorFlow GAN Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for TF-GAN internal inception_metrics."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from absl.testing import parameterized
import tensorflow as tf
import tensorflow_gan as tfgan
mock = tf.compat.v1.test.mock
class FakeInceptionModule(tf.Module):
@tf.function(input_signature=[tf.TensorSpec(shape=None, dtype=tf.float32)])
def __call__(self, x):
bs = tf.shape(x)[0]
logits = tf.zeros([bs, 1008])
pool_3 = tf.ones([bs, 2048])
return {'logits': logits, 'pool_3': pool_3}
class RunInceptionTest(tf.test.TestCase, parameterized.TestCase):
def setUp(self):
super(RunInceptionTest, self).setUp()
self.export_path = os.path.join(self.get_temp_dir(), 'my-module')
tf.saved_model.save(FakeInceptionModule(), self.export_path)
classifier_fn = tfgan.eval.classifier_fn_from_tfhub(
self.export_path, None)
def run_inception(*args, **kwargs):
return tfgan.eval.run_inception(
*args, classifier_fn=classifier_fn, **kwargs)
self.run_inception = run_inception
@parameterized.parameters(
{'num_batches': 1},
{'num_batches': 4},
)
def test_run_inception_graph(self, num_batches):
"""Test `run_inception` graph construction."""
batch_size = 8
img = tf.ones([batch_size, 299, 299, 3])
results = self.run_inception(img, num_batches=num_batches)
self.assertIsInstance(results, dict)
self.assertLen(results, 2)
self.assertIn('logits', results)
logits = results['logits']
self.assertIsInstance(logits, tf.Tensor)
logits.shape.assert_is_compatible_with([batch_size, 1008])
self.assertIn('pool_3', results)
pool = results['pool_3']
self.assertIsInstance(pool, tf.Tensor)
pool.shape.assert_is_compatible_with([batch_size, 2048])
# Check that none of the model variables are trainable.
self.assertListEqual([], tf.compat.v1.trainable_variables())
def test_run_inception_multicall(self):
"""Test that `run_inception` can be called multiple times."""
for batch_size in (7, 3, 2):
img = tf.ones([batch_size, 299, 299, 3])
self.run_inception(img)
class SampleAndRunInception(tf.test.TestCase, parameterized.TestCase):
def setUp(self):
super(SampleAndRunInception, self).setUp()
self.export_path = os.path.join(self.get_temp_dir(), 'my-module')
tf.saved_model.save(FakeInceptionModule(), self.export_path)
classifier_fn = tfgan.eval.classifier_fn_from_tfhub(self.export_path, None)
def sample_and_run_inception(*args, **kwargs):
return tfgan.eval.sample_and_run_inception(
*args, classifier_fn=classifier_fn, **kwargs)
self.sample_and_run_inception = sample_and_run_inception
@parameterized.parameters(
{'num_batches': 1},
{'num_batches': 4},
)
def test_sample_and_run_inception_graph(self, num_batches):
"""Test `sample_and_run_inception` graph construction."""
batch_size = 8
def sample_fn(_):
return tf.ones([batch_size, 244, 244, 3])
sample_inputs = [1] * num_batches
results = self.sample_and_run_inception(sample_fn, sample_inputs)
self.assertIsInstance(results, dict)
self.assertLen(results, 2)
self.assertIn('logits', results)
logits = results['logits']
self.assertIsInstance(logits, tf.Tensor)
logits.shape.assert_is_compatible_with([batch_size * num_batches, 1008])
self.assertIn('pool_3', results)
pool = results['pool_3']
self.assertIsInstance(pool, tf.Tensor)
pool.shape.assert_is_compatible_with([batch_size * num_batches, 2048])
# Check that none of the model variables are trainable.
self.assertListEqual([], tf.compat.v1.trainable_variables())
class InceptionScore(tf.test.TestCase, parameterized.TestCase):
def setUp(self):
super(InceptionScore, self).setUp()
self.export_path = os.path.join(self.get_temp_dir(), 'my-module')
tf.saved_model.save(FakeInceptionModule(), self.export_path)
classifier_fn = tfgan.eval.classifier_fn_from_tfhub(
self.export_path, tfgan.eval.INCEPTION_OUTPUT, True)
def inception_score(*args, **kwargs):
return tfgan.eval.inception_score(
*args, classifier_fn=classifier_fn, **kwargs)
self.inception_score = inception_score
def inception_score_streaming(*args, **kwargs):
return tfgan.eval.inception_score_streaming(
*args, classifier_fn=classifier_fn, **kwargs)
self.inception_score_streaming = inception_score_streaming
@parameterized.parameters(
{'num_batches': 1, 'streaming': True},
{'num_batches': 1, 'streaming': False},
{'num_batches': 3, 'streaming': True},
{'num_batches': 3, 'streaming': False},
)
def test_inception_score_graph(self, num_batches, streaming):
"""Test `inception_score` graph construction."""
if streaming and tf.executing_eagerly():
# streaming doesn't work in eager execution.
return
img = tf.zeros([6, 299, 299, 3])
if streaming:
score, update_op = self.inception_score_streaming(
img, num_batches=num_batches)
self.assertIsInstance(update_op, tf.Tensor)
update_op.shape.assert_has_rank(0)
else:
score = self.inception_score(img, num_batches=num_batches)
self.assertIsInstance(score, tf.Tensor)
score.shape.assert_has_rank(0)
# Check that none of the model variables are trainable.
self.assertEmpty(tf.compat.v1.trainable_variables())
class FrechetInceptionDistance(tf.test.TestCase, parameterized.TestCase):
def setUp(self):
super(FrechetInceptionDistance, self).setUp()
self.export_path = os.path.join(self.get_temp_dir(), 'my-module')
tf.saved_model.save(FakeInceptionModule(), self.export_path)
classifier_fn = tfgan.eval.classifier_fn_from_tfhub(
self.export_path, tfgan.eval.INCEPTION_FINAL_POOL, True)
def frechet_inception_distance(*args, **kwargs):
return tfgan.eval.frechet_inception_distance(
*args, classifier_fn=classifier_fn, **kwargs)
self.frechet_inception_distance = frechet_inception_distance
def fid_streaming(*args, **kwargs):
return tfgan.eval.frechet_inception_distance_streaming(
*args, classifier_fn=classifier_fn, **kwargs)
self.frechet_inception_distance_streaming = fid_streaming
@parameterized.parameters(
{'num_batches': 1, 'streaming': True},
{'num_batches': 1, 'streaming': False},
{'num_batches': 3, 'streaming': True},
{'num_batches': 3, 'streaming': False},
)
def test_frechet_inception_distance_graph(self, num_batches, streaming):
"""Test `frechet_inception_distance` graph construction."""
if streaming and tf.executing_eagerly():
# streaming doesn't work in eager execution.
return
img = tf.ones([6, 299, 299, 3])
if streaming:
distance, update_op = self.frechet_inception_distance_streaming(
img, img, num_batches=num_batches)
self.assertIsInstance(update_op, tf.Tensor)
update_op.shape.assert_has_rank(0)
else:
distance = self.frechet_inception_distance(
img, img, num_batches=num_batches)
self.assertIsInstance(distance, tf.Tensor)
distance.shape.assert_has_rank(0)
# Check that none of the model variables are trainable.
self.assertEmpty(tf.compat.v1.trainable_variables())
if __name__ == '__main__':
tf.test.main()
|
[
"tensorflow.test.main",
"tensorflow.ones",
"tensorflow_gan.eval.frechet_inception_distance",
"tensorflow_gan.eval.classifier_fn_from_tfhub",
"tensorflow_gan.eval.sample_and_run_inception",
"absl.testing.parameterized.parameters",
"tensorflow_gan.eval.inception_score_streaming",
"tensorflow.executing_eagerly",
"tensorflow.shape",
"tensorflow.zeros",
"tensorflow_gan.eval.run_inception",
"tensorflow_gan.eval.inception_score",
"tensorflow.TensorSpec",
"tensorflow.compat.v1.trainable_variables",
"tensorflow_gan.eval.frechet_inception_distance_streaming"
] |
[((1719, 1783), 'absl.testing.parameterized.parameters', 'parameterized.parameters', (["{'num_batches': 1}", "{'num_batches': 4}"], {}), "({'num_batches': 1}, {'num_batches': 4})\n", (1743, 1783), False, 'from absl.testing import parameterized\n'), ((3374, 3438), 'absl.testing.parameterized.parameters', 'parameterized.parameters', (["{'num_batches': 1}", "{'num_batches': 4}"], {}), "({'num_batches': 1}, {'num_batches': 4})\n", (3398, 3438), False, 'from absl.testing import parameterized\n'), ((5142, 5334), 'absl.testing.parameterized.parameters', 'parameterized.parameters', (["{'num_batches': 1, 'streaming': True}", "{'num_batches': 1, 'streaming': False}", "{'num_batches': 3, 'streaming': True}", "{'num_batches': 3, 'streaming': False}"], {}), "({'num_batches': 1, 'streaming': True}, {\n 'num_batches': 1, 'streaming': False}, {'num_batches': 3, 'streaming': \n True}, {'num_batches': 3, 'streaming': False})\n", (5166, 5334), False, 'from absl.testing import parameterized\n'), ((6951, 7143), 'absl.testing.parameterized.parameters', 'parameterized.parameters', (["{'num_batches': 1, 'streaming': True}", "{'num_batches': 1, 'streaming': False}", "{'num_batches': 3, 'streaming': True}", "{'num_batches': 3, 'streaming': False}"], {}), "({'num_batches': 1, 'streaming': True}, {\n 'num_batches': 1, 'streaming': False}, {'num_batches': 3, 'streaming': \n True}, {'num_batches': 3, 'streaming': False})\n", (6975, 7143), False, 'from absl.testing import parameterized\n'), ((8012, 8026), 'tensorflow.test.main', 'tf.test.main', ([], {}), '()\n', (8024, 8026), True, 'import tensorflow as tf\n'), ((1085, 1105), 'tensorflow.zeros', 'tf.zeros', (['[bs, 1008]'], {}), '([bs, 1008])\n', (1093, 1105), True, 'import tensorflow as tf\n'), ((1119, 1138), 'tensorflow.ones', 'tf.ones', (['[bs, 2048]'], {}), '([bs, 2048])\n', (1126, 1138), True, 'import tensorflow as tf\n'), ((1472, 1531), 'tensorflow_gan.eval.classifier_fn_from_tfhub', 'tfgan.eval.classifier_fn_from_tfhub', (['self.export_path', 'None'], {}), '(self.export_path, None)\n', (1507, 1531), True, 'import tensorflow_gan as tfgan\n'), ((1932, 1966), 'tensorflow.ones', 'tf.ones', (['[batch_size, 299, 299, 3]'], {}), '([batch_size, 299, 299, 3])\n', (1939, 1966), True, 'import tensorflow as tf\n'), ((3092, 3151), 'tensorflow_gan.eval.classifier_fn_from_tfhub', 'tfgan.eval.classifier_fn_from_tfhub', (['self.export_path', 'None'], {}), '(self.export_path, None)\n', (3127, 3151), True, 'import tensorflow_gan as tfgan\n'), ((4634, 4727), 'tensorflow_gan.eval.classifier_fn_from_tfhub', 'tfgan.eval.classifier_fn_from_tfhub', (['self.export_path', 'tfgan.eval.INCEPTION_OUTPUT', '(True)'], {}), '(self.export_path, tfgan.eval.\n INCEPTION_OUTPUT, True)\n', (4669, 4727), True, 'import tensorflow_gan as tfgan\n'), ((5590, 5616), 'tensorflow.zeros', 'tf.zeros', (['[6, 299, 299, 3]'], {}), '([6, 299, 299, 3])\n', (5598, 5616), True, 'import tensorflow as tf\n'), ((6397, 6494), 'tensorflow_gan.eval.classifier_fn_from_tfhub', 'tfgan.eval.classifier_fn_from_tfhub', (['self.export_path', 'tfgan.eval.INCEPTION_FINAL_POOL', '(True)'], {}), '(self.export_path, tfgan.eval.\n INCEPTION_FINAL_POOL, True)\n', (6432, 6494), True, 'import tensorflow_gan as tfgan\n'), ((7421, 7446), 'tensorflow.ones', 'tf.ones', (['[6, 299, 299, 3]'], {}), '([6, 299, 299, 3])\n', (7428, 7446), True, 'import tensorflow as tf\n'), ((1057, 1068), 'tensorflow.shape', 'tf.shape', (['x'], {}), '(x)\n', (1065, 1068), True, 'import tensorflow as tf\n'), ((1594, 1664), 'tensorflow_gan.eval.run_inception', 'tfgan.eval.run_inception', (['*args'], {'classifier_fn': 'classifier_fn'}), '(*args, classifier_fn=classifier_fn, **kwargs)\n', (1618, 1664), True, 'import tensorflow_gan as tfgan\n'), ((2542, 2576), 'tensorflow.compat.v1.trainable_variables', 'tf.compat.v1.trainable_variables', ([], {}), '()\n', (2574, 2576), True, 'import tensorflow as tf\n'), ((2732, 2766), 'tensorflow.ones', 'tf.ones', (['[batch_size, 299, 299, 3]'], {}), '([batch_size, 299, 299, 3])\n', (2739, 2766), True, 'import tensorflow as tf\n'), ((3216, 3302), 'tensorflow_gan.eval.sample_and_run_inception', 'tfgan.eval.sample_and_run_inception', (['*args'], {'classifier_fn': 'classifier_fn'}), '(*args, classifier_fn=classifier_fn, **\n kwargs)\n', (3251, 3302), True, 'import tensorflow_gan as tfgan\n'), ((3634, 3668), 'tensorflow.ones', 'tf.ones', (['[batch_size, 244, 244, 3]'], {}), '([batch_size, 244, 244, 3])\n', (3641, 3668), True, 'import tensorflow as tf\n'), ((4317, 4351), 'tensorflow.compat.v1.trainable_variables', 'tf.compat.v1.trainable_variables', ([], {}), '()\n', (4349, 4351), True, 'import tensorflow as tf\n'), ((4788, 4860), 'tensorflow_gan.eval.inception_score', 'tfgan.eval.inception_score', (['*args'], {'classifier_fn': 'classifier_fn'}), '(*args, classifier_fn=classifier_fn, **kwargs)\n', (4814, 4860), True, 'import tensorflow_gan as tfgan\n'), ((4981, 5068), 'tensorflow_gan.eval.inception_score_streaming', 'tfgan.eval.inception_score_streaming', (['*args'], {'classifier_fn': 'classifier_fn'}), '(*args, classifier_fn=classifier_fn, **\n kwargs)\n', (5017, 5068), True, 'import tensorflow_gan as tfgan\n'), ((5492, 5514), 'tensorflow.executing_eagerly', 'tf.executing_eagerly', ([], {}), '()\n', (5512, 5514), True, 'import tensorflow as tf\n'), ((6060, 6094), 'tensorflow.compat.v1.trainable_variables', 'tf.compat.v1.trainable_variables', ([], {}), '()\n', (6092, 6094), True, 'import tensorflow as tf\n'), ((6566, 6653), 'tensorflow_gan.eval.frechet_inception_distance', 'tfgan.eval.frechet_inception_distance', (['*args'], {'classifier_fn': 'classifier_fn'}), '(*args, classifier_fn=classifier_fn,\n **kwargs)\n', (6603, 6653), True, 'import tensorflow_gan as tfgan\n'), ((6780, 6878), 'tensorflow_gan.eval.frechet_inception_distance_streaming', 'tfgan.eval.frechet_inception_distance_streaming', (['*args'], {'classifier_fn': 'classifier_fn'}), '(*args, classifier_fn=\n classifier_fn, **kwargs)\n', (6827, 6878), True, 'import tensorflow_gan as tfgan\n'), ((7323, 7345), 'tensorflow.executing_eagerly', 'tf.executing_eagerly', ([], {}), '()\n', (7343, 7345), True, 'import tensorflow as tf\n'), ((7946, 7980), 'tensorflow.compat.v1.trainable_variables', 'tf.compat.v1.trainable_variables', ([], {}), '()\n', (7978, 7980), True, 'import tensorflow as tf\n'), ((977, 1020), 'tensorflow.TensorSpec', 'tf.TensorSpec', ([], {'shape': 'None', 'dtype': 'tf.float32'}), '(shape=None, dtype=tf.float32)\n', (990, 1020), True, 'import tensorflow as tf\n')]
|
# Authored by : gusdn3477
# Co-authored by : tony9402
# Link : http://boj.kr/8a53cdacfc6340c894fb47257232f244
import sys
from collections import deque
def input():
return sys.stdin.readline().rstrip()
def checkMap():
for z in range(H):
for i in range(N):
for j in range(M):
if arr[z][i][j] == 0:
return False
return True
def BFS():
while queue:
q = queue.popleft()
z, x, y = q[0]
for i in range(6):
dx = x + nx[i]
dy = y + ny[i]
dz = z + nz[i]
if dx < 0 or dx >= N or dy < 0 or dy >= M or dz < 0 or dz >= H:
continue
if arr[dz][dx][dy] == 0:
arr[dz][dx][dy] = 1
queue.append(((dz,dx,dy), q[1]+1))
if checkMap():
return q[1]
return -1
M, N, H = map(int, input().split())
arr = []
nx = [-1,0,1,0,0,0]
ny = [0,-1,0,1,0,0]
nz = [0,0,0,0,-1,1]
queue = deque()
arr = [ [ list(map(int, input().split())) for _ in range(N) ] for _ in range(H) ]
for z in range(H):
for i in range(N):
for j in range(M):
if arr[z][i][j] == 1:
arr[z][i][j] = 1
queue.append(((z,i,j),0))
ans = BFS()
print(ans)
|
[
"sys.stdin.readline",
"collections.deque"
] |
[((973, 980), 'collections.deque', 'deque', ([], {}), '()\n', (978, 980), False, 'from collections import deque\n'), ((176, 196), 'sys.stdin.readline', 'sys.stdin.readline', ([], {}), '()\n', (194, 196), False, 'import sys\n')]
|
#!/usr/bin/python3
import inspect
from typing import List, Union
import collections
import jk_logging
# ----------------------------------------------------------------
_ActionRecord = collections.namedtuple("__ActionRecord", [ "f", "priority", "name" ])
def _listOfActionRecordsToStr(records:list):
ret = "[ "
for r in records:
if len(ret) > 2:
ret += ", "
ret += "(" + str(r.name if r.name else "callable") + ", " + str(r.priority) + ")"
return ret + " ]"
#
# ----------------------------------------------------------------
class _State(object):
def __init__(self, state):
self.__state = state
self.__fromStates = {} # preconditions: int -> __ActionRecord[]
self.__general = [] # __ActionRecord[]
#
def registerAction(self, fromStateOrListOfStates, action:callable, priority:int, name:str):
assert action != None
if fromStateOrListOfStates is None:
self.__general.append(_ActionRecord(action, priority, name))
else:
if isinstance(fromStateOrListOfStates, (list, tuple)):
for s in fromStateOrListOfStates:
s = int(s)
if s in self.__fromStates:
actions = self.__fromStates[s]
else:
actions = []
self.__fromStates[s] = actions
actions.append(_ActionRecord(action, priority, name))
else:
s = int(fromStateOrListOfStates)
if s in self.__fromStates:
actions = self.__fromStates[s]
else:
actions = []
self.__fromStates[s] = actions
actions.append(_ActionRecord(action, priority, name))
#
def getActions(self, fromState) -> List[_ActionRecord]:
fromState = int(fromState)
ret = []
ret.extend(self.__general)
if fromState in self.__fromStates:
ret.extend(self.__fromStates[fromState])
return ret
#
def __str__(self):
return str(self.__state)
#
def __repr__(self):
return str(self.__state)
#
def dump(self, stateMap, prefix, outputFunction):
outputFunction(prefix + str(self.__state) + ":")
prefix += "\t"
for precond in self.__fromStates:
outputFunction(prefix + str(stateMap[precond]) + " -> " + str(self.__state) + " : " + _listOfActionRecordsToStr(self.__fromStates[precond]))
if len(self.__general) > 0:
outputFunction(prefix + "* -> " + str(self.__state) + " : " + _listOfActionRecordsToStr(self.__general))
#
#
# ----------------------------------------------------------------
#
# This class manages actions on state transitions. The basic concept follows this principle:
#
# * initialization phase
# * instantiate a state manager object; specify all possible states and the initial state;
# * register actions with all transitions as necessary; you can choose from
# * actions performed if a state is left
# * actions performed if a state is reached
# * actions performed on specific state transitions
# * runtime phase
# * specify state transitions using <c>switchState()</c> and get actions performed automatically in sorted order: priorities of actions are considered accordingly
#
class StateManager(object):
def __init__(self, states, startingState):
self.__states = {}
for state in states:
self.__states[int(state)] = _State(state)
self.__currentState = startingState
self.__general = [] # __ActionRecord[]
#
@property
def currentState(self) -> int:
return self.__currentState
#
def registerActionFromTo(self, fromStateOrListOfStates, toStateOrListOfStates, action:callable, priority:int, name:str = None):
assert fromStateOrListOfStates != None
assert toStateOrListOfStates != None
assert action != None
if isinstance(toStateOrListOfStates, (list, tuple)):
for s in toStateOrListOfStates:
self.__states[int(s)].registerAction(fromStateOrListOfStates, action, priority, name)
else:
self.__states[int(toStateOrListOfStates)].registerAction(fromStateOrListOfStates, action, priority, name)
#
def registerActionTo(self, toStateOrListOfStates, action:callable, priority:int, name:str = None):
assert toStateOrListOfStates != None
assert action != None
if isinstance(toStateOrListOfStates, (list, tuple)):
for s in toStateOrListOfStates:
self.__states[int(s)].registerAction(None, action, priority, name)
else:
self.__states[int(toStateOrListOfStates)].registerAction(None, action, priority, name)
#
def registerActionFrom(self, fromStateOrListOfStates, action:callable, priority:int, name:str = None):
assert fromStateOrListOfStates != None
assert action != None
for state in self.__states.values():
state.registerAction(fromStateOrListOfStates, action, priority, name)
#
def registerAction(self, action:callable, priority:int, name:str = None):
assert action != None
self.__general.append(_ActionRecord(action, priority, name))
#
#
# Switch to a state without performaing any actions.
#
def switchStateWithoutActions(self, toState, logger:jk_logging.AbstractLogger = None):
if logger != None:
logger.debug("Setting state: " + str(toState))
self.__currentState = toState
#
#
# Switch the current state to the specified state and perform all actions defined for this transition.
#
def switchState(self, toState, logger:jk_logging.AbstractLogger = None):
if self.__currentState == toState:
return None
if logger != None:
logger.debug("Switching from state " + str(self.__currentState) + " to " + str(toState))
allActions = self.__states[int(toState)].getActions(self.__currentState)
allActions.extend(self.__general)
# sort all actions so that actions with the highest priority come first
allActions.sort(key=lambda a: a.priority, reverse=True)
# remember state
self.__currentState = toState
# process all actions
for a in allActions:
if logger != None:
logger.debug("Executing: " + str(a.f))
a.f()
#
def dump(self, prefix="", outputFunction=print):
outputFunction(prefix + "Current state: " + str(self.__currentState))
outputFunction(prefix + "States:")
prefix += "\t"
for state in self.__states.values():
state.dump(self.__states, prefix, outputFunction)
if len(self.__general) > 0:
outputFunction(prefix + "\t*" + " : " + _listOfActionRecordsToStr(self.__general))
#
#
|
[
"collections.namedtuple"
] |
[((193, 260), 'collections.namedtuple', 'collections.namedtuple', (['"""__ActionRecord"""', "['f', 'priority', 'name']"], {}), "('__ActionRecord', ['f', 'priority', 'name'])\n", (215, 260), False, 'import collections\n')]
|
from __future__ import print_function, absolute_import, division
import numpy as np
from distutils.version import LooseVersion
def allbadtonan(function):
"""
Wrapper of numpy's nansum etc.: for <=1.8, just return the function's
results. For >=1.9, any axes with all-nan values will have all-nan outputs
in the collapsed version
"""
def f(data, axis=None, keepdims=None):
if keepdims is None:
result = function(data, axis=axis)
else:
result = function(data, axis=axis, keepdims=keepdims)
if LooseVersion(np.__version__) >= LooseVersion('1.9.0') and hasattr(result, '__len__'):
if axis is None:
if np.all(np.isnan(data)):
return np.nan
else:
return result
if keepdims is None:
nans = np.all(np.isnan(data), axis=axis)
else:
nans = np.all(np.isnan(data), axis=axis, keepdims=keepdims)
result[nans] = np.nan
return result
return f
|
[
"distutils.version.LooseVersion",
"numpy.isnan"
] |
[((565, 593), 'distutils.version.LooseVersion', 'LooseVersion', (['np.__version__'], {}), '(np.__version__)\n', (577, 593), False, 'from distutils.version import LooseVersion\n'), ((597, 618), 'distutils.version.LooseVersion', 'LooseVersion', (['"""1.9.0"""'], {}), "('1.9.0')\n", (609, 618), False, 'from distutils.version import LooseVersion\n'), ((706, 720), 'numpy.isnan', 'np.isnan', (['data'], {}), '(data)\n', (714, 720), True, 'import numpy as np\n'), ((876, 890), 'numpy.isnan', 'np.isnan', (['data'], {}), '(data)\n', (884, 890), True, 'import numpy as np\n'), ((951, 965), 'numpy.isnan', 'np.isnan', (['data'], {}), '(data)\n', (959, 965), True, 'import numpy as np\n')]
|
from django.urls import path, include
from rest_framework.routers import DefaultRouter
from . import views
from placeReservations import views as reserviews
from placeReviews import views as revviews
from qnas import views as qnaviews
app_name = "places"
place_list = views.PlaceViewSet.as_view({"post": "create", "get": "list"})
place_detail = views.PlaceViewSet.as_view(
{"get": "retrieve", "delete": "destroy", "put": "update"}
)
reservation_list = reserviews.ReservationViewSet.as_view(
{"post": "create", "get": "list"}
)
reservation_detail = reserviews.ReservationViewSet.as_view(
{"get": "retrieve", "delete": "destroy"}
)
review_list = revviews.ReviewViewSet.as_view({"get": "list", "post": "create"})
review_detail = revviews.ReviewViewSet.as_view({"get": "retrieve", "delete": "destroy"})
ques_list = qnaviews.QuestionViewSet.as_view({"get": "list", "post": "create"})
ques_detail = qnaviews.QuestionViewSet.as_view({"get": "retrieve", "delete": "destroy"})
urlpatterns = [
path("", place_list),
path("<int:pk>", place_detail),
path("<int:place_pk>/reservations", reservation_list),
path("<int:place_pk>/reservations/<int:pk>", reservation_detail),
path("<int:place_pk>/reviews", review_list),
path("<int:place_pk>/reviews/<int:pk>", review_detail),
path("<int:place_pk>/questions", ques_list),
path("<int:place_pk>/questions/<int:pk>", ques_detail),
]
|
[
"placeReviews.views.ReviewViewSet.as_view",
"placeReservations.views.ReservationViewSet.as_view",
"qnas.views.QuestionViewSet.as_view",
"django.urls.path"
] |
[((460, 532), 'placeReservations.views.ReservationViewSet.as_view', 'reserviews.ReservationViewSet.as_view', (["{'post': 'create', 'get': 'list'}"], {}), "({'post': 'create', 'get': 'list'})\n", (497, 532), True, 'from placeReservations import views as reserviews\n'), ((561, 640), 'placeReservations.views.ReservationViewSet.as_view', 'reserviews.ReservationViewSet.as_view', (["{'get': 'retrieve', 'delete': 'destroy'}"], {}), "({'get': 'retrieve', 'delete': 'destroy'})\n", (598, 640), True, 'from placeReservations import views as reserviews\n'), ((662, 727), 'placeReviews.views.ReviewViewSet.as_view', 'revviews.ReviewViewSet.as_view', (["{'get': 'list', 'post': 'create'}"], {}), "({'get': 'list', 'post': 'create'})\n", (692, 727), True, 'from placeReviews import views as revviews\n'), ((744, 816), 'placeReviews.views.ReviewViewSet.as_view', 'revviews.ReviewViewSet.as_view', (["{'get': 'retrieve', 'delete': 'destroy'}"], {}), "({'get': 'retrieve', 'delete': 'destroy'})\n", (774, 816), True, 'from placeReviews import views as revviews\n'), ((830, 897), 'qnas.views.QuestionViewSet.as_view', 'qnaviews.QuestionViewSet.as_view', (["{'get': 'list', 'post': 'create'}"], {}), "({'get': 'list', 'post': 'create'})\n", (862, 897), True, 'from qnas import views as qnaviews\n'), ((912, 986), 'qnas.views.QuestionViewSet.as_view', 'qnaviews.QuestionViewSet.as_view', (["{'get': 'retrieve', 'delete': 'destroy'}"], {}), "({'get': 'retrieve', 'delete': 'destroy'})\n", (944, 986), True, 'from qnas import views as qnaviews\n'), ((1008, 1028), 'django.urls.path', 'path', (['""""""', 'place_list'], {}), "('', place_list)\n", (1012, 1028), False, 'from django.urls import path, include\n'), ((1034, 1064), 'django.urls.path', 'path', (['"""<int:pk>"""', 'place_detail'], {}), "('<int:pk>', place_detail)\n", (1038, 1064), False, 'from django.urls import path, include\n'), ((1070, 1123), 'django.urls.path', 'path', (['"""<int:place_pk>/reservations"""', 'reservation_list'], {}), "('<int:place_pk>/reservations', reservation_list)\n", (1074, 1123), False, 'from django.urls import path, include\n'), ((1129, 1193), 'django.urls.path', 'path', (['"""<int:place_pk>/reservations/<int:pk>"""', 'reservation_detail'], {}), "('<int:place_pk>/reservations/<int:pk>', reservation_detail)\n", (1133, 1193), False, 'from django.urls import path, include\n'), ((1199, 1242), 'django.urls.path', 'path', (['"""<int:place_pk>/reviews"""', 'review_list'], {}), "('<int:place_pk>/reviews', review_list)\n", (1203, 1242), False, 'from django.urls import path, include\n'), ((1248, 1302), 'django.urls.path', 'path', (['"""<int:place_pk>/reviews/<int:pk>"""', 'review_detail'], {}), "('<int:place_pk>/reviews/<int:pk>', review_detail)\n", (1252, 1302), False, 'from django.urls import path, include\n'), ((1308, 1351), 'django.urls.path', 'path', (['"""<int:place_pk>/questions"""', 'ques_list'], {}), "('<int:place_pk>/questions', ques_list)\n", (1312, 1351), False, 'from django.urls import path, include\n'), ((1357, 1411), 'django.urls.path', 'path', (['"""<int:place_pk>/questions/<int:pk>"""', 'ques_detail'], {}), "('<int:place_pk>/questions/<int:pk>', ques_detail)\n", (1361, 1411), False, 'from django.urls import path, include\n')]
|
from flask import Blueprint, request, current_app
from kerlescan import view_helpers
from kerlescan.view_helpers import validate_uuids
from system_baseline import metrics
from system_baseline.version import app_version
from system_baseline.models import SystemBaselineMappedSystem
section = Blueprint("v1", __name__)
FACTS_MAXSIZE = 2 ** 19 # 512KB
def get_version():
"""
return the service version
"""
return {"version": app_version}
@metrics.baseline_fetch_all_requests.time()
@metrics.api_exceptions.count_exceptions()
def get_baselines_by_system_id(system_id=None):
account_number = view_helpers.get_account_number(request)
if system_id:
validate_uuids([system_id])
query = SystemBaselineMappedSystem.query.filter(
SystemBaselineMappedSystem.account == account_number,
SystemBaselineMappedSystem.system_id == system_id,
)
else:
query = SystemBaselineMappedSystem.query.filter(
SystemBaselineMappedSystem.account == account_number
)
try:
query_results = query.all()
except Exception:
message = "Unknown error when reading baselines by system id"
current_app.logger.audit(message, request=request, success=False)
raise
message = "read baselines with system"
current_app.logger.audit(message, request=request, success=True)
return [result.system_baseline_id for result in query_results]
|
[
"system_baseline.metrics.baseline_fetch_all_requests.time",
"flask.Blueprint",
"system_baseline.models.SystemBaselineMappedSystem.query.filter",
"kerlescan.view_helpers.get_account_number",
"kerlescan.view_helpers.validate_uuids",
"system_baseline.metrics.api_exceptions.count_exceptions",
"flask.current_app.logger.audit"
] |
[((294, 319), 'flask.Blueprint', 'Blueprint', (['"""v1"""', '__name__'], {}), "('v1', __name__)\n", (303, 319), False, 'from flask import Blueprint, request, current_app\n'), ((461, 503), 'system_baseline.metrics.baseline_fetch_all_requests.time', 'metrics.baseline_fetch_all_requests.time', ([], {}), '()\n', (501, 503), False, 'from system_baseline import metrics\n'), ((505, 546), 'system_baseline.metrics.api_exceptions.count_exceptions', 'metrics.api_exceptions.count_exceptions', ([], {}), '()\n', (544, 546), False, 'from system_baseline import metrics\n'), ((616, 656), 'kerlescan.view_helpers.get_account_number', 'view_helpers.get_account_number', (['request'], {}), '(request)\n', (647, 656), False, 'from kerlescan import view_helpers\n'), ((1324, 1388), 'flask.current_app.logger.audit', 'current_app.logger.audit', (['message'], {'request': 'request', 'success': '(True)'}), '(message, request=request, success=True)\n', (1348, 1388), False, 'from flask import Blueprint, request, current_app\n'), ((684, 711), 'kerlescan.view_helpers.validate_uuids', 'validate_uuids', (['[system_id]'], {}), '([system_id])\n', (698, 711), False, 'from kerlescan.view_helpers import validate_uuids\n'), ((728, 876), 'system_baseline.models.SystemBaselineMappedSystem.query.filter', 'SystemBaselineMappedSystem.query.filter', (['(SystemBaselineMappedSystem.account == account_number)', '(SystemBaselineMappedSystem.system_id == system_id)'], {}), '(SystemBaselineMappedSystem.account ==\n account_number, SystemBaselineMappedSystem.system_id == system_id)\n', (767, 876), False, 'from system_baseline.models import SystemBaselineMappedSystem\n'), ((934, 1031), 'system_baseline.models.SystemBaselineMappedSystem.query.filter', 'SystemBaselineMappedSystem.query.filter', (['(SystemBaselineMappedSystem.account == account_number)'], {}), '(SystemBaselineMappedSystem.account ==\n account_number)\n', (973, 1031), False, 'from system_baseline.models import SystemBaselineMappedSystem\n'), ((1196, 1261), 'flask.current_app.logger.audit', 'current_app.logger.audit', (['message'], {'request': 'request', 'success': '(False)'}), '(message, request=request, success=False)\n', (1220, 1261), False, 'from flask import Blueprint, request, current_app\n')]
|
# Copyright 2017-2019 typed_python Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typed_python.compiler.type_wrappers.wrapper import Wrapper
import typed_python.compiler.native_ast as native_ast
from typed_python import Int32
def tp_hash_to_py_hash(hVal):
"""Convert a typed-python hash to a regular python hash.
Python insists that its hash values are never -1, because it uses -1 as an
indicator that the exception flag is set. TypedPython doesn't have this behavior
because it uses c++ exception propagation internally. As a result, it's the
'hash' wrapper that's responsible for mapping -1 to -2.
"""
if hVal == -1:
return Int32(-2)
return hVal
class HashWrapper(Wrapper):
is_pod = True
is_empty = False
is_pass_by_ref = False
def __init__(self):
super().__init__(hash)
def getNativeLayoutType(self):
return native_ast.Type.Void()
def convert_call(self, context, expr, args, kwargs):
if len(args) == 1 and not kwargs:
hashVal = args[0].convert_hash()
if hashVal is None:
return None
return context.call_py_function(tp_hash_to_py_hash, (hashVal,), {})
return super().convert_call(context, expr, args, kwargs)
|
[
"typed_python.Int32",
"typed_python.compiler.native_ast.Type.Void"
] |
[((1199, 1208), 'typed_python.Int32', 'Int32', (['(-2)'], {}), '(-2)\n', (1204, 1208), False, 'from typed_python import Int32\n'), ((1428, 1450), 'typed_python.compiler.native_ast.Type.Void', 'native_ast.Type.Void', ([], {}), '()\n', (1448, 1450), True, 'import typed_python.compiler.native_ast as native_ast\n')]
|
import numpy as np
def calc_mass(m, l_rod):
M = np.array([[m, 0., 0.], [0., m, 0.], [0., 0., (1. / 12.) * m * l_rod * l_rod]])
return M
def calc_rot(q):
theta = q[2][0]
c, s = np.cos(theta), np.sin(theta)
R = np.array(((c, -s, 0.), (s, c, 0.), (0., 0., 1.)))
return R
# Location of the rev joint - 1st end
def calc_rD1(q, l_rod):
R = calc_rot(q)
rD = np.array([q[0][0], q[1][0], 0.]) + np.matmul(R, np.array([-l_rod / 2., 0., 0.]))
return rD
def calc_A(q, l):
rG1 = np.array([q[0][0], q[1][0], 0.])
rOrg = calc_rD1(q, l)
rOrgG1 = rOrg - rG1
A = np.array([[0., 1., rOrgG1[0]]])
return A
def calc_Qg(m_p, g):
Qg = m_p*g
return Qg
def calcDrivingForce(q, l, f):
fVec = np.array([[f],[0.],[(f*np.sin(q[2][0]))*(l/2.0)]])
return fVec
def step_sim(f, q, qd, mass, l, h):
# print(f)
g = np.array([[0.], [-9.81], [0.]])
m = calc_mass(mass, l)
W = np.zeros((4, 4))
b = np.zeros((4, 1))
Qg = calc_Qg(mass, g)
Org = np.array([0., 0., 0.])
# Compliance
# C = 1.e-8 * np.identity(6)
# start the step calculations
rO = calc_rD1(q, l)
phi = -(rO - Org) / h
A = calc_A(q, l)
W[0:3, 0:3] = m
W[0:3, 3:4] = A.transpose()
W[3:4, 0:3] = A
# W[9:15, 9:15] = C
fVec = calcDrivingForce(q,l,f)
b[0:3, ] = h * Qg + np.matmul(m, qd) + h * fVec
b[3,] = np.array([phi[1]])
X = np.linalg.solve(W, b)
qd = X[0:3, ]
qp = q
q = qp + h * qd
# print(q[2][0])
if q[2][0] > 2. * np.pi:
q[2][0] = q[2][0] - 2.*np.pi
if q[2][0] < -2. * np.pi:
q[2][0] = q[2][0] + 2. * np.pi
return q, qd
|
[
"numpy.zeros",
"numpy.sin",
"numpy.array",
"numpy.cos",
"numpy.matmul",
"numpy.linalg.solve"
] |
[((53, 141), 'numpy.array', 'np.array', (['[[m, 0.0, 0.0], [0.0, m, 0.0], [0.0, 0.0, 1.0 / 12.0 * m * l_rod * l_rod]]'], {}), '([[m, 0.0, 0.0], [0.0, m, 0.0], [0.0, 0.0, 1.0 / 12.0 * m * l_rod *\n l_rod]])\n', (61, 141), True, 'import numpy as np\n'), ((232, 286), 'numpy.array', 'np.array', (['((c, -s, 0.0), (s, c, 0.0), (0.0, 0.0, 1.0))'], {}), '(((c, -s, 0.0), (s, c, 0.0), (0.0, 0.0, 1.0)))\n', (240, 286), True, 'import numpy as np\n'), ((511, 544), 'numpy.array', 'np.array', (['[q[0][0], q[1][0], 0.0]'], {}), '([q[0][0], q[1][0], 0.0])\n', (519, 544), True, 'import numpy as np\n'), ((604, 637), 'numpy.array', 'np.array', (['[[0.0, 1.0, rOrgG1[0]]]'], {}), '([[0.0, 1.0, rOrgG1[0]]])\n', (612, 637), True, 'import numpy as np\n'), ((871, 904), 'numpy.array', 'np.array', (['[[0.0], [-9.81], [0.0]]'], {}), '([[0.0], [-9.81], [0.0]])\n', (879, 904), True, 'import numpy as np\n'), ((938, 954), 'numpy.zeros', 'np.zeros', (['(4, 4)'], {}), '((4, 4))\n', (946, 954), True, 'import numpy as np\n'), ((963, 979), 'numpy.zeros', 'np.zeros', (['(4, 1)'], {}), '((4, 1))\n', (971, 979), True, 'import numpy as np\n'), ((1017, 1042), 'numpy.array', 'np.array', (['[0.0, 0.0, 0.0]'], {}), '([0.0, 0.0, 0.0])\n', (1025, 1042), True, 'import numpy as np\n'), ((1394, 1412), 'numpy.array', 'np.array', (['[phi[1]]'], {}), '([phi[1]])\n', (1402, 1412), True, 'import numpy as np\n'), ((1422, 1443), 'numpy.linalg.solve', 'np.linalg.solve', (['W', 'b'], {}), '(W, b)\n', (1437, 1443), True, 'import numpy as np\n'), ((195, 208), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (201, 208), True, 'import numpy as np\n'), ((210, 223), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (216, 223), True, 'import numpy as np\n'), ((387, 420), 'numpy.array', 'np.array', (['[q[0][0], q[1][0], 0.0]'], {}), '([q[0][0], q[1][0], 0.0])\n', (395, 420), True, 'import numpy as np\n'), ((435, 469), 'numpy.array', 'np.array', (['[-l_rod / 2.0, 0.0, 0.0]'], {}), '([-l_rod / 2.0, 0.0, 0.0])\n', (443, 469), True, 'import numpy as np\n'), ((1354, 1370), 'numpy.matmul', 'np.matmul', (['m', 'qd'], {}), '(m, qd)\n', (1363, 1370), True, 'import numpy as np\n'), ((766, 781), 'numpy.sin', 'np.sin', (['q[2][0]'], {}), '(q[2][0])\n', (772, 781), True, 'import numpy as np\n')]
|
# Copyright (c) 2017, CNRS.
# All rights reserved.
#
# Released under the BSD 3-Clause license as published at the link below.
# https://openwsn.atlassian.net/wiki/display/OW/License
import logging
import sys
from openvisualizer.motehandler.moteconnector.openparser import parser
from openvisualizer.motehandler.moteconnector.openparser.parserexception import ParserException
from openvisualizer.utils import format_buf
log = logging.getLogger('ParserPrintf')
log.setLevel(logging.INFO)
log.addHandler(logging.NullHandler())
class ParserPrintf(parser.Parser):
HEADER_LENGTH = 2
buffer = ""
def __init__(self):
# log
log.debug('create instance')
# initialize parent class
super(ParserPrintf, self).__init__(self.HEADER_LENGTH)
# returns a string with the decimal value of a uint16_t
@staticmethod
def bytes_to_string(bytestring):
string = ''
i = 0
for byte in bytestring:
string = format(eval('{0} + {1} * 256 ** {2}'.format(string, byte, i)))
i = i + 1
return string
@staticmethod
def bytes_to_addr(bytestring):
string = ''
for byte in bytestring:
string = string + '{:02x}'.format(byte)
return string
def parse_input(self, data):
# log
log.debug('received printf {0}'.format(data))
mote_id = ParserPrintf.bytes_to_addr(data[0:2]) # addr
asn = ParserPrintf.bytes_to_string(data[2:7]) # asn
msg = "{}".format("".join([chr(c) for c in data[7:]]))
log.info("[ASN={ASN}] {MOTEID}: {MSG}".format(
ASN=asn,
MOTEID=mote_id,
MSG=msg.strip()
)
)
#sys.stdout.write("{0} {1} ".format(mote_id, asn));
#sys.stdout.write("{}".format("".join([chr(c) for c in data[7:]])))
#sys.stdout.flush()
# everything was fine
return 'error', data
|
[
"logging.getLogger",
"logging.NullHandler"
] |
[((430, 463), 'logging.getLogger', 'logging.getLogger', (['"""ParserPrintf"""'], {}), "('ParserPrintf')\n", (447, 463), False, 'import logging\n'), ((506, 527), 'logging.NullHandler', 'logging.NullHandler', ([], {}), '()\n', (525, 527), False, 'import logging\n')]
|
import numpy as np
import astropy.units as u
import classes
dist = classes.Distribution('planets3_bottomup/')
ms = np.round(np.logspace(np.log10(0.1), np.log10(13*u.Mjup.to('Mearth')),100),3)
aas = np.round(np.logspace(np.log10(0.1), np.log10(10),100), 3)[::-1]
aas2 = np.round(np.logspace(np.log10(0.04), np.log10(10),100), 3)[::-1]
spliced_aas = np.append(aas[:-51], aas2[43:-13])
np.round(np.log10(ms),3)
dist.fit(ms=ms, aas=aas)
|
[
"numpy.append",
"numpy.log10",
"astropy.units.Mjup.to",
"classes.Distribution"
] |
[((68, 110), 'classes.Distribution', 'classes.Distribution', (['"""planets3_bottomup/"""'], {}), "('planets3_bottomup/')\n", (88, 110), False, 'import classes\n'), ((349, 383), 'numpy.append', 'np.append', (['aas[:-51]', 'aas2[43:-13]'], {}), '(aas[:-51], aas2[43:-13])\n', (358, 383), True, 'import numpy as np\n'), ((393, 405), 'numpy.log10', 'np.log10', (['ms'], {}), '(ms)\n', (401, 405), True, 'import numpy as np\n'), ((137, 150), 'numpy.log10', 'np.log10', (['(0.1)'], {}), '(0.1)\n', (145, 150), True, 'import numpy as np\n'), ((220, 233), 'numpy.log10', 'np.log10', (['(0.1)'], {}), '(0.1)\n', (228, 233), True, 'import numpy as np\n'), ((235, 247), 'numpy.log10', 'np.log10', (['(10)'], {}), '(10)\n', (243, 247), True, 'import numpy as np\n'), ((291, 305), 'numpy.log10', 'np.log10', (['(0.04)'], {}), '(0.04)\n', (299, 305), True, 'import numpy as np\n'), ((307, 319), 'numpy.log10', 'np.log10', (['(10)'], {}), '(10)\n', (315, 319), True, 'import numpy as np\n'), ((164, 183), 'astropy.units.Mjup.to', 'u.Mjup.to', (['"""Mearth"""'], {}), "('Mearth')\n", (173, 183), True, 'import astropy.units as u\n')]
|
#!/usr/bin/env python3
"""This script invokes format.py in the wpilibsuite/styleguide repository.
Set the WPI_FORMAT environment variable to its location on disk before use. For
example:
WPI_FORMAT="$HOME/styleguide" ./format.py
"""
import os
import subprocess
import sys
def main():
path = os.environ.get("WPI_FORMAT")
if path == None:
print("Error: WPI_FORMAT environment variable not set")
sys.exit(1)
try:
# Run main format.py script
args = ["python3", path + "/format.py"]
args.extend(sys.argv[1:])
proc = subprocess.Popen(args)
sys.exit(proc.wait())
except FileNotFoundError:
# Run main format.py script on windows
args = ["py", "-3", path + "/format.py"]
args.extend(sys.argv[1:])
proc = subprocess.Popen(args)
sys.exit(proc.wait())
if __name__ == "__main__":
main()
|
[
"os.environ.get",
"subprocess.Popen",
"sys.exit"
] |
[((300, 328), 'os.environ.get', 'os.environ.get', (['"""WPI_FORMAT"""'], {}), "('WPI_FORMAT')\n", (314, 328), False, 'import os\n'), ((422, 433), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (430, 433), False, 'import sys\n'), ((577, 599), 'subprocess.Popen', 'subprocess.Popen', (['args'], {}), '(args)\n', (593, 599), False, 'import subprocess\n'), ((805, 827), 'subprocess.Popen', 'subprocess.Popen', (['args'], {}), '(args)\n', (821, 827), False, 'import subprocess\n')]
|
from django.db import models
from django.core.validators import MinValueValidator, MaxValueValidator
class GuacdServer(models.Model):
class Meta:
verbose_name = "Guacd Server"
verbose_name_plural = "Guacd Servers"
name = models.CharField(max_length=64, blank=False, unique=True,
default="guacd server")
hostname = models.CharField(max_length=64, blank=False,
default="localhost")
port = models.PositiveIntegerField(blank=False, default=4822,
validators=[MinValueValidator(1),
MaxValueValidator(65535)])
def __str__(self):
return self.name
|
[
"django.db.models.CharField",
"django.core.validators.MinValueValidator",
"django.core.validators.MaxValueValidator"
] |
[((248, 334), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(64)', 'blank': '(False)', 'unique': '(True)', 'default': '"""guacd server"""'}), "(max_length=64, blank=False, unique=True, default=\n 'guacd server')\n", (264, 334), False, 'from django.db import models\n'), ((374, 439), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(64)', 'blank': '(False)', 'default': '"""localhost"""'}), "(max_length=64, blank=False, default='localhost')\n", (390, 439), False, 'from django.db import models\n'), ((590, 610), 'django.core.validators.MinValueValidator', 'MinValueValidator', (['(1)'], {}), '(1)\n', (607, 610), False, 'from django.core.validators import MinValueValidator, MaxValueValidator\n'), ((663, 687), 'django.core.validators.MaxValueValidator', 'MaxValueValidator', (['(65535)'], {}), '(65535)\n', (680, 687), False, 'from django.core.validators import MinValueValidator, MaxValueValidator\n')]
|
from typing import List, Tuple
import vapoursynth as vs
from lvsfunc.misc import source, replace_ranges
from lvsfunc.types import Range
from vardautomation import (FSRCNNX_56_16_4_1, JAPANESE, AudioCutter,
AudioStream, BasicTool, FileInfo, FlacEncoder, Mux,
Patch, PresetBD, PresetFLAC, RunnerConfig,
SelfRunner, VideoStream, VPath, X265Encoder)
from vardefunc.misc import get_bicubic_params
from vsutil import get_w
from bento_filters import flt
core = vs.core
core.num_threads = 16
EPNUM = __file__[-5:-3]
# Sources
JPBD_NCOP = FileInfo(r'BDMV/Vol.1/BDMV/STREAM/00003.m2ts', 0, -24,
idx=lambda x: source(x, cachedir=''),
preset=[PresetBD, PresetFLAC])
JPBD_EP = FileInfo(r'BDMV/Vol.2/BDMV/STREAM/00000.m2ts', 1534, 1534+JPBD_NCOP.clip_cut.num_frames,
idx=lambda x: source(x, cachedir=''),
preset=[PresetBD, PresetFLAC])
JPBD_NCOP.name_file_final = VPath(fr"premux/{JPBD_NCOP.name} (Premux).mkv")
JPBD_NCOP.do_qpfile = True
# Common variables
replace_op: List[Range] = [(418, 526)]
op_aisle: List[Range] = [(281, 373)]
red_circle: List[Range] = [(1934, 1951), (1956, 1979), (1984, 2054)]
def main() -> vs.VideoNode:
"""Vapoursynth filtering"""
from adptvgrnMod import adptvgrnMod
from havsfunc import FastLineDarkenMOD
from vsutil import depth
src_op = JPBD_NCOP.clip_cut
src_ep = JPBD_EP.clip_cut
src = replace_ranges(src_op, src_ep, replace_op)
scaled = flt.rescaler(src, 720)
denoised = flt.denoiser(scaled, bm3d_sigma=[0.8, 0.6], bm3d_rad=1)
aa_rep = flt.clamped_aa(denoised)
trans_sraa = flt.transpose_sraa(denoised)
aa_ranges = replace_ranges(aa_rep, trans_sraa, red_circle)
darken = FastLineDarkenMOD(aa_ranges, strength=48, protection=6, luma_cap=255, threshold=2)
deband = flt.masked_deband(darken, denoised=True, deband_args={'iterations': 2, 'threshold': 5.0, 'radius': 8, 'grain': 6})
pdeband = flt.placebo_debander(darken, grain=4, deband_args={'iterations': 2, 'threshold': 8.0, 'radius': 10})
deband = replace_ranges(deband, pdeband, op_aisle)
grain = adptvgrnMod(deband, strength=0.3, luma_scaling=10, size=1.25, sharp=80, grain_chroma=False, seed=42069)
return depth(grain, 10).std.Limiter(16 << 2, [235 << 2, 240 << 2], [0, 1, 2])
class Encoding:
def __init__(self, file: FileInfo, clip: vs.VideoNode) -> None:
self.file = file
self.clip = clip
def run(self) -> None:
assert self.file.a_src
assert self.file.a_enc_cut
self.preqpfileflt()
v_encoder = X265Encoder('x265', 'settings/x265_settings_BD')
p = Patch(
file_to_fix=f'premux/{JPBD_NCOP.name[:-2]}01 (Premux).mkv',
filtered_clip=filtered,
frame_start=281,
frame_end=527,
encoder=X265Encoder('x265', 'settings/x265_settings_BD'),
file=JPBD_NCOP,
output_filename=VPath(fr"{JPBD_NCOP.name} (Premux).mkv")
)
p.run()
p.do_cleanup()
def preqpfileflt(self) -> None:
"""Pre-QP file generation filtering so the scenes match properly"""
self.file.clip_cut = replace_ranges(self.file.clip_cut, JPBD_EP.clip_cut, replace_op)
if __name__ == '__main__':
filtered = main()
filtered = filtered
Encoding(JPBD_NCOP, filtered).run()
else:
JPBD_NCOP.clip_cut.set_output(0)
FILTERED = main()
FILTERED.set_output(1)
|
[
"bento_filters.flt.transpose_sraa",
"lvsfunc.misc.replace_ranges",
"bento_filters.flt.denoiser",
"havsfunc.FastLineDarkenMOD",
"vardautomation.X265Encoder",
"vsutil.depth",
"bento_filters.flt.clamped_aa",
"bento_filters.flt.rescaler",
"lvsfunc.misc.source",
"vardautomation.VPath",
"bento_filters.flt.placebo_debander",
"adptvgrnMod.adptvgrnMod",
"bento_filters.flt.masked_deband"
] |
[((1020, 1066), 'vardautomation.VPath', 'VPath', (['f"""premux/{JPBD_NCOP.name} (Premux).mkv"""'], {}), "(f'premux/{JPBD_NCOP.name} (Premux).mkv')\n", (1025, 1066), False, 'from vardautomation import FSRCNNX_56_16_4_1, JAPANESE, AudioCutter, AudioStream, BasicTool, FileInfo, FlacEncoder, Mux, Patch, PresetBD, PresetFLAC, RunnerConfig, SelfRunner, VideoStream, VPath, X265Encoder\n'), ((1508, 1550), 'lvsfunc.misc.replace_ranges', 'replace_ranges', (['src_op', 'src_ep', 'replace_op'], {}), '(src_op, src_ep, replace_op)\n', (1522, 1550), False, 'from lvsfunc.misc import source, replace_ranges\n'), ((1565, 1587), 'bento_filters.flt.rescaler', 'flt.rescaler', (['src', '(720)'], {}), '(src, 720)\n', (1577, 1587), False, 'from bento_filters import flt\n'), ((1604, 1659), 'bento_filters.flt.denoiser', 'flt.denoiser', (['scaled'], {'bm3d_sigma': '[0.8, 0.6]', 'bm3d_rad': '(1)'}), '(scaled, bm3d_sigma=[0.8, 0.6], bm3d_rad=1)\n', (1616, 1659), False, 'from bento_filters import flt\n'), ((1674, 1698), 'bento_filters.flt.clamped_aa', 'flt.clamped_aa', (['denoised'], {}), '(denoised)\n', (1688, 1698), False, 'from bento_filters import flt\n'), ((1716, 1744), 'bento_filters.flt.transpose_sraa', 'flt.transpose_sraa', (['denoised'], {}), '(denoised)\n', (1734, 1744), False, 'from bento_filters import flt\n'), ((1761, 1807), 'lvsfunc.misc.replace_ranges', 'replace_ranges', (['aa_rep', 'trans_sraa', 'red_circle'], {}), '(aa_rep, trans_sraa, red_circle)\n', (1775, 1807), False, 'from lvsfunc.misc import source, replace_ranges\n'), ((1822, 1908), 'havsfunc.FastLineDarkenMOD', 'FastLineDarkenMOD', (['aa_ranges'], {'strength': '(48)', 'protection': '(6)', 'luma_cap': '(255)', 'threshold': '(2)'}), '(aa_ranges, strength=48, protection=6, luma_cap=255,\n threshold=2)\n', (1839, 1908), False, 'from havsfunc import FastLineDarkenMOD\n'), ((1919, 2037), 'bento_filters.flt.masked_deband', 'flt.masked_deband', (['darken'], {'denoised': '(True)', 'deband_args': "{'iterations': 2, 'threshold': 5.0, 'radius': 8, 'grain': 6}"}), "(darken, denoised=True, deband_args={'iterations': 2,\n 'threshold': 5.0, 'radius': 8, 'grain': 6})\n", (1936, 2037), False, 'from bento_filters import flt\n'), ((2048, 2152), 'bento_filters.flt.placebo_debander', 'flt.placebo_debander', (['darken'], {'grain': '(4)', 'deband_args': "{'iterations': 2, 'threshold': 8.0, 'radius': 10}"}), "(darken, grain=4, deband_args={'iterations': 2,\n 'threshold': 8.0, 'radius': 10})\n", (2068, 2152), False, 'from bento_filters import flt\n'), ((2162, 2203), 'lvsfunc.misc.replace_ranges', 'replace_ranges', (['deband', 'pdeband', 'op_aisle'], {}), '(deband, pdeband, op_aisle)\n', (2176, 2203), False, 'from lvsfunc.misc import source, replace_ranges\n'), ((2217, 2324), 'adptvgrnMod.adptvgrnMod', 'adptvgrnMod', (['deband'], {'strength': '(0.3)', 'luma_scaling': '(10)', 'size': '(1.25)', 'sharp': '(80)', 'grain_chroma': '(False)', 'seed': '(42069)'}), '(deband, strength=0.3, luma_scaling=10, size=1.25, sharp=80,\n grain_chroma=False, seed=42069)\n', (2228, 2324), False, 'from adptvgrnMod import adptvgrnMod\n'), ((2684, 2732), 'vardautomation.X265Encoder', 'X265Encoder', (['"""x265"""', '"""settings/x265_settings_BD"""'], {}), "('x265', 'settings/x265_settings_BD')\n", (2695, 2732), False, 'from vardautomation import FSRCNNX_56_16_4_1, JAPANESE, AudioCutter, AudioStream, BasicTool, FileInfo, FlacEncoder, Mux, Patch, PresetBD, PresetFLAC, RunnerConfig, SelfRunner, VideoStream, VPath, X265Encoder\n'), ((3276, 3340), 'lvsfunc.misc.replace_ranges', 'replace_ranges', (['self.file.clip_cut', 'JPBD_EP.clip_cut', 'replace_op'], {}), '(self.file.clip_cut, JPBD_EP.clip_cut, replace_op)\n', (3290, 3340), False, 'from lvsfunc.misc import source, replace_ranges\n'), ((710, 732), 'lvsfunc.misc.source', 'source', (['x'], {'cachedir': '""""""'}), "(x, cachedir='')\n", (716, 732), False, 'from lvsfunc.misc import source, replace_ranges\n'), ((918, 940), 'lvsfunc.misc.source', 'source', (['x'], {'cachedir': '""""""'}), "(x, cachedir='')\n", (924, 940), False, 'from lvsfunc.misc import source, replace_ranges\n'), ((2333, 2349), 'vsutil.depth', 'depth', (['grain', '(10)'], {}), '(grain, 10)\n', (2338, 2349), False, 'from vsutil import depth\n'), ((2937, 2985), 'vardautomation.X265Encoder', 'X265Encoder', (['"""x265"""', '"""settings/x265_settings_BD"""'], {}), "('x265', 'settings/x265_settings_BD')\n", (2948, 2985), False, 'from vardautomation import FSRCNNX_56_16_4_1, JAPANESE, AudioCutter, AudioStream, BasicTool, FileInfo, FlacEncoder, Mux, Patch, PresetBD, PresetFLAC, RunnerConfig, SelfRunner, VideoStream, VPath, X265Encoder\n'), ((3043, 3082), 'vardautomation.VPath', 'VPath', (['f"""{JPBD_NCOP.name} (Premux).mkv"""'], {}), "(f'{JPBD_NCOP.name} (Premux).mkv')\n", (3048, 3082), False, 'from vardautomation import FSRCNNX_56_16_4_1, JAPANESE, AudioCutter, AudioStream, BasicTool, FileInfo, FlacEncoder, Mux, Patch, PresetBD, PresetFLAC, RunnerConfig, SelfRunner, VideoStream, VPath, X265Encoder\n')]
|
# coding: utf-8
"""
jatdb
JSON API to DB: Fetch JSON from APIs and send to a TinyDB database. # noqa: E501
OpenAPI spec version: 0.0.2
Contact: <EMAIL>
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import jatdb_client
from jatdb_client.api.trello_api import TrelloApi # noqa: E501
from jatdb_client.rest import ApiException
class TestTrelloApi(unittest.TestCase):
"""TrelloApi unit test stubs"""
def setUp(self):
self.api = jatdb_client.api.trello_api.TrelloApi() # noqa: E501
def tearDown(self):
pass
def test_trello_model_id_put(self):
"""Test case for trello_model_id_put
Updates the models currently in db. # noqa: E501
"""
pass
def test_trello_post(self):
"""Test case for trello_post
"""
pass
def test_trello_put(self):
"""Test case for trello_put
"""
pass
if __name__ == '__main__':
unittest.main()
|
[
"unittest.main",
"jatdb_client.api.trello_api.TrelloApi"
] |
[((1035, 1050), 'unittest.main', 'unittest.main', ([], {}), '()\n', (1048, 1050), False, 'import unittest\n'), ((550, 589), 'jatdb_client.api.trello_api.TrelloApi', 'jatdb_client.api.trello_api.TrelloApi', ([], {}), '()\n', (587, 589), False, 'import jatdb_client\n')]
|
import os
import dotenv
if os.path.exists('.env'):
dotenv.load_dotenv('.env')
DEBUG = True if os.getenv('DEBUG') == 'True' else False
# REQUIRED APP SETTINGS
FRONTEND_URL = os.getenv('FRONTEND_URL')
CONTROLLERS = ['auth', 'resume', 'status']
PROVIDERS = ['headhunter', 'superjob']
CLEANUP_PERIOD = 60*60*24 # sec
REAUTH_PERIOD = 60*180 # sec
PUSH_PERIOD = 60*30 # sec
JWT_HEADER_TYPE = 'JWT'
JWT_SECRET_KEY = os.getenv('JWT_SECRET_KEY', os.urandom(64))
JWT_ACCESS_TOKEN_EXPIRES = os.getenv('JWT_ACCESS_TOKEN_EXPIRES', 15) # min
SQLALCHEMY_DATABASE_URI = os.getenv('DATABASE_URL', 'postgres://')
SQLALCHEMY_TRACK_MODIFICATIONS = False
REDIS_URL = os.getenv('REDIS_URL', 'redis://')
CACHE_TYPE = 'redis'
CACHE_KEY_PREFIX = 'cache'
CACHE_DEFAULT_TIMEOUT = 300
CACHE_REDIS_URL = os.getenv('REDIS_URL', 'redis://')
SENTRY_DSN = os.getenv('SENTRY_DSN', None)
SCOUT_KEY = os.getenv('SCOUT_KEY', None)
SCOUT_NAME = 'pushresume-dev' if DEBUG else 'pushresume'
SCOUT_MONITOR = True
# PROVIDERS SETTINGS
HEADHUNTER = {
'client_id': os.getenv('HH_CLIENT'),
'client_secret': os.getenv('HH_SECRET'),
'base_url': os.getenv('HH_BASE_URL'),
'authorize_url': os.getenv('HH_AUTH_URL'),
'access_token_url': os.getenv('HH_TOKEN_URL')
}
SUPERJOB = {
'client_id': os.getenv('SJ_CLIENT'),
'client_secret': os.getenv('SJ_SECRET'),
'base_url': os.getenv('SJ_BASE_URL'),
'authorize_url': os.getenv('SJ_AUTH_URL'),
'access_token_url': os.getenv('SJ_TOKEN_URL'),
'refresh_token_url': os.getenv('SJ_TOKEN_REFRESH_URL')
}
|
[
"dotenv.load_dotenv",
"os.urandom",
"os.path.exists",
"os.getenv"
] |
[((28, 50), 'os.path.exists', 'os.path.exists', (['""".env"""'], {}), "('.env')\n", (42, 50), False, 'import os\n'), ((181, 206), 'os.getenv', 'os.getenv', (['"""FRONTEND_URL"""'], {}), "('FRONTEND_URL')\n", (190, 206), False, 'import os\n'), ((494, 535), 'os.getenv', 'os.getenv', (['"""JWT_ACCESS_TOKEN_EXPIRES"""', '(15)'], {}), "('JWT_ACCESS_TOKEN_EXPIRES', 15)\n", (503, 535), False, 'import os\n'), ((570, 610), 'os.getenv', 'os.getenv', (['"""DATABASE_URL"""', '"""postgres://"""'], {}), "('DATABASE_URL', 'postgres://')\n", (579, 610), False, 'import os\n'), ((663, 697), 'os.getenv', 'os.getenv', (['"""REDIS_URL"""', '"""redis://"""'], {}), "('REDIS_URL', 'redis://')\n", (672, 697), False, 'import os\n'), ((793, 827), 'os.getenv', 'os.getenv', (['"""REDIS_URL"""', '"""redis://"""'], {}), "('REDIS_URL', 'redis://')\n", (802, 827), False, 'import os\n'), ((842, 871), 'os.getenv', 'os.getenv', (['"""SENTRY_DSN"""', 'None'], {}), "('SENTRY_DSN', None)\n", (851, 871), False, 'import os\n'), ((885, 913), 'os.getenv', 'os.getenv', (['"""SCOUT_KEY"""', 'None'], {}), "('SCOUT_KEY', None)\n", (894, 913), False, 'import os\n'), ((56, 82), 'dotenv.load_dotenv', 'dotenv.load_dotenv', (['""".env"""'], {}), "('.env')\n", (74, 82), False, 'import dotenv\n'), ((451, 465), 'os.urandom', 'os.urandom', (['(64)'], {}), '(64)\n', (461, 465), False, 'import os\n'), ((1047, 1069), 'os.getenv', 'os.getenv', (['"""HH_CLIENT"""'], {}), "('HH_CLIENT')\n", (1056, 1069), False, 'import os\n'), ((1092, 1114), 'os.getenv', 'os.getenv', (['"""HH_SECRET"""'], {}), "('HH_SECRET')\n", (1101, 1114), False, 'import os\n'), ((1132, 1156), 'os.getenv', 'os.getenv', (['"""HH_BASE_URL"""'], {}), "('HH_BASE_URL')\n", (1141, 1156), False, 'import os\n'), ((1179, 1203), 'os.getenv', 'os.getenv', (['"""HH_AUTH_URL"""'], {}), "('HH_AUTH_URL')\n", (1188, 1203), False, 'import os\n'), ((1229, 1254), 'os.getenv', 'os.getenv', (['"""HH_TOKEN_URL"""'], {}), "('HH_TOKEN_URL')\n", (1238, 1254), False, 'import os\n'), ((1288, 1310), 'os.getenv', 'os.getenv', (['"""SJ_CLIENT"""'], {}), "('SJ_CLIENT')\n", (1297, 1310), False, 'import os\n'), ((1333, 1355), 'os.getenv', 'os.getenv', (['"""SJ_SECRET"""'], {}), "('SJ_SECRET')\n", (1342, 1355), False, 'import os\n'), ((1373, 1397), 'os.getenv', 'os.getenv', (['"""SJ_BASE_URL"""'], {}), "('SJ_BASE_URL')\n", (1382, 1397), False, 'import os\n'), ((1420, 1444), 'os.getenv', 'os.getenv', (['"""SJ_AUTH_URL"""'], {}), "('SJ_AUTH_URL')\n", (1429, 1444), False, 'import os\n'), ((1470, 1495), 'os.getenv', 'os.getenv', (['"""SJ_TOKEN_URL"""'], {}), "('SJ_TOKEN_URL')\n", (1479, 1495), False, 'import os\n'), ((1522, 1555), 'os.getenv', 'os.getenv', (['"""SJ_TOKEN_REFRESH_URL"""'], {}), "('SJ_TOKEN_REFRESH_URL')\n", (1531, 1555), False, 'import os\n'), ((100, 118), 'os.getenv', 'os.getenv', (['"""DEBUG"""'], {}), "('DEBUG')\n", (109, 118), False, 'import os\n')]
|
# coding: utf-8
"""
Laserfiche API
Welcome to the Laserfiche API Swagger Playground. You can try out any of our API calls against your live Laserfiche Cloud account. Visit the developer center for more details: <a href=\"https://developer.laserfiche.com\">https://developer.laserfiche.com</a><p><strong>Build# : </strong>650780</p> # noqa: E501
OpenAPI spec version: 1-alpha
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class ContextHit(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'hit_type': 'HitType',
'is_annotation_hit': 'bool',
'annotation_id': 'int',
'page_number': 'int',
'page_offset': 'int',
'context': 'str',
'highlight1_offset': 'int',
'highlight1_length': 'int',
'highlight2_offset': 'int',
'highlight2_length': 'int',
'hit_width': 'int',
'edoc_hit_count': 'int',
'field_hit_count': 'int',
'field_name': 'str',
'hit_number': 'int'
}
attribute_map = {
'hit_type': 'hitType',
'is_annotation_hit': 'isAnnotationHit',
'annotation_id': 'annotationId',
'page_number': 'pageNumber',
'page_offset': 'pageOffset',
'context': 'context',
'highlight1_offset': 'highlight1Offset',
'highlight1_length': 'highlight1Length',
'highlight2_offset': 'highlight2Offset',
'highlight2_length': 'highlight2Length',
'hit_width': 'hitWidth',
'edoc_hit_count': 'edocHitCount',
'field_hit_count': 'fieldHitCount',
'field_name': 'fieldName',
'hit_number': 'hitNumber'
}
def __init__(self, hit_type=None, is_annotation_hit=None, annotation_id=None, page_number=None, page_offset=None, context=None, highlight1_offset=None, highlight1_length=None, highlight2_offset=None, highlight2_length=None, hit_width=None, edoc_hit_count=None, field_hit_count=None, field_name=None, hit_number=None): # noqa: E501
"""ContextHit - a model defined in Swagger""" # noqa: E501
self._hit_type = None
self._is_annotation_hit = None
self._annotation_id = None
self._page_number = None
self._page_offset = None
self._context = None
self._highlight1_offset = None
self._highlight1_length = None
self._highlight2_offset = None
self._highlight2_length = None
self._hit_width = None
self._edoc_hit_count = None
self._field_hit_count = None
self._field_name = None
self._hit_number = None
self.discriminator = None
if hit_type is not None:
self.hit_type = hit_type
if is_annotation_hit is not None:
self.is_annotation_hit = is_annotation_hit
if annotation_id is not None:
self.annotation_id = annotation_id
if page_number is not None:
self.page_number = page_number
if page_offset is not None:
self.page_offset = page_offset
if context is not None:
self.context = context
if highlight1_offset is not None:
self.highlight1_offset = highlight1_offset
if highlight1_length is not None:
self.highlight1_length = highlight1_length
if highlight2_offset is not None:
self.highlight2_offset = highlight2_offset
if highlight2_length is not None:
self.highlight2_length = highlight2_length
if hit_width is not None:
self.hit_width = hit_width
if edoc_hit_count is not None:
self.edoc_hit_count = edoc_hit_count
if field_hit_count is not None:
self.field_hit_count = field_hit_count
if field_name is not None:
self.field_name = field_name
if hit_number is not None:
self.hit_number = hit_number
@property
def hit_type(self):
"""Gets the hit_type of this ContextHit. # noqa: E501
:return: The hit_type of this ContextHit. # noqa: E501
:rtype: HitType
"""
return self._hit_type
@hit_type.setter
def hit_type(self, hit_type):
"""Sets the hit_type of this ContextHit.
:param hit_type: The hit_type of this ContextHit. # noqa: E501
:type: HitType
"""
self._hit_type = hit_type
@property
def is_annotation_hit(self):
"""Gets the is_annotation_hit of this ContextHit. # noqa: E501
:return: The is_annotation_hit of this ContextHit. # noqa: E501
:rtype: bool
"""
return self._is_annotation_hit
@is_annotation_hit.setter
def is_annotation_hit(self, is_annotation_hit):
"""Sets the is_annotation_hit of this ContextHit.
:param is_annotation_hit: The is_annotation_hit of this ContextHit. # noqa: E501
:type: bool
"""
self._is_annotation_hit = is_annotation_hit
@property
def annotation_id(self):
"""Gets the annotation_id of this ContextHit. # noqa: E501
:return: The annotation_id of this ContextHit. # noqa: E501
:rtype: int
"""
return self._annotation_id
@annotation_id.setter
def annotation_id(self, annotation_id):
"""Sets the annotation_id of this ContextHit.
:param annotation_id: The annotation_id of this ContextHit. # noqa: E501
:type: int
"""
self._annotation_id = annotation_id
@property
def page_number(self):
"""Gets the page_number of this ContextHit. # noqa: E501
:return: The page_number of this ContextHit. # noqa: E501
:rtype: int
"""
return self._page_number
@page_number.setter
def page_number(self, page_number):
"""Sets the page_number of this ContextHit.
:param page_number: The page_number of this ContextHit. # noqa: E501
:type: int
"""
self._page_number = page_number
@property
def page_offset(self):
"""Gets the page_offset of this ContextHit. # noqa: E501
:return: The page_offset of this ContextHit. # noqa: E501
:rtype: int
"""
return self._page_offset
@page_offset.setter
def page_offset(self, page_offset):
"""Sets the page_offset of this ContextHit.
:param page_offset: The page_offset of this ContextHit. # noqa: E501
:type: int
"""
self._page_offset = page_offset
@property
def context(self):
"""Gets the context of this ContextHit. # noqa: E501
:return: The context of this ContextHit. # noqa: E501
:rtype: str
"""
return self._context
@context.setter
def context(self, context):
"""Sets the context of this ContextHit.
:param context: The context of this ContextHit. # noqa: E501
:type: str
"""
self._context = context
@property
def highlight1_offset(self):
"""Gets the highlight1_offset of this ContextHit. # noqa: E501
:return: The highlight1_offset of this ContextHit. # noqa: E501
:rtype: int
"""
return self._highlight1_offset
@highlight1_offset.setter
def highlight1_offset(self, highlight1_offset):
"""Sets the highlight1_offset of this ContextHit.
:param highlight1_offset: The highlight1_offset of this ContextHit. # noqa: E501
:type: int
"""
self._highlight1_offset = highlight1_offset
@property
def highlight1_length(self):
"""Gets the highlight1_length of this ContextHit. # noqa: E501
:return: The highlight1_length of this ContextHit. # noqa: E501
:rtype: int
"""
return self._highlight1_length
@highlight1_length.setter
def highlight1_length(self, highlight1_length):
"""Sets the highlight1_length of this ContextHit.
:param highlight1_length: The highlight1_length of this ContextHit. # noqa: E501
:type: int
"""
self._highlight1_length = highlight1_length
@property
def highlight2_offset(self):
"""Gets the highlight2_offset of this ContextHit. # noqa: E501
:return: The highlight2_offset of this ContextHit. # noqa: E501
:rtype: int
"""
return self._highlight2_offset
@highlight2_offset.setter
def highlight2_offset(self, highlight2_offset):
"""Sets the highlight2_offset of this ContextHit.
:param highlight2_offset: The highlight2_offset of this ContextHit. # noqa: E501
:type: int
"""
self._highlight2_offset = highlight2_offset
@property
def highlight2_length(self):
"""Gets the highlight2_length of this ContextHit. # noqa: E501
:return: The highlight2_length of this ContextHit. # noqa: E501
:rtype: int
"""
return self._highlight2_length
@highlight2_length.setter
def highlight2_length(self, highlight2_length):
"""Sets the highlight2_length of this ContextHit.
:param highlight2_length: The highlight2_length of this ContextHit. # noqa: E501
:type: int
"""
self._highlight2_length = highlight2_length
@property
def hit_width(self):
"""Gets the hit_width of this ContextHit. # noqa: E501
:return: The hit_width of this ContextHit. # noqa: E501
:rtype: int
"""
return self._hit_width
@hit_width.setter
def hit_width(self, hit_width):
"""Sets the hit_width of this ContextHit.
:param hit_width: The hit_width of this ContextHit. # noqa: E501
:type: int
"""
self._hit_width = hit_width
@property
def edoc_hit_count(self):
"""Gets the edoc_hit_count of this ContextHit. # noqa: E501
:return: The edoc_hit_count of this ContextHit. # noqa: E501
:rtype: int
"""
return self._edoc_hit_count
@edoc_hit_count.setter
def edoc_hit_count(self, edoc_hit_count):
"""Sets the edoc_hit_count of this ContextHit.
:param edoc_hit_count: The edoc_hit_count of this ContextHit. # noqa: E501
:type: int
"""
self._edoc_hit_count = edoc_hit_count
@property
def field_hit_count(self):
"""Gets the field_hit_count of this ContextHit. # noqa: E501
:return: The field_hit_count of this ContextHit. # noqa: E501
:rtype: int
"""
return self._field_hit_count
@field_hit_count.setter
def field_hit_count(self, field_hit_count):
"""Sets the field_hit_count of this ContextHit.
:param field_hit_count: The field_hit_count of this ContextHit. # noqa: E501
:type: int
"""
self._field_hit_count = field_hit_count
@property
def field_name(self):
"""Gets the field_name of this ContextHit. # noqa: E501
:return: The field_name of this ContextHit. # noqa: E501
:rtype: str
"""
return self._field_name
@field_name.setter
def field_name(self, field_name):
"""Sets the field_name of this ContextHit.
:param field_name: The field_name of this ContextHit. # noqa: E501
:type: str
"""
self._field_name = field_name
@property
def hit_number(self):
"""Gets the hit_number of this ContextHit. # noqa: E501
:return: The hit_number of this ContextHit. # noqa: E501
:rtype: int
"""
return self._hit_number
@hit_number.setter
def hit_number(self, hit_number):
"""Sets the hit_number of this ContextHit.
:param hit_number: The hit_number of this ContextHit. # noqa: E501
:type: int
"""
self._hit_number = hit_number
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(ContextHit, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ContextHit):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
[
"six.iteritems"
] |
[((12453, 12486), 'six.iteritems', 'six.iteritems', (['self.swagger_types'], {}), '(self.swagger_types)\n', (12466, 12486), False, 'import six\n')]
|
# -*- coding: utf-8 -*-
import unittest
from eprun import EPEnd
class Test_EPEnd(unittest.TestCase):
def test_EPEnd(self):
""
e=EPEnd(fp=r'files\eplusout.end')
self.assertEqual(e.line.encode(),
b'EnergyPlus Completed Successfully-- 3 Warning; 0 Severe Errors; Elapsed Time=00hr 00min 2.28sec\n')
if __name__=='__main__':
unittest.main(Test_EPEnd())
|
[
"eprun.EPEnd"
] |
[((155, 186), 'eprun.EPEnd', 'EPEnd', ([], {'fp': '"""files\\\\eplusout.end"""'}), "(fp='files\\\\eplusout.end')\n", (160, 186), False, 'from eprun import EPEnd\n')]
|
#!/usr/bin/python2
# -*- coding: utf-8 -*-
from datetime import datetime
from flask import (Blueprint,
session as login_session,
request,
redirect,
url_for,
flash,
render_template)
from .. import database as db
from ..auth.views import login_required
catalog_blueprint = Blueprint('catalog', __name__,
template_folder='templates')
@catalog_blueprint.route('/')
@catalog_blueprint.route('/catalog/')
def catalogs():
catalogs = db.get_catalogs()
items = db.get_items()
if 'username' in login_session:
return render_template('catalogs.html',
catalogs=catalogs,
items=items)
else:
return render_template('public_catalogs.html',
catalogs=catalogs,
items=items)
@catalog_blueprint.route('/catalog/<catalog_name>/')
def catalog(catalog_name):
catalogs = db.get_catalogs()
catalog = db.get_catalog(catalog_name)
catalog_items = db.get_items(catalog.id)
item_section_title = '{catalog_name} Items ({num_items} item{suffix})'\
.format(catalog_name=catalog.name,
num_items=len(catalog_items),
suffix='' if len(catalog_items) == 1 else 's')
if 'user_id' in login_session:
return render_template('catalog.html',
catalog=catalog,
catalogs=catalogs,
catalog_items=catalog_items,
item_section_title=item_section_title)
else:
return render_template('public_catalog.html',
catalog=catalog,
catalogs=catalogs,
catalog_items=catalog_items,
item_section_title=item_section_title)
@catalog_blueprint.route('/catalog/<catalog_name>/<item_name>')
def catalog_item(catalog_name, item_name):
catalog = db.get_catalog(catalog_name)
catalog_item = db.get_item(catalog.id, item_name)
if 'username' not in login_session:
return render_template('public_catalog_item.html',
catalog_item=catalog_item)
else:
creator = db.get_user_info(catalog_item.user_id)
if creator is not None and login_session['user_id'] == creator.id:
return render_template('catalog_item.html',
catalog_name=catalog_name,
catalog_item=catalog_item)
else:
return render_template('public_catalog_item.html',
catalog_item=catalog_item)
@catalog_blueprint.route('/catalog///add',
methods=['GET', 'POST'])
@login_required
def add_catalog_item():
catalogs = db.get_catalogs()
if request.method == 'POST':
category_name = request.form['category']
catalog = db.get_catalog(category_name)
user_id = login_session['user_id']
db.add_catalog_item(creation_date=datetime.now(),
catalog_id=catalog.id,
name=request.form['name'],
description=request.form['description'],
user_id=user_id)
flash("Item added", "success")
return redirect(url_for('catalog.catalogs'))
else:
return render_template('add_catalog_item.html', catalogs=catalogs)
@catalog_blueprint.route('/catalog/<catalog_name>/<item_name>/edit',
methods=['GET', 'POST'])
@login_required
def edit_catalog_item(catalog_name, item_name):
catalog = db.get_catalog(catalog_name)
catalog_item = db.get_item(catalog.id, item_name)
creator = db.get_user_info(catalog_item.user_id)
if creator is None or login_session['user_id'] != creator.id:
flash("You are not allowed to access there", "error")
return redirect('/')
catalogs = db.get_catalogs()
if request.method == 'POST':
catalog_item.name = request.form['name']
catalog_item.description = request.form['description']
catalog = db.get_catalog(request.form['category'])
catalog_item.catalog_id = catalog.id
db.edit_catalog_item(catalog_item)
flash("Item successfully edited", "success")
return redirect(url_for('catalog.catalog', catalog_name=catalog_name))
else:
return render_template('edit_catalog_item.html',
catalogs=catalogs,
catalog=catalog,
catalog_item=catalog_item)
@catalog_blueprint.route('/catalog/<catalog_name>/<item_name>/delete',
methods=['GET', 'POST'])
@login_required
def delete_catalog_item(catalog_name, item_name):
catalog = db.get_catalog(catalog_name)
catalog_item = db.get_item(catalog.id, item_name)
creator = db.get_user_info(catalog_item.user_id)
if creator is None or login_session['user_id'] != creator.id:
flash("You are not allowed to access there", "error")
return redirect('/')
if request.method == 'POST':
db.delete_catalog_item(catalog_item)
flash("Item successfully deleted", "success")
return redirect(url_for('catalog.catalog', catalog_name=catalog_name))
else:
return render_template('delete_catalog_item.html',
catalog=catalog,
catalog_item=catalog_item)
|
[
"flask.flash",
"flask.Blueprint",
"flask.redirect",
"flask.url_for",
"flask.render_template",
"datetime.datetime.now"
] |
[((388, 447), 'flask.Blueprint', 'Blueprint', (['"""catalog"""', '__name__'], {'template_folder': '"""templates"""'}), "('catalog', __name__, template_folder='templates')\n", (397, 447), False, 'from flask import Blueprint, session as login_session, request, redirect, url_for, flash, render_template\n'), ((675, 739), 'flask.render_template', 'render_template', (['"""catalogs.html"""'], {'catalogs': 'catalogs', 'items': 'items'}), "('catalogs.html', catalogs=catalogs, items=items)\n", (690, 739), False, 'from flask import Blueprint, session as login_session, request, redirect, url_for, flash, render_template\n'), ((827, 898), 'flask.render_template', 'render_template', (['"""public_catalogs.html"""'], {'catalogs': 'catalogs', 'items': 'items'}), "('public_catalogs.html', catalogs=catalogs, items=items)\n", (842, 898), False, 'from flask import Blueprint, session as login_session, request, redirect, url_for, flash, render_template\n'), ((1442, 1581), 'flask.render_template', 'render_template', (['"""catalog.html"""'], {'catalog': 'catalog', 'catalogs': 'catalogs', 'catalog_items': 'catalog_items', 'item_section_title': 'item_section_title'}), "('catalog.html', catalog=catalog, catalogs=catalogs,\n catalog_items=catalog_items, item_section_title=item_section_title)\n", (1457, 1581), False, 'from flask import Blueprint, session as login_session, request, redirect, url_for, flash, render_template\n'), ((1727, 1873), 'flask.render_template', 'render_template', (['"""public_catalog.html"""'], {'catalog': 'catalog', 'catalogs': 'catalogs', 'catalog_items': 'catalog_items', 'item_section_title': 'item_section_title'}), "('public_catalog.html', catalog=catalog, catalogs=catalogs,\n catalog_items=catalog_items, item_section_title=item_section_title)\n", (1742, 1873), False, 'from flask import Blueprint, session as login_session, request, redirect, url_for, flash, render_template\n'), ((2255, 2325), 'flask.render_template', 'render_template', (['"""public_catalog_item.html"""'], {'catalog_item': 'catalog_item'}), "('public_catalog_item.html', catalog_item=catalog_item)\n", (2270, 2325), False, 'from flask import Blueprint, session as login_session, request, redirect, url_for, flash, render_template\n'), ((3445, 3475), 'flask.flash', 'flash', (['"""Item added"""', '"""success"""'], {}), "('Item added', 'success')\n", (3450, 3475), False, 'from flask import Blueprint, session as login_session, request, redirect, url_for, flash, render_template\n'), ((3554, 3613), 'flask.render_template', 'render_template', (['"""add_catalog_item.html"""'], {'catalogs': 'catalogs'}), "('add_catalog_item.html', catalogs=catalogs)\n", (3569, 3613), False, 'from flask import Blueprint, session as login_session, request, redirect, url_for, flash, render_template\n'), ((4023, 4076), 'flask.flash', 'flash', (['"""You are not allowed to access there"""', '"""error"""'], {}), "('You are not allowed to access there', 'error')\n", (4028, 4076), False, 'from flask import Blueprint, session as login_session, request, redirect, url_for, flash, render_template\n'), ((4092, 4105), 'flask.redirect', 'redirect', (['"""/"""'], {}), "('/')\n", (4100, 4105), False, 'from flask import Blueprint, session as login_session, request, redirect, url_for, flash, render_template\n'), ((4440, 4484), 'flask.flash', 'flash', (['"""Item successfully edited"""', '"""success"""'], {}), "('Item successfully edited', 'success')\n", (4445, 4484), False, 'from flask import Blueprint, session as login_session, request, redirect, url_for, flash, render_template\n'), ((4589, 4698), 'flask.render_template', 'render_template', (['"""edit_catalog_item.html"""'], {'catalogs': 'catalogs', 'catalog': 'catalog', 'catalog_item': 'catalog_item'}), "('edit_catalog_item.html', catalogs=catalogs, catalog=\n catalog, catalog_item=catalog_item)\n", (4604, 4698), False, 'from flask import Blueprint, session as login_session, request, redirect, url_for, flash, render_template\n'), ((5200, 5253), 'flask.flash', 'flash', (['"""You are not allowed to access there"""', '"""error"""'], {}), "('You are not allowed to access there', 'error')\n", (5205, 5253), False, 'from flask import Blueprint, session as login_session, request, redirect, url_for, flash, render_template\n'), ((5269, 5282), 'flask.redirect', 'redirect', (['"""/"""'], {}), "('/')\n", (5277, 5282), False, 'from flask import Blueprint, session as login_session, request, redirect, url_for, flash, render_template\n'), ((5370, 5415), 'flask.flash', 'flash', (['"""Item successfully deleted"""', '"""success"""'], {}), "('Item successfully deleted', 'success')\n", (5375, 5415), False, 'from flask import Blueprint, session as login_session, request, redirect, url_for, flash, render_template\n'), ((5520, 5612), 'flask.render_template', 'render_template', (['"""delete_catalog_item.html"""'], {'catalog': 'catalog', 'catalog_item': 'catalog_item'}), "('delete_catalog_item.html', catalog=catalog, catalog_item=\n catalog_item)\n", (5535, 5612), False, 'from flask import Blueprint, session as login_session, request, redirect, url_for, flash, render_template\n'), ((2518, 2612), 'flask.render_template', 'render_template', (['"""catalog_item.html"""'], {'catalog_name': 'catalog_name', 'catalog_item': 'catalog_item'}), "('catalog_item.html', catalog_name=catalog_name,\n catalog_item=catalog_item)\n", (2533, 2612), False, 'from flask import Blueprint, session as login_session, request, redirect, url_for, flash, render_template\n'), ((2712, 2782), 'flask.render_template', 'render_template', (['"""public_catalog_item.html"""'], {'catalog_item': 'catalog_item'}), "('public_catalog_item.html', catalog_item=catalog_item)\n", (2727, 2782), False, 'from flask import Blueprint, session as login_session, request, redirect, url_for, flash, render_template\n'), ((3500, 3527), 'flask.url_for', 'url_for', (['"""catalog.catalogs"""'], {}), "('catalog.catalogs')\n", (3507, 3527), False, 'from flask import Blueprint, session as login_session, request, redirect, url_for, flash, render_template\n'), ((4509, 4562), 'flask.url_for', 'url_for', (['"""catalog.catalog"""'], {'catalog_name': 'catalog_name'}), "('catalog.catalog', catalog_name=catalog_name)\n", (4516, 4562), False, 'from flask import Blueprint, session as login_session, request, redirect, url_for, flash, render_template\n'), ((5440, 5493), 'flask.url_for', 'url_for', (['"""catalog.catalog"""'], {'catalog_name': 'catalog_name'}), "('catalog.catalog', catalog_name=catalog_name)\n", (5447, 5493), False, 'from flask import Blueprint, session as login_session, request, redirect, url_for, flash, render_template\n'), ((3201, 3215), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (3213, 3215), False, 'from datetime import datetime\n')]
|
# Generated by Django 3.1.3 on 2020-11-27 13:06
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('app', '0007_auto_20201127_0452'),
]
operations = [
migrations.AlterField(
model_name='donation',
name='amount',
field=models.DecimalField(decimal_places=4, max_digits=50),
),
]
|
[
"django.db.models.DecimalField"
] |
[((334, 386), 'django.db.models.DecimalField', 'models.DecimalField', ([], {'decimal_places': '(4)', 'max_digits': '(50)'}), '(decimal_places=4, max_digits=50)\n', (353, 386), False, 'from django.db import migrations, models\n')]
|
# -*- coding: utf-8 -*-
# This file is part of beets.
# Copyright 2016, <NAME>.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""Test module for file ui/commands.py
"""
import os
import shutil
from test import _common
from test._common import unittest
from beets import library
from beets import ui
from beets.ui import commands
class QueryTest(_common.TestCase):
def setUp(self):
super(QueryTest, self).setUp()
self.libdir = os.path.join(self.temp_dir, 'testlibdir')
os.mkdir(self.libdir)
# Add a file to the library but don't copy it in yet.
self.lib = library.Library(':memory:', self.libdir)
# Alternate destination directory.
self.otherdir = os.path.join(self.temp_dir, 'testotherdir')
def add_item(self, filename='srcfile', templatefile='full.mp3'):
itempath = os.path.join(self.libdir, filename)
shutil.copy(os.path.join(_common.RSRC, templatefile), itempath)
item = library.Item.from_path(itempath)
self.lib.add(item)
return item, itempath
def add_album(self, items):
album = self.lib.add_album(items)
return album
def check_do_query(self, num_items, num_albums,
q=(), album=False, also_items=True):
items, albums = commands._do_query(
self.lib, q, album, also_items)
self.assertEqual(len(items), num_items)
self.assertEqual(len(albums), num_albums)
def test_query_empty(self):
with self.assertRaises(ui.UserError):
commands._do_query(self.lib, (), False)
def test_query_empty_album(self):
with self.assertRaises(ui.UserError):
commands._do_query(self.lib, (), True)
def test_query_item(self):
self.add_item()
self.check_do_query(1, 0, album=False)
self.add_item()
self.check_do_query(2, 0, album=False)
def test_query_album(self):
item, itempath = self.add_item()
self.add_album([item])
self.check_do_query(1, 1, album=True)
self.check_do_query(0, 1, album=True, also_items=False)
item, itempath = self.add_item()
item2, itempath = self.add_item()
self.add_album([item, item2])
self.check_do_query(3, 2, album=True)
self.check_do_query(0, 2, album=True, also_items=False)
class FieldsTest(_common.LibTestCase):
def setUp(self):
super(FieldsTest, self).setUp()
self.io.install()
def tearDown(self):
self.io.restore()
def remove_keys(self, l, text):
for i in text:
try:
l.remove(i)
except ValueError:
pass
def test_fields_func(self):
commands.fields_func(self.lib, [], [])
items = library.Item.all_keys()
albums = library.Album.all_keys()
output = self.io.stdout.get().split()
self.remove_keys(items, output)
self.remove_keys(albums, output)
self.assertEqual(len(items), 0)
self.assertEqual(len(albums), 0)
def suite():
return unittest.TestLoader().loadTestsFromName(__name__)
if __name__ == b'__main__':
unittest.main(defaultTest='suite')
|
[
"beets.library.Item.all_keys",
"os.mkdir",
"test._common.unittest.TestLoader",
"beets.library.Item.from_path",
"beets.library.Album.all_keys",
"beets.ui.commands.fields_func",
"os.path.join",
"test._common.unittest.main",
"beets.ui.commands._do_query",
"beets.library.Library"
] |
[((3698, 3732), 'test._common.unittest.main', 'unittest.main', ([], {'defaultTest': '"""suite"""'}), "(defaultTest='suite')\n", (3711, 3732), False, 'from test._common import unittest\n'), ((988, 1029), 'os.path.join', 'os.path.join', (['self.temp_dir', '"""testlibdir"""'], {}), "(self.temp_dir, 'testlibdir')\n", (1000, 1029), False, 'import os\n'), ((1038, 1059), 'os.mkdir', 'os.mkdir', (['self.libdir'], {}), '(self.libdir)\n', (1046, 1059), False, 'import os\n'), ((1142, 1182), 'beets.library.Library', 'library.Library', (['""":memory:"""', 'self.libdir'], {}), "(':memory:', self.libdir)\n", (1157, 1182), False, 'from beets import library\n'), ((1251, 1294), 'os.path.join', 'os.path.join', (['self.temp_dir', '"""testotherdir"""'], {}), "(self.temp_dir, 'testotherdir')\n", (1263, 1294), False, 'import os\n'), ((1384, 1419), 'os.path.join', 'os.path.join', (['self.libdir', 'filename'], {}), '(self.libdir, filename)\n', (1396, 1419), False, 'import os\n'), ((1507, 1539), 'beets.library.Item.from_path', 'library.Item.from_path', (['itempath'], {}), '(itempath)\n', (1529, 1539), False, 'from beets import library\n'), ((1830, 1880), 'beets.ui.commands._do_query', 'commands._do_query', (['self.lib', 'q', 'album', 'also_items'], {}), '(self.lib, q, album, also_items)\n', (1848, 1880), False, 'from beets.ui import commands\n'), ((3258, 3296), 'beets.ui.commands.fields_func', 'commands.fields_func', (['self.lib', '[]', '[]'], {}), '(self.lib, [], [])\n', (3278, 3296), False, 'from beets.ui import commands\n'), ((3313, 3336), 'beets.library.Item.all_keys', 'library.Item.all_keys', ([], {}), '()\n', (3334, 3336), False, 'from beets import library\n'), ((3354, 3378), 'beets.library.Album.all_keys', 'library.Album.all_keys', ([], {}), '()\n', (3376, 3378), False, 'from beets import library\n'), ((1440, 1480), 'os.path.join', 'os.path.join', (['_common.RSRC', 'templatefile'], {}), '(_common.RSRC, templatefile)\n', (1452, 1480), False, 'import os\n'), ((2083, 2122), 'beets.ui.commands._do_query', 'commands._do_query', (['self.lib', '()', '(False)'], {}), '(self.lib, (), False)\n', (2101, 2122), False, 'from beets.ui import commands\n'), ((2220, 2258), 'beets.ui.commands._do_query', 'commands._do_query', (['self.lib', '()', '(True)'], {}), '(self.lib, (), True)\n', (2238, 2258), False, 'from beets.ui import commands\n'), ((3615, 3636), 'test._common.unittest.TestLoader', 'unittest.TestLoader', ([], {}), '()\n', (3634, 3636), False, 'from test._common import unittest\n')]
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=too-many-locals,too-many-statements,too-many-branches,protected-access
"""API for graph traversing."""
import threading
import re
import tvm
from tvm import relay, autotvm
from tvm.relay import transform
from tvm.relay.expr import Call, TupleGetItem, Var, Constant, Tuple
from tvm.relay.function import Function
from tvm.relay.ty import TupleType, TensorType
from tvm.autotvm.task import TaskExtractEnv
from .utils import has_multiple_inputs, is_boundary_node, is_skipped_node
from .._base import OPT_OUT_OP
def expr2graph(expr, target_ops, node_dict, node_list, tvm_target):
"""Convert relay expr to graph data structure
and fetch workloads of target operators.
Parameters
----------
expr : tvm.relay.Expr.Function
Input relay function expression.
target_ops: List of tvm.ir.Op
List of target relay ops
node_dict : dictionary from tvm.relay.Expr to int
Dictionary to record node index
node_list : list of dictionary
List of nodes which contains all expr in the input relay function.
Each node will be stored as a dictionary in the format of
{"op": str, "node": tvm.relay.expr, "inputs": [int], "types": [tvm.relay.Type],
"name": str, "workloads": [tuple], "topi_op": [function]}
tvm_target : tvm.target
The TVM target object.
"""
# TODO(@kevinthesun, @icemelon9): Currently graph tuning pass relies on the fact
# that # autotvm tasks == # ops. But this won't be true after having relay op
# strategy. We need to find a solution to fix this.
env = TaskExtractEnv.get(allow_duplicate=True)
env.reset(target_ops)
# pylint: disable=not-context-manager
with env:
_expr2graph_impl(expr, target_ops, node_dict, node_list, tvm_target)
task_pos = 0
for node_entry in node_list:
if node_entry["op"] in target_ops:
task_name, args = env.task_collection[task_pos]
task = autotvm.task.create(task_name, args, target=tvm_target)
node_entry["workloads"] = [task.workload]
node_entry["topi_op"] = [task_name]
task_pos += 1
def _infer_type(node):
"""A method to infer the type of a relay expression."""
mod = tvm.IRModule.from_expr(node)
mod = transform.InferType()(mod)
entry = mod["main"]
return entry if isinstance(node, relay.Function) else entry.body
def _replace_device_with_tracing(target):
"""This is to replace -device=XXX with -device=tracing in the tvm_target string.
It is a stand-along function for testability.
We need to have device=tracing in order to fetch the workloads, it is not used
for anything beyond that so it is safe to override the device here only."""
target = str(target)
if "-device" in target:
return re.sub("-device=[^\\-$]+", "-device=tracing ", target).strip(" ")
return target + " -device=tracing"
def _expr2graph_impl(expr, target_ops, node_dict, node_list, tvm_target):
"""Implementation to convert relay expr to graph data structure"""
def _traverse_expr(node):
if node in node_dict:
return
node_index = len(node_list)
node_entry = {"node": node, "inputs": [], "types": [], "op": None, "name": None}
if isinstance(node, Call):
op = node.op
node_entry["op"] = node.op
for arg in node.args:
in_node_idx = node_dict[arg]
if isinstance(arg, (Tuple, TupleGetItem)):
node_entry["inputs"] += node_list[in_node_idx]["inputs"]
else:
node_entry["inputs"].append([in_node_idx, 0, 0])
infer_out = _infer_type(node)
out_type = infer_out._checked_type_
if isinstance(out_type, TensorType):
node_entry["types"].append(out_type)
elif isinstance(out_type, TupleType):
for tupe_type in out_type.fields:
node_entry["types"].append(tupe_type)
else:
raise RuntimeError(
"Unsupported output type %s in operator %s" % (type(out_type), op.name)
)
# Utilize tracing target to fetch workload with topo-order.
# Since we only need workload, dummy target can be used to
# create task.
if op in target_ops:
params = []
for i, input_idx in enumerate(node_entry["inputs"]):
input_node_entry = node_list[input_idx[0]]
input_type = input_node_entry["types"][input_idx[1]]
if not isinstance(input_node_entry["node"], (Var, Constant, Call)):
raise RuntimeError(
"Graph tuner can only tune target "
"operators with input node of type "
"relay.expr.Var/Constant/Call. Now "
"find a target op %s with input type %s"
% (op, str(type(input_node_entry["node"])))
)
free_var = relay.Var("var_%d" % i, input_type)
params.append(free_var)
call = relay.Call(node.op, params, node.attrs)
mod = tvm.IRModule.from_expr(relay.Function(params, call))
relay.backend.te_compiler.get().clear()
tracing_target = _replace_device_with_tracing(tvm_target)
build_thread = threading.Thread(target=relay.build, args=(mod, tracing_target))
build_thread.start()
build_thread.join()
elif isinstance(node, Var):
node_entry["name"] = node.name_hint
node_entry["types"] = [node.type_annotation]
elif isinstance(node, Function):
# Ignore root node since it equals to input function expression
if node != expr:
_expr2graph_impl(node, target_ops, node_dict, node_list, tvm_target)
return
elif isinstance(node, TupleGetItem):
in_node_idx = node_dict[node.tuple_value]
node_entry["inputs"].append([in_node_idx, node.index, 0])
elif isinstance(node, Tuple):
for tuple_item in node:
in_node_idx = node_dict[tuple_item]
if isinstance(tuple_item, TupleGetItem):
node_entry["inputs"] += node_list[in_node_idx]["inputs"]
elif isinstance(tuple_item, Tuple):
raise RuntimeError("Graph tuner doesn't support nested tuple.")
else:
node_entry["inputs"].append([in_node_idx, 0, 0])
elif isinstance(node, Constant):
node_entry["name"] = "Constant_" + str(node_index)
node_entry["types"] = [node.checked_type]
elif isinstance(node, tvm.ir.Op):
return
else:
raise RuntimeError(
"Not supported relay node type in graph tuning: %s" % str(type(node))
)
node_dict[node] = node_index
node_list.append(node_entry)
relay.analysis.post_order_visit(expr, _traverse_expr)
def get_direct_ancestor(node_list, visited_dict, target_ops, node_idx, input_names):
"""Given a node_list in relay function and a node index, return the
closest ancestor which has op_name as operator name or is multi_input operator.
If node has multiple inputs, multiple ancestor nodes will be returned.
Parameters
----------
node_list : list of dict of str to object
List of all nodes in a graph.
visited_dict : dict of int to int
Nodes and corresponding ancestors which have been visited.
target_ops: List of str
List of target relay base op name
node_idx : int
Input node index.
input_names : list of str
Names of graph input nodes.
Returns
-------
out : list of int
List of ancestor node index.
"""
if node_idx in visited_dict:
return visited_dict[node_idx]
node = node_list[node_idx]
if is_boundary_node(node, input_names):
return [node_idx]
node_direct_ancestor = []
for item_idx in node["inputs"]:
item = node_list[item_idx[0]]
is_multiple_inputs = has_multiple_inputs(node_list, item_idx[0], input_names, OPT_OUT_OP)
if item["op"] in target_ops or is_multiple_inputs:
node_direct_ancestor.append(item_idx[0])
else:
tmp = get_direct_ancestor(node_list, visited_dict, target_ops, item_idx[0], input_names)
for tmp_item in tmp:
if tmp_item not in node_direct_ancestor:
node_direct_ancestor.append(tmp_item)
visited_dict[node_idx] = node_direct_ancestor
return node_direct_ancestor
def get_in_nodes(node_list, target_ops, input_names):
"""Create a dictionary mapping from op_name nodes or multi-input
nodes to closest input ancestors.
Parameters
----------
node_list : list of dict of str to object
List of all nodes in a graph.
target_ops: List of str
List of target relay op
input_names : list of str
Names of graph input nodes.
Returns
-------
out : dict of int to list of int
Dictionary maps node index to closest input ancestors.
"""
visited_dict = {}
in_node_dict = {}
for i, node in enumerate(node_list):
if is_boundary_node(node, input_names) or is_skipped_node(node):
continue
get_direct_ancestor(node_list, visited_dict, target_ops, i, input_names)
for key, val in visited_dict.items():
node = node_list[key]
is_multiple_inputs = has_multiple_inputs(node_list, key, input_names, OPT_OUT_OP)
if node["op"] in target_ops or is_multiple_inputs:
in_node_dict[key] = val
# Reduce boundary nodes
out_node_dict = get_out_nodes(in_node_dict)
has_reduced_node = True
while has_reduced_node:
boundary_nodes = []
for key, val in in_node_dict.items():
node = node_list[key]
is_boundary = True
# Target ops can't be boundary nodes
if node["op"] not in target_ops:
for input_idx in val:
in_node = node_list[input_idx]
if not is_boundary_node(in_node, input_names) and input_idx in in_node_dict:
is_boundary = False
else:
val.remove(input_idx)
if is_boundary:
boundary_nodes.append(key)
if boundary_nodes:
for idx in boundary_nodes:
if idx in in_node_dict:
del in_node_dict[idx]
else:
has_reduced_node = False
# Remove empty nodes to ignore pre-computed sub-graph
has_empty_node = True
while has_empty_node:
empty_nodes = []
for key, val in in_node_dict.items():
if not val:
empty_nodes.append(key)
if empty_nodes:
has_empty_node = True
for node in empty_nodes:
del in_node_dict[node]
if node in out_node_dict:
for out_node in out_node_dict[node]:
in_node_dict[out_node].remove(node)
else:
has_empty_node = False
return in_node_dict
def get_out_nodes(in_node_dict):
"""Create output dictionary from input dictionary.
Parameters
----------
in_node_dict : dict of int to list of int
Dictionary maps node index to closest input ancestors.
It can be created with get_in_nodes.
Returns
-------
out : dict of int to list of int
Dictionary maps node index to closest output nodes.
"""
out_node_dict = {}
for key in in_node_dict:
out_node_dict[key] = []
for key, val in in_node_dict.items():
for item in val:
if item in out_node_dict:
out_node_dict[item].append(key)
else:
out_node_dict[item] = [key]
return out_node_dict
|
[
"threading.Thread",
"tvm.relay.backend.te_compiler.get",
"tvm.relay.transform.InferType",
"tvm.relay.Function",
"tvm.relay.Call",
"tvm.relay.Var",
"tvm.relay.analysis.post_order_visit",
"tvm.autotvm.task.TaskExtractEnv.get",
"tvm.autotvm.task.create",
"tvm.IRModule.from_expr",
"re.sub"
] |
[((2385, 2425), 'tvm.autotvm.task.TaskExtractEnv.get', 'TaskExtractEnv.get', ([], {'allow_duplicate': '(True)'}), '(allow_duplicate=True)\n', (2403, 2425), False, 'from tvm.autotvm.task import TaskExtractEnv\n'), ((3068, 3096), 'tvm.IRModule.from_expr', 'tvm.IRModule.from_expr', (['node'], {}), '(node)\n', (3090, 3096), False, 'import tvm\n'), ((7983, 8036), 'tvm.relay.analysis.post_order_visit', 'relay.analysis.post_order_visit', (['expr', '_traverse_expr'], {}), '(expr, _traverse_expr)\n', (8014, 8036), False, 'from tvm import relay, autotvm\n'), ((3107, 3128), 'tvm.relay.transform.InferType', 'transform.InferType', ([], {}), '()\n', (3126, 3128), False, 'from tvm.relay import transform\n'), ((2777, 2832), 'tvm.autotvm.task.create', 'autotvm.task.create', (['task_name', 'args'], {'target': 'tvm_target'}), '(task_name, args, target=tvm_target)\n', (2796, 2832), False, 'from tvm import relay, autotvm\n'), ((3637, 3691), 're.sub', 're.sub', (['"""-device=[^\\\\-$]+"""', '"""-device=tracing """', 'target'], {}), "('-device=[^\\\\-$]+', '-device=tracing ', target)\n", (3643, 3691), False, 'import re\n'), ((6078, 6117), 'tvm.relay.Call', 'relay.Call', (['node.op', 'params', 'node.attrs'], {}), '(node.op, params, node.attrs)\n', (6088, 6117), False, 'from tvm import relay, autotvm\n'), ((6354, 6418), 'threading.Thread', 'threading.Thread', ([], {'target': 'relay.build', 'args': '(mod, tracing_target)'}), '(target=relay.build, args=(mod, tracing_target))\n', (6370, 6418), False, 'import threading\n'), ((5975, 6010), 'tvm.relay.Var', 'relay.Var', (["('var_%d' % i)", 'input_type'], {}), "('var_%d' % i, input_type)\n", (5984, 6010), False, 'from tvm import relay, autotvm\n'), ((6163, 6191), 'tvm.relay.Function', 'relay.Function', (['params', 'call'], {}), '(params, call)\n', (6177, 6191), False, 'from tvm import relay, autotvm\n'), ((6209, 6240), 'tvm.relay.backend.te_compiler.get', 'relay.backend.te_compiler.get', ([], {}), '()\n', (6238, 6240), False, 'from tvm import relay, autotvm\n')]
|
"""
This module describes the unlogged state of the default game.
The setting STATE_UNLOGGED should be set to the python path
of the state instance in this module.
"""
from evennia.commands.cmdset import CmdSet
from evennia.commands.default import unloggedin
class UnloggedinCmdSet(CmdSet):
"""
Sets up the unlogged cmdset.
"""
key = "DefaultUnloggedin"
priority = 0
def at_cmdset_creation(self):
"Populate the cmdset"
self.add(unloggedin.CmdUnconnectedConnect())
self.add(unloggedin.CmdUnconnectedCreate())
self.add(unloggedin.CmdUnconnectedQuit())
self.add(unloggedin.CmdUnconnectedLook())
self.add(unloggedin.CmdUnconnectedHelp())
self.add(unloggedin.CmdUnconnectedDefaultUser())
self.add(unloggedin.CmdUnconnectedGuest())
self.add(unloggedin.CmdUnconnectedEncoding())
self.add(unloggedin.CmdUnconnectedScreenreader())
self.add(unloggedin.CmdUnconnectedInfo())
|
[
"evennia.commands.default.unloggedin.CmdUnconnectedQuit",
"evennia.commands.default.unloggedin.CmdUnconnectedConnect",
"evennia.commands.default.unloggedin.CmdUnconnectedDefaultUser",
"evennia.commands.default.unloggedin.CmdUnconnectedGuest",
"evennia.commands.default.unloggedin.CmdUnconnectedLook",
"evennia.commands.default.unloggedin.CmdUnconnectedEncoding",
"evennia.commands.default.unloggedin.CmdUnconnectedCreate",
"evennia.commands.default.unloggedin.CmdUnconnectedScreenreader",
"evennia.commands.default.unloggedin.CmdUnconnectedInfo",
"evennia.commands.default.unloggedin.CmdUnconnectedHelp"
] |
[((471, 505), 'evennia.commands.default.unloggedin.CmdUnconnectedConnect', 'unloggedin.CmdUnconnectedConnect', ([], {}), '()\n', (503, 505), False, 'from evennia.commands.default import unloggedin\n'), ((524, 557), 'evennia.commands.default.unloggedin.CmdUnconnectedCreate', 'unloggedin.CmdUnconnectedCreate', ([], {}), '()\n', (555, 557), False, 'from evennia.commands.default import unloggedin\n'), ((576, 607), 'evennia.commands.default.unloggedin.CmdUnconnectedQuit', 'unloggedin.CmdUnconnectedQuit', ([], {}), '()\n', (605, 607), False, 'from evennia.commands.default import unloggedin\n'), ((626, 657), 'evennia.commands.default.unloggedin.CmdUnconnectedLook', 'unloggedin.CmdUnconnectedLook', ([], {}), '()\n', (655, 657), False, 'from evennia.commands.default import unloggedin\n'), ((676, 707), 'evennia.commands.default.unloggedin.CmdUnconnectedHelp', 'unloggedin.CmdUnconnectedHelp', ([], {}), '()\n', (705, 707), False, 'from evennia.commands.default import unloggedin\n'), ((726, 764), 'evennia.commands.default.unloggedin.CmdUnconnectedDefaultUser', 'unloggedin.CmdUnconnectedDefaultUser', ([], {}), '()\n', (762, 764), False, 'from evennia.commands.default import unloggedin\n'), ((783, 815), 'evennia.commands.default.unloggedin.CmdUnconnectedGuest', 'unloggedin.CmdUnconnectedGuest', ([], {}), '()\n', (813, 815), False, 'from evennia.commands.default import unloggedin\n'), ((834, 869), 'evennia.commands.default.unloggedin.CmdUnconnectedEncoding', 'unloggedin.CmdUnconnectedEncoding', ([], {}), '()\n', (867, 869), False, 'from evennia.commands.default import unloggedin\n'), ((888, 927), 'evennia.commands.default.unloggedin.CmdUnconnectedScreenreader', 'unloggedin.CmdUnconnectedScreenreader', ([], {}), '()\n', (925, 927), False, 'from evennia.commands.default import unloggedin\n'), ((946, 977), 'evennia.commands.default.unloggedin.CmdUnconnectedInfo', 'unloggedin.CmdUnconnectedInfo', ([], {}), '()\n', (975, 977), False, 'from evennia.commands.default import unloggedin\n')]
|
import os
from argparse import ArgumentParser
from warnings import warn
from pytorch_lightning import Trainer
from pytorch_lightning import loggers as pl_loggers
from pytorch_lightning.loggers import WandbLogger
from data.dataloaders import DataProvider
from models.separation_framework import CUNET_Framework
def main(args):
dict_args = vars(args)
model_name = dict_args['model_name']
if dict_args['dev_mode']:
warn('You are in a DEVELOPMENT MODE!')
if dict_args['gpus'] > 1:
warn('# gpu and num_workers should be 1, Not implemented: museval for distributed parallel')
dict_args['gpus'] = 1
if model_name == 'cunet':
model = CUNET_Framework(**dict_args)
else:
raise NotImplementedError
if dict_args['log_system'] == 'wandb':
logger = WandbLogger(project='source_separation', tags=model_name, offline=False,
id=dict_args['run_id'] + 'eval')
logger.log_hyperparams(model.hparams)
elif dict_args['log_system'] == 'tensorboard':
if not os.path.exists(temp_args.tensorboard_path):
os.mkdir(temp_args.tensorboard_path)
logger = pl_loggers.TensorBoardLogger(temp_args.tensorboard_path, name=model_name)
else:
logger = True # default
ckpt_path = '{}/{}/{}/{}_epoch={}.ckpt'.format(
dict_args['checkpoints_path'],
dict_args['model_name'],
dict_args['run_id'],
dict_args['model_name'],
dict_args['epoch'])
assert (ckpt_path is not None)
model = model.load_from_checkpoint(ckpt_path)
data_provider = DataProvider(**dict_args)
n_fft, hop_length, num_frame = [dict_args[key] for key in ['n_fft', 'hop_length', 'num_frame']]
test_dataloader = data_provider.get_test_dataloader(n_fft, hop_length, num_frame)
trainer = Trainer(
gpus=dict_args['gpus'],
logger=logger,
precision=16 if dict_args['float16'] else 32
)
trainer.test(model, test_dataloader)
if __name__ == '__main__':
parser = ArgumentParser()
parser = Trainer.add_argparse_args(parser)
parser.add_argument('--model_name', type=str)
parser.add_argument('--checkpoints_path', type=str, default='checkpoints')
parser.add_argument('--log_system', type=str, default=True)
parser.add_argument('--float16', type=bool, default=False)
parser.add_argument('--run_id', type=str)
parser.add_argument('--epoch', type=int)
temp_args, _ = parser.parse_known_args()
if temp_args.model_name == "cunet":
parser = CUNET_Framework.add_model_specific_args(parser)
else:
warn("no model name")
raise NotImplementedError
parser = DataProvider.add_data_provider_args(parser)
args = parser.parse_args()
# train
main(args)
|
[
"os.mkdir",
"models.separation_framework.CUNET_Framework.add_model_specific_args",
"pytorch_lightning.Trainer",
"pytorch_lightning.Trainer.add_argparse_args",
"argparse.ArgumentParser",
"os.path.exists",
"data.dataloaders.DataProvider.add_data_provider_args",
"data.dataloaders.DataProvider",
"pytorch_lightning.loggers.WandbLogger",
"pytorch_lightning.loggers.TensorBoardLogger",
"warnings.warn",
"models.separation_framework.CUNET_Framework"
] |
[((1616, 1641), 'data.dataloaders.DataProvider', 'DataProvider', ([], {}), '(**dict_args)\n', (1628, 1641), False, 'from data.dataloaders import DataProvider\n'), ((1843, 1940), 'pytorch_lightning.Trainer', 'Trainer', ([], {'gpus': "dict_args['gpus']", 'logger': 'logger', 'precision': "(16 if dict_args['float16'] else 32)"}), "(gpus=dict_args['gpus'], logger=logger, precision=16 if dict_args[\n 'float16'] else 32)\n", (1850, 1940), False, 'from pytorch_lightning import Trainer\n'), ((2051, 2067), 'argparse.ArgumentParser', 'ArgumentParser', ([], {}), '()\n', (2065, 2067), False, 'from argparse import ArgumentParser\n'), ((2081, 2114), 'pytorch_lightning.Trainer.add_argparse_args', 'Trainer.add_argparse_args', (['parser'], {}), '(parser)\n', (2106, 2114), False, 'from pytorch_lightning import Trainer\n'), ((2701, 2744), 'data.dataloaders.DataProvider.add_data_provider_args', 'DataProvider.add_data_provider_args', (['parser'], {}), '(parser)\n', (2736, 2744), False, 'from data.dataloaders import DataProvider\n'), ((437, 475), 'warnings.warn', 'warn', (['"""You are in a DEVELOPMENT MODE!"""'], {}), "('You are in a DEVELOPMENT MODE!')\n", (441, 475), False, 'from warnings import warn\n'), ((515, 617), 'warnings.warn', 'warn', (['"""# gpu and num_workers should be 1, Not implemented: museval for distributed parallel"""'], {}), "(\n '# gpu and num_workers should be 1, Not implemented: museval for distributed parallel'\n )\n", (519, 617), False, 'from warnings import warn\n'), ((685, 713), 'models.separation_framework.CUNET_Framework', 'CUNET_Framework', ([], {}), '(**dict_args)\n', (700, 713), False, 'from models.separation_framework import CUNET_Framework\n'), ((819, 929), 'pytorch_lightning.loggers.WandbLogger', 'WandbLogger', ([], {'project': '"""source_separation"""', 'tags': 'model_name', 'offline': '(False)', 'id': "(dict_args['run_id'] + 'eval')"}), "(project='source_separation', tags=model_name, offline=False, id\n =dict_args['run_id'] + 'eval')\n", (830, 929), False, 'from pytorch_lightning.loggers import WandbLogger\n'), ((2565, 2612), 'models.separation_framework.CUNET_Framework.add_model_specific_args', 'CUNET_Framework.add_model_specific_args', (['parser'], {}), '(parser)\n', (2604, 2612), False, 'from models.separation_framework import CUNET_Framework\n'), ((2631, 2652), 'warnings.warn', 'warn', (['"""no model name"""'], {}), "('no model name')\n", (2635, 2652), False, 'from warnings import warn\n'), ((1177, 1250), 'pytorch_lightning.loggers.TensorBoardLogger', 'pl_loggers.TensorBoardLogger', (['temp_args.tensorboard_path'], {'name': 'model_name'}), '(temp_args.tensorboard_path, name=model_name)\n', (1205, 1250), True, 'from pytorch_lightning import loggers as pl_loggers\n'), ((1067, 1109), 'os.path.exists', 'os.path.exists', (['temp_args.tensorboard_path'], {}), '(temp_args.tensorboard_path)\n', (1081, 1109), False, 'import os\n'), ((1123, 1159), 'os.mkdir', 'os.mkdir', (['temp_args.tensorboard_path'], {}), '(temp_args.tensorboard_path)\n', (1131, 1159), False, 'import os\n')]
|
r"""Postprocessing Laplace equation.
A basic postprocessing step in finite element analysis is evaluating linear
forms over the solution. For the Poisson equation, the integral
of the solution (normalized by the area) is the 'Boussinesq k-factor'; for
the square it's roughly 0.03514, for the circle 1/Pi/8 = 0.03979. Linear forms
are easily evaluated in skfem using the 1-D arrays assembled using the
@LinearForm decorator. In :ref:`poisson`, the linear form required for simple
integration happens to be the same one used on the right-hand side of the
differential equation, so it's already to hand.
Another is interpolation; i.e. evaluation of the solution at a
specified point which isn't necessarily a node of the mesh. For this
problem, the maximum of the solution (normalized by the area) is the
'Boussinesq k'-factor'; by symmetry, this occurs for squares (k' =
0.07363) and circles (k' = 1/Pi/4) at the centre and so can be
evaluated by interpolation.
"""
from pathlib import Path
from skfem import *
from skfem.models.poisson import laplace, unit_load
from skfem.io.json import from_file
import numpy as np
m = MeshTri.init_circle(4)
basis = InteriorBasis(m, ElementTriP2())
A = asm(laplace, basis)
b = asm(unit_load, basis)
x = solve(*condense(A, b, D=basis.find_dofs()))
area = sum(b)
k = b @ x / area**2
k1, = basis.probes(np.zeros((2, 1)))(x) / area
if __name__ == '__main__':
from skfem.visuals.matplotlib import plot, show
print('area = {:.4f} (exact = {:.4f})'.format(area, np.pi))
print('k = {:.5f} (exact = 1/8/pi = {:.5f})'.format(k, 1/np.pi/8))
print("k' = {:.5f} (exact = 1/4/pi = {:.5f})".format(k1, 1/np.pi/4))
plot(basis, x)
show()
|
[
"skfem.visuals.matplotlib.show",
"skfem.visuals.matplotlib.plot",
"numpy.zeros"
] |
[((1669, 1683), 'skfem.visuals.matplotlib.plot', 'plot', (['basis', 'x'], {}), '(basis, x)\n', (1673, 1683), False, 'from skfem.visuals.matplotlib import plot, show\n'), ((1688, 1694), 'skfem.visuals.matplotlib.show', 'show', ([], {}), '()\n', (1692, 1694), False, 'from skfem.visuals.matplotlib import plot, show\n'), ((1347, 1363), 'numpy.zeros', 'np.zeros', (['(2, 1)'], {}), '((2, 1))\n', (1355, 1363), True, 'import numpy as np\n')]
|
"""
This benchmark compares the performance of different models in
predicting tissue based on gene expression
"""
import argparse
import os
import numpy as np
import pandas as pd
import sklearn.metrics
import yaml
from sklearn.preprocessing import MinMaxScaler
from saged import utils, datasets, models
AVAILABLE_TISSUES = ['Blood', 'Breast', 'Stem Cell', 'Cervix', 'Brain', 'Kidney',
'Umbilical Cord', 'Lung', 'Epithelium', 'Prostate', 'Liver',
'Heart', 'Skin', 'Colon', 'Bone Marrow', 'Muscle', 'Tonsil', 'Blood Vessel',
'Spinal Cord', 'Testis', 'Placenta', 'Bladder', 'Adipose Tisse', 'Ovary',
'Melanoma', 'Adrenal Gland', 'Bone', 'Pancreas', 'Penis',
'Universal reference', 'Spleen', 'Brain reference', 'Large Intestine',
'Esophagus', 'Small Intestine', 'Embryonic kidney', 'Thymus', 'Stomach',
'Endometrium', 'Glioblastoma', 'Gall bladder', 'Lymph Nodes', 'Airway',
'Appendix', 'Thyroid', 'Retina', 'Bowel tissue', 'Foreskin', 'Sperm', 'Foot',
'Cerebellum', 'Cerebral cortex', 'Salivary Gland', 'Duodenum'
]
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('dataset_config',
help='The yaml formatted dataset configuration file. For more information '
'about this file read the comments in the example_dataset.yml file')
parser.add_argument('supervised_config',
help='The yaml formatted model configuration file. For more information '
'about this file read the comments in the example_model.yml file')
parser.add_argument('out_file',
help='The file to save the results to')
parser.add_argument('--tissue1',
help='The first tissue to be predicted from the data',
default='Blood', choices=AVAILABLE_TISSUES)
parser.add_argument('--tissue2',
help='The second tissue to be predicted from the data',
default='Breast', choices=AVAILABLE_TISSUES)
parser.add_argument('--neptune_config',
help='A yaml formatted file containing init information for '
'neptune logging')
parser.add_argument('--seed',
help='The random seed to be used in splitting data',
type=int,
default=42)
parser.add_argument('--num_splits',
help='The number of splits to use in cross-validation',
type=int,
default=5)
parser.add_argument('--batch_correction_method',
help='The method to use to correct for batch effects',
default=None)
parser.add_argument('--all_tissue', help='Predict all common tissues in the dataset',
default=False, action='store_true')
parser.add_argument('--biobert', help='Add biobert embeddings as features the model can use',
default=False, action='store_true')
args = parser.parse_args()
with open(args.dataset_config) as in_file:
dataset_config = yaml.safe_load(in_file)
expression_df, sample_to_label, sample_to_study = utils.load_recount_data(args.dataset_config)
if args.biobert:
embeddings = utils.load_biobert_embeddings(args.dataset_config)
# These indices are correct, the expression dataframe is genes x samples currently
placeholder_array = np.ones((embeddings.shape[1], expression_df.shape[1]))
with open(dataset_config['metadata_path'], 'r') as in_file:
header = in_file.readline()
header = header.replace('"', '')
header = header.strip().split('\t')
# Add one to the indices to account for the index column in metadata not present in the
# header
sample_index = header.index('external_id') + 1
for line_number, metadata_line in enumerate(in_file):
line = metadata_line.strip().split('\t')
sample = line[sample_index]
sample = sample.replace('"', '')
# Not all samples with metadata are in compendium
if sample not in expression_df.columns:
continue
index_in_df = expression_df.columns.get_loc(sample)
placeholder_array[:, index_in_df] = embeddings[line_number, :]
# 0-1 normalize embeddings to match scale of expression
scaler = MinMaxScaler()
placeholder_array = scaler.fit_transform(placeholder_array.T).T
embedding_df = pd.DataFrame(placeholder_array, columns=expression_df.columns)
expression_df = pd.concat([expression_df, embedding_df], axis='rows')
all_data = datasets.RefineBioMixedDataset(expression_df, sample_to_label, sample_to_study)
labeled_data = all_data.get_labeled()
labels_to_keep = None
if args.all_tissue:
# Keep all labels with at least ten studies in the dataset
labels_to_keep = ['Blood', 'Breast', 'Stem Cell', 'Cervix', 'Brain', 'Kidney',
'Umbilical Cord', 'Lung', 'Epithelium', 'Prostate', 'Liver',
'Heart', 'Skin', 'Colon', 'Bone Marrow', 'Muscle', 'Tonsil',
'Blood Vessel', 'Spinal Cord', 'Testis', 'Placenta'
]
else:
labels_to_keep = [args.tissue1, args.tissue2]
labeled_data.subset_samples_to_labels(labels_to_keep)
# Correct for batch effects
if args.batch_correction_method is not None:
labeled_data = all_data.get_labeled()
labeled_data.subset_samples_to_labels(labels_to_keep)
labeled_data = datasets.correct_batch_effects(labeled_data, args.batch_correction_method)
labeled_data.recode()
label_encoder = labeled_data.get_label_encoder()
# Get fivefold cross-validation splits
print('CV splitting')
labeled_splits = labeled_data.get_cv_splits(num_splits=args.num_splits, seed=args.seed)
# Train the model on each fold
accuracies = []
balanced_accuracies = []
f1_scores = []
supervised_train_studies = []
supervised_train_sample_names = []
supervised_val_sample_names = []
supervised_train_sample_counts = []
subset_percents = []
for i in range(len(labeled_splits)):
for subset_number in range(1, 11, 1):
# The new neptune version doesn't have a create_experiment function so you have to
# reinitialize per-model
neptune_run = None
# Parse config file
if args.neptune_config is not None:
with open(args.neptune_config) as neptune_file:
neptune_config = yaml.safe_load(neptune_file)
neptune_run = utils.initialize_neptune(neptune_config)
subset_percent = subset_number * .1
train_list = labeled_splits[:i] + labeled_splits[i+1:]
# Extract the train and test datasets
LabeledDatasetClass = type(labeled_data)
train_data = LabeledDatasetClass.from_list(train_list)
val_data = labeled_splits[i]
# This isn't strictly necessary since we're checking whether both classes are present,
# but it's safer
train_data.set_label_encoder(label_encoder)
val_data.set_label_encoder(label_encoder)
if not args.all_tissue:
train_data = utils.subset_to_equal_ratio(train_data, val_data, args.tissue1,
args.tissue2, args.seed)
# Now that the ratio is correct, actually subset the samples
train_data = train_data.subset_samples(subset_percent,
args.seed)
# Skip entries where there is only data for one class
if len(train_data.get_classes()) <= 1 or len(val_data.get_classes()) <= 1:
continue
if args.neptune_config is not None:
neptune_run['samples'] = len(train_data.get_samples())
neptune_run['studies'] = len(train_data.get_studies())
print('Samples: {}'.format(len(train_data.get_samples())))
print('Studies: {}'.format(len(train_data.get_studies())))
print('Val data: {}'.format(len(val_data)))
input_size = len(train_data.get_features())
output_size = len(train_data.get_classes())
print('Classes: {}'.format(output_size))
with open(args.supervised_config) as supervised_file:
supervised_config = yaml.safe_load(supervised_file)
supervised_config['input_size'] = input_size
supervised_config['output_size'] = output_size
if 'save_path' in supervised_config:
# Append script-specific information to model save file
save_path = supervised_config['save_path']
# Remove extension
save_path = os.path.splitext(save_path)[0]
if args.all_tissue and args.biobert:
extra_info = 'all_tissue_biobert'
elif args.all_tissue:
extra_info = 'all_tissue'
elif args.biobert:
extra_info = 'biobert'
else:
extra_info = '{}-{}'.format(args.tissue1, args.tissue2)
extra_info = '{}_{}_{}'.format(extra_info, i, args.seed)
save_path = os.path.join(save_path + '_predict_{}.pt'.format(extra_info))
supervised_config['save_path'] = save_path
supervised_model_type = supervised_config.pop('name')
SupervisedClass = getattr(models, supervised_model_type)
supervised_model = SupervisedClass(**supervised_config)
supervised_model.fit(train_data, neptune_run)
predictions, true_labels = supervised_model.evaluate(val_data)
supervised_model.free_memory()
accuracy = sklearn.metrics.accuracy_score(true_labels, predictions)
positive_label_encoding = train_data.get_label_encoding(args.tissue1)
balanced_acc = sklearn.metrics.balanced_accuracy_score(true_labels, predictions)
if args.all_tissue:
f1_score = 'NA'
else:
f1_score = sklearn.metrics.f1_score(true_labels, predictions,
pos_label=positive_label_encoding,
average='binary')
accuracies.append(accuracy)
balanced_accuracies.append(balanced_acc)
f1_scores.append(f1_score)
supervised_train_studies.append(','.join(train_data.get_studies()))
supervised_train_sample_names.append(','.join(train_data.get_samples()))
supervised_val_sample_names.append(','.join(val_data.get_samples()))
supervised_train_sample_counts.append(len(train_data))
subset_percents.append(subset_percent)
train_data.reset_filters()
val_data.reset_filters()
with open(args.out_file, 'w') as out_file:
# Write header
out_file.write('accuracy\tbalanced_accuracy\tf1_score\ttrain studies\ttrain samples\t'
'val samples\ttrain sample count\tfraction of data used\n')
result_iterator = zip(accuracies,
balanced_accuracies,
f1_scores,
supervised_train_studies,
supervised_train_sample_names,
supervised_val_sample_names,
supervised_train_sample_counts,
subset_percents
)
for stats in result_iterator:
stat_strings = [str(item) for item in stats]
out_str = '\t'.join(stat_strings)
out_file.write(f'{out_str}\n')
|
[
"saged.utils.load_recount_data",
"pandas.DataFrame",
"argparse.ArgumentParser",
"saged.utils.initialize_neptune",
"sklearn.preprocessing.MinMaxScaler",
"saged.datasets.correct_batch_effects",
"numpy.ones",
"saged.datasets.RefineBioMixedDataset",
"saged.utils.subset_to_equal_ratio",
"yaml.safe_load",
"os.path.splitext",
"saged.utils.load_biobert_embeddings",
"pandas.concat"
] |
[((1267, 1292), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (1290, 1292), False, 'import argparse\n'), ((3451, 3495), 'saged.utils.load_recount_data', 'utils.load_recount_data', (['args.dataset_config'], {}), '(args.dataset_config)\n', (3474, 3495), False, 'from saged import utils, datasets, models\n'), ((5032, 5111), 'saged.datasets.RefineBioMixedDataset', 'datasets.RefineBioMixedDataset', (['expression_df', 'sample_to_label', 'sample_to_study'], {}), '(expression_df, sample_to_label, sample_to_study)\n', (5062, 5111), False, 'from saged import utils, datasets, models\n'), ((3373, 3396), 'yaml.safe_load', 'yaml.safe_load', (['in_file'], {}), '(in_file)\n', (3387, 3396), False, 'import yaml\n'), ((3538, 3588), 'saged.utils.load_biobert_embeddings', 'utils.load_biobert_embeddings', (['args.dataset_config'], {}), '(args.dataset_config)\n', (3567, 3588), False, 'from saged import utils, datasets, models\n'), ((3709, 3763), 'numpy.ones', 'np.ones', (['(embeddings.shape[1], expression_df.shape[1])'], {}), '((embeddings.shape[1], expression_df.shape[1]))\n', (3716, 3763), True, 'import numpy as np\n'), ((5976, 6050), 'saged.datasets.correct_batch_effects', 'datasets.correct_batch_effects', (['labeled_data', 'args.batch_correction_method'], {}), '(labeled_data, args.batch_correction_method)\n', (6006, 6050), False, 'from saged import utils, datasets, models\n'), ((4752, 4766), 'sklearn.preprocessing.MinMaxScaler', 'MinMaxScaler', ([], {}), '()\n', (4764, 4766), False, 'from sklearn.preprocessing import MinMaxScaler\n'), ((4871, 4933), 'pandas.DataFrame', 'pd.DataFrame', (['placeholder_array'], {'columns': 'expression_df.columns'}), '(placeholder_array, columns=expression_df.columns)\n', (4883, 4933), True, 'import pandas as pd\n'), ((4962, 5015), 'pandas.concat', 'pd.concat', (['[expression_df, embedding_df]'], {'axis': '"""rows"""'}), "([expression_df, embedding_df], axis='rows')\n", (4971, 5015), True, 'import pandas as pd\n'), ((7741, 7834), 'saged.utils.subset_to_equal_ratio', 'utils.subset_to_equal_ratio', (['train_data', 'val_data', 'args.tissue1', 'args.tissue2', 'args.seed'], {}), '(train_data, val_data, args.tissue1, args.\n tissue2, args.seed)\n', (7768, 7834), False, 'from saged import utils, datasets, models\n'), ((8927, 8958), 'yaml.safe_load', 'yaml.safe_load', (['supervised_file'], {}), '(supervised_file)\n', (8941, 8958), False, 'import yaml\n'), ((7003, 7031), 'yaml.safe_load', 'yaml.safe_load', (['neptune_file'], {}), '(neptune_file)\n', (7017, 7031), False, 'import yaml\n'), ((7066, 7106), 'saged.utils.initialize_neptune', 'utils.initialize_neptune', (['neptune_config'], {}), '(neptune_config)\n', (7090, 7106), False, 'from saged import utils, datasets, models\n'), ((9346, 9373), 'os.path.splitext', 'os.path.splitext', (['save_path'], {}), '(save_path)\n', (9362, 9373), False, 'import os\n')]
|
import numpy as np
import aesara
import aesara.tensor as tt
class Mlp:
def __init__(self, nfeatures=100, noutputs=10, nhiddens=50, rng=None):
if rng is None:
rng = 0
if isinstance(rng, int):
rng = np.random.RandomState(rng)
self.rng = rng
self.nfeatures = nfeatures
self.noutputs = noutputs
self.nhiddens = nhiddens
x = tt.dmatrix("x")
wh = aesara.shared(self.rng.normal(0, 1, (nfeatures, nhiddens)), borrow=True)
bh = aesara.shared(np.zeros(nhiddens), borrow=True)
h = tt.nnet.sigmoid(tt.dot(x, wh) + bh)
wy = aesara.shared(self.rng.normal(0, 1, (nhiddens, noutputs)))
by = aesara.shared(np.zeros(noutputs), borrow=True)
y = tt.nnet.softmax(tt.dot(h, wy) + by)
self.inputs = [x]
self.outputs = [y]
class OfgNested:
def __init__(self):
x, y, z = tt.scalars("xyz")
e = x * y
op = aesara.OpFromGraph([x, y], [e])
e2 = op(x, y) + z
op2 = aesara.OpFromGraph([x, y, z], [e2])
e3 = op2(x, y, z) + z
self.inputs = [x, y, z]
self.outputs = [e3]
class Ofg:
def __init__(self):
x, y, z = tt.scalars("xyz")
e = tt.nnet.sigmoid((x + y + z) ** 2)
op = aesara.OpFromGraph([x, y, z], [e])
e2 = op(x, y, z) + op(z, y, x)
self.inputs = [x, y, z]
self.outputs = [e2]
class OfgSimple:
def __init__(self):
x, y, z = tt.scalars("xyz")
e = tt.nnet.sigmoid((x + y + z) ** 2)
op = aesara.OpFromGraph([x, y, z], [e])
e2 = op(x, y, z)
self.inputs = [x, y, z]
self.outputs = [e2]
|
[
"aesara.tensor.dmatrix",
"aesara.tensor.nnet.sigmoid",
"aesara.tensor.dot",
"numpy.zeros",
"numpy.random.RandomState",
"aesara.OpFromGraph",
"aesara.tensor.scalars"
] |
[((408, 423), 'aesara.tensor.dmatrix', 'tt.dmatrix', (['"""x"""'], {}), "('x')\n", (418, 423), True, 'import aesara.tensor as tt\n'), ((914, 931), 'aesara.tensor.scalars', 'tt.scalars', (['"""xyz"""'], {}), "('xyz')\n", (924, 931), True, 'import aesara.tensor as tt\n'), ((963, 994), 'aesara.OpFromGraph', 'aesara.OpFromGraph', (['[x, y]', '[e]'], {}), '([x, y], [e])\n', (981, 994), False, 'import aesara\n'), ((1035, 1070), 'aesara.OpFromGraph', 'aesara.OpFromGraph', (['[x, y, z]', '[e2]'], {}), '([x, y, z], [e2])\n', (1053, 1070), False, 'import aesara\n'), ((1217, 1234), 'aesara.tensor.scalars', 'tt.scalars', (['"""xyz"""'], {}), "('xyz')\n", (1227, 1234), True, 'import aesara.tensor as tt\n'), ((1247, 1280), 'aesara.tensor.nnet.sigmoid', 'tt.nnet.sigmoid', (['((x + y + z) ** 2)'], {}), '((x + y + z) ** 2)\n', (1262, 1280), True, 'import aesara.tensor as tt\n'), ((1294, 1328), 'aesara.OpFromGraph', 'aesara.OpFromGraph', (['[x, y, z]', '[e]'], {}), '([x, y, z], [e])\n', (1312, 1328), False, 'import aesara\n'), ((1490, 1507), 'aesara.tensor.scalars', 'tt.scalars', (['"""xyz"""'], {}), "('xyz')\n", (1500, 1507), True, 'import aesara.tensor as tt\n'), ((1520, 1553), 'aesara.tensor.nnet.sigmoid', 'tt.nnet.sigmoid', (['((x + y + z) ** 2)'], {}), '((x + y + z) ** 2)\n', (1535, 1553), True, 'import aesara.tensor as tt\n'), ((1567, 1601), 'aesara.OpFromGraph', 'aesara.OpFromGraph', (['[x, y, z]', '[e]'], {}), '([x, y, z], [e])\n', (1585, 1601), False, 'import aesara\n'), ((244, 270), 'numpy.random.RandomState', 'np.random.RandomState', (['rng'], {}), '(rng)\n', (265, 270), True, 'import numpy as np\n'), ((537, 555), 'numpy.zeros', 'np.zeros', (['nhiddens'], {}), '(nhiddens)\n', (545, 555), True, 'import numpy as np\n'), ((718, 736), 'numpy.zeros', 'np.zeros', (['noutputs'], {}), '(noutputs)\n', (726, 736), True, 'import numpy as np\n'), ((598, 611), 'aesara.tensor.dot', 'tt.dot', (['x', 'wh'], {}), '(x, wh)\n', (604, 611), True, 'import aesara.tensor as tt\n'), ((779, 792), 'aesara.tensor.dot', 'tt.dot', (['h', 'wy'], {}), '(h, wy)\n', (785, 792), True, 'import aesara.tensor as tt\n')]
|
from __future__ import print_function
import os
import sys
import traceback
from flask import Flask, g
from werkzeug.contrib.fixers import ProxyFix
from werkzeug.routing import BaseConverter
from config_parser import parse_config
from database import create_session
from decorators import template_renderer
from log_configuration import LogConfiguration
from mailer import Mailer
from mod_auth.controllers import mod_auth
from mod_ci.controllers import mod_ci
from mod_deploy.controllers import mod_deploy
from mod_home.controllers import mod_home
from mod_regression.controllers import mod_regression
from mod_sample.controllers import mod_sample
from mod_test.controllers import mod_test
from mod_upload.controllers import mod_upload
from mod_customized.controllers import mod_customized
app = Flask(__name__)
app.wsgi_app = ProxyFix(app.wsgi_app)
# Load config
config = parse_config('config')
app.config.from_mapping(config)
try:
app.config['DEBUG'] = os.environ['DEBUG']
except KeyError:
app.config['DEBUG'] = False
# Init logger
log_configuration = LogConfiguration(app.root_path, 'platform', app.config['DEBUG'])
log = log_configuration.create_logger("Platform")
def install_secret_keys(application, secret_session='secret_key', secret_csrf='secret_csrf'):
"""
Configure the SECRET_KEY from a file in the instance directory.
If the file does not exist, print instructions to create it from a shell with a random key, then exit.
"""
do_exit = False
session_file_path = os.path.join(application.root_path, secret_session)
csrf_file_path = os.path.join(application.root_path, secret_csrf)
try:
with open(session_file_path, 'rb') as session_file:
application.config['SECRET_KEY'] = session_file.read()
except IOError:
traceback.print_exc()
print('Error: No secret key. Create it with:')
if not os.path.isdir(os.path.dirname(session_file)):
print('mkdir -p', os.path.dirname(session_file))
print('head -c 24 /dev/urandom >', session_file)
do_exit = True
try:
with open(csrf_file_path, 'rb') as csrf_file:
application.config['CSRF_SESSION_KEY'] = csrf_file.read()
except IOError:
print('Error: No secret CSRF key. Create it with:')
if not os.path.isdir(os.path.dirname(csrf_file)):
print('mkdir -p', os.path.dirname(csrf_file))
print('head -c 24 /dev/urandom >', csrf_file)
do_exit = True
if do_exit:
sys.exit(1)
if 'TESTING' not in os.environ or os.environ['TESTING'] == 'False':
install_secret_keys(app)
# Expose submenu method for jinja templates
def sub_menu_open(menu_entries, active_route):
for menu_entry in menu_entries:
if 'route' in menu_entry and menu_entry['route'] == active_route:
return True
return False
app.jinja_env.globals.update(sub_menu_open=sub_menu_open)
app.jinja_env.add_extension('jinja2.ext.loopcontrols')
# Add datetime format filter
def date_time_format(value, fmt='%Y-%m-%d %H:%M:%S'):
return value.strftime(fmt)
def get_github_issue_link(issue_id):
return 'https://www.github.com/{org}/{repo}/issues/{id}'.format(
org=config.get('GITHUB_OWNER', ''),
repo=config.get('GITHUB_REPOSITORY', ''),
id=issue_id
)
def filename(filepath):
return os.path.basename(filepath)
app.jinja_env.filters['date'] = date_time_format
app.jinja_env.filters['issue_link'] = get_github_issue_link
app.jinja_env.filters['filename'] = filename
class RegexConverter(BaseConverter):
def __init__(self, url_map, *items):
super(RegexConverter, self).__init__(url_map)
self.regex = items[0]
# Allow regexes in routes
app.url_map.converters['regex'] = RegexConverter
@app.errorhandler(404)
@template_renderer('404.html', 404)
def not_found(error):
return
@app.errorhandler(500)
@template_renderer('500.html', 500)
def internal_error(error):
log.debug('500 error: {err}'.format(err=error))
log.debug('Stacktrace:')
log.debug(traceback.format_exc())
return
@app.errorhandler(403)
@template_renderer('403.html', 403)
def forbidden(error):
user_name = 'Guest' if g.user is None else g.user.name
user_role = 'Guest' if g.user is None else g.user.role.value
log.debug('{u} (role: {r}) tried to access {page}'.format(u=user_name, r=user_role, page=error.description))
return {
'user_role': user_role,
'endpoint': error.description
}
@app.before_request
def before_request():
g.menu_entries = {}
g.db = create_session(app.config['DATABASE_URI'])
g.mailer = Mailer(
app.config.get('EMAIL_DOMAIN', ''), app.config.get('EMAIL_API_KEY', ''), 'CCExtractor.org CI Platform'
)
g.version = "0.1"
g.log = log
g.github = get_github_config(app.config)
def get_github_config(config):
return {
'deploy_key': config.get('GITHUB_DEPLOY_KEY', ''),
'ci_key': config.get('GITHUB_CI_KEY', ''),
'bot_token': config.get('GITHUB_TOKEN', ''),
'bot_name': config.get('GITHUB_BOT', ''),
'repository_owner': config.get('GITHUB_OWNER', ''),
'repository': config.get('GITHUB_REPOSITORY', '')
}
@app.teardown_appcontext
def teardown(exception):
db = g.get('db', None)
if db is not None:
db.remove()
# Register blueprints
app.register_blueprint(mod_auth, url_prefix='/account') # Needs to be first
app.register_blueprint(mod_upload, url_prefix='/upload')
app.register_blueprint(mod_regression, url_prefix='/regression')
app.register_blueprint(mod_sample, url_prefix='/sample')
app.register_blueprint(mod_home)
app.register_blueprint(mod_deploy)
app.register_blueprint(mod_test, url_prefix="/test")
app.register_blueprint(mod_ci)
app.register_blueprint(mod_customized, url_prefix='/custom')
if __name__ == '__main__':
# Run in development mode; Werkzeug server
# Load variables for running (if defined)
ssl_context = host = None
proto = 'https'
key = app.config.get('SSL_KEY', 'cert/key.key')
cert = app.config.get('SSL_CERT', 'cert/cert.cert')
if len(key) == 0 or len(cert) == 0:
ssl_context = 'adhoc'
else:
ssl_context = (cert, key)
server_name = app.config.get('0.0.0.0')
port = app.config.get('SERVER_PORT', 443)
print('Server should be running soon on {0}://{1}:{2}'.format(proto, server_name, port))
if server_name != '127.0.0.1':
host = '0.0.0.0'
app.run(host, port, app.config['DEBUG'], ssl_context=ssl_context)
|
[
"werkzeug.contrib.fixers.ProxyFix",
"traceback.print_exc",
"flask.g.get",
"os.path.basename",
"config_parser.parse_config",
"os.path.dirname",
"flask.Flask",
"traceback.format_exc",
"log_configuration.LogConfiguration",
"decorators.template_renderer",
"database.create_session",
"os.path.join",
"sys.exit"
] |
[((800, 815), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (805, 815), False, 'from flask import Flask, g\n'), ((831, 853), 'werkzeug.contrib.fixers.ProxyFix', 'ProxyFix', (['app.wsgi_app'], {}), '(app.wsgi_app)\n', (839, 853), False, 'from werkzeug.contrib.fixers import ProxyFix\n'), ((877, 899), 'config_parser.parse_config', 'parse_config', (['"""config"""'], {}), "('config')\n", (889, 899), False, 'from config_parser import parse_config\n'), ((1067, 1131), 'log_configuration.LogConfiguration', 'LogConfiguration', (['app.root_path', '"""platform"""', "app.config['DEBUG']"], {}), "(app.root_path, 'platform', app.config['DEBUG'])\n", (1083, 1131), False, 'from log_configuration import LogConfiguration\n'), ((3813, 3847), 'decorators.template_renderer', 'template_renderer', (['"""404.html"""', '(404)'], {}), "('404.html', 404)\n", (3830, 3847), False, 'from decorators import template_renderer\n'), ((3907, 3941), 'decorators.template_renderer', 'template_renderer', (['"""500.html"""', '(500)'], {}), "('500.html', 500)\n", (3924, 3941), False, 'from decorators import template_renderer\n'), ((4125, 4159), 'decorators.template_renderer', 'template_renderer', (['"""403.html"""', '(403)'], {}), "('403.html', 403)\n", (4142, 4159), False, 'from decorators import template_renderer\n'), ((1514, 1565), 'os.path.join', 'os.path.join', (['application.root_path', 'secret_session'], {}), '(application.root_path, secret_session)\n', (1526, 1565), False, 'import os\n'), ((1587, 1635), 'os.path.join', 'os.path.join', (['application.root_path', 'secret_csrf'], {}), '(application.root_path, secret_csrf)\n', (1599, 1635), False, 'import os\n'), ((3362, 3388), 'os.path.basename', 'os.path.basename', (['filepath'], {}), '(filepath)\n', (3378, 3388), False, 'import os\n'), ((4588, 4630), 'database.create_session', 'create_session', (["app.config['DATABASE_URI']"], {}), "(app.config['DATABASE_URI'])\n", (4602, 4630), False, 'from database import create_session\n'), ((5298, 5315), 'flask.g.get', 'g.get', (['"""db"""', 'None'], {}), "('db', None)\n", (5303, 5315), False, 'from flask import Flask, g\n'), ((2511, 2522), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (2519, 2522), False, 'import sys\n'), ((4064, 4086), 'traceback.format_exc', 'traceback.format_exc', ([], {}), '()\n', (4084, 4086), False, 'import traceback\n'), ((1800, 1821), 'traceback.print_exc', 'traceback.print_exc', ([], {}), '()\n', (1819, 1821), False, 'import traceback\n'), ((1906, 1935), 'os.path.dirname', 'os.path.dirname', (['session_file'], {}), '(session_file)\n', (1921, 1935), False, 'import os\n'), ((1968, 1997), 'os.path.dirname', 'os.path.dirname', (['session_file'], {}), '(session_file)\n', (1983, 1997), False, 'import os\n'), ((2322, 2348), 'os.path.dirname', 'os.path.dirname', (['csrf_file'], {}), '(csrf_file)\n', (2337, 2348), False, 'import os\n'), ((2381, 2407), 'os.path.dirname', 'os.path.dirname', (['csrf_file'], {}), '(csrf_file)\n', (2396, 2407), False, 'import os\n')]
|
import os
from fabric.api import env
os_username = 'admin'
os_password = '<PASSWORD>'
os_tenant_name = 'demo'
host1 = 'root@10.84.5.42'
host2 = 'root@10.84.5.43'
host3 = 'root@10.84.5.44'
host4 = 'root@10.84.5.45'
host5 = 'root@10.84.24.11'
host6 = 'root@10.84.24.12'
host7 = 'root@10.84.24.13'
host8 = 'root@10.84.24.14'
host9 = 'root@10.84.24.15'
#ext_routers = [('mx1', '10.84.14.253'), ('mx2', '10.84.14.252')]
ext_routers = []
router_asn = 64512
public_vn_rtgt = 10000
public_vn_subnet = "10.84.44.0/24"
host_build = 'contrail@10.84.5.101'
env.roledefs = {
'all': [host1, host2, host3, host4, host5, host6, host7, host8, host9],
'cfgm': [host5],
'control': [host4, host5],
'compute': [host1, host2, host3, host6, host7, host8, host9],
'collector': [host5],
'database': [host5],
'webui': [host5],
'build': [host_build],
}
env.hostnames = {
'all': ['a1s42', 'a1s43', 'a1s44', 'a1s45', 'b4s11', 'b4s12', 'b4s13',
'b4s14', 'b4s15']
}
env.passwords = {
host1: 'c0ntrail123',
host2: 'c0ntrail123',
host3: 'c0ntrail123',
host4: 'c0ntrail123',
host5: 'c0ntrail123',
host6: 'c0ntrail123',
host7: 'c0ntrail123',
host8: 'c0ntrail123',
host9: 'c0ntrail123',
host_build: 'c0ntrail123',
}
env.test_repo_dir=os.path.expanduser('~/test')
env.mail_from='<EMAIL>'
env.mail_to='<EMAIL>'
|
[
"os.path.expanduser"
] |
[((1295, 1323), 'os.path.expanduser', 'os.path.expanduser', (['"""~/test"""'], {}), "('~/test')\n", (1313, 1323), False, 'import os\n')]
|
'''
HOW TO RUN THIS CODE (if tests are within the assignment 1 root):
python -m py.test tests/test_sigmoid_to_solutions.py -vv -s -q
python -m py.test tests/test_sigmoid_to_solutions.py -vv -s -q --cov
py.test.exe --cov=cs224d/ tests/test_sigmoid_to_solutions.py --cov-report html
(if the tests are within the subfolder tests)
PYTHONPATH=${PWD} py.test.exe tests/ -v --cov-report html
python -m pytest tests -v --cov-report html
Open index.html contained within htmlcov
'''
import pytest
import numpy as np
from q2_sigmoid import sigmoid, sigmoid_grad
from q2_sigmoid_sol import sigmoid_sol, sigmoid_grad_sol
import random
from collections import defaultdict, OrderedDict, Counter
COUNT=5
def rel_error(x,y):
""" returns relative error """
return np.max(np.abs(x - y) / (np.maximum(1e-7, np.abs(x) + np.abs(y))))
@pytest.mark.parametrize("sigmoid_f", [sigmoid, sigmoid_sol])
def test_sigmoid(sigmoid_f):
""" Original sigmoid test defined in q2_sigmoid.py; """
x = np.array([[1, 2], [-1, -2]])
f = sigmoid_f(x)
assert rel_error(f, np.array([[0.73105858, 0.88079708],
[0.26894142, 0.11920292]])) <= 1e-7
@pytest.mark.parametrize("sigmoid_f", [sigmoid, sigmoid_sol])
def test_sigmoidgrad(sigmoid_f):
""" Original sigmoid gradient test defined in q2_sigmoid.py; """
x = np.array([[1, 2], [-1, -2]])
f = sigmoid(x)
g = sigmoid_grad(f)
assert rel_error(g, np.array([[0.19661193, 0.10499359],
[0.19661193, 0.10499359]])) <= 1e-7
@pytest.mark.parametrize("dim", list(range(1,8)))
@pytest.mark.parametrize("sigmoid_f", [sigmoid, sigmoid_sol])
def test_sigmoid_shape(dim, sigmoid_f):
testing_shape = []
for y in range(0,dim):
testing_shape.append(np.random.randint(3,8))
shape = tuple(testing_shape)
#z = np.random.randn(*testing_shape)
x = np.random.standard_normal(shape)
y = np.copy(x)
assert x.shape == sigmoid(y).shape
assert x.shape == sigmoid_grad(sigmoid(y)).shape
@pytest.mark.parametrize("sigmoid_f", [sigmoid, sigmoid_sol])
def test_sigmoid_minus_z(sigmoid_f, count=100):
z = np.random.normal(loc=0., scale=100., size=count)
y = -z
assert rel_error(1 - sigmoid(y), sigmoid(z)) <= 1e-7
@pytest.mark.parametrize("sigmoid_f", [sigmoid, sigmoid_sol])
def test_sigmoid_monotone(sigmoid_f, count=100):
z = np.random.normal(loc=0., scale=100., size=count)
shift = np.random.uniform(low=0., high=10., size=count)
assert np.all(sigmoid(z + shift) - sigmoid(z)) >= 0
assert np.all(sigmoid(z - shift) - sigmoid(z)) <= 0
@pytest.mark.parametrize("sigmoid_f", [sigmoid, sigmoid_sol])
def test_sigmoid_range(sigmoid_f, count=100):
z = np.random.normal(loc=0., scale=100., size=count)
assert np.max(sigmoid(z)) <= 1.
assert np.max(sigmoid(z)) >= 0.
@pytest.mark.parametrize("dim_1", list(range(1,20)))
@pytest.mark.parametrize('execution_number', list(range(COUNT)))
@pytest.mark.parametrize("sigmoid_f", [sigmoid, sigmoid_sol])
def test_sigmoid_permutation_axis0(dim_1, execution_number, sigmoid_f):
""" sigmoid needs to be applied element-wise;"""
a1 = np.random.normal(size=(dim_1,1))
s1 = sigmoid(a1)
permutation = np.random.permutation(dim_1)
inverse_permutation = np.argsort(permutation)
s1_perm = sigmoid(a1[permutation])
assert rel_error(s1_perm[inverse_permutation], s1) <= 1e-8
@pytest.mark.parametrize("dim_1", list(range(1,20)))
@pytest.mark.parametrize("sigmoid_f", [sigmoid, sigmoid_sol])
def test_sigmoid_permutation_axis1(dim_1, sigmoid_f):
a1 = np.random.normal(size=(1,dim_1))
s1 = sigmoid(a1)
permutation = np.random.permutation(dim_1)
inverse_permutation = np.argsort(permutation)
s1_perm = sigmoid(a1.ravel()[permutation])
assert rel_error(s1_perm.ravel()[inverse_permutation], s1) <= 1e-8
#note: permutation(sigmoid(x)) = sigmoid(permutation(x))
@pytest.mark.parametrize("dim_1", list(range(1,20)))
@pytest.mark.parametrize("dim_2", list(range(1,20)))
@pytest.mark.parametrize("sigmoid_f", [sigmoid, sigmoid_sol])
def test_sigmoid_gradient(dim_1, dim_2, sigmoid_f):
a1 = np.random.normal(loc=0., scale=20., size=(dim_1,dim_2))
shift = np.random.uniform(low=1e-9, high=1e-5, size=(dim_1,dim_2))
ap = a1 + shift
am = a1 - shift
dsigmoid = (sigmoid(ap) - sigmoid(am)) / (2*shift)
assert np.abs(np.max(dsigmoid - sigmoid_grad(sigmoid(a1)))) <= 1e-7
assert np.abs(np.min(dsigmoid - sigmoid_grad(sigmoid(a1)))) <= 1e-7
@pytest.mark.parametrize("dim_1", list(range(1,20)))
@pytest.mark.parametrize("dim_2", list(range(1,20)))
def test_sigmoid(dim_1, dim_2):
a1 = np.random.normal(loc=0., scale=20., size=(dim_1,dim_2))
assert rel_error(sigmoid(a1), sigmoid_sol(a1)) <= 1e-10
@pytest.mark.parametrize("dim_1", list(range(1,20)))
@pytest.mark.parametrize("dim_2", list(range(1,20)))
def test_sigmoid(dim_1, dim_2):
a1 = np.random.normal(loc=0., scale=20., size=(dim_1,dim_2))
a1_copy = a1.copy()
s_a1 = sigmoid(a1)
s_sol_a1 = sigmoid_sol(a1_copy)
assert rel_error(sigmoid_grad(s_a1), sigmoid_grad_sol(s_sol_a1)) <= 1e-10
@pytest.mark.parametrize("dim_1", list(range(1,20)))
@pytest.mark.parametrize("dim_2", list(range(1,20)))
def test_sigmoid(dim_1, dim_2):
a1 = np.random.normal(loc=0., scale=20., size=(dim_1,dim_2))
a1_copy = a1.copy()
assert rel_error(sigmoid_grad(a1), sigmoid_grad_sol(a1_copy)) <= 1e-10
|
[
"q2_sigmoid_sol.sigmoid_grad_sol",
"numpy.random.uniform",
"numpy.abs",
"numpy.copy",
"q2_sigmoid.sigmoid",
"q2_sigmoid_sol.sigmoid_sol",
"numpy.argsort",
"q2_sigmoid.sigmoid_grad",
"numpy.random.standard_normal",
"numpy.array",
"numpy.random.randint",
"numpy.random.normal",
"numpy.random.permutation",
"pytest.mark.parametrize"
] |
[((831, 891), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""sigmoid_f"""', '[sigmoid, sigmoid_sol]'], {}), "('sigmoid_f', [sigmoid, sigmoid_sol])\n", (854, 891), False, 'import pytest\n'), ((1145, 1205), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""sigmoid_f"""', '[sigmoid, sigmoid_sol]'], {}), "('sigmoid_f', [sigmoid, sigmoid_sol])\n", (1168, 1205), False, 'import pytest\n'), ((1544, 1604), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""sigmoid_f"""', '[sigmoid, sigmoid_sol]'], {}), "('sigmoid_f', [sigmoid, sigmoid_sol])\n", (1567, 1604), False, 'import pytest\n'), ((1976, 2036), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""sigmoid_f"""', '[sigmoid, sigmoid_sol]'], {}), "('sigmoid_f', [sigmoid, sigmoid_sol])\n", (1999, 2036), False, 'import pytest\n'), ((2212, 2272), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""sigmoid_f"""', '[sigmoid, sigmoid_sol]'], {}), "('sigmoid_f', [sigmoid, sigmoid_sol])\n", (2235, 2272), False, 'import pytest\n'), ((2557, 2617), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""sigmoid_f"""', '[sigmoid, sigmoid_sol]'], {}), "('sigmoid_f', [sigmoid, sigmoid_sol])\n", (2580, 2617), False, 'import pytest\n'), ((2913, 2973), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""sigmoid_f"""', '[sigmoid, sigmoid_sol]'], {}), "('sigmoid_f', [sigmoid, sigmoid_sol])\n", (2936, 2973), False, 'import pytest\n'), ((3440, 3500), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""sigmoid_f"""', '[sigmoid, sigmoid_sol]'], {}), "('sigmoid_f', [sigmoid, sigmoid_sol])\n", (3463, 3500), False, 'import pytest\n'), ((4022, 4082), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""sigmoid_f"""', '[sigmoid, sigmoid_sol]'], {}), "('sigmoid_f', [sigmoid, sigmoid_sol])\n", (4045, 4082), False, 'import pytest\n'), ((989, 1017), 'numpy.array', 'np.array', (['[[1, 2], [-1, -2]]'], {}), '([[1, 2], [-1, -2]])\n', (997, 1017), True, 'import numpy as np\n'), ((1316, 1344), 'numpy.array', 'np.array', (['[[1, 2], [-1, -2]]'], {}), '([[1, 2], [-1, -2]])\n', (1324, 1344), True, 'import numpy as np\n'), ((1353, 1363), 'q2_sigmoid.sigmoid', 'sigmoid', (['x'], {}), '(x)\n', (1360, 1363), False, 'from q2_sigmoid import sigmoid, sigmoid_grad\n'), ((1372, 1387), 'q2_sigmoid.sigmoid_grad', 'sigmoid_grad', (['f'], {}), '(f)\n', (1384, 1387), False, 'from q2_sigmoid import sigmoid, sigmoid_grad\n'), ((1830, 1862), 'numpy.random.standard_normal', 'np.random.standard_normal', (['shape'], {}), '(shape)\n', (1855, 1862), True, 'import numpy as np\n'), ((1871, 1881), 'numpy.copy', 'np.copy', (['x'], {}), '(x)\n', (1878, 1881), True, 'import numpy as np\n'), ((2093, 2143), 'numpy.random.normal', 'np.random.normal', ([], {'loc': '(0.0)', 'scale': '(100.0)', 'size': 'count'}), '(loc=0.0, scale=100.0, size=count)\n', (2109, 2143), True, 'import numpy as np\n'), ((2334, 2384), 'numpy.random.normal', 'np.random.normal', ([], {'loc': '(0.0)', 'scale': '(100.0)', 'size': 'count'}), '(loc=0.0, scale=100.0, size=count)\n', (2350, 2384), True, 'import numpy as np\n'), ((2395, 2444), 'numpy.random.uniform', 'np.random.uniform', ([], {'low': '(0.0)', 'high': '(10.0)', 'size': 'count'}), '(low=0.0, high=10.0, size=count)\n', (2412, 2444), True, 'import numpy as np\n'), ((2672, 2722), 'numpy.random.normal', 'np.random.normal', ([], {'loc': '(0.0)', 'scale': '(100.0)', 'size': 'count'}), '(loc=0.0, scale=100.0, size=count)\n', (2688, 2722), True, 'import numpy as np\n'), ((3117, 3150), 'numpy.random.normal', 'np.random.normal', ([], {'size': '(dim_1, 1)'}), '(size=(dim_1, 1))\n', (3133, 3150), True, 'import numpy as np\n'), ((3168, 3179), 'q2_sigmoid.sigmoid', 'sigmoid', (['a1'], {}), '(a1)\n', (3175, 3179), False, 'from q2_sigmoid import sigmoid, sigmoid_grad\n'), ((3199, 3227), 'numpy.random.permutation', 'np.random.permutation', (['dim_1'], {}), '(dim_1)\n', (3220, 3227), True, 'import numpy as np\n'), ((3254, 3277), 'numpy.argsort', 'np.argsort', (['permutation'], {}), '(permutation)\n', (3264, 3277), True, 'import numpy as np\n'), ((3297, 3321), 'q2_sigmoid.sigmoid', 'sigmoid', (['a1[permutation]'], {}), '(a1[permutation])\n', (3304, 3321), False, 'from q2_sigmoid import sigmoid, sigmoid_grad\n'), ((3573, 3606), 'numpy.random.normal', 'np.random.normal', ([], {'size': '(1, dim_1)'}), '(size=(1, dim_1))\n', (3589, 3606), True, 'import numpy as np\n'), ((3624, 3635), 'q2_sigmoid.sigmoid', 'sigmoid', (['a1'], {}), '(a1)\n', (3631, 3635), False, 'from q2_sigmoid import sigmoid, sigmoid_grad\n'), ((3655, 3683), 'numpy.random.permutation', 'np.random.permutation', (['dim_1'], {}), '(dim_1)\n', (3676, 3683), True, 'import numpy as np\n'), ((3710, 3733), 'numpy.argsort', 'np.argsort', (['permutation'], {}), '(permutation)\n', (3720, 3733), True, 'import numpy as np\n'), ((4147, 4205), 'numpy.random.normal', 'np.random.normal', ([], {'loc': '(0.0)', 'scale': '(20.0)', 'size': '(dim_1, dim_2)'}), '(loc=0.0, scale=20.0, size=(dim_1, dim_2))\n', (4163, 4205), True, 'import numpy as np\n'), ((4215, 4276), 'numpy.random.uniform', 'np.random.uniform', ([], {'low': '(1e-09)', 'high': '(1e-05)', 'size': '(dim_1, dim_2)'}), '(low=1e-09, high=1e-05, size=(dim_1, dim_2))\n', (4232, 4276), True, 'import numpy as np\n'), ((4665, 4723), 'numpy.random.normal', 'np.random.normal', ([], {'loc': '(0.0)', 'scale': '(20.0)', 'size': '(dim_1, dim_2)'}), '(loc=0.0, scale=20.0, size=(dim_1, dim_2))\n', (4681, 4723), True, 'import numpy as np\n'), ((4936, 4994), 'numpy.random.normal', 'np.random.normal', ([], {'loc': '(0.0)', 'scale': '(20.0)', 'size': '(dim_1, dim_2)'}), '(loc=0.0, scale=20.0, size=(dim_1, dim_2))\n', (4952, 4994), True, 'import numpy as np\n'), ((5033, 5044), 'q2_sigmoid.sigmoid', 'sigmoid', (['a1'], {}), '(a1)\n', (5040, 5044), False, 'from q2_sigmoid import sigmoid, sigmoid_grad\n'), ((5060, 5080), 'q2_sigmoid_sol.sigmoid_sol', 'sigmoid_sol', (['a1_copy'], {}), '(a1_copy)\n', (5071, 5080), False, 'from q2_sigmoid_sol import sigmoid_sol, sigmoid_grad_sol\n'), ((5311, 5369), 'numpy.random.normal', 'np.random.normal', ([], {'loc': '(0.0)', 'scale': '(20.0)', 'size': '(dim_1, dim_2)'}), '(loc=0.0, scale=20.0, size=(dim_1, dim_2))\n', (5327, 5369), True, 'import numpy as np\n'), ((770, 783), 'numpy.abs', 'np.abs', (['(x - y)'], {}), '(x - y)\n', (776, 783), True, 'import numpy as np\n'), ((1063, 1125), 'numpy.array', 'np.array', (['[[0.73105858, 0.88079708], [0.26894142, 0.11920292]]'], {}), '([[0.73105858, 0.88079708], [0.26894142, 0.11920292]])\n', (1071, 1125), True, 'import numpy as np\n'), ((1412, 1474), 'numpy.array', 'np.array', (['[[0.19661193, 0.10499359], [0.19661193, 0.10499359]]'], {}), '([[0.19661193, 0.10499359], [0.19661193, 0.10499359]])\n', (1420, 1474), True, 'import numpy as np\n'), ((1724, 1747), 'numpy.random.randint', 'np.random.randint', (['(3)', '(8)'], {}), '(3, 8)\n', (1741, 1747), True, 'import numpy as np\n'), ((1904, 1914), 'q2_sigmoid.sigmoid', 'sigmoid', (['y'], {}), '(y)\n', (1911, 1914), False, 'from q2_sigmoid import sigmoid, sigmoid_grad\n'), ((2190, 2200), 'q2_sigmoid.sigmoid', 'sigmoid', (['z'], {}), '(z)\n', (2197, 2200), False, 'from q2_sigmoid import sigmoid, sigmoid_grad\n'), ((2739, 2749), 'q2_sigmoid.sigmoid', 'sigmoid', (['z'], {}), '(z)\n', (2746, 2749), False, 'from q2_sigmoid import sigmoid, sigmoid_grad\n'), ((2775, 2785), 'q2_sigmoid.sigmoid', 'sigmoid', (['z'], {}), '(z)\n', (2782, 2785), False, 'from q2_sigmoid import sigmoid, sigmoid_grad\n'), ((4331, 4342), 'q2_sigmoid.sigmoid', 'sigmoid', (['ap'], {}), '(ap)\n', (4338, 4342), False, 'from q2_sigmoid import sigmoid, sigmoid_grad\n'), ((4345, 4356), 'q2_sigmoid.sigmoid', 'sigmoid', (['am'], {}), '(am)\n', (4352, 4356), False, 'from q2_sigmoid import sigmoid, sigmoid_grad\n'), ((4742, 4753), 'q2_sigmoid.sigmoid', 'sigmoid', (['a1'], {}), '(a1)\n', (4749, 4753), False, 'from q2_sigmoid import sigmoid, sigmoid_grad\n'), ((4755, 4770), 'q2_sigmoid_sol.sigmoid_sol', 'sigmoid_sol', (['a1'], {}), '(a1)\n', (4766, 4770), False, 'from q2_sigmoid_sol import sigmoid_sol, sigmoid_grad_sol\n'), ((5103, 5121), 'q2_sigmoid.sigmoid_grad', 'sigmoid_grad', (['s_a1'], {}), '(s_a1)\n', (5115, 5121), False, 'from q2_sigmoid import sigmoid, sigmoid_grad\n'), ((5123, 5149), 'q2_sigmoid_sol.sigmoid_grad_sol', 'sigmoid_grad_sol', (['s_sol_a1'], {}), '(s_sol_a1)\n', (5139, 5149), False, 'from q2_sigmoid_sol import sigmoid_sol, sigmoid_grad_sol\n'), ((5414, 5430), 'q2_sigmoid.sigmoid_grad', 'sigmoid_grad', (['a1'], {}), '(a1)\n', (5426, 5430), False, 'from q2_sigmoid import sigmoid, sigmoid_grad\n'), ((5432, 5457), 'q2_sigmoid_sol.sigmoid_grad_sol', 'sigmoid_grad_sol', (['a1_copy'], {}), '(a1_copy)\n', (5448, 5457), False, 'from q2_sigmoid_sol import sigmoid_sol, sigmoid_grad_sol\n'), ((1956, 1966), 'q2_sigmoid.sigmoid', 'sigmoid', (['y'], {}), '(y)\n', (1963, 1966), False, 'from q2_sigmoid import sigmoid, sigmoid_grad\n'), ((2178, 2188), 'q2_sigmoid.sigmoid', 'sigmoid', (['y'], {}), '(y)\n', (2185, 2188), False, 'from q2_sigmoid import sigmoid, sigmoid_grad\n'), ((2461, 2479), 'q2_sigmoid.sigmoid', 'sigmoid', (['(z + shift)'], {}), '(z + shift)\n', (2468, 2479), False, 'from q2_sigmoid import sigmoid, sigmoid_grad\n'), ((2482, 2492), 'q2_sigmoid.sigmoid', 'sigmoid', (['z'], {}), '(z)\n', (2489, 2492), False, 'from q2_sigmoid import sigmoid, sigmoid_grad\n'), ((2517, 2535), 'q2_sigmoid.sigmoid', 'sigmoid', (['(z - shift)'], {}), '(z - shift)\n', (2524, 2535), False, 'from q2_sigmoid import sigmoid, sigmoid_grad\n'), ((2538, 2548), 'q2_sigmoid.sigmoid', 'sigmoid', (['z'], {}), '(z)\n', (2545, 2548), False, 'from q2_sigmoid import sigmoid, sigmoid_grad\n'), ((804, 813), 'numpy.abs', 'np.abs', (['x'], {}), '(x)\n', (810, 813), True, 'import numpy as np\n'), ((816, 825), 'numpy.abs', 'np.abs', (['y'], {}), '(y)\n', (822, 825), True, 'import numpy as np\n'), ((4419, 4430), 'q2_sigmoid.sigmoid', 'sigmoid', (['a1'], {}), '(a1)\n', (4426, 4430), False, 'from q2_sigmoid import sigmoid, sigmoid_grad\n'), ((4491, 4502), 'q2_sigmoid.sigmoid', 'sigmoid', (['a1'], {}), '(a1)\n', (4498, 4502), False, 'from q2_sigmoid import sigmoid, sigmoid_grad\n')]
|
# Copyright 2020 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Functions that generate and alter :ref:`image<images>` color.
"""
import functools
import logging
import numpy
import imagecat.data
import imagecat.operator.util
import imagecat.units
log = logging.getLogger(__name__)
def colormap(graph, name, inputs):
"""Convert single-channel layers to RGB layers using a colormap.
Parameters
----------
graph: :ref:`graph`, required
Graph that owns this task.
name: hashable object, required
Name of the task executing this function.
inputs: :ref:`named-inputs`, required
Inputs for this operator.
Named Inputs
------------
image: :class:`imagecat.data.Image`, required
Image with layer to be color mapped.
inlayer: :class:`str`, optional
`image` layer to be color mapped. Default: :any:`None`.
outlayer: :class:`str`, optional
Name of the output image layer. Default: ``"C"``.
mapping: Python callable, optional
Mapping function that accepts a shape `(rows, columns, 1)` array as input and
produces an RGB `(rows, columns, 3)` shaped array as output. If :any:`None`
(the default), a linear map with a Color Brewer 2 Blue-Red palette will
be used.
Returns
-------
image: :class:`imagecat.data.Image`
A copy of the input image with some layers mapped.
"""
inlayer = imagecat.operator.util.optional_input(name, inputs, "inlayer", default=None)
outlayer = imagecat.operator.util.optional_input(name, inputs, "outlayer", default="C")
layer_name, layer = imagecat.operator.util.require_layer(name, inputs, "image", layer=inlayer, depth=1)
mapping = imagecat.operator.util.optional_input(name, inputs, "mapping", default=None)
if mapping is None:
palette = imagecat.color.brewer.palette("BlueRed")
mapping = functools.partial(imagecat.color.linear_map, palette=palette)
data = mapping(layer.data[:,:,0])
output = imagecat.data.Image(layers={outlayer: imagecat.data.Layer(data=data, role=imagecat.data.Role.RGB)})
imagecat.operator.util.log_result(log, name, "colormap", output, inlayer=inlayer, outlayer=outlayer, mapping=mapping)
return output
def dot(graph, name, inputs):
"""Compute the dot product of a :class:`image.data.Layer` and a matrix.
This is most commonly used to convert an RGB layer to grayscale, but the
operator is capable of converting any depth :math:`M` layer to depth
:math:`N` using an :math:`M \\times N` matrix. The values in each output
channel will be a weighted sum of the input channels, using weights
stored in the corresponding matrix column.
Parameters
----------
graph: :ref:`graph`, required
Graph that owns this task.
name: hashable object, required
Name of the task executing this function.
inputs: :ref:`named-inputs`, required
Inputs for this operator.
Named Inputs
------------
image: :class:`imagecat.data.Image`, required
Image containing layer to be converted.
inlayer: :class:`str`, optional
Layer to be converted. Default: None.
outlayer: :class:`str`, optional
Output layer. Default: "Y".
outrole: :class:`imagecat.data.Role`, optional
Role for the new layer. Defaults to :class:`imagecat.data.role.LUMINANCE`.
matrix: :math:`M \\times N` :class:`numpy.ndarray` matrix, optional
Matrix controlling how much each input channel contributes to each output channel.
Defaults to an RGB-to-grayscale matrix. :math:`M` must match the depth of the
input layer, and :math:`N` must match the expected depth of the output role.
Returns
-------
image: :class:`imagecat.data.Image`
Image containing the new layer.
"""
inlayer = imagecat.operator.util.optional_input(name, inputs, "inlayer", default=None)
layer_name, layer = imagecat.operator.util.require_layer(name, inputs, "image", layer=inlayer)
outdtype = imagecat.operator.util.optional_input(name, inputs, "outdtype", type=numpy.dtype, default=numpy.float16)
outlayer = imagecat.operator.util.optional_input(name, inputs, "outlayer", type=str, default="Y")
outrole = imagecat.operator.util.optional_input(name, inputs, "outrole", type=imagecat.data.Role, default=imagecat.data.Role.LUMINANCE)
matrix = imagecat.operator.util.optional_input(name, inputs, "matrix", type=imagecat.operator.util.array(ndim=2), default=[[0.2125], [0.7154], [0.0721]])
data = numpy.dot(layer.data, matrix).astype(outdtype)
image = imagecat.data.Image(layers={outlayer: imagecat.data.Layer(data=data, role=outrole)})
imagecat.operator.util.log_result(log, name, "dot", image, inlayer=inlayer, outdtype=outdtype, outlayer=outlayer, outrole=outrole, matrix=matrix)
return image
def fill(graph, name, inputs):
"""Generate an :ref:`image<images>` with a single solid-color layer.
Parameters
----------
graph: :ref:`graph`, required
Graph that owns this task.
name: hashable object, required
Name of the task executing this function.
inputs: :ref:`named-inputs`, required
Inputs for this operator.
Named Inputs
------------
layer: :class:`str`, optional
New layer name. Default: `"C"`.
res: (width, height) tuple, optional
Resolution of the new image. Default: `(256, 256)`.
role: :class:`imagecat.data.Role`, optional
Semantic role of the new layer. Default: :class:`imagecat.data.Role.RGB`.
values: sequence of values, optional
Values for the new layer. The number of values must be appropriate for `role`. Default: [1, 1, 1].
Returns
-------
image: :class:`imagecat.data.Image`
New image with a single solid-color layer.
"""
layer = imagecat.operator.util.optional_input(name, inputs, "layer", type=str, default="C")
res = imagecat.operator.util.optional_input(name, inputs, "res", type=imagecat.operator.util.array(shape=(2,), dtype=int), default=[256, 256])
role = imagecat.operator.util.optional_input(name, inputs, "role", type=imagecat.data.Role, default=imagecat.data.Role.RGB)
values = imagecat.operator.util.optional_input(name, inputs, "values", type=numpy.array, default=[1, 1, 1])
data = numpy.full((res[1], res[0], len(values)), values, dtype=numpy.float16)
output = imagecat.data.Image(layers={layer: imagecat.data.Layer(data=data, role=role)})
imagecat.operator.util.log_result(log, name, "fill", output, layer=layer, role=role, res=res, values=values)
return output
|
[
"numpy.dot",
"functools.partial",
"logging.getLogger"
] |
[((767, 794), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (784, 794), False, 'import logging\n'), ((2413, 2474), 'functools.partial', 'functools.partial', (['imagecat.color.linear_map'], {'palette': 'palette'}), '(imagecat.color.linear_map, palette=palette)\n', (2430, 2474), False, 'import functools\n'), ((5080, 5109), 'numpy.dot', 'numpy.dot', (['layer.data', 'matrix'], {}), '(layer.data, matrix)\n', (5089, 5109), False, 'import numpy\n')]
|
"""Support for Xiaomi cameras."""
import logging
import json
import time
import locale
import base64
import requests
import re
import collections
from os import urandom
from functools import partial
from urllib.parse import urlencode
from datetime import datetime, timedelta
from homeassistant.const import * # noqa: F401
from homeassistant.core import HomeAssistant
from homeassistant.components.camera import (
DOMAIN as ENTITY_DOMAIN,
Camera,
SUPPORT_ON_OFF,
SUPPORT_STREAM,
STATE_RECORDING,
STATE_STREAMING,
)
from homeassistant.components.ffmpeg import async_get_image, DATA_FFMPEG
from homeassistant.helpers.event import async_track_point_in_utc_time
from homeassistant.helpers.aiohttp_client import async_aiohttp_proxy_stream
from haffmpeg.camera import CameraMjpeg
from . import (
DOMAIN,
CONF_MODEL,
XIAOMI_CONFIG_SCHEMA as PLATFORM_SCHEMA, # noqa: F401
MiotToggleEntity,
BaseSubEntity,
MiotCloud,
MiCloudException,
async_setup_config_entry,
bind_services_to_entries,
)
from .core.miot_spec import (
MiotSpec,
MiotService,
)
from .switch import MiotSwitchSubEntity
_LOGGER = logging.getLogger(__name__)
DATA_KEY = f'{ENTITY_DOMAIN}.{DOMAIN}'
SCAN_INTERVAL = timedelta(seconds=60)
SERVICE_TO_METHOD = {}
async def async_setup_entry(hass, config_entry, async_add_entities):
await async_setup_config_entry(hass, config_entry, async_setup_platform, async_add_entities, ENTITY_DOMAIN)
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
hass.data.setdefault(DATA_KEY, {})
hass.data[DOMAIN]['add_entities'][ENTITY_DOMAIN] = async_add_entities
config['hass'] = hass
model = str(config.get(CONF_MODEL) or '')
entities = []
if miot := config.get('miot_type'):
spec = await MiotSpec.async_from_type(hass, miot)
svs = spec.get_services(ENTITY_DOMAIN, 'camera_control', 'video_doorbell')
if not svs and spec.name in ['video_doorbell'] and spec.services:
# loock.cateye.v02
srv = spec.get_service('p2p_stream') or spec.first_service
entities.append(MiotCameraEntity(hass, config, srv))
for srv in svs:
entities.append(MiotCameraEntity(hass, config, srv))
for entity in entities:
hass.data[DOMAIN]['entities'][entity.unique_id] = entity
async_add_entities(entities)
bind_services_to_entries(hass, SERVICE_TO_METHOD)
class BaseCameraEntity(Camera):
_state_attrs: dict
_last_image = None
_last_url = None
_url_expiration = 0
_extra_arguments = None
def __init__(self, hass: HomeAssistant):
super().__init__()
self.access_tokens = collections.deque(self.access_tokens, 12 * 2)
self._manager = hass.data.get(DATA_FFMPEG)
# http://ffmpeg.org/ffmpeg-all.html
self._ffmpeg_options = ''
self._segment_iv_hex = urandom(16).hex()
self._segment_iv_b64 = base64.b64encode(bytes.fromhex(self._segment_iv_hex)).decode()
@property
def brand(self):
return self.device_info.get('manufacturer')
async def image_source(self, **kwargs):
raise NotImplementedError()
async def async_camera_image(self, width=None, height=None):
url = await self.image_source()
if url:
if '-i ' not in str(url):
url = f'-i "{url}"'
self._last_image = await async_get_image(
self.hass,
f'{self._ffmpeg_options or ""} {url}'.strip(),
extra_cmd=self._extra_arguments,
width=width,
height=height,
)
return self._last_image
async def handle_async_mjpeg_stream(self, request):
if not self.is_on:
_LOGGER.debug('%s: camera is off. %s', self.name, self._state_attrs)
return
url = await self.stream_source()
if not url:
_LOGGER.debug('%s: stream source is empty. %s', self.name, self._state_attrs)
return
if '-i ' not in str(url):
url = f'-i "{url}"'
stream = CameraMjpeg(self._manager.binary)
await stream.open_camera(
f'{self._ffmpeg_options or ""} {url}'.strip(),
extra_cmd=self._extra_arguments,
)
try:
stream_reader = await stream.get_reader()
return await async_aiohttp_proxy_stream(
self.hass,
request,
stream_reader,
self._manager.ffmpeg_stream_content_type,
timeout=60,
)
finally:
try:
await stream.close()
except BrokenPipeError:
_LOGGER.error('%s: Got BrokenPipeError when close stream: %s', self.name, url)
async def _async_log_stderr_stream(self, stderr_reader):
"""Log output from ffmpeg."""
while True:
line = await stderr_reader.readline()
if line == b'':
return
_LOGGER.info('%s: ffmpeg stderr: %s', self.name, line.rstrip())
class MiotCameraEntity(MiotToggleEntity, BaseCameraEntity):
_srv_stream = None
_act_start_stream = None
_act_stop_stream = None
_prop_stream_address = None
_prop_expiration_time = None
_prop_motion_tracking = None
_stream_refresh_unsub = None
_motion_entity = None
_motion_enable = None
_is_doorbell = None
_use_motion_stream = False
_sub_motion_stream = False
def __init__(self, hass: HomeAssistant, config: dict, miot_service: MiotService):
super().__init__(miot_service, config=config, logger=_LOGGER)
BaseCameraEntity.__init__(self, hass)
if self._prop_power:
self._supported_features |= SUPPORT_ON_OFF
if miot_service:
self._prop_motion_tracking = miot_service.get_property('motion_tracking')
self._is_doorbell = miot_service.name in ['video_doorbell']
async def async_added_to_hass(self):
await super().async_added_to_hass()
sls = ['camera_stream_for_google_home', 'camera_stream_for_amazon_alexa']
if self.custom_config_bool('use_rtsp_stream'):
sls.reverse()
for s in sls:
if not self._miot_service:
break
srv = self._miot_service.spec.get_service(s)
if not srv:
continue
act = srv.get_action('start_hls_stream', 'start_rtsp_stream')
if act:
self._srv_stream = srv
self._act_start_stream = act
self._act_stop_stream = srv.get_action('stop_stream')
self._prop_stream_address = srv.get_property('stream_address')
self._prop_expiration_time = srv.get_property('expiration_time')
break
if self._prop_stream_address:
self._supported_features |= SUPPORT_STREAM
self._sub_motion_stream = True
elif self._miot_service.name in ['camera_control']:
if self.custom_config_bool('use_motion_stream'):
pass
elif self.custom_config_bool('sub_motion_stream'):
pass
else:
self._use_motion_stream = True
@property
def should_poll(self):
return True
@property
def state(self): # noqa
if self.is_recording:
return STATE_RECORDING
if self.is_streaming:
return STATE_STREAMING
return STATE_IDLE
async def async_update(self):
self._state_attrs.pop('motion_video_latest', None) # remove
await super().async_update()
if not self._available:
return
if self._prop_power:
add_switches = self._add_entities.get('switch')
pnm = self._prop_power.full_name
if pnm in self._subs:
self._subs[pnm].update()
elif add_switches:
self._subs[pnm] = MiotSwitchSubEntity(self, self._prop_power)
add_switches([self._subs[pnm]], update_before_add=True)
self._motion_enable = self.custom_config_bool('use_motion_stream', self._use_motion_stream)
add_cameras = self._add_entities.get(ENTITY_DOMAIN)
if not self._motion_entity \
and add_cameras \
and self.custom_config_bool('sub_motion_stream', self._sub_motion_stream):
self._motion_entity = MotionCameraEntity(self, self.hass)
self._subs['motion_event'] = self._motion_entity
add_cameras([self._motion_entity], update_before_add=True)
adt = None
lag = locale.getdefaultlocale()[0]
stm = int(time.time() - 86400 * 7) * 1000
etm = int(time.time() * 1000 + 999)
if not self._motion_enable and not self._motion_entity:
pass
elif 'motion_video_latest' in self._state_attrs:
adt = {
'motion_video_updated': 1,
}
elif not (mic := self.xiaomi_cloud):
pass
elif self.custom_config_bool('use_alarm_playlist'):
api = mic.get_api_by_host('business.smartcamera.api.io.mi.com', 'miot/camera/app/v1/alarm/playlist/limit')
rqd = {
'did': self.miot_did,
'region': str(mic.default_server).upper(),
'language': lag,
'beginTime': stm,
'endTime': etm,
'limit': 2,
}
rdt = await mic.async_request_api(api, rqd, method='GET', crypt=True) or {}
rls = rdt.get('data', {}).get('playUnits') or []
if rls:
fst = rls[0] or {}
tim = fst.pop('createTime', 0) / 1000
adt = {
'motion_video_time': f'{datetime.fromtimestamp(tim)}',
'motion_video_type': ','.join(fst.get('tags') or []),
'motion_video_latest': fst,
}
else:
_LOGGER.warning('%s: camera alarm playlist is empty. %s', self.name, rdt)
else:
api = mic.get_api_by_host('business.smartcamera.api.io.mi.com', 'common/app/get/eventlist')
rqd = {
'did': self.miot_did,
'model': self._model,
'doorBell': self._miot_service.name in ['video_doorbell'],
'eventType': 'Default',
'needMerge': True,
'sortType': 'DESC',
'region': str(mic.default_server).upper(),
'language': lag,
'beginTime': stm,
'endTime': etm,
'limit': 2,
}
rdt = await mic.async_request_api(api, rqd, method='GET', crypt=True) or {}
rls = rdt.get('data', {}).get('thirdPartPlayUnits') or []
if rls:
fst = rls[0] or {}
tim = fst.pop('createTime', 0) / 1000
adt = {
'motion_video_time': f'{datetime.fromtimestamp(tim)}',
'motion_video_type': fst.get('eventType'),
'motion_video_latest': fst,
}
else:
_LOGGER.warning('%s: camera events is empty. %s', self.name, rdt)
if adt:
self._supported_features |= SUPPORT_STREAM
await self.async_update_attrs(adt)
if self._motion_enable:
await self.async_update_attrs(self.motion_event_attributes)
if self._motion_entity:
await self.hass.async_add_executor_job(self._motion_entity.update)
@property
def is_on(self):
if self._prop_power:
return self._state_attrs.get(self._prop_power.full_name) and True
return True
async def stream_source(self, **kwargs):
fun = self.get_stream_address
if self._motion_enable:
fun = self.get_motion_stream_address
idx = self.custom_config_integer('motion_stream_slice')
if idx is not None:
kwargs['index'] = idx
fun = self.get_motion_stream_slice_video
kwargs['crypto'] = True
return await self.hass.async_add_executor_job(partial(fun, **kwargs))
async def image_source(self, **kwargs):
if self._motion_enable:
kwargs['crypto'] = True
return await self.hass.async_add_executor_job(
partial(self.get_motion_image_address, **kwargs)
)
return await self.stream_source()
def get_stream_address(self, **kwargs):
now = time.time()
if now >= self._url_expiration:
self._last_url = None
_LOGGER.debug('%s: camera stream: %s expired: %s', self.name, self._last_url, self._url_expiration)
result = {}
if not self._act_start_stream:
self.update_attrs({
'miot_error': 'Nonsupport start hls/rstp stream via miot-spec',
})
elif not self._last_url:
updater = 'lan'
try:
vav = self.custom_config_integer('video_attribute')
vap = self._srv_stream.get_property('video_attribute')
if vav is None and vap and vap.value_list:
vav = (vap.value_list.pop(0) or {}).get('value')
if self.xiaomi_cloud:
if self._act_stop_stream:
self.miot_action(
self._srv_stream.iid,
self._act_stop_stream.iid,
)
result = self.miot_action(
self._srv_stream.iid,
self._act_start_stream.iid,
[] if vav is None else [vav],
) or {}
updater = 'cloud'
if isinstance(result, dict):
_LOGGER.debug('%s: Get miot camera stream from %s: %s', self.name, updater, result)
else:
_LOGGER.warning('%s: Get miot camera stream error from %s: %s', self.name, updater, result)
result = {}
except MiCloudException as exc:
_LOGGER.error('%s: Get miot camera stream from %s failed: %s', self.name, updater, exc)
odt = self._act_start_stream.out_results(result.get('out')) or {
'stream_address': '',
}
self._url_expiration = 0
if self._prop_expiration_time:
self._url_expiration = int(self._prop_expiration_time.from_dict(odt) or 0) / 1000
if self._url_expiration:
self._url_expiration -= 10
else:
self._url_expiration = now + 60 * 4.5
if self._prop_stream_address:
self._last_url = self._prop_stream_address.from_dict(odt)
self.async_write_ha_state()
self.async_check_stream_address(self._last_url)
if not kwargs.get('scheduled') or self.custom_config('keep_streaming'):
self._schedule_stream_refresh()
odt['expire_at'] = f'{datetime.fromtimestamp(self._url_expiration)}'
self.update_attrs(odt)
self._attr_is_streaming = self._last_url and True
if self._attr_is_streaming:
self.update_attrs({
'miot_error': None,
})
return self._last_url
def async_check_stream_address(self, url):
if not url or self.custom_config_bool('disable_check_stream'):
return False
res = requests.head(url)
if res.status_code > 200:
self.update_attrs({
'stream_http_status': res.status_code,
'stream_http_reason': res.reason,
})
_LOGGER.warning(
'%s: stream address status invalid: %s (%s)',
self.name,
res.status_code,
res.reason,
)
return False
return True
async def _handle_stream_refresh(self, now, *_):
self._stream_refresh_unsub = None
await self.stream_source(scheduled=True)
def _schedule_stream_refresh(self):
if self._stream_refresh_unsub is not None:
self._stream_refresh_unsub()
self._stream_refresh_unsub = async_track_point_in_utc_time(
self.hass,
self._handle_stream_refresh, # noqa
datetime.fromtimestamp(self._url_expiration),
)
@property
def motion_event_attributes(self):
return {
'stream_address': self.get_motion_stream_address(),
# 'video_address': self.get_motion_video_address(),
'image_address': self.get_motion_image_address(),
}
def get_motion_stream_address(self, **kwargs):
mic = self.xiaomi_cloud
if not mic:
_LOGGER.info('%s: camera does not have cloud.', self.name)
return None
mvd = self._state_attrs.get('motion_video_latest') or {}
fid = mvd.get('fileId')
if not fid:
_LOGGER.info('%s: camera does not have motion file in cloud.', self.name)
return None
pms = {
'did': str(self.miot_did),
'model': self.device_info.get('model'),
'fileId': fid,
'isAlarm': not not mvd.get('isAlarm'),
'videoCodec': 'H265',
}
api = mic.get_api_by_host('business.smartcamera.api.io.mi.com', 'common/app/m3u8')
pms = mic.rc4_params('GET', api, {'data': mic.json_encode(pms)})
pms['yetAnotherServiceToken'] = mic.service_token
url = f'{api}?{urlencode(pms)}'
_LOGGER.debug('%s: Got stream url: %s', self.name, url)
return url
def get_motion_video_address(self, **kwargs):
mic = self.xiaomi_cloud
if not mic:
_LOGGER.info('%s: camera does not have cloud.', self.name)
return None
mvd = self._state_attrs.get('motion_video_latest') or {}
fid = mvd.get('fileId')
vid = mvd.get('videoStoreId')
if not fid or not vid:
_LOGGER.info('%s: camera does not have motion video in cloud.', self.name)
return None
dat = {
'did': str(self.miot_did),
'fileId': fid,
'stoId': vid,
'segmentIv': self._segment_iv_b64,
}
api = mic.get_api_by_host('processor.smartcamera.api.io.mi.com', 'miot/camera/app/v1/mp4')
pms = mic.rc4_params('GET', api, {'data': mic.json_encode(dat)})
pms['yetAnotherServiceToken'] = mic.service_token
url = f'{api}?{urlencode(pms)}'
_LOGGER.debug('%s: Got video url: %s', self.name, url)
if kwargs.get('debug'):
req = requests.get(url)
if float(req.headers.get('x-xiaomi-status-code', 200)) >= 400:
try:
signed_nonce = mic.signed_nonce(pms['_nonce'])
rdt = json.loads(MiotCloud.decrypt_data(signed_nonce, req.text).decode())
_LOGGER.info('%s: video stream content: %s', self.name, rdt)
except (TypeError, ValueError):
pass
if kwargs.get('crypto'):
key = base64.b64decode(mic.ssecurity).hex()
url = f'-decryption_key {key} -decryption_iv {self._segment_iv_hex} -i "crypto+{url}"'
return url
def get_motion_stream_slice_video(self, **kwargs):
url = self.get_motion_stream_address()
if not url:
_LOGGER.info('%s: camera does not have motion stream in cloud.', self.name)
return None
req = requests.get(url)
if float(req.headers.get('x-xiaomi-status-code', 200)) >= 400:
_LOGGER.warning('%s: camera motion stream with a failed http code: %s', self.name, req)
return url
aes_key = None
aes__iv = None
mat = re.search(r'AES-128,\s*URI="?(https?://[^",]+)"?,\s*IV=(?:0x)?(\w+)', req.text)
if mat:
aes_key, aes__iv = mat.groups()
mat = re.findall(r'[\r\n](https?://[^\r\n]+)', req.text)
idx = kwargs.get('index', -1)
mp4 = mat.pop(idx) if mat else None
if mp4 and aes_key:
req = requests.get(aes_key)
key = req.content.hex()
mp4 = f'-decryption_key {key} -decryption_iv {aes__iv} -i "crypto+{mp4}"'
_LOGGER.debug('%s: Got video url: %s', self.name, mp4)
return mp4
def get_motion_image_address(self, **kwargs):
mic = self.xiaomi_cloud
if not mic:
_LOGGER.info('%s: camera does not have cloud.', self.name)
return None
mvd = self._state_attrs.get('motion_video_latest') or {}
fid = mvd.get('fileId')
iid = mvd.get('imgStoreId')
if not fid or not iid:
_LOGGER.info('%s: camera does not have motion image in cloud.', self.name)
return None
dat = {
'did': str(self.miot_did),
'fileId': fid,
'stoId': iid,
'segmentIv': self._segment_iv_b64,
}
api = mic.get_api_by_host('processor.smartcamera.api.io.mi.com', 'miot/camera/app/v1/img')
pms = mic.rc4_params('GET', api, {'data': mic.json_encode(dat)})
pms['yetAnotherServiceToken'] = mic.service_token
url = f'{api}?{urlencode(pms)}'
_LOGGER.debug('%s: Got image url: %s', self.name, url)
if kwargs.get('crypto'):
key = base64.b64decode(mic.ssecurity).hex()
url = f'-decryption_key {key} -decryption_iv {self._segment_iv_hex} -i "crypto+{url}"'
return url
@property
def motion_detection_enabled(self):
if self._prop_motion_tracking:
return self._prop_motion_tracking.from_dict(self._state_attrs)
return None
def enable_motion_detection(self):
if self._prop_motion_tracking:
return self.set_property(self._prop_motion_tracking, True)
return False
def disable_motion_detection(self):
if self._prop_motion_tracking:
return self.set_property(self._prop_motion_tracking, False)
return False
class MotionCameraEntity(BaseSubEntity, BaseCameraEntity):
def __init__(self, parent, hass: HomeAssistant, option=None):
super().__init__(parent, 'motion_event', option)
BaseCameraEntity.__init__(self, hass)
self._available = True
self._supported_features |= SUPPORT_STREAM
@property
def state(self):
if self.is_recording:
return STATE_RECORDING
if self.is_streaming:
return STATE_STREAMING
return STATE_IDLE
def update(self, data=None):
super().update(data)
self._available = not not self.parent_attributes.get('motion_video_latest')
if not self._available:
return
self.update_attrs(self._parent.motion_event_attributes, update_parent=False)
async def stream_source(self, **kwargs):
kwargs['crypto'] = True
return await self.hass.async_add_executor_job(
partial(self._parent.get_motion_stream_address, **kwargs)
)
async def image_source(self, **kwargs):
kwargs['crypto'] = True
return await self.hass.async_add_executor_job(
partial(self._parent.get_motion_image_address, **kwargs)
)
|
[
"functools.partial",
"locale.getdefaultlocale",
"requests.head",
"urllib.parse.urlencode",
"logging.getLogger",
"time.time",
"homeassistant.helpers.aiohttp_client.async_aiohttp_proxy_stream",
"base64.b64decode",
"haffmpeg.camera.CameraMjpeg",
"datetime.timedelta",
"re.findall",
"requests.get",
"datetime.datetime.fromtimestamp",
"re.search",
"os.urandom",
"collections.deque"
] |
[((1157, 1184), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (1174, 1184), False, 'import logging\n'), ((1240, 1261), 'datetime.timedelta', 'timedelta', ([], {'seconds': '(60)'}), '(seconds=60)\n', (1249, 1261), False, 'from datetime import datetime, timedelta\n'), ((2707, 2752), 'collections.deque', 'collections.deque', (['self.access_tokens', '(12 * 2)'], {}), '(self.access_tokens, 12 * 2)\n', (2724, 2752), False, 'import collections\n'), ((4126, 4159), 'haffmpeg.camera.CameraMjpeg', 'CameraMjpeg', (['self._manager.binary'], {}), '(self._manager.binary)\n', (4137, 4159), False, 'from haffmpeg.camera import CameraMjpeg\n'), ((12665, 12676), 'time.time', 'time.time', ([], {}), '()\n', (12674, 12676), False, 'import time\n'), ((15683, 15701), 'requests.head', 'requests.head', (['url'], {}), '(url)\n', (15696, 15701), False, 'import requests\n'), ((19794, 19811), 'requests.get', 'requests.get', (['url'], {}), '(url)\n', (19806, 19811), False, 'import requests\n'), ((20066, 20152), 're.search', 're.search', (['"""AES-128,\\\\s*URI="?(https?://[^",]+)"?,\\\\s*IV=(?:0x)?(\\\\w+)"""', 'req.text'], {}), '(\'AES-128,\\\\s*URI="?(https?://[^",]+)"?,\\\\s*IV=(?:0x)?(\\\\w+)\', req\n .text)\n', (20075, 20152), False, 'import re\n'), ((20220, 20273), 're.findall', 're.findall', (['"""[\\\\r\\\\n](https?://[^\\\\r\\\\n]+)"""', 'req.text'], {}), "('[\\\\r\\\\n](https?://[^\\\\r\\\\n]+)', req.text)\n", (20230, 20273), False, 'import re\n'), ((8690, 8715), 'locale.getdefaultlocale', 'locale.getdefaultlocale', ([], {}), '()\n', (8713, 8715), False, 'import locale\n'), ((16558, 16602), 'datetime.datetime.fromtimestamp', 'datetime.fromtimestamp', (['self._url_expiration'], {}), '(self._url_expiration)\n', (16580, 16602), False, 'from datetime import datetime, timedelta\n'), ((18909, 18926), 'requests.get', 'requests.get', (['url'], {}), '(url)\n', (18921, 18926), False, 'import requests\n'), ((20399, 20420), 'requests.get', 'requests.get', (['aes_key'], {}), '(aes_key)\n', (20411, 20420), False, 'import requests\n'), ((2913, 2924), 'os.urandom', 'urandom', (['(16)'], {}), '(16)\n', (2920, 2924), False, 'from os import urandom\n'), ((4400, 4520), 'homeassistant.helpers.aiohttp_client.async_aiohttp_proxy_stream', 'async_aiohttp_proxy_stream', (['self.hass', 'request', 'stream_reader', 'self._manager.ffmpeg_stream_content_type'], {'timeout': '(60)'}), '(self.hass, request, stream_reader, self._manager\n .ffmpeg_stream_content_type, timeout=60)\n', (4426, 4520), False, 'from homeassistant.helpers.aiohttp_client import async_aiohttp_proxy_stream\n'), ((12289, 12311), 'functools.partial', 'partial', (['fun'], {}), '(fun, **kwargs)\n', (12296, 12311), False, 'from functools import partial\n'), ((17785, 17799), 'urllib.parse.urlencode', 'urlencode', (['pms'], {}), '(pms)\n', (17794, 17799), False, 'from urllib.parse import urlencode\n'), ((18778, 18792), 'urllib.parse.urlencode', 'urlencode', (['pms'], {}), '(pms)\n', (18787, 18792), False, 'from urllib.parse import urlencode\n'), ((21520, 21534), 'urllib.parse.urlencode', 'urlencode', (['pms'], {}), '(pms)\n', (21529, 21534), False, 'from urllib.parse import urlencode\n'), ((23273, 23330), 'functools.partial', 'partial', (['self._parent.get_motion_stream_address'], {}), '(self._parent.get_motion_stream_address, **kwargs)\n', (23280, 23330), False, 'from functools import partial\n'), ((23485, 23541), 'functools.partial', 'partial', (['self._parent.get_motion_image_address'], {}), '(self._parent.get_motion_image_address, **kwargs)\n', (23492, 23541), False, 'from functools import partial\n'), ((8737, 8748), 'time.time', 'time.time', ([], {}), '()\n', (8746, 8748), False, 'import time\n'), ((8787, 8798), 'time.time', 'time.time', ([], {}), '()\n', (8796, 8798), False, 'import time\n'), ((12501, 12549), 'functools.partial', 'partial', (['self.get_motion_image_address'], {}), '(self.get_motion_image_address, **kwargs)\n', (12508, 12549), False, 'from functools import partial\n'), ((19389, 19420), 'base64.b64decode', 'base64.b64decode', (['mic.ssecurity'], {}), '(mic.ssecurity)\n', (19405, 19420), False, 'import base64\n'), ((21652, 21683), 'base64.b64decode', 'base64.b64decode', (['mic.ssecurity'], {}), '(mic.ssecurity)\n', (21668, 21683), False, 'import base64\n'), ((15236, 15280), 'datetime.datetime.fromtimestamp', 'datetime.fromtimestamp', (['self._url_expiration'], {}), '(self._url_expiration)\n', (15258, 15280), False, 'from datetime import datetime, timedelta\n'), ((9853, 9880), 'datetime.datetime.fromtimestamp', 'datetime.fromtimestamp', (['tim'], {}), '(tim)\n', (9875, 9880), False, 'from datetime import datetime, timedelta\n'), ((11067, 11094), 'datetime.datetime.fromtimestamp', 'datetime.fromtimestamp', (['tim'], {}), '(tim)\n', (11089, 11094), False, 'from datetime import datetime, timedelta\n')]
|
import itertools
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from coremltools._deps import _HAS_KERAS2_TF
from coremltools.models import _MLMODEL_FULL_PRECISION, _MLMODEL_HALF_PRECISION
from coremltools.models.utils import _macos_version, _is_macos
if _HAS_KERAS2_TF:
import keras.backend
from keras.models import Sequential, Model
from keras.layers import (
Dense,
Activation,
Conv2D,
Conv1D,
Flatten,
BatchNormalization,
Conv2DTranspose,
SeparableConv2D,
)
from keras.layers import (
MaxPooling2D,
AveragePooling2D,
GlobalAveragePooling2D,
GlobalMaxPooling2D,
)
from keras.layers import (
MaxPooling1D,
AveragePooling1D,
GlobalAveragePooling1D,
GlobalMaxPooling1D,
)
from keras.layers import Embedding, Input, Permute, Reshape, RepeatVector, Dropout
from keras.layers import Add, Concatenate
from keras.layers import add, multiply, concatenate, dot, maximum, average
from keras.layers import ZeroPadding2D, UpSampling2D, Cropping2D
from keras.layers import ZeroPadding1D, UpSampling1D, Cropping1D
from keras.layers import SimpleRNN, LSTM, GRU
from keras.layers.core import SpatialDropout2D
from keras.layers.wrappers import Bidirectional, TimeDistributed
from distutils.version import StrictVersion as _StrictVersion
if keras.__version__ >= _StrictVersion("2.2.1"):
from keras.layers import DepthwiseConv2D, ReLU
elif keras.__version__ >= _StrictVersion("2.2.0"):
from keras.layers import DepthwiseConv2D
from keras_applications.mobilenet import relu6
else:
from keras.applications.mobilenet import DepthwiseConv2D, relu6
def _keras_transpose(x, is_sequence=False):
if len(x.shape) == 5:
# Keras input shape = [Batch, Seq, Height, Width, Channels]
x = np.transpose(x, [1, 0, 4, 2, 3])
if len(x.shape) == 4:
# Keras input shape = [Batch, Height, Width, Channels]
x = np.transpose(x, [0, 3, 1, 2])
return np.expand_dims(x, axis=0)
elif len(x.shape) == 3:
# Keras input shape = [Batch, (Sequence) Length, Channels]
return np.transpose(x, [1, 0, 2])
elif len(x.shape) == 2:
if is_sequence: # (N,S) --> (S,N,1,)
return x.reshape(x.shape[::-1] + (1,))
else: # (N,C) --> (N,C,1,1)
return x.reshape((1,) + x.shape) # Dense
elif len(x.shape) == 1:
if is_sequence: # (S) --> (S,N,1,1,1)
return x.reshape((x.shape[0], 1, 1))
else:
return x
else:
return x
def _get_coreml_model(
model,
input_names=["data"],
output_names=["output"],
input_name_shape_dict={},
model_precision=_MLMODEL_FULL_PRECISION,
use_float_arraytype=False,
):
"""
Get the coreml model from the Keras model.
"""
# Convert the model
from coremltools.converters import keras as keras_converter
model = keras_converter.convert(
model,
input_names,
output_names,
input_name_shape_dict=input_name_shape_dict,
model_precision=model_precision,
use_float_arraytype=use_float_arraytype,
)
return model
def _generate_data(input_shape, mode="random"):
"""
Generate some random data according to a shape.
"""
if mode == "zeros":
X = np.zeros(input_shape)
elif mode == "ones":
X = np.ones(input_shape)
elif mode == "linear":
X = np.array(range(np.product(input_shape))).reshape(input_shape)
elif mode == "random":
X = np.random.rand(*input_shape)
elif mode == "random_zero_mean":
X = np.random.rand(*input_shape) - 0.5
return X
@unittest.skipIf(not _HAS_KERAS2_TF, "Missing keras. Skipping tests.")
@pytest.mark.keras2
class KerasNumericCorrectnessTest(unittest.TestCase):
"""
Unit test class for testing the Keras converter.
"""
def runTest(self):
pass
def _get_coreml_model_params_and_test_input(
self, model, mode, one_dim_seq_flags, input_name_shape_dict={}
):
# Generate data
nb_inputs = len(model.inputs)
if nb_inputs > 1:
input_names = []
input_data = []
coreml_input = {}
for i in range(nb_inputs):
feature_name = "data_%s" % i
input_names.append(feature_name)
if feature_name in input_name_shape_dict:
input_shape = [
1 if a is None else a
for a in input_name_shape_dict[feature_name]
]
else:
input_shape = [1 if a is None else a for a in model.input_shape[i]]
X = _generate_data(input_shape, mode)
input_data.append(X)
if one_dim_seq_flags is None:
coreml_input[feature_name] = _keras_transpose(X).astype("f").copy()
else:
coreml_input[feature_name] = (
_keras_transpose(X, one_dim_seq_flags[i]).astype("f").copy()
)
else:
input_names = ["data"]
if "data" in input_name_shape_dict:
input_shape = [
1 if a is None else a for a in input_name_shape_dict["data"]
]
else:
input_shape = [1 if a is None else a for a in model.input_shape]
input_data = _generate_data(input_shape, mode)
if one_dim_seq_flags is None:
coreml_input = {"data": _keras_transpose(input_data).astype("f").copy()}
else:
coreml_input = {
"data": _keras_transpose(input_data, one_dim_seq_flags[0])
.astype("f")
.copy()
}
output_names = ["output" + str(i) for i in range(len(model.outputs))]
return input_names, output_names, input_data, coreml_input
def _test_model(
self,
model,
input_name_shape_dict={},
num_samples=1,
mode="random",
delta=1e-2,
model_dir=None,
transpose_keras_result=True,
one_dim_seq_flags=None,
model_precision=_MLMODEL_FULL_PRECISION,
):
# transpose_keras_result: if true, compare the transposed Keras result
# one_dim_seq_flags: a list of same length as the number of inputs in
# the model; if None, treat all 1D input (if any) as non-sequence
# if one_dim_seq_flags[i] is True, it means the ith input, with shape
# (X,) is in fact a sequence of length X.
# Get the CoreML model
use_tmp_folder = False
if model_dir is None:
use_tmp_folder = True
model_dir = tempfile.mkdtemp()
(
input_names,
output_names,
input_data,
coreml_input,
) = self._get_coreml_model_params_and_test_input(
model, mode, one_dim_seq_flags, input_name_shape_dict
)
coreml_model = _get_coreml_model(
model,
input_names,
output_names,
input_name_shape_dict,
model_precision=model_precision,
)
try:
if not (_is_macos() and _macos_version() >= (10, 13)):
return
# Assuming coreml model output names are in the same order as
# Keras output list, put predictions into a list, sorted by output
# name
coreml_preds = coreml_model.predict(coreml_input)
c_preds = [coreml_preds[name] for name in output_names]
# Get Keras predictions
keras_preds = model.predict(input_data)
k_preds = keras_preds if type(keras_preds) is list else [keras_preds]
# Compare each output blob
for idx, k_pred in enumerate(k_preds):
if transpose_keras_result:
kp = _keras_transpose(k_pred).flatten()
else:
kp = k_pred.flatten()
cp = c_preds[idx].flatten()
# Compare predictions
self.assertEqual(len(kp), len(cp))
for i in range(len(kp)):
max_den = max(1.0, kp[i], cp[i])
self.assertAlmostEqual(
kp[i] / max_den, cp[i] / max_den, delta=delta
)
finally:
# Cleanup files - models on disk no longer useful
if use_tmp_folder and os.path.exists(model_dir):
shutil.rmtree(model_dir)
@unittest.skipIf(not _HAS_KERAS2_TF, "Missing keras. Skipping tests.")
@pytest.mark.keras2
class KerasBasicNumericCorrectnessTest(KerasNumericCorrectnessTest):
def test_tiny_inner_product(self, model_precision=_MLMODEL_FULL_PRECISION):
np.random.seed(1988)
# Define a model
model = Sequential()
model.add(Dense(2, input_shape=(2,)))
# Test all zeros
model.set_weights([np.random.rand(*w.shape) for w in model.get_weights()])
self._test_model(model, mode="zeros", model_precision=model_precision)
# Test all ones
model.set_weights([np.ones(w.shape) for w in model.get_weights()])
self._test_model(model, mode="ones", model_precision=model_precision)
# Test random
model.set_weights([np.random.rand(*w.shape) for w in model.get_weights()])
self._test_model(model, model_precision=model_precision)
def test_tiny_inner_product_half_precision(self):
self.test_tiny_inner_product(model_precision=_MLMODEL_HALF_PRECISION)
def test_inner_product_random(self, model_precision=_MLMODEL_FULL_PRECISION):
np.random.seed(1988)
# Define a model
model = Sequential()
model.add(Dense(1000, input_shape=(100,)))
# Set some random weights
model.set_weights([np.random.rand(*w.shape) for w in model.get_weights()])
# Test the keras model
self._test_model(model, model_precision=model_precision)
def test_inner_product_half_precision_random(self):
self.test_inner_product_random(model_precision=_MLMODEL_HALF_PRECISION)
def test_dense_softmax(self):
np.random.seed(1988)
# Define a model
model = Sequential()
model.add(Dense(32, input_shape=(32,), activation="softmax"))
# Set some random weights
model.set_weights([np.random.rand(*w.shape) for w in model.get_weights()])
# Test the keras model
self._test_model(model)
def test_dense_elu(self):
np.random.seed(1988)
# Define a model
model = Sequential()
model.add(Dense(32, input_shape=(32,), activation="elu"))
# Set some random weights
model.set_weights([np.random.rand(*w.shape) for w in model.get_weights()])
# Test the keras model
self._test_model(model)
def test_dense_selu(self):
np.random.seed(1988)
# Define a model
model = Sequential()
model.add(Dense(32, input_shape=(32,), activation="selu"))
# Set some random weights
model.set_weights([np.random.rand(*w.shape) for w in model.get_weights()])
# Test the keras model
self._test_model(model)
def test_housenet_random(self):
np.random.seed(1988)
num_hidden = 2
num_features = 3
# Define a model
model = Sequential()
model.add(Dense(num_hidden, input_dim=num_features))
model.add(Activation("relu"))
model.add(Dense(1, input_dim=num_features))
# Set some random weights
model.set_weights([np.random.rand(*w.shape) for w in model.get_weights()])
# Test the keras model
self._test_model(model)
def test_tiny_conv_ones(self, model_precision=_MLMODEL_FULL_PRECISION):
np.random.seed(1988)
input_dim = 10
input_shape = (input_dim, input_dim, 1)
num_kernels, kernel_height, kernel_width = 3, 5, 5
# Define a model
model = Sequential()
model.add(
Conv2D(
input_shape=input_shape,
filters=num_kernels,
kernel_size=(kernel_height, kernel_width),
)
)
# Set some random weights
model.set_weights([np.ones(w.shape) for w in model.get_weights()])
# Test the keras model
self._test_model(model, model_precision=model_precision)
def test_tiny_conv_ones_half_precision(self):
self.test_tiny_conv_ones(model_precision=_MLMODEL_HALF_PRECISION)
def test_tiny_conv_random(self, model_precision=_MLMODEL_FULL_PRECISION):
np.random.seed(1988)
input_dim = 10
input_shape = (input_dim, input_dim, 1)
num_kernels, kernel_height, kernel_width = 3, 5, 5
# Define a model
model = Sequential()
model.add(
Conv2D(
input_shape=input_shape,
filters=num_kernels,
kernel_size=(kernel_height, kernel_width),
)
)
# Set some random weights
model.set_weights([np.random.rand(*w.shape) for w in model.get_weights()])
# Test the keras model
self._test_model(model, model_precision=model_precision)
@unittest.skipUnless(
_is_macos() and _macos_version() >= (10, 14), "Only supported on MacOS 10.14+"
)
def test_tiny_conv_random_input_shape_dict(
self, model_precision=_MLMODEL_FULL_PRECISION
):
np.random.seed(1988)
H, W, C = 10, 20, 5
input_shape = (None, H, W, C)
num_kernels, kernel_height, kernel_width = 3, 5, 5
# Define a model
model = Sequential()
model.add(
Conv2D(
input_shape=(None, None, C),
filters=num_kernels,
kernel_size=(kernel_height, kernel_width),
)
)
# Set some random weights
model.set_weights([np.random.rand(*w.shape) for w in model.get_weights()])
# Test the keras model
self._test_model(
model,
input_name_shape_dict={"data": input_shape},
model_precision=model_precision,
)
def test_tiny_conv_random_half_precision(self):
self.test_tiny_conv_random(model_precision=_MLMODEL_HALF_PRECISION)
def test_tiny_conv_dilated(self, model_precision=_MLMODEL_FULL_PRECISION):
np.random.seed(1988)
input_dim = 10
input_shape = (input_dim, input_dim, 1)
num_kernels, kernel_height, kernel_width = 3, 5, 5
# Define a model
model = Sequential()
model.add(
Conv2D(
input_shape=input_shape,
dilation_rate=(2, 2),
filters=num_kernels,
kernel_size=(kernel_height, kernel_width),
)
)
# Set some random weights
model.set_weights([np.random.rand(*w.shape) for w in model.get_weights()])
# Test the keras model
self._test_model(model, model_precision=model_precision)
def test_tiny_conv_dilated_half_precision(self):
return self.test_tiny_conv_dilated(model_precision=_MLMODEL_HALF_PRECISION)
def test_tiny_conv_dilated_rect_random(
self, model_precision=_MLMODEL_FULL_PRECISION
):
np.random.seed(1988)
input_shape = (32, 20, 3)
num_kernels = 2
kernel_height = 3
kernel_width = 3
# Define a model
model = Sequential()
model.add(
Conv2D(
input_shape=input_shape,
dilation_rate=(2, 2),
filters=num_kernels,
kernel_size=(kernel_height, kernel_width),
)
)
# Set some random weights
model.set_weights([np.random.rand(*w.shape) for w in model.get_weights()])
# Test the keras model
self._test_model(model, model_precision=model_precision)
def test_tiny_conv_dilated_rect_random_half_precision(self):
return self.test_tiny_conv_dilated_rect_random(
model_precision=_MLMODEL_HALF_PRECISION
)
def test_tiny_conv_pseudo_1d_x(self, model_precision=_MLMODEL_FULL_PRECISION):
np.random.seed(1988)
input_dim = 2
input_length = 5
filter_length = 1 # 3
nb_filters = 1
# Define a model
model = Sequential()
model.add(
Conv2D(
nb_filters,
kernel_size=(1, filter_length),
input_shape=(1, input_length, input_dim),
padding="valid",
)
)
# Set some random weights
model.set_weights([np.ones(w.shape) for w in model.get_weights()])
self._test_model(model, mode="linear", model_precision=model_precision)
def test_tiny_conv_pseudo_1d_x_half_precision(self):
return self.test_tiny_conv_pseudo_1d_x(model_precision=_MLMODEL_HALF_PRECISION)
def test_tiny_conv1d_same_random(self):
np.random.seed(1988)
input_dim = 2
input_length = 10
filter_length = 3
nb_filters = 4
model = Sequential()
model.add(
Conv1D(
nb_filters,
kernel_size=filter_length,
padding="same",
input_shape=(input_length, input_dim),
)
)
# Set some random weights
model.set_weights([np.random.rand(*w.shape) for w in model.get_weights()])
# Test the keras model
self._test_model(model)
def test_tiny_conv1d_same_random_input_shape_dict(self):
np.random.seed(1988)
input_dim = 2
input_length = 10
filter_length = 3
nb_filters = 4
model = Sequential()
model.add(
Conv1D(
nb_filters,
kernel_size=filter_length,
padding="same",
input_shape=(None, input_dim),
)
)
# Set some random weights
model.set_weights([np.random.rand(*w.shape) for w in model.get_weights()])
# Test the keras model
self._test_model(
model, input_name_shape_dict={"data": (None, input_length, input_dim)}
)
def test_large_input_length_conv1d_same_random(
self, model_precision=_MLMODEL_FULL_PRECISION
):
np.random.seed(1988)
input_dim = 2
input_length = 80
filter_length = 3
nb_filters = 4
model = Sequential()
model.add(
Conv1D(
nb_filters,
kernel_size=filter_length,
padding="same",
input_shape=(input_length, input_dim),
)
)
# Set some random weights
model.set_weights([np.random.rand(*w.shape) for w in model.get_weights()])
# Test the keras model
self._test_model(model, model_precision=model_precision)
def test_large_input_length_conv1d_same_random_half_precision(self):
return self.test_large_input_length_conv1d_same_random(
model_precision=_MLMODEL_HALF_PRECISION
)
def test_tiny_conv1d_valid_random(self):
np.random.seed(1988)
input_dim = 2
input_length = 10
filter_length = 3
nb_filters = 4
model = Sequential()
model.add(
Conv1D(
nb_filters,
kernel_size=filter_length,
padding="valid",
input_shape=(input_length, input_dim),
)
)
# Set some random weights
model.set_weights([np.random.rand(*w.shape) for w in model.get_weights()])
# Test the keras model
self._test_model(model)
def test_tiny_conv1d_dilated_random(self):
np.random.seed(1988)
input_shape = (20, 1)
num_kernels = 2
filter_length = 3
# Define a model
model = Sequential()
model.add(
Conv1D(
num_kernels,
kernel_size=filter_length,
padding="valid",
input_shape=input_shape,
dilation_rate=3,
)
)
# Set some random weights
model.set_weights([np.random.rand(*w.shape) for w in model.get_weights()])
# Test the keras model
self._test_model(model)
def test_tiny_conv_rect_kernel_x(self):
np.random.seed(1988)
input_dim = 10
input_shape = (input_dim, input_dim, 1)
num_kernels = 3
kernel_height = 1
kernel_width = 5
# Define a model
model = Sequential()
model.add(
Conv2D(
input_shape=input_shape,
filters=num_kernels,
kernel_size=(kernel_height, kernel_width),
padding="same",
)
)
# Set some random weights
model.set_weights([np.random.rand(*w.shape) for w in model.get_weights()])
# Test the keras model
self._test_model(model)
def test_tiny_conv_rect_kernel_y(self):
np.random.seed(1988)
input_dim = 10
input_shape = (input_dim, input_dim, 1)
num_kernels = 3
kernel_height = 5
kernel_width = 1
# Define a model
model = Sequential()
model.add(
Conv2D(
input_shape=input_shape,
filters=num_kernels,
kernel_size=(kernel_height, kernel_width),
padding="valid",
)
)
# Set some random weights
model.set_weights([np.random.rand(*w.shape) for w in model.get_weights()])
# Test the keras model
self._test_model(model)
def test_tiny_conv_rect_kernel_xy(self, model_precision=_MLMODEL_FULL_PRECISION):
np.random.seed(1988)
input_dim = 10
input_shape = (input_dim, input_dim, 1)
num_kernels = 3
kernel_height = 5
kernel_width = 3
# Define a model
model = Sequential()
model.add(
Conv2D(
input_shape=input_shape,
filters=num_kernels,
kernel_size=(kernel_height, kernel_width),
padding="valid",
)
)
# Set some random weights
model.set_weights([np.random.rand(*w.shape) for w in model.get_weights()])
# Test the keras model
self._test_model(model, model_precision=model_precision)
def test_tiny_conv_rect_kernel_xy_half_precision(self):
self.test_tiny_conv_rect_kernel_xy(model_precision=_MLMODEL_HALF_PRECISION)
def test_flatten(self):
model = Sequential()
model.add(Flatten(input_shape=(2, 2, 2)))
self._test_model(model, mode="linear")
def test_conv_dense(self, model_precision=_MLMODEL_FULL_PRECISION):
input_shape = (48, 48, 3)
model = Sequential()
model.add(Conv2D(32, (3, 3), activation="relu", input_shape=input_shape))
model.add(Flatten())
model.add(Dense(10, activation="softmax"))
# Get the coreml model
self._test_model(model, model_precision=model_precision)
def test_conv_dense_half_precision(self):
return self.test_conv_dense(model_precision=_MLMODEL_HALF_PRECISION)
def test_conv_batchnorm_random(self, model_precision=_MLMODEL_FULL_PRECISION):
np.random.seed(1988)
input_dim = 10
input_shape = (input_dim, input_dim, 3)
num_kernels = 3
kernel_height = 5
kernel_width = 5
# Define a model
model = Sequential()
model.add(
Conv2D(
input_shape=input_shape,
filters=num_kernels,
kernel_size=(kernel_height, kernel_width),
)
)
model.add(BatchNormalization(epsilon=1e-5))
model.set_weights([np.random.rand(*w.shape) for w in model.get_weights()])
# Get the coreml model
self._test_model(model, model_precision=model_precision)
def test_conv_batchnorm_random_half_precision(self):
return self.test_conv_batchnorm_random(model_precision=_MLMODEL_HALF_PRECISION)
def test_conv_batchnorm_no_gamma_no_beta(
self, model_precision=_MLMODEL_FULL_PRECISION
):
np.random.seed(1988)
input_dim = 10
input_shape = (input_dim, input_dim, 3)
num_kernels = 3
kernel_height = 5
kernel_width = 5
# Define a model
model = Sequential()
model.add(
Conv2D(
input_shape=input_shape,
filters=num_kernels,
kernel_size=(kernel_height, kernel_width),
)
)
model.add(BatchNormalization(center=False, scale=False, epsilon=1e-5))
model.set_weights([np.random.rand(*w.shape) for w in model.get_weights()])
# Get the coreml model
self._test_model(model, model_precision=model_precision)
def test_conv_batchnorm_no_gamma_no_beta_half_precision(self):
return self.test_conv_batchnorm_no_gamma_no_beta(
model_precision=_MLMODEL_HALF_PRECISION
)
def test_tiny_deconv_random(self):
# In Keras 2, deconvolution auto computes the output shape.
np.random.seed(1988)
input_dim = 13
input_shape = (input_dim, input_dim, 5)
num_kernels = 16
kernel_height = 3
kernel_width = 3
# Define a model
model = Sequential()
model.add(
Conv2DTranspose(
filters=num_kernels,
kernel_size=(kernel_height, kernel_width),
input_shape=input_shape,
padding="valid",
use_bias=False,
)
)
# Set some random weights
model.set_weights([np.random.rand(*w.shape) for w in model.get_weights()])
# Test the keras model
self._test_model(model)
def test_tiny_deconv_random_same_padding(self):
np.random.seed(1988)
input_dim = 14
input_shape = (input_dim, input_dim, 3)
num_kernels = 16
kernel_height = 3
kernel_width = 3
# Define a model
model = Sequential()
model.add(
Conv2DTranspose(
filters=num_kernels,
kernel_size=(kernel_height, kernel_width),
input_shape=input_shape,
padding="same",
strides=(2, 2),
use_bias=True,
)
)
# Set some random weights
model.set_weights([np.random.rand(*w.shape) for w in model.get_weights()])
# Test the keras model
self._test_model(model)
def test_tiny_depthwise_conv_same_pad(self):
np.random.seed(1988)
input_dim = 16
input_shape = (input_dim, input_dim, 3)
depth_multiplier = 1
kernel_height = 3
kernel_width = 3
# Define a model
model = Sequential()
model.add(
DepthwiseConv2D(
depth_multiplier=depth_multiplier,
kernel_size=(kernel_height, kernel_width),
input_shape=input_shape,
padding="same",
strides=(1, 1),
)
)
# Set some random weights
model.set_weights([np.random.rand(*w.shape) for w in model.get_weights()])
# Test the keras model
self._test_model(model)
def test_tiny_depthwise_conv_valid_pad(self):
np.random.seed(1988)
input_dim = 16
input_shape = (input_dim, input_dim, 3)
depth_multiplier = 1
kernel_height = 3
kernel_width = 3
# Define a model
model = Sequential()
model.add(
DepthwiseConv2D(
depth_multiplier=depth_multiplier,
kernel_size=(kernel_height, kernel_width),
input_shape=input_shape,
padding="valid",
strides=(1, 1),
)
)
# Set some random weights
model.set_weights([np.random.rand(*w.shape) for w in model.get_weights()])
# Test the keras model
self._test_model(model)
def test_tiny_depthwise_conv_same_pad_depth_multiplier(self):
np.random.seed(1988)
input_dim = 16
input_shape = (input_dim, input_dim, 3)
depth_multiplier = 4
kernel_height = 3
kernel_width = 3
# Define a model
model = Sequential()
model.add(
DepthwiseConv2D(
depth_multiplier=depth_multiplier,
kernel_size=(kernel_height, kernel_width),
input_shape=input_shape,
padding="same",
strides=(1, 1),
)
)
# Set some random weights
model.set_weights([np.random.rand(*w.shape) for w in model.get_weights()])
# Test the keras model
self._test_model(model)
def test_tiny_depthwise_conv_valid_pad_depth_multiplier(self):
np.random.seed(1988)
input_dim = 16
input_shape = (input_dim, input_dim, 3)
depth_multiplier = 2
kernel_height = 3
kernel_width = 3
# Define a model
model = Sequential()
model.add(
DepthwiseConv2D(
depth_multiplier=depth_multiplier,
kernel_size=(kernel_height, kernel_width),
input_shape=input_shape,
padding="valid",
strides=(1, 1),
)
)
# Set some random weights
model.set_weights([np.random.rand(*w.shape) for w in model.get_weights()])
# Test the keras model
self._test_model(model)
def test_tiny_separable_conv_valid(self):
np.random.seed(1988)
input_dim = 16
input_shape = (input_dim, input_dim, 3)
depth_multiplier = 1
kernel_height = 3
kernel_width = 3
num_kernels = 4
# Define a model
model = Sequential()
model.add(
SeparableConv2D(
filters=num_kernels,
kernel_size=(kernel_height, kernel_width),
padding="valid",
strides=(1, 1),
depth_multiplier=depth_multiplier,
input_shape=input_shape,
)
)
# Set some random weights
model.set_weights([np.random.rand(*w.shape) for w in model.get_weights()])
# Test the keras model
self._test_model(model)
def test_tiny_separable_conv_same_fancy(self):
np.random.seed(1988)
input_dim = 16
input_shape = (input_dim, input_dim, 3)
depth_multiplier = 1
kernel_height = 3
kernel_width = 3
num_kernels = 4
# Define a model
model = Sequential()
model.add(
SeparableConv2D(
filters=num_kernels,
kernel_size=(kernel_height, kernel_width),
padding="same",
strides=(2, 2),
activation="relu",
depth_multiplier=depth_multiplier,
input_shape=input_shape,
)
)
# Set some random weights
model.set_weights([np.random.rand(*w.shape) for w in model.get_weights()])
# Test the keras model
self._test_model(model)
def test_tiny_separable_conv_valid_depth_multiplier(self):
np.random.seed(1988)
input_dim = 16
input_shape = (input_dim, input_dim, 3)
depth_multiplier = 5
kernel_height = 3
kernel_width = 3
num_kernels = 40
# Define a model
model = Sequential()
model.add(
SeparableConv2D(
filters=num_kernels,
kernel_size=(kernel_height, kernel_width),
padding="valid",
strides=(1, 1),
depth_multiplier=depth_multiplier,
input_shape=input_shape,
)
)
# Set some random weights
model.set_weights([np.random.rand(*w.shape) for w in model.get_weights()])
# Test the keras model
self._test_model(model)
def test_tiny_separable_conv_same_fancy_depth_multiplier(
self, model_precision=_MLMODEL_FULL_PRECISION
):
np.random.seed(1988)
input_dim = 16
input_shape = (input_dim, input_dim, 3)
depth_multiplier = 2
kernel_height = 3
kernel_width = 3
num_kernels = 40
# Define a model
model = Sequential()
model.add(
SeparableConv2D(
filters=num_kernels,
kernel_size=(kernel_height, kernel_width),
padding="same",
strides=(2, 2),
activation="relu",
depth_multiplier=depth_multiplier,
input_shape=input_shape,
)
)
# Set some random weights
model.set_weights([np.random.rand(*w.shape) for w in model.get_weights()])
# Test the keras model
self._test_model(model, model_precision=model_precision)
def test_tiny_separable_conv_same_fancy_depth_multiplier_half_precision(self):
return self.test_tiny_separable_conv_same_fancy_depth_multiplier(
model_precision=_MLMODEL_HALF_PRECISION
)
def test_tiny_separable_conv_dilated(self, model_precision=_MLMODEL_FULL_PRECISION):
np.random.seed(1988)
input_dim = 10
input_shape = (input_dim, input_dim, 1)
num_kernels, kernel_height, kernel_width = 3, 5, 5
# Define a model
model = Sequential()
model.add(
SeparableConv2D(
input_shape=input_shape,
dilation_rate=(2, 2),
filters=num_kernels,
kernel_size=(kernel_height, kernel_width),
)
)
# Set some random weights
model.set_weights([np.random.rand(*w.shape) for w in model.get_weights()])
# Test the keras model
self._test_model(model, model_precision=model_precision)
def test_tiny_separable_conv_dilated_half_precision(self):
return self.test_tiny_separable_conv_dilated(
model_precision=_MLMODEL_HALF_PRECISION
)
def test_tiny_separable_conv_dilated_rect_random(
self, model_precision=_MLMODEL_FULL_PRECISION
):
np.random.seed(1988)
input_shape = (32, 20, 3)
num_kernels = 2
kernel_height = 3
kernel_width = 3
# Define a model
model = Sequential()
model.add(
SeparableConv2D(
input_shape=input_shape,
dilation_rate=(2, 2),
filters=num_kernels,
kernel_size=(kernel_height, kernel_width),
)
)
# Set some random weights
model.set_weights([np.random.rand(*w.shape) for w in model.get_weights()])
# Test the keras model
self._test_model(model, model_precision=model_precision)
def test_tiny_separable_conv_dilated_rect_random_half_precision(self):
return self.test_tiny_separable_conv_dilated_rect_random(
model_precision=_MLMODEL_HALF_PRECISION
)
def test_max_pooling_no_overlap(self):
# no_overlap: pool_size = strides
model = Sequential()
model.add(
MaxPooling2D(
input_shape=(16, 16, 3), pool_size=(2, 2), strides=None, padding="valid"
)
)
self._test_model(model)
def test_max_pooling_overlap_multiple(self):
# input shape is multiple of pool_size, strides != pool_size
model = Sequential()
model.add(
MaxPooling2D(
input_shape=(18, 18, 3),
pool_size=(3, 3),
strides=(2, 2),
padding="valid",
)
)
self._test_model(model)
def test_max_pooling_overlap_odd(self):
model = Sequential()
model.add(
MaxPooling2D(
input_shape=(16, 16, 3),
pool_size=(3, 3),
strides=(2, 2),
padding="valid",
)
)
self._test_model(model)
def test_max_pooling_overlap_same(self):
model = Sequential()
model.add(
MaxPooling2D(
input_shape=(16, 16, 3),
pool_size=(3, 3),
strides=(2, 2),
padding="same",
)
)
self._test_model(model)
def test_global_max_pooling(self):
model = Sequential()
model.add(GlobalMaxPooling2D(input_shape=(16, 16, 3)))
self._test_model(model)
def test_average_pooling_no_overlap(self):
# no_overlap: pool_size = strides
model = Sequential()
model.add(
AveragePooling2D(
input_shape=(16, 16, 3), pool_size=(2, 2), strides=None, padding="valid"
)
)
self._test_model(model, delta=1e-2)
def test_average_pooling_inception_config_1(self):
# no_overlap: pool_size = strides
model = Sequential()
model.add(
AveragePooling2D(
input_shape=(16, 16, 3),
pool_size=(3, 3),
strides=(1, 1),
padding="same",
)
)
self._test_model(model, delta=1e-2)
def test_global_average_pooling(self):
model = Sequential()
model.add(GlobalAveragePooling2D(input_shape=(16, 16, 3)))
self._test_model(model)
def test_max_pooling_1d(self):
model = Sequential()
model.add(MaxPooling1D(input_shape=(16, 3), pool_size=4))
self._test_model(model)
def test_global_max_pooling_1d(self):
np.random.seed(1988)
input_dim = 2
input_length = 10
filter_length = 3
nb_filters = 4
model = Sequential()
model.add(
Conv1D(
nb_filters,
kernel_size=filter_length,
padding="same",
input_shape=(input_length, input_dim),
)
)
model.add(GlobalMaxPooling1D())
self._test_model(model)
def test_average_pooling_1d(self):
np.random.seed(1988)
input_dim = 2
input_length = 10
filter_length = 3
nb_filters = 4
model = Sequential()
model.add(
Conv1D(
nb_filters,
kernel_size=filter_length,
padding="same",
input_shape=(input_length, input_dim),
)
)
model.add(AveragePooling1D(pool_size=2))
self._test_model(model)
def test_global_average_pooling_1d(self):
np.random.seed(1988)
input_dim = 2
input_length = 10
filter_length = 3
nb_filters = 4
model = Sequential()
model.add(
Conv1D(
nb_filters,
kernel_size=filter_length,
padding="same",
input_shape=(input_length, input_dim),
)
)
model.add(GlobalAveragePooling1D())
self._test_model(model)
def test_tiny_conv_upsample_random(self):
np.random.seed(1988)
input_dim = 10
input_shape = (input_dim, input_dim, 1)
num_kernels = 3
kernel_height = 5
kernel_width = 5
# Define a model
model = Sequential()
model.add(
Conv2D(
input_shape=input_shape,
filters=num_kernels,
kernel_size=(kernel_height, kernel_width),
)
)
model.add(UpSampling2D(size=2))
# Set some random weights
model.set_weights([np.random.rand(*w.shape) for w in model.get_weights()])
# Test the keras model
self._test_model(model)
def test_tiny_conv_upsample_1d_random(self):
np.random.seed(1988)
input_dim = 2
input_length = 10
filter_length = 3
nb_filters = 4
model = Sequential()
model.add(
Conv1D(
nb_filters,
kernel_size=filter_length,
padding="same",
input_shape=(input_length, input_dim),
)
)
model.add(UpSampling1D(size=2))
# Set some random weights
model.set_weights([np.random.rand(*w.shape) for w in model.get_weights()])
# Test the keras model
self._test_model(model)
def test_tiny_conv_crop_1d_random(self, model_precision=_MLMODEL_FULL_PRECISION):
np.random.seed(1988)
input_dim = 2
input_length = 10
filter_length = 3
nb_filters = 4
model = Sequential()
model.add(
Conv1D(
nb_filters,
kernel_size=filter_length,
padding="same",
input_shape=(input_length, input_dim),
)
)
model.add(Cropping1D(cropping=2))
# Set some random weights
model.set_weights([np.random.rand(*w.shape) for w in model.get_weights()])
# Test the keras model
self._test_model(model, model_precision=model_precision)
def test_tiny_conv_crop_1d_random_half_precision(self):
return self.test_tiny_conv_crop_1d_random(
model_precision=_MLMODEL_HALF_PRECISION
)
def test_tiny_conv_pad_1d_random(self, model_precision=_MLMODEL_FULL_PRECISION):
np.random.seed(1988)
input_dim = 2
input_length = 10
filter_length = 3
nb_filters = 4
model = Sequential()
model.add(
Conv1D(
nb_filters,
kernel_size=filter_length,
padding="same",
input_shape=(input_length, input_dim),
)
)
model.add(ZeroPadding1D(padding=2))
# Set some random weights
model.set_weights([np.random.rand(*w.shape) for w in model.get_weights()])
# Test the keras model
self._test_model(model, model_precision=model_precision)
def test_tiny_conv_pad_1d_random_half_precision(self):
return self.test_tiny_conv_pad_1d_random(
model_precision=_MLMODEL_HALF_PRECISION
)
def test_tiny_conv_causal_1d(self):
np.random.seed(1988)
model = Sequential()
model.add(Conv1D(1, 3, input_shape=(10, 1), use_bias=False, padding="causal"))
model.set_weights([np.random.rand(*w.shape) for w in model.get_weights()])
self._test_model(model)
def test_embedding(self, model_precision=_MLMODEL_FULL_PRECISION):
model = Sequential()
num_inputs = 10
num_outputs = 3
model.add(Embedding(num_inputs, num_outputs))
model.set_weights([np.random.rand(*w.shape) for w in model.get_weights()])
self._test_model(model, model_precision=model_precision)
def test_embedding_half_precision(self):
return self.test_embedding(model_precision=_MLMODEL_HALF_PRECISION)
def test_embedding_seq(self, model_precision=_MLMODEL_FULL_PRECISION):
model = Sequential()
num_inputs = 10
num_outputs = 3
model.add(Embedding(num_inputs, num_outputs, input_length=7))
model.set_weights([np.random.rand(*w.shape) for w in model.get_weights()])
self._test_model(
model, one_dim_seq_flags=[True], model_precision=model_precision
)
def test_embedding_seq_half_precision(self):
return self.test_embedding_seq(model_precision=_MLMODEL_HALF_PRECISION)
def test_tiny_no_sequence_simple_rnn_random(self):
np.random.seed(1988)
input_dim = 10
input_length = 1
num_channels = 1
# Define a model
model = Sequential()
model.add(SimpleRNN(num_channels, input_shape=(input_length, input_dim)))
# Set some random weights
model.set_weights([np.random.rand(*w.shape) for w in model.get_weights()])
# Test the keras model
self._test_model(model)
def test_tiny_sequence_simple_rnn_random(self):
np.random.seed(1988)
input_dim = 2
input_length = 4
num_channels = 3
# Define a model
model = Sequential()
model.add(SimpleRNN(num_channels, input_shape=(input_length, input_dim)))
# Set some random weights
model.set_weights(
[np.random.rand(*w.shape) * 0.2 - 0.1 for w in model.get_weights()]
)
# Test the keras model
self._test_model(model)
def test_tiny_seq2seq_rnn_random(self):
np.random.seed(1988)
input_dim = 2
input_length = 4
num_channels = 3
# Define a model
model = Sequential()
model.add(
SimpleRNN(
num_channels,
input_shape=(input_length, input_dim),
return_sequences=True,
)
)
# Set some random weights
model.set_weights(
[np.random.rand(*w.shape) * 0.2 - 0.1 for w in model.get_weights()]
)
# Test the keras model
self._test_model(model)
def test_rnn_seq(self):
np.random.seed(1988)
input_dim = 11
input_length = 5
# Define a model
model = Sequential()
model.add(
SimpleRNN(20, input_shape=(input_length, input_dim), return_sequences=False)
)
# Set some random weights
model.set_weights(
[np.random.rand(*w.shape) * 0.2 - 0.1 for w in model.get_weights()]
)
# Test the keras model
self._test_model(model)
def test_rnn_seq_backwards(self):
np.random.seed(1988)
input_dim = 11
input_length = 5
# Define a model
model = Sequential()
model.add(
SimpleRNN(
20,
input_shape=(input_length, input_dim),
return_sequences=False,
go_backwards=True,
)
)
# Set some random weights
model.set_weights(
[np.random.rand(*w.shape) * 0.2 - 0.1 for w in model.get_weights()]
)
# Test the keras model
self._test_model(model)
def test_medium_no_sequence_simple_rnn_random(self):
np.random.seed(1988)
input_dim = 10
input_length = 1
num_channels = 10
# Define a model
model = Sequential()
model.add(SimpleRNN(num_channels, input_shape=(input_length, input_dim)))
# Set some random weights
model.set_weights(
[np.random.rand(*w.shape) * 0.2 - 0.1 for w in model.get_weights()]
)
# Test the keras model
self._test_model(model)
def test_tiny_no_sequence_lstm_zeros(self):
np.random.seed(1988)
input_dim = 1
input_length = 1
num_channels = 1
model = Sequential()
model.add(
LSTM(
num_channels,
input_shape=(input_length, input_dim),
implementation=1,
recurrent_activation="sigmoid",
)
)
model.set_weights(
[np.random.rand(*w.shape) * 0.2 - 0.1 for w in model.get_weights()]
)
self._test_model(model, mode="zeros")
def test_tiny_no_sequence_lstm_ones(self):
np.random.seed(1988)
input_dim = 1
input_length = 1
num_channels = 1
model = Sequential()
model.add(
LSTM(
num_channels,
input_shape=(input_length, input_dim),
implementation=1,
recurrent_activation="sigmoid",
)
)
model.set_weights(
[np.random.rand(*w.shape) * 0.2 - 0.1 for w in model.get_weights()]
)
self._test_model(model, mode="ones")
def test_small_no_sequence_lstm_zeros(self):
np.random.seed(1988)
input_dim = 10
input_length = 1
num_channels = 1
model = Sequential()
model.add(
LSTM(
num_channels,
input_shape=(input_length, input_dim),
implementation=2,
recurrent_activation="sigmoid",
)
)
model.set_weights(
[np.random.rand(*w.shape) * 0.2 - 0.1 for w in model.get_weights()]
)
self._test_model(model, mode="zeros")
def test_small_no_sequence_lstm_ones(self):
np.random.seed(1988)
input_dim = 10
input_length = 1
num_channels = 1
model = Sequential()
model.add(
LSTM(
num_channels,
input_shape=(input_length, input_dim),
implementation=2,
recurrent_activation="sigmoid",
)
)
model.set_weights(
[np.random.rand(*w.shape) * 0.2 - 0.1 for w in model.get_weights()]
)
self._test_model(model, mode="ones")
def test_lstm_seq(self):
np.random.seed(1988)
input_dim = 11
input_length = 5
model = Sequential()
model.add(
LSTM(20, input_shape=(input_length, input_dim), return_sequences=False)
)
model.set_weights(
[np.random.rand(*w.shape) * 0.2 - 0.1 for w in model.get_weights()]
)
self._test_model(model)
def test_lstm_seq_backwards(self):
np.random.seed(1988)
input_dim = 11
input_length = 5
model = Sequential()
model.add(
LSTM(
20,
input_shape=(input_length, input_dim),
return_sequences=False,
go_backwards=True,
)
)
model.set_weights(
[np.random.rand(*w.shape) * 0.2 - 0.1 for w in model.get_weights()]
)
self._test_model(model)
def test_medium_no_sequence_lstm_random(self):
np.random.seed(1988)
input_dim = 10
input_length = 1
num_channels = 10
# Define a model
model = Sequential()
model.add(
LSTM(
num_channels,
input_shape=(input_length, input_dim),
recurrent_activation="sigmoid",
)
)
# Set some random weights
model.set_weights(
[np.random.rand(*w.shape) * 0.2 - 0.1 for w in model.get_weights()]
)
# Test the keras model
self._test_model(model)
def test_tiny_no_sequence_lstm_zeros_gpu(self):
np.random.seed(1988)
input_dim = 1
input_length = 1
num_channels = 1
# Define a model
model = Sequential()
model.add(
LSTM(
num_channels,
input_shape=(input_length, input_dim),
implementation=2,
recurrent_activation="sigmoid",
)
)
# Set some random weights
model.set_weights(
[np.random.rand(*w.shape) * 0.2 - 0.1 for w in model.get_weights()]
)
# Test the keras model
self._test_model(model, mode="zeros")
def test_small_no_sequence_lstm_random(self):
np.random.seed(1988)
input_dim = 10
input_length = 1
num_channels = 1
# Define a model
model = Sequential()
model.add(
LSTM(
num_channels,
input_shape=(input_length, input_dim),
implementation=2,
recurrent_activation="sigmoid",
)
)
# Set some random weights
model.set_weights(
[np.random.rand(*w.shape) * 0.2 - 0.1 for w in model.get_weights()]
)
# Test the keras model
self._test_model(model)
def test_tiny_no_sequence_gru_random(self, model_precision=_MLMODEL_FULL_PRECISION):
np.random.seed(1988)
input_dim = 1
input_length = 1
num_channels = 1
num_samples = 1
# Define a model
model = Sequential()
model.add(
GRU(
num_channels,
input_shape=(input_length, input_dim),
recurrent_activation="sigmoid",
)
)
# Set some random weights
model.set_weights(
[np.random.rand(*w.shape) * 0.2 - 0.1 for w in model.get_weights()]
)
# Test the keras model
self._test_model(model, model_precision=model_precision)
def test_tiny_no_sequence_gru_random_half_precision(self):
return self.test_tiny_no_sequence_gru_random(
model_precision=_MLMODEL_HALF_PRECISION
)
def test_small_no_sequence_gru_random(self):
np.random.seed(1988)
input_dim = 10
input_length = 1
num_channels = 1
# Define a model
model = Sequential()
model.add(
GRU(
num_channels,
input_shape=(input_length, input_dim),
recurrent_activation="sigmoid",
)
)
# Set some random weights
model.set_weights(
[np.random.rand(*w.shape) * 0.2 - 0.1 for w in model.get_weights()]
)
# Test the keras model
self._test_model(model)
def test_medium_no_sequence_gru_random(
self, model_precision=_MLMODEL_FULL_PRECISION
):
np.random.seed(1988)
input_dim = 10
input_length = 1
num_channels = 10
# Define a model
model = Sequential()
model.add(
GRU(
num_channels,
input_shape=(input_length, input_dim),
recurrent_activation="sigmoid",
)
)
# Set some random weights
model.set_weights([np.random.rand(*w.shape) for w in model.get_weights()])
# Test the keras model
self._test_model(model, model_precision=model_precision)
def test_medium_no_sequence_gru_random_half_precision(self):
return self.test_medium_no_sequence_gru_random(
model_precision=_MLMODEL_HALF_PRECISION
)
def test_gru_seq(self):
np.random.seed(1988)
input_dim = 11
input_length = 5
# Define a model
model = Sequential()
model.add(
GRU(20, input_shape=(input_length, input_dim), return_sequences=False)
)
# Set some random weights
model.set_weights(
[np.random.rand(*w.shape) * 0.2 - 0.1 for w in model.get_weights()]
)
# Test the keras model
self._test_model(model)
def test_gru_seq_backwards(self, model_precision=_MLMODEL_FULL_PRECISION):
np.random.seed(1988)
input_dim = 11
input_length = 5
# Define a model
model = Sequential()
model.add(
GRU(
20,
input_shape=(input_length, input_dim),
return_sequences=False,
go_backwards=True,
)
)
# Set some random weights
model.set_weights(
[np.random.rand(*w.shape) * 0.2 - 0.1 for w in model.get_weights()]
)
# Test the keras model
self._test_model(model, model_precision=model_precision)
def test_gru_seq_backwards_half_precision(self):
return self.test_gru_seq_backwards(model_precision=_MLMODEL_HALF_PRECISION)
def test_tiny_no_sequence_bidir_random(
self, model_precision=_MLMODEL_FULL_PRECISION
):
np.random.seed(1988)
input_dim = 1
input_length = 1
num_channels = 1
num_samples = 1
# Define a model
model = Sequential()
model.add(
Bidirectional(
LSTM(num_channels, implementation=1, recurrent_activation="sigmoid"),
input_shape=(input_length, input_dim),
)
)
# Set some random weights
model.set_weights(
[np.random.rand(*w.shape) * 0.2 - 0.1 for w in model.get_weights()]
)
# Test the keras model
self._test_model(model, model_precision=model_precision)
def test_tiny_no_sequence_bidir_random_half_precision(self):
return self.test_tiny_no_sequence_bidir_random(
model_precision=_MLMODEL_HALF_PRECISION
)
def test_tiny_no_sequence_bidir_random_gpu(
self, model_precision=_MLMODEL_FULL_PRECISION
):
np.random.seed(1988)
input_dim = 1
input_length = 1
num_channels = 1
num_samples = 1
# Define a model
model = Sequential()
model.add(
Bidirectional(
LSTM(num_channels, implementation=2, recurrent_activation="sigmoid"),
input_shape=(input_length, input_dim),
)
)
# Set some random weights
model.set_weights(
[np.random.rand(*w.shape) * 0.2 - 0.1 for w in model.get_weights()]
)
# Test the keras model
self._test_model(model, model_precision=model_precision)
def test_tiny_no_sequence_bidir_random_gpu_half_precision(self):
return self.test_tiny_no_sequence_bidir_random_gpu(
model_precision=_MLMODEL_HALF_PRECISION
)
def test_small_no_sequence_bidir_random(self):
np.random.seed(1988)
input_dim = 10
input_length = 1
num_channels = 1
# Define a model
model = Sequential()
model.add(
Bidirectional(
LSTM(num_channels, implementation=2, recurrent_activation="sigmoid"),
input_shape=(input_length, input_dim),
)
)
# Set some random weights
model.set_weights(
[np.random.rand(*w.shape) * 0.2 - 0.1 for w in model.get_weights()]
)
# Test the keras model
self._test_model(model)
def test_medium_no_sequence_bidir_random(self):
np.random.seed(1988)
input_dim = 10
input_length = 1
num_channels = 10
# Define a model
model = Sequential()
model.add(
Bidirectional(
LSTM(num_channels, implementation=2, recurrent_activation="sigmoid"),
input_shape=(input_length, input_dim),
)
)
# Set some random weights
model.set_weights(
[np.random.rand(*w.shape) * 0.2 - 0.1 for w in model.get_weights()]
)
# Test the keras model
self._test_model(model)
def test_medium_bidir_random_return_seq_false(self):
np.random.seed(1988)
input_dim = 7
input_length = 5
num_channels = 10
# Define a model
model = Sequential()
model.add(
Bidirectional(
LSTM(
num_channels,
return_sequences=False,
implementation=2,
recurrent_activation="sigmoid",
),
input_shape=(input_length, input_dim),
)
)
# Set some random weights
model.set_weights(
[np.random.rand(*w.shape) * 0.2 - 0.1 for w in model.get_weights()]
)
# Test the keras model
self._test_model(model)
def test_medium_bidir_random_return_seq_true(self):
np.random.seed(1988)
input_dim = 7
input_length = 5
num_channels = 10
# Define a model
model = Sequential()
model.add(
Bidirectional(
LSTM(
num_channels,
return_sequences=True,
implementation=2,
recurrent_activation="sigmoid",
),
input_shape=(input_length, input_dim),
)
)
# Set some random weights
model.set_weights(
[np.random.rand(*w.shape) * 0.2 - 0.1 for w in model.get_weights()]
)
# Test the keras model
self._test_model(model)
def test_bilstm_merge_modes(self):
# issue 157
def get_model(input_dim, fc_size, rnn_size, output_dim, merge_mode):
input_data = Input(name="the_input", shape=(None, input_dim))
x = TimeDistributed(Dense(fc_size, name="fc1", activation="relu",))(
input_data
)
x = Bidirectional(
LSTM(
rnn_size,
return_sequences=True,
activation="relu",
kernel_initializer="he_normal",
),
merge_mode=merge_mode,
)(x)
y_pred = TimeDistributed(
Dense(output_dim, name="y_pred", activation="softmax")
)(x)
model = Model([input_data], [y_pred])
model.set_weights(
[np.random.rand(*w.shape) * 0.2 - 0.1 for w in model.get_weights()]
)
return model
input_dim = 26
fc_size = 512
rnn_size = 512
output_dim = 29
for merge_mode in ["concat", "sum", "mul", "ave"]:
model = get_model(input_dim, fc_size, rnn_size, output_dim, merge_mode)
self._test_model(model)
def test_tiny_conv_elu_random(self):
np.random.seed(1988)
# Define a model
from keras.layers.advanced_activations import ELU
model = Sequential()
model.add(Conv2D(input_shape=(10, 10, 3), filters=3, kernel_size=(5, 5)))
model.add(ELU(alpha=0.8))
model.set_weights([np.random.rand(*w.shape) for w in model.get_weights()])
# Get the coreml model
self._test_model(model)
def test_tiny_conv_prelu_random(self, model_precision=_MLMODEL_FULL_PRECISION):
np.random.seed(1988)
# Define a model
from keras.layers.advanced_activations import PReLU
model = Sequential()
model.add(
Conv2D(
input_shape=(10, 10, 3), filters=3, kernel_size=(5, 5), padding="same"
)
)
model.add(PReLU(shared_axes=[1, 2]))
model.set_weights([np.random.rand(*w.shape) for w in model.get_weights()])
# Get the coreml model
self._test_model(model, model_precision=model_precision)
def test_tiny_conv_prelu_random_half_precision(self):
return self.test_tiny_conv_prelu_random(model_precision=_MLMODEL_HALF_PRECISION)
def test_tiny_conv_leaky_relu_random(self):
np.random.seed(1988)
# Define a model
from keras.layers.advanced_activations import LeakyReLU
model = Sequential()
model.add(
Conv2D(
input_shape=(10, 10, 3), filters=3, kernel_size=(5, 5), padding="same"
)
)
model.add(LeakyReLU(alpha=0.3))
model.set_weights([np.random.rand(*w.shape) for w in model.get_weights()])
# Get the coreml model
self._test_model(model)
def test_tiny_conv_thresholded_relu_random(self):
np.random.seed(1988)
# Define a model
from keras.layers.advanced_activations import ThresholdedReLU
model = Sequential()
model.add(
Conv2D(
input_shape=(10, 10, 3), filters=3, kernel_size=(5, 5), padding="same"
)
)
model.add(ThresholdedReLU(theta=0.8))
model.set_weights([np.random.rand(*w.shape) for w in model.get_weights()])
# Get the coreml model
self._test_model(model)
def test_tiny_concat_random(self):
np.random.seed(1988)
input_dim = 10
num_channels = 6
# Define a model
input_tensor = Input(shape=(input_dim,))
x1 = Dense(num_channels)(input_tensor)
x2 = Dense(num_channels)(x1)
x3 = Dense(num_channels)(x1)
x4 = concatenate([x2, x3])
x5 = Dense(num_channels)(x4)
model = Model(inputs=[input_tensor], outputs=[x5])
# Set some random weights
model.set_weights([np.random.rand(*w.shape) for w in model.get_weights()])
# Get the coreml model
self._test_model(model)
def test_tiny_concat_seq_random(self):
np.random.seed(1988)
max_features = 10
embedding_dims = 4
seq_len = 5
num_channels = 6
# Define a model
input_tensor = Input(shape=(seq_len,))
x1 = Embedding(max_features, embedding_dims)(input_tensor)
x2 = Embedding(max_features, embedding_dims)(input_tensor)
x3 = concatenate([x1, x2], axis=1)
model = Model(inputs=[input_tensor], outputs=[x3])
# Set some random weights
model.set_weights([np.random.rand(*w.shape) for w in model.get_weights()])
# Get the coreml model
self._test_model(model, one_dim_seq_flags=[True])
def test_lstm_concat_dense_random(self):
np.random.seed(1988)
vocab_size = 1250
seq_length = 5
units = 32
# Define a model
input = Input(shape=(seq_length,))
pos = Input(shape=(seq_length, 1))
embedding = Embedding(vocab_size, 50, input_length=seq_length)(input)
concat = Concatenate(axis=2)([embedding, pos])
model = LSTM(units, return_sequences=True, stateful=False)(concat)
model = LSTM(units, return_sequences=False)(model)
model = Dense(100, activation="relu")(model)
model = Dense(vocab_size, activation="softmax")(model)
model = Model(inputs=[input, pos], outputs=model)
# Set some random weights
model.set_weights([np.random.rand(*w.shape) for w in model.get_weights()])
# Get the coreml model
self._test_model(model, one_dim_seq_flags=[True, True])
def test_tiny_add_random(self):
np.random.seed(1988)
input_dim = 10
num_channels = 6
# Define a model
input_tensor = Input(shape=(input_dim,))
x1 = Dense(num_channels)(input_tensor)
x2 = Dense(num_channels)(x1)
x3 = Dense(num_channels)(x1)
x4 = add([x2, x3])
x5 = Dense(num_channels)(x4)
model = Model(inputs=[input_tensor], outputs=[x5])
# Set some random weights
model.set_weights([np.random.rand(*w.shape) for w in model.get_weights()])
# Get the coreml model
self._test_model(model)
def test_tiny_mul_random(self):
np.random.seed(1988)
input_dim = 10
num_channels = 6
# Define a model
input_tensor = Input(shape=(input_dim,))
x1 = Dense(num_channels)(input_tensor)
x2 = Dense(num_channels)(x1)
x3 = Dense(num_channels)(x1)
x4 = multiply([x2, x3])
x5 = Dense(num_channels)(x4)
model = Model(inputs=[input_tensor], outputs=[x5])
# Set some random weights
model.set_weights([np.random.rand(*w.shape) for w in model.get_weights()])
# Get the coreml model
self._test_model(model)
def test_tiny_cos_random(self):
np.random.seed(1988)
input_dim = 10
num_channels = 6
# Define a model
input_tensor = Input(shape=(input_dim,))
x1 = Dense(num_channels)(input_tensor)
x2 = Dense(num_channels)(x1)
x3 = Dense(num_channels)(x1)
x4 = dot([x2, x3], axes=-1, normalize=True)
x5 = Dense(num_channels)(x4)
model = Model(inputs=[input_tensor], outputs=[x5])
# Set some random weights
model.set_weights([np.random.rand(*w.shape) for w in model.get_weights()])
# Get the coreml model
self._test_model(model)
def test_zeropad_simple(self):
input_shape = (48, 48, 3)
model = Sequential()
model.add(ZeroPadding2D((1, 1), input_shape=input_shape))
# Set some random weights
model.set_weights([np.random.rand(*w.shape) for w in model.get_weights()])
# Get the coreml model
self._test_model(model)
def test_zeropad_fancy(self):
input_shape = (48, 48, 3)
model = Sequential()
model.add(ZeroPadding2D(((2, 5), (3, 4)), input_shape=input_shape))
# Set some random weights
model.set_weights([np.random.rand(*w.shape) for w in model.get_weights()])
# Get the coreml model
self._test_model(model)
def test_crop_simple(self):
input_shape = (48, 48, 3)
model = Sequential()
model.add(Cropping2D(cropping=((2, 5), (2, 5)), input_shape=input_shape))
# Set some random weights
model.set_weights([np.random.rand(*w.shape) for w in model.get_weights()])
# Get the coreml model
self._test_model(model)
def test_tiny_permute(self):
# When input blob is 3D array (D1, D2, D3), Keras assumes the axes' meaning is
# (D1=H,D2=W,D3=C), while CoreML assumes (D1=C,D2=H,D3=W)
import itertools
for permute_order in list(itertools.permutations([1, 2, 3])):
model = Sequential()
model.add(Permute(permute_order, input_shape=(4, 3, 2)))
self._test_model(model, transpose_keras_result=True)
def test_reshape_3d(self):
model = Sequential()
model.add(Reshape((10, 1, 6), input_shape=(5, 4, 3)))
self._test_model(model, mode="linear")
def test_tiny_conv_dense_random(self):
np.random.seed(1988)
num_samples = 1
input_dim = 8
input_shape = (input_dim, input_dim, 3)
num_kernels = 2
kernel_height = 5
kernel_width = 5
hidden_dim = 4
# Define a model
model = Sequential()
model.add(
Conv2D(
input_shape=input_shape,
filters=num_kernels,
kernel_size=(kernel_height, kernel_width),
)
)
model.add(Dropout(0.5))
model.add(Flatten())
model.add(Dense(hidden_dim))
# Set some random weights
model.set_weights([np.random.rand(*w.shape) for w in model.get_weights()])
# Get the coreml model
self._test_model(model)
def test_tiny_conv_dropout_random(self):
np.random.seed(1988)
num_samples = 1
input_dim = 8
input_shape = (input_dim, input_dim, 3)
num_kernels = 2
kernel_height = 5
kernel_width = 5
hidden_dim = 4
# Define a model
model = Sequential()
model.add(
Conv2D(
input_shape=input_shape,
filters=num_kernels,
kernel_size=(kernel_height, kernel_width),
)
)
model.add(SpatialDropout2D(0.5))
model.add(Flatten())
model.add(Dense(hidden_dim))
# Set some random weights
model.set_weights([np.random.rand(*w.shape) for w in model.get_weights()])
# Get the coreml model
self._test_model(model)
def test_tiny_dense_tanh_fused_random(self):
np.random.seed(1988)
num_samples = 1
input_dim = 3
hidden_dim = 4
# Define a model
model = Sequential()
model.add(Dense(hidden_dim, input_shape=(input_dim,), activation="tanh"))
# Set some random weights
model.set_weights([np.random.rand(*w.shape) for w in model.get_weights()])
# Get the coreml model
self._test_model(model)
def test_tiny_conv_relu_fused_random(self):
np.random.seed(1988)
num_samples = 1
input_dim = 8
input_shape = (input_dim, input_dim, 3)
num_kernels = 2
kernel_height = 5
kernel_width = 5
hidden_dim = 4
# Define a model
model = Sequential()
model.add(
Conv2D(
input_shape=input_shape,
activation="relu",
filters=num_kernels,
kernel_size=(kernel_height, kernel_width),
)
)
# Set some random weights
model.set_weights([np.random.rand(*w.shape) for w in model.get_weights()])
# Get the coreml model
self._test_model(model)
def test_tiny_time_distrbuted(self):
# as the first layer in a model
model = Sequential()
model.add(TimeDistributed(Dense(8), input_shape=(10, 16)))
model.set_weights([np.random.rand(*w.shape) for w in model.get_weights()])
self._test_model(model)
def test_tiny_sequence_lstm(self, model_precision=_MLMODEL_FULL_PRECISION):
np.random.seed(1988)
input_dim = 1
input_length = 2
num_channels = 1
# Define a model
model = Sequential()
model.add(
LSTM(
num_channels,
input_shape=(input_length, input_dim),
implementation=1,
recurrent_activation="sigmoid",
)
)
# Set some random weights
model.set_weights(
[(np.random.rand(*w.shape) - 0.5) * 0.2 for w in model.get_weights()]
)
# Test the keras model
self._test_model(model, delta=1e-4, model_precision=model_precision)
def test_tiny_sequence_lstm_half_precision(self):
return self.test_tiny_sequence_lstm(model_precision=_MLMODEL_HALF_PRECISION)
def test_tiny_spatial_bn(self):
np.random.seed(1988)
x_in = Input(shape=(7, 7, 2))
x = ZeroPadding2D(padding=(1, 1))(x_in)
x = BatchNormalization(axis=2)(x)
model = Model(x_in, x)
self._test_model(model, delta=1e-2)
def test_embedding_fixed_length(self):
sequence_length = 5
vocab_size = 10
embed_channels = 4
dense_units = sequence_length * embed_channels
model = Sequential()
model.add(Embedding(vocab_size, embed_channels, input_length=sequence_length))
model.add(Flatten())
model.add(Dense(dense_units))
model.add(Dense(20))
model.set_weights([np.random.rand(*w.shape) for w in model.get_weights()])
self._test_model(model, one_dim_seq_flags=[True])
def test_conv1d_flatten(self, delta=1e-2):
model = Sequential()
model.add(AveragePooling1D(2, input_shape=(64, 9)))
model.add(Conv1D(16, 1, padding="same", activation="relu", use_bias=False))
model.add(MaxPooling1D(2))
model.add(Flatten())
model.add(Dense(units=7, activation="softmax", use_bias=False))
model.set_weights([np.random.rand(*w.shape) for w in model.get_weights()])
self._test_model(model, delta=delta)
def test_dense_fused_act_in_td(self):
np.random.seed(1988)
x_in = Input(shape=(10, 2))
x = TimeDistributed(Dense(6, activation="softmax"))(x_in)
model = Model(inputs=[x_in], outputs=[x])
self._test_model(model, delta=1e-4)
def test_conv_batch_1d(self):
np.random.seed(1988)
vocabulary_size = 4
embedding_dimension = 6
input_length = 10
model = Sequential()
model.add(
Embedding(
vocabulary_size,
embedding_dimension,
input_length=input_length,
trainable=True,
)
)
model.add(Conv1D(5, 2))
model.add(BatchNormalization())
model.add(Activation("relu"))
model.add(MaxPooling1D(2))
model.set_weights([np.random.rand(*w.shape) for w in model.get_weights()])
self._test_model(model, one_dim_seq_flags=[True])
def test_lstm_td(self):
np.random.seed(1988)
input_dim = 2
input_length = 4
num_channels = 3
# Define a model
model = Sequential()
model.add(
SimpleRNN(
num_channels,
return_sequences=True,
input_shape=(input_length, input_dim),
)
)
model.add(TimeDistributed(Dense(5)))
# Set some random weights
model.set_weights(
[np.random.rand(*w.shape) * 0.2 - 0.1 for w in model.get_weights()]
)
# Test the keras model
self._test_model(model)
# Making sure that giant channel sizes get handled correctly
def test_large_channel_gpu(self):
input_shape = (20, 20, 3)
num_channels = 2049
kernel_size = 3
model = Sequential()
model.add(
Conv2D(
input_shape=input_shape,
filters=num_channels,
kernel_size=(kernel_size, kernel_size),
)
)
model.set_weights(
[(np.random.rand(*w.shape) - 0.5) * 0.2 for w in model.get_weights()]
)
self._test_model(model, delta=1e-2)
@pytest.mark.xfail(raises=Exception)
def test_large_batch_gpu(self):
batch_size = 2049
num_channels = 4
kernel_size = 3
model = Sequential()
model.add(
TimeDistributed(Dense(num_channels), input_shape=(batch_size, kernel_size))
)
model.set_weights(
[(np.random.rand(*w.shape) - 0.5) * 0.2 for w in model.get_weights()]
)
self._test_model(model, delta=1e-2)
@unittest.skipIf(not _HAS_KERAS2_TF, "Missing keras. Skipping tests.")
@pytest.mark.keras2
class KerasTopologyCorrectnessTest(KerasNumericCorrectnessTest):
def test_dangling_merge_left(self):
x1 = Input(shape=(4,), name="input1")
x2 = Input(shape=(5,), name="input2")
y1 = Dense(6, name="dense")(x2)
z = concatenate([x1, y1])
model = Model(inputs=[x1, x2], outputs=[z])
model.set_weights([np.random.rand(*w.shape) for w in model.get_weights()])
self._test_model(model)
def test_dangling_merge_right(self):
x1 = Input(shape=(4,), name="input1")
x2 = Input(shape=(5,), name="input2")
y1 = Dense(6, name="dense")(x2)
z = concatenate([y1, x1])
model = Model(inputs=[x1, x2], outputs=[z])
model.set_weights([np.random.rand(*w.shape) for w in model.get_weights()])
self._test_model(model)
def test_shared_vision(self):
digit_input = Input(shape=(27, 27, 1))
x = Conv2D(64, (3, 3))(digit_input)
x = Conv2D(64, (3, 3))(x)
out = Flatten()(x)
vision_model = Model(inputs=[digit_input], outputs=[out])
# then define the tell-digits-apart model
digit_a = Input(shape=(27, 27, 1))
digit_b = Input(shape=(27, 27, 1))
# the vision model will be shared, weights and all
out_a = vision_model(digit_a)
out_b = vision_model(digit_b)
concatenated = concatenate([out_a, out_b])
out = Dense(1, activation="sigmoid")(concatenated)
model = Model(inputs=[digit_a, digit_b], outputs=out)
model.set_weights([np.random.rand(*w.shape) for w in model.get_weights()])
self._test_model(model)
def test_tiny_weight_sharing(self):
# - Dense1 -----------
# x - | |- Merge
# - Dense1 - Dense2 --
x = Input(shape=(3,))
dense = Dense(4)
y1 = dense(x)
y2 = dense(x)
y3 = Dense(4)(y2)
z = concatenate([y1, y3])
model = Model(inputs=[x], outputs=[z])
model.set_weights([np.random.rand(*w.shape) for w in model.get_weights()])
self._test_model(model, mode="random", delta=1e-2)
def test_tiny_multiple_outputs(self):
x = Input(shape=(3,))
y1 = Dense(4)(x)
y2 = Dense(5)(x)
model = Model([x], [y1, y2])
model.set_weights([np.random.rand(*w.shape) for w in model.get_weights()])
self._test_model(model, mode="random", delta=1e-2)
def test_intermediate_outputs_dense(self):
x = Input(shape=(3,))
y = Dense(4, name="intermediate_dense_y")(x)
z = Dense(5, name="intermediate_dense_z")(y)
model = Model([x], [y, z])
model.set_weights([np.random.rand(*w.shape) for w in model.get_weights()])
self._test_model(model, mode="random", delta=1e-2)
def test_intermediate_outputs_conv2d(self):
x = Input(shape=(8, 8, 3))
y = Conv2D(4, (3, 3), name="intermdiate_conv2d_1")(x)
z = Conv2D(5, (3, 3), name="intermdiate_conv2d_2")(y)
model = Model([x], [y, z])
model.set_weights([np.random.rand(*w.shape) for w in model.get_weights()])
self._test_model(model, mode="random", delta=1e-2)
def test_intermediate_outputs_conv2d_fused_act(self):
x = Input(shape=(8, 8, 3))
y = Conv2D(4, (3, 3), name="intermdiate_conv2d_1_fused", activation="relu")(x)
z = Conv2D(5, (3, 3), name="intermdiate_conv2d_2_fused", activation="relu")(y)
model = Model([x], [y, z])
model.set_weights([np.random.rand(*w.shape) - 0.5 for w in model.get_weights()])
self._test_model(model, mode="random", delta=1e-2)
def test_intermediate_outputs_conv1d(self):
x = Input(shape=(10, 3))
y = Conv1D(4, 3, name="intermdiate_conv1d_1")(x)
z = Conv1D(5, 3, name="intermdiate_conv1d_2")(y)
model = Model([x], [y, z])
model.set_weights([np.random.rand(*w.shape) for w in model.get_weights()])
self._test_model(model, mode="random", delta=1e-2)
def test_intermediate_outputs_conv1d_fused_act(self):
x = Input(shape=(10, 3))
y = Conv1D(4, 3, name="intermdiate_conv1d_1_fused", activation="relu")(x)
z = Conv1D(5, 3, name="intermdiate_conv1d_2_fused", activation="relu")(y)
model = Model([x], [y, z])
model.set_weights([np.random.rand(*w.shape) - 0.5 for w in model.get_weights()])
self._test_model(model, mode="random", delta=1e-2)
def test_intermediate_rcnn_1d(self):
x_in = Input(shape=(10, 2))
# Conv block 1
x = Conv1D(3, 3, padding="same", name="interm_rcnn_conv1")(x_in)
x = BatchNormalization(axis=-1, name="interm_rcnn_bn1")(x)
x = Activation("elu")(x)
x = MaxPooling1D(pool_size=2, name="interm_rcnn_pool1")(x)
out1 = x # out1.shape = (5,3)
x = GRU(6, name="gru1")(x)
out2 = x
model = Model(x_in, [out1, out2])
# model = Model(x_in, [out2])
self._test_model(model, mode="random_zero_mean", delta=1e-2)
def test_tiny_mobilenet_arch(self, model_precision=_MLMODEL_FULL_PRECISION):
def ReLU6(x, name):
if keras.__version__ >= _StrictVersion("2.2.1"):
return ReLU(6.0, name=name)(x)
else:
return Activation(relu6, name=name)(x)
img_input = Input(shape=(32, 32, 3))
x = Conv2D(
4, (3, 3), padding="same", use_bias=False, strides=(2, 2), name="conv1"
)(img_input)
x = BatchNormalization(axis=-1, name="conv1_bn")(x)
x = ReLU6(x, name="conv1_relu")
x = DepthwiseConv2D(
(3, 3),
padding="same",
depth_multiplier=1,
strides=(1, 1),
use_bias=False,
name="conv_dw_1",
)(x)
x = BatchNormalization(axis=-1, name="conv_dw_1_bn")(x)
x = ReLU6(x, name="conv_dw_1_relu")
x = Conv2D(
8, (1, 1), padding="same", use_bias=False, strides=(1, 1), name="conv_pw_1"
)(x)
x = BatchNormalization(axis=-1, name="conv_pw_1_bn")(x)
x = ReLU6(x, name="conv_pw_1_relu")
x = DepthwiseConv2D(
(3, 3),
padding="same",
depth_multiplier=1,
strides=(2, 2),
use_bias=False,
name="conv_dw_2",
)(x)
x = BatchNormalization(axis=-1, name="conv_dw_2_bn")(x)
x = ReLU6(x, name="conv_dw_2_relu")
x = Conv2D(
8, (1, 1), padding="same", use_bias=False, strides=(2, 2), name="conv_pw_2"
)(x)
x = BatchNormalization(axis=-1, name="conv_pw_2_bn")(x)
x = ReLU6(x, name="conv_pw_2_relu")
model = Model(inputs=[img_input], outputs=[x])
self._test_model(model, delta=1e-2, model_precision=model_precision)
def test_tiny_mobilenet_arch_half_precision(self):
self.test_tiny_mobilenet_arch(model_precision=_MLMODEL_HALF_PRECISION)
def test_tiny_xception(self, model_precision=_MLMODEL_FULL_PRECISION):
img_input = Input(shape=(32, 32, 3))
x = Conv2D(2, (3, 3), strides=(2, 2), use_bias=False, name="block1_conv1")(
img_input
)
x = BatchNormalization(name="block1_conv1_bn")(x)
x = Activation("relu", name="block1_conv1_act")(x)
x = Conv2D(4, (3, 3), use_bias=False, name="block1_conv2")(x)
x = BatchNormalization(name="block1_conv2_bn")(x)
x = Activation("relu", name="block1_conv2_act")(x)
residual = Conv2D(8, (1, 1), strides=(2, 2), padding="same", use_bias=False)(x)
residual = BatchNormalization()(residual)
x = SeparableConv2D(
8, (3, 3), padding="same", use_bias=False, name="block2_sepconv1"
)(x)
x = BatchNormalization(name="block2_sepconv1_bn")(x)
x = Activation("relu", name="block2_sepconv2_act")(x)
x = SeparableConv2D(
8, (3, 3), padding="same", use_bias=False, name="block2_sepconv2"
)(x)
x = BatchNormalization(name="block2_sepconv2_bn")(x)
x = MaxPooling2D((3, 3), strides=(2, 2), padding="same", name="block2_pool")(x)
x = add([x, residual])
residual = Conv2D(16, (1, 1), strides=(2, 2), padding="same", use_bias=False)(x)
residual = BatchNormalization()(residual)
model = Model(inputs=[img_input], outputs=[residual])
self._test_model(model, delta=1e-2, model_precision=model_precision)
def test_tiny_xception_half_precision(self):
return self.test_tiny_xception(model_precision=_MLMODEL_HALF_PRECISION)
def test_nested_model_giving_output(self):
base_model = Sequential()
base_model.add(Conv2D(32, (1, 1), input_shape=(4, 4, 3)))
top_model = Sequential()
top_model.add(Flatten(input_shape=base_model.output_shape[1:]))
top_model.add(Dense(16, activation="relu"))
top_model.add(Dense(1, activation="sigmoid"))
model = Model(inputs=base_model.input, outputs=top_model(base_model.output))
self._test_model(model)
# similar to issue 269
def test_time_distributed_conv(self):
model = Sequential()
model.add(
TimeDistributed(
Conv2D(64, (3, 3), activation="relu"), input_shape=(1, 30, 30, 3)
)
)
model.add(TimeDistributed(MaxPooling2D((2, 2), strides=(1, 1))))
model.add(TimeDistributed(Conv2D(32, (4, 4), activation="relu")))
model.add(TimeDistributed(MaxPooling2D((2, 2), strides=(2, 2))))
model.add(TimeDistributed(Conv2D(32, (4, 4), activation="relu")))
model.add(TimeDistributed(MaxPooling2D((2, 2), strides=(2, 2))))
model.add(TimeDistributed(Flatten()))
model.add(Dropout(0.5))
model.add(LSTM(32, return_sequences=False, dropout=0.5))
model.add(Dense(10, activation="sigmoid"))
self._test_model(model)
@pytest.mark.slow
@pytest.mark.keras2
@unittest.skipIf(not _HAS_KERAS2_TF, "Missing keras. Skipping tests.")
class KerasNumericCorrectnessStressTest(KerasNumericCorrectnessTest):
"""
Unit test class for testing all combinations of a particular
layer.
"""
def _run_test(
self,
model,
param,
model_dir=None,
delta=1e-2,
transpose_keras_result=True,
one_dim_seq_flags=None,
model_precision=_MLMODEL_FULL_PRECISION,
):
""" Run a test on a particular model
"""
use_tmp_folder = False
if model_dir is None:
use_tmp_folder = True
model_dir = tempfile.mkdtemp()
model_path = os.path.join(model_dir, "keras.mlmodel")
# Generate some random data
nb_inputs = len(model.inputs)
if nb_inputs > 1:
input_names = []
input_data = []
coreml_input = {}
for i in range(nb_inputs):
input_shape = [1 if a is None else a for a in model.input_shape[i]]
X = _generate_data(input_shape)
feature_name = "data_%s" % i
input_names.append(feature_name)
input_data.append(X)
if one_dim_seq_flags is None:
coreml_input[feature_name] = _keras_transpose(X).astype("f")
else:
coreml_input[feature_name] = _keras_transpose(
X, one_dim_seq_flags[i]
).astype("f")
else:
input_shape = [1 if a is None else a for a in model.input_shape]
input_names = ["data"]
input_data = _generate_data(input_shape)
if one_dim_seq_flags is None:
coreml_input = {"data": _keras_transpose(input_data).astype("f")}
else:
coreml_input = {
"data": _keras_transpose(input_data, one_dim_seq_flags[0]).astype(
"f"
)
}
# Make predictions
if transpose_keras_result:
keras_preds = _keras_transpose(model.predict(input_data)).flatten()
else:
keras_preds = model.predict(input_data).flatten()
# Get the model
coreml_model = _get_coreml_model(
model, input_names, ["output"], model_precision=model_precision
)
if _is_macos() and _macos_version() >= (10, 13):
# get prediction
coreml_preds = coreml_model.predict(coreml_input)["output"].flatten()
if use_tmp_folder:
shutil.rmtree(model_dir)
self.assertEqual(
len(coreml_preds),
len(keras_preds),
msg="Failed test case %s. Lengths wrong (%s vs %s)"
% (param, len(coreml_preds), len(keras_preds)),
)
for i in range(len(keras_preds)):
max_den = max(1.0, keras_preds[i], coreml_preds[i])
self.assertAlmostEqual(
keras_preds[i] / max_den,
coreml_preds[i] / max_den,
delta=delta,
msg="Failed test case %s. Predictions wrong (%s vs %s)"
% (param, coreml_preds[i], keras_preds[i]),
)
@pytest.mark.slow
def test_activation_layer_params(self):
options = dict(
activation=[
"tanh",
"relu",
"sigmoid",
"softmax",
"softplus",
"softsign",
"hard_sigmoid",
"elu",
]
)
# Define a function that tests a model
num_channels = 10
input_dim = 10
def build_model(x):
model = Sequential()
model.add(Dense(num_channels, input_dim=input_dim))
model.add(Activation(**dict(zip(options.keys(), x))))
return x, model
# Iterate through all combinations
product = itertools.product(*options.values())
args = [build_model(p) for p in product]
# Test the cases
print("Testing a total of %s cases. This could take a while" % len(args))
for param, model in args:
model.set_weights([np.random.rand(*w.shape) for w in model.get_weights()])
self._run_test(model, param)
@pytest.mark.slow
def test_dense_layer_params(self):
options = dict(
activation=[
"relu",
"softmax",
"tanh",
"sigmoid",
"softplus",
"softsign",
"elu",
"hard_sigmoid",
],
use_bias=[True, False],
)
# Define a function that tests a model
input_shape = (10,)
num_channels = 10
def build_model(x):
kwargs = dict(zip(options.keys(), x))
model = Sequential()
model.add(Dense(num_channels, input_shape=input_shape, **kwargs))
return x, model
# Iterate through all combinations
product = itertools.product(*options.values())
args = [build_model(p) for p in product]
# Test the cases
print("Testing a total of %s cases. This could take a while" % len(args))
for param, model in args:
self._run_test(model, param)
@pytest.mark.slow
def test_upsample_layer_params(self):
options = dict(size=[(2, 2), (3, 3), (4, 4), (5, 5)])
np.random.seed(1988)
input_dim = 10
input_shape = (input_dim, input_dim, 1)
X = np.random.rand(1, *input_shape)
# Define a function that tests a model
def build_model(x):
kwargs = dict(zip(options.keys(), x))
model = Sequential()
model.add(Conv2D(filters=5, kernel_size=(7, 7), input_shape=input_shape))
model.add(UpSampling2D(**kwargs))
return x, model
# Iterate through all combinations
product = itertools.product(*options.values())
args = [build_model(p) for p in product]
# Test the cases
print("Testing a total of %s cases. This could take a while" % len(args))
for param, model in args:
self._run_test(model, param)
@pytest.mark.slow
def test_conv_layer_params(self, model_precision=_MLMODEL_FULL_PRECISION):
options = dict(
activation=[
"relu",
"tanh",
"sigmoid",
], # keras does not support softmax on 4-D
use_bias=[True, False],
padding=["same", "valid"],
filters=[1, 3, 5],
kernel_size=[[5, 5]], # fails when sizes are different
)
# Define a function that tests a model
input_shape = (10, 10, 1)
def build_model(x):
kwargs = dict(zip(options.keys(), x))
model = Sequential()
model.add(Conv2D(input_shape=input_shape, **kwargs))
return x, model
# Iterate through all combinations
product = itertools.product(*options.values())
args = [build_model(p) for p in product]
# Test the cases
print("Testing a total of %s cases. This could take a while" % len(args))
for param, model in args:
self._run_test(model, param, model_precision=model_precision)
@pytest.mark.keras2
def test_conv_layer_params_half_precision(self):
return self.test_conv_layer_params(model_precision=_MLMODEL_HALF_PRECISION)
@pytest.mark.slow
def test_dense_elementwise_params(self):
options = dict(modes=[add, multiply, concatenate, average, maximum])
def build_model(mode):
x1 = Input(shape=(3,))
x2 = Input(shape=(3,))
y1 = Dense(4)(x1)
y2 = Dense(4)(x2)
z = mode([y1, y2])
model = Model([x1, x2], z)
return mode, model
product = itertools.product(*options.values())
args = [build_model(p[0]) for p in product]
print("Testing a total of %s cases. This could take a while" % len(args))
for param, model in args:
self._run_test(model, param)
def test_vgg_16_tiny(self):
input_shape = (48, 48, 3)
model = Sequential()
model.add(ZeroPadding2D((1, 1), input_shape=input_shape))
model.add(Conv2D(32, (3, 3), activation="relu"))
model.add(ZeroPadding2D((1, 1)))
model.add(Conv2D(32, (3, 3), activation="relu"))
model.add(MaxPooling2D((2, 2), strides=(2, 2)))
model.add(ZeroPadding2D((1, 1)))
model.add(Conv2D(32, (3, 3), activation="relu"))
model.add(ZeroPadding2D((1, 1)))
model.add(Conv2D(32, (3, 3), activation="relu"))
model.add(MaxPooling2D((2, 2), strides=(2, 2)))
model.add(ZeroPadding2D((1, 1)))
model.add(Conv2D(32, (3, 3), activation="relu"))
model.add(ZeroPadding2D((1, 1)))
model.add(Conv2D(32, (3, 3), activation="relu"))
model.add(ZeroPadding2D((1, 1)))
model.add(Conv2D(32, (3, 3), activation="relu"))
model.add(MaxPooling2D((2, 2), strides=(2, 2)))
model.add(ZeroPadding2D((1, 1)))
model.add(Conv2D(32, (3, 3), activation="relu"))
model.add(ZeroPadding2D((1, 1)))
model.add(Conv2D(32, (3, 3), activation="relu"))
model.add(ZeroPadding2D((1, 1)))
model.add(Conv2D(32, (3, 3), activation="relu"))
model.add(MaxPooling2D((2, 2), strides=(2, 2)))
model.add(ZeroPadding2D((1, 1)))
model.add(Conv2D(32, (3, 3), activation="relu"))
model.add(ZeroPadding2D((1, 1)))
model.add(Conv2D(32, (3, 3), activation="relu"))
model.add(ZeroPadding2D((1, 1)))
model.add(Conv2D(32, (3, 3), activation="relu"))
model.add(MaxPooling2D((2, 2), strides=(2, 2)))
model.add(Flatten())
model.add(Dense(32, activation="relu"))
model.add(Dropout(0.5))
model.add(Dense(32, activation="relu"))
model.add(Dropout(0.5))
model.add(Dense(1000)) # activation='softmax'))
# Set some random weights
model.set_weights(
[(np.random.rand(*w.shape) - 0.5) * 0.2 for w in model.get_weights()]
)
# Get the coreml model
self._test_model(model)
def test_vgg_16_tiny_no_pooling(self):
input_shape = (48, 48, 3)
model = Sequential()
model.add(ZeroPadding2D((1, 1), input_shape=input_shape))
model.add(Conv2D(32, (3, 3), activation="relu"))
model.add(ZeroPadding2D((1, 1)))
model.add(Conv2D(32, (3, 3), activation="relu"))
model.add(MaxPooling2D((2, 2), strides=(2, 2)))
model.add(ZeroPadding2D((1, 1)))
model.add(Conv2D(32, (3, 3), activation="relu"))
model.add(ZeroPadding2D((1, 1)))
model.add(Conv2D(32, (3, 3), activation="relu"))
model.add(MaxPooling2D((2, 2), strides=(2, 2)))
model.add(ZeroPadding2D((1, 1)))
model.add(Conv2D(32, (3, 3), activation="relu"))
model.add(ZeroPadding2D((1, 1)))
model.add(Conv2D(32, (3, 3), activation="relu"))
model.add(ZeroPadding2D((1, 1)))
model.add(Conv2D(32, (3, 3), activation="relu"))
model.add(MaxPooling2D((2, 2), strides=(2, 2)))
model.add(ZeroPadding2D((1, 1)))
model.add(Conv2D(32, (3, 3), activation="relu"))
model.add(ZeroPadding2D((1, 1)))
model.add(Conv2D(32, (3, 3), activation="relu"))
model.add(ZeroPadding2D((1, 1)))
model.add(Conv2D(32, (3, 3), activation="relu"))
model.add(MaxPooling2D((2, 2), strides=(2, 2)))
model.add(ZeroPadding2D((1, 1)))
model.add(Conv2D(32, (3, 3), activation="relu"))
model.add(ZeroPadding2D((1, 1)))
model.add(Conv2D(32, (3, 3), activation="relu"))
model.add(ZeroPadding2D((1, 1)))
model.add(Conv2D(32, (3, 3), activation="relu"))
model.add(MaxPooling2D((2, 2), strides=(2, 2)))
model.add(Flatten())
model.add(Dense(32, activation="relu"))
# model.add(Dropout(0.5))
model.add(Dense(32, activation="relu"))
# model.add(Dropout(0.5))
model.add(Dense(1000)) # activation='softmax'))
# Set some random weights
model.set_weights(
[(np.random.rand(*w.shape) - 0.5) * 0.2 for w in model.get_weights()]
)
# Get the coreml model
self._test_model(model)
def test_vgg_16_tiny_no_pooling_no_padding(
self, model_precision=_MLMODEL_FULL_PRECISION
):
input_shape = (48, 48, 3)
model = Sequential()
model.add(Conv2D(32, (3, 3), activation="relu", input_shape=input_shape))
model.add(Conv2D(32, (3, 3), activation="relu"))
model.add(Conv2D(32, (3, 3), activation="relu"))
model.add(Conv2D(32, (3, 3), activation="relu"))
model.add(Conv2D(32, (3, 3), activation="relu"))
model.add(Conv2D(32, (3, 3), activation="relu"))
model.add(Conv2D(32, (3, 3), activation="relu"))
model.add(Conv2D(32, (3, 3), activation="relu"))
model.add(Conv2D(32, (3, 3), activation="relu"))
model.add(Conv2D(32, (3, 3), activation="relu"))
model.add(Conv2D(32, (3, 3), activation="relu"))
model.add(Conv2D(32, (3, 3), activation="relu"))
model.add(Conv2D(32, (3, 3), activation="relu"))
model.add(Flatten())
model.add(Dense(32, activation="relu"))
model.add(Dropout(0.5))
model.add(Dense(32, activation="relu"))
model.add(Dropout(0.5))
model.add(Dense(1000, activation="softmax"))
# Get the coreml model
self._test_model(model, model_precision=model_precision)
def test_vgg_16_tiny_no_pooling_no_padding_half_precision(self):
return self.test_vgg_16_tiny_no_pooling_no_padding(
model_precision=_MLMODEL_HALF_PRECISION
)
def test_imdb_fasttext_first_2(self):
max_features = 10
max_len = 6
embedding_dims = 4
pool_length = 2
model = Sequential()
model.add(Embedding(max_features, embedding_dims, input_length=max_len))
# we add a AveragePooling1D, which will average the embeddings
# of all words in the document
model.add(AveragePooling1D(pool_size=pool_length))
self._test_model(model, one_dim_seq_flags=[True])
def test_tiny_mcrnn_td(self):
model = Sequential()
model.add(Conv2D(3, (1, 1), input_shape=(2, 4, 4), padding="same"))
model.add(AveragePooling2D(pool_size=(2, 2)))
model.add(Reshape((2, 3)))
model.add(TimeDistributed(Dense(5)))
self._test_model(model)
def test_tiny_mcrnn_recurrent(self):
model = Sequential()
model.add(Conv2D(3, (1, 1), input_shape=(2, 4, 4), padding="same"))
model.add(AveragePooling2D(pool_size=(2, 2)))
model.add(Reshape((2, 3)))
model.add(LSTM(5, recurrent_activation="sigmoid"))
self._test_model(model)
def test_tiny_mcrnn_music_tagger(self):
x_in = Input(shape=(4, 6, 1))
x = ZeroPadding2D(padding=(0, 1))(x_in)
x = BatchNormalization(axis=2, name="bn_0_freq")(x)
# Conv block 1
x = Conv2D(2, (3, 3), padding="same", name="conv1")(x)
x = BatchNormalization(axis=3, name="bn1")(x)
x = Activation("elu")(x)
x = MaxPooling2D(pool_size=(2, 2), strides=(2, 2), name="pool1")(x)
# Conv block 2
x = Conv2D(4, (3, 3), padding="same", name="conv2")(x)
x = BatchNormalization(axis=3, name="bn2")(x)
x = Activation("elu")(x)
x = MaxPooling2D(pool_size=(2, 2), strides=(2, 2), name="pool2")(x)
# Should get you (1,1,2,4)
x = Reshape((2, 4))(x)
x = GRU(32, return_sequences=True, name="gru1")(x)
x = GRU(32, return_sequences=False, name="gru2")(x)
# Create model.
model = Model(x_in, x)
model.set_weights([np.random.rand(*w.shape) for w in model.get_weights()])
self._test_model(model, mode="random_zero_mean", delta=1e-2)
def test_tiny_apple_manual(self):
model = Sequential()
model.add(LSTM(3, input_shape=(4, 5), recurrent_activation="sigmoid"))
model.add(Dense(5))
model.add(Activation("softmax"))
self._test_model(model)
def test_tiny_image_captioning_image_branch(self):
img_input_1 = Input(shape=(16, 16, 3))
x = Conv2D(2, (3, 3))(img_input_1)
x = Flatten()(x)
img_model = Model(inputs=[img_input_1], outputs=[x])
img_input = Input(shape=(16, 16, 3))
x = img_model(img_input)
x = Dense(8, name="cap_dense")(x)
x = Reshape((1, 8), name="cap_reshape")(x)
image_branch = Model(inputs=[img_input], outputs=[x])
self._test_model(image_branch)
def test_tiny_image_captioning_feature_merge(self):
img_input_1 = Input(shape=(16, 16, 3))
x = Conv2D(2, (3, 3))(img_input_1)
x = Flatten()(x)
img_model = Model([img_input_1], [x])
img_input = Input(shape=(16, 16, 3))
x = img_model(img_input)
x = Dense(8, name="cap_dense")(x)
x = Reshape((1, 8), name="cap_reshape")(x)
sentence_input = Input(shape=(5,)) # max_length = 5
y = Embedding(8, 8, name="cap_embedding")(sentence_input)
z = concatenate([x, y], axis=1, name="cap_merge")
combined_model = Model(inputs=[img_input, sentence_input], outputs=[z])
self._test_model(combined_model, one_dim_seq_flags=[False, True])
def test_tiny_image_captioning(self):
# use a conv layer as a image feature branch
img_input_1 = Input(shape=(16, 16, 3))
x = Conv2D(2, (3, 3))(img_input_1)
x = Flatten()(x)
img_model = Model(inputs=[img_input_1], outputs=[x])
img_input = Input(shape=(16, 16, 3))
x = img_model(img_input)
x = Dense(8, name="cap_dense")(x)
x = Reshape((1, 8), name="cap_reshape")(x)
sentence_input = Input(shape=(5,)) # max_length = 5
y = Embedding(8, 8, name="cap_embedding")(sentence_input)
z = concatenate([x, y], axis=1, name="cap_merge")
z = LSTM(4, return_sequences=True, name="cap_lstm")(z)
z = TimeDistributed(Dense(8), name="cap_timedistributed")(z)
combined_model = Model(inputs=[img_input, sentence_input], outputs=[z])
self._test_model(combined_model, one_dim_seq_flags=[False, True])
def test_tiny_babi_rnn(self):
vocab_size = 10
embed_hidden_size = 8
story_maxlen = 5
query_maxlen = 5
input_tensor_1 = Input(shape=(story_maxlen,))
x1 = Embedding(vocab_size, embed_hidden_size)(input_tensor_1)
x1 = Dropout(0.3)(x1)
input_tensor_2 = Input(shape=(query_maxlen,))
x2 = Embedding(vocab_size, embed_hidden_size)(input_tensor_2)
x2 = Dropout(0.3)(x2)
x2 = LSTM(embed_hidden_size, return_sequences=False)(x2)
x2 = RepeatVector(story_maxlen)(x2)
x3 = add([x1, x2])
x3 = LSTM(embed_hidden_size, return_sequences=False)(x3)
x3 = Dropout(0.3)(x3)
x3 = Dense(vocab_size, activation="softmax")(x3)
model = Model(inputs=[input_tensor_1, input_tensor_2], outputs=[x3])
self._test_model(model, one_dim_seq_flags=[True, True])
def test_clickbait_cnn(self, model_precision=_MLMODEL_FULL_PRECISION):
# from: https://github.com/saurabhmathur96/clickbait-detector
vocabulary_size = 500
embedding_dimension = 30
input_length = 20
model = Sequential()
model.add(
Embedding(
vocabulary_size,
embedding_dimension,
input_length=input_length,
trainable=True,
)
)
model.add(Conv1D(32, 2))
model.add(BatchNormalization())
model.add(Activation("relu"))
model.add(Conv1D(32, 2))
model.add(BatchNormalization())
model.add(Activation("relu"))
model.add(Conv1D(32, 2))
model.add(BatchNormalization())
model.add(Activation("relu"))
model.add(MaxPooling1D(17))
model.add(Flatten())
model.add(Dense(1, use_bias=True))
model.add(BatchNormalization())
model.add(Activation("sigmoid"))
self._test_model(
model, one_dim_seq_flags=[True], model_precision=model_precision
)
def test_clickbait_cnn_half_precision(self):
return self.test_clickbait_cnn(model_precision=_MLMODEL_HALF_PRECISION)
def test_model_with_duplicated_edges(self):
# Create a simple model
inputs = Input(shape=(20, 20))
activation = Activation("relu")(inputs)
cropping = Cropping1D(cropping=(1, 1))(activation)
conv1d = Conv1D(20, 3, padding="valid")(activation)
ouputs = Add()([conv1d, cropping])
model = Model(inputs, ouputs)
self._test_model(model)
@unittest.skipIf(not _HAS_KERAS2_TF, "Missing keras. Skipping tests.")
@pytest.mark.keras2
class KerasBasicConversionTest(KerasNumericCorrectnessTest):
def test_float_arraytype_flag(self):
np.random.seed(1988)
# Define a model
model = Sequential()
model.add(Dense(1000, input_shape=(100,)))
# Set some random weights
model.set_weights([np.random.rand(*w.shape) for w in model.get_weights()])
# Convert model
from coremltools.converters import keras as keras_converter
coreml_model = keras_converter.convert(model, use_float_arraytype=True)
spec = coreml_model.get_spec()
from coremltools.proto import Model_pb2 as _Model_pb2
self.assertEqual(
spec.description.input[0].type.multiArrayType.dataType,
_Model_pb2.ArrayFeatureType.FLOAT32,
)
self.assertEqual(
spec.description.output[0].type.multiArrayType.dataType,
_Model_pb2.ArrayFeatureType.FLOAT32,
)
if __name__ == "__main__":
unittest.main()
# suite = unittest.TestSuite()
# suite.addTest(KerasBasicNumericCorrectnessTest("test_lstm_concat_dense_random"))
# unittest.TextTestRunner().run(suite)
|
[
"numpy.random.seed",
"distutils.version.StrictVersion",
"keras.layers.dot",
"keras.layers.Cropping2D",
"numpy.ones",
"keras.models.Model",
"numpy.product",
"keras.layers.ZeroPadding1D",
"keras.layers.ZeroPadding2D",
"keras.layers.Input",
"keras.layers.Cropping1D",
"keras.layers.concatenate",
"keras.layers.Reshape",
"os.path.join",
"keras.layers.core.SpatialDropout2D",
"shutil.rmtree",
"unittest.main",
"unittest.skipIf",
"coremltools.models.utils._is_macos",
"itertools.permutations",
"keras.layers.GRU",
"numpy.transpose",
"keras.layers.Flatten",
"os.path.exists",
"coremltools.models.utils._macos_version",
"keras.layers.GlobalAveragePooling2D",
"keras.layers.MaxPooling1D",
"tempfile.mkdtemp",
"keras.layers.Permute",
"keras.layers.GlobalMaxPooling2D",
"keras.layers.MaxPooling2D",
"keras.layers.GlobalMaxPooling1D",
"coremltools.converters.keras.convert",
"keras.layers.AveragePooling1D",
"keras.layers.SimpleRNN",
"keras.layers.Dropout",
"keras.layers.Conv2DTranspose",
"keras.layers.Concatenate",
"keras.layers.AveragePooling2D",
"keras.layers.ReLU",
"keras.layers.Conv2D",
"keras.layers.UpSampling2D",
"keras.layers.RepeatVector",
"keras.layers.BatchNormalization",
"pytest.mark.xfail",
"keras.layers.advanced_activations.ThresholdedReLU",
"keras.layers.GlobalAveragePooling1D",
"keras.layers.Activation",
"keras.layers.LSTM",
"keras.layers.add",
"numpy.zeros",
"numpy.expand_dims",
"keras.layers.Conv1D",
"keras.applications.mobilenet.DepthwiseConv2D",
"keras.layers.UpSampling1D",
"keras.layers.advanced_activations.PReLU",
"keras.layers.multiply",
"keras.layers.Dense",
"keras.layers.advanced_activations.LeakyReLU",
"keras.layers.Embedding",
"keras.layers.Add",
"numpy.random.rand",
"keras.models.Sequential",
"keras.layers.SeparableConv2D",
"keras.layers.advanced_activations.ELU"
] |
[((3825, 3894), 'unittest.skipIf', 'unittest.skipIf', (['(not _HAS_KERAS2_TF)', '"""Missing keras. Skipping tests."""'], {}), "(not _HAS_KERAS2_TF, 'Missing keras. Skipping tests.')\n", (3840, 3894), False, 'import unittest\n'), ((8791, 8860), 'unittest.skipIf', 'unittest.skipIf', (['(not _HAS_KERAS2_TF)', '"""Missing keras. Skipping tests."""'], {}), "(not _HAS_KERAS2_TF, 'Missing keras. Skipping tests.')\n", (8806, 8860), False, 'import unittest\n'), ((78144, 78213), 'unittest.skipIf', 'unittest.skipIf', (['(not _HAS_KERAS2_TF)', '"""Missing keras. Skipping tests."""'], {}), "(not _HAS_KERAS2_TF, 'Missing keras. Skipping tests.')\n", (78159, 78213), False, 'import unittest\n'), ((88196, 88265), 'unittest.skipIf', 'unittest.skipIf', (['(not _HAS_KERAS2_TF)', '"""Missing keras. Skipping tests."""'], {}), "(not _HAS_KERAS2_TF, 'Missing keras. Skipping tests.')\n", (88211, 88265), False, 'import unittest\n'), ((109425, 109494), 'unittest.skipIf', 'unittest.skipIf', (['(not _HAS_KERAS2_TF)', '"""Missing keras. Skipping tests."""'], {}), "(not _HAS_KERAS2_TF, 'Missing keras. Skipping tests.')\n", (109440, 109494), False, 'import unittest\n'), ((3073, 3258), 'coremltools.converters.keras.convert', 'keras_converter.convert', (['model', 'input_names', 'output_names'], {'input_name_shape_dict': 'input_name_shape_dict', 'model_precision': 'model_precision', 'use_float_arraytype': 'use_float_arraytype'}), '(model, input_names, output_names,\n input_name_shape_dict=input_name_shape_dict, model_precision=\n model_precision, use_float_arraytype=use_float_arraytype)\n', (3096, 3258), True, 'from coremltools.converters import keras as keras_converter\n'), ((77681, 77716), 'pytest.mark.xfail', 'pytest.mark.xfail', ([], {'raises': 'Exception'}), '(raises=Exception)\n', (77698, 77716), False, 'import pytest\n'), ((110483, 110498), 'unittest.main', 'unittest.main', ([], {}), '()\n', (110496, 110498), False, 'import unittest\n'), ((1492, 1515), 'distutils.version.StrictVersion', '_StrictVersion', (['"""2.2.1"""'], {}), "('2.2.1')\n", (1506, 1515), True, 'from distutils.version import StrictVersion as _StrictVersion\n'), ((1965, 1997), 'numpy.transpose', 'np.transpose', (['x', '[1, 0, 4, 2, 3]'], {}), '(x, [1, 0, 4, 2, 3])\n', (1977, 1997), True, 'import numpy as np\n'), ((2099, 2128), 'numpy.transpose', 'np.transpose', (['x', '[0, 3, 1, 2]'], {}), '(x, [0, 3, 1, 2])\n', (2111, 2128), True, 'import numpy as np\n'), ((2144, 2169), 'numpy.expand_dims', 'np.expand_dims', (['x'], {'axis': '(0)'}), '(x, axis=0)\n', (2158, 2169), True, 'import numpy as np\n'), ((3476, 3497), 'numpy.zeros', 'np.zeros', (['input_shape'], {}), '(input_shape)\n', (3484, 3497), True, 'import numpy as np\n'), ((9038, 9058), 'numpy.random.seed', 'np.random.seed', (['(1988)'], {}), '(1988)\n', (9052, 9058), True, 'import numpy as np\n'), ((9101, 9113), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (9111, 9113), False, 'from keras.models import Sequential, Model\n'), ((9921, 9941), 'numpy.random.seed', 'np.random.seed', (['(1988)'], {}), '(1988)\n', (9935, 9941), True, 'import numpy as np\n'), ((9984, 9996), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (9994, 9996), False, 'from keras.models import Sequential, Model\n'), ((10443, 10463), 'numpy.random.seed', 'np.random.seed', (['(1988)'], {}), '(1988)\n', (10457, 10463), True, 'import numpy as np\n'), ((10506, 10518), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (10516, 10518), False, 'from keras.models import Sequential, Model\n'), ((10810, 10830), 'numpy.random.seed', 'np.random.seed', (['(1988)'], {}), '(1988)\n', (10824, 10830), True, 'import numpy as np\n'), ((10873, 10885), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (10883, 10885), False, 'from keras.models import Sequential, Model\n'), ((11174, 11194), 'numpy.random.seed', 'np.random.seed', (['(1988)'], {}), '(1988)\n', (11188, 11194), True, 'import numpy as np\n'), ((11237, 11249), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (11247, 11249), False, 'from keras.models import Sequential, Model\n'), ((11544, 11564), 'numpy.random.seed', 'np.random.seed', (['(1988)'], {}), '(1988)\n', (11558, 11564), True, 'import numpy as np\n'), ((11655, 11667), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (11665, 11667), False, 'from keras.models import Sequential, Model\n'), ((12086, 12106), 'numpy.random.seed', 'np.random.seed', (['(1988)'], {}), '(1988)\n', (12100, 12106), True, 'import numpy as np\n'), ((12279, 12291), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (12289, 12291), False, 'from keras.models import Sequential, Model\n'), ((12911, 12931), 'numpy.random.seed', 'np.random.seed', (['(1988)'], {}), '(1988)\n', (12925, 12931), True, 'import numpy as np\n'), ((13104, 13116), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (13114, 13116), False, 'from keras.models import Sequential, Model\n'), ((13769, 13789), 'numpy.random.seed', 'np.random.seed', (['(1988)'], {}), '(1988)\n', (13783, 13789), True, 'import numpy as np\n'), ((13957, 13969), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (13967, 13969), False, 'from keras.models import Sequential, Model\n'), ((14698, 14718), 'numpy.random.seed', 'np.random.seed', (['(1988)'], {}), '(1988)\n', (14712, 14718), True, 'import numpy as np\n'), ((14891, 14903), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (14901, 14903), False, 'from keras.models import Sequential, Model\n'), ((15609, 15629), 'numpy.random.seed', 'np.random.seed', (['(1988)'], {}), '(1988)\n', (15623, 15629), True, 'import numpy as np\n'), ((15781, 15793), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (15791, 15793), False, 'from keras.models import Sequential, Model\n'), ((16523, 16543), 'numpy.random.seed', 'np.random.seed', (['(1988)'], {}), '(1988)\n', (16537, 16543), True, 'import numpy as np\n'), ((16686, 16698), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (16696, 16698), False, 'from keras.models import Sequential, Model\n'), ((17317, 17337), 'numpy.random.seed', 'np.random.seed', (['(1988)'], {}), '(1988)\n', (17331, 17337), True, 'import numpy as np\n'), ((17451, 17463), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (17461, 17463), False, 'from keras.models import Sequential, Model\n'), ((17937, 17957), 'numpy.random.seed', 'np.random.seed', (['(1988)'], {}), '(1988)\n', (17951, 17957), True, 'import numpy as np\n'), ((18071, 18083), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (18081, 18083), False, 'from keras.models import Sequential, Model\n'), ((18688, 18708), 'numpy.random.seed', 'np.random.seed', (['(1988)'], {}), '(1988)\n', (18702, 18708), True, 'import numpy as np\n'), ((18822, 18834), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (18832, 18834), False, 'from keras.models import Sequential, Model\n'), ((19525, 19545), 'numpy.random.seed', 'np.random.seed', (['(1988)'], {}), '(1988)\n', (19539, 19545), True, 'import numpy as np\n'), ((19659, 19671), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (19669, 19671), False, 'from keras.models import Sequential, Model\n'), ((20132, 20152), 'numpy.random.seed', 'np.random.seed', (['(1988)'], {}), '(1988)\n', (20146, 20152), True, 'import numpy as np\n'), ((20275, 20287), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (20285, 20287), False, 'from keras.models import Sequential, Model\n'), ((20765, 20785), 'numpy.random.seed', 'np.random.seed', (['(1988)'], {}), '(1988)\n', (20779, 20785), True, 'import numpy as np\n'), ((20974, 20986), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (20984, 20986), False, 'from keras.models import Sequential, Model\n'), ((21454, 21474), 'numpy.random.seed', 'np.random.seed', (['(1988)'], {}), '(1988)\n', (21468, 21474), True, 'import numpy as np\n'), ((21663, 21675), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (21673, 21675), False, 'from keras.models import Sequential, Model\n'), ((22186, 22206), 'numpy.random.seed', 'np.random.seed', (['(1988)'], {}), '(1988)\n', (22200, 22206), True, 'import numpy as np\n'), ((22395, 22407), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (22405, 22407), False, 'from keras.models import Sequential, Model\n'), ((23046, 23058), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (23056, 23058), False, 'from keras.models import Sequential, Model\n'), ((23279, 23291), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (23289, 23291), False, 'from keras.models import Sequential, Model\n'), ((23767, 23787), 'numpy.random.seed', 'np.random.seed', (['(1988)'], {}), '(1988)\n', (23781, 23787), True, 'import numpy as np\n'), ((23976, 23988), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (23986, 23988), False, 'from keras.models import Sequential, Model\n'), ((24684, 24704), 'numpy.random.seed', 'np.random.seed', (['(1988)'], {}), '(1988)\n', (24698, 24704), True, 'import numpy as np\n'), ((24893, 24905), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (24903, 24905), False, 'from keras.models import Sequential, Model\n'), ((25670, 25690), 'numpy.random.seed', 'np.random.seed', (['(1988)'], {}), '(1988)\n', (25684, 25690), True, 'import numpy as np\n'), ((25880, 25892), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (25890, 25892), False, 'from keras.models import Sequential, Model\n'), ((26410, 26430), 'numpy.random.seed', 'np.random.seed', (['(1988)'], {}), '(1988)\n', (26424, 26430), True, 'import numpy as np\n'), ((26620, 26632), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (26630, 26632), False, 'from keras.models import Sequential, Model\n'), ((27177, 27197), 'numpy.random.seed', 'np.random.seed', (['(1988)'], {}), '(1988)\n', (27191, 27197), True, 'import numpy as np\n'), ((27391, 27403), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (27401, 27403), False, 'from keras.models import Sequential, Model\n'), ((27932, 27952), 'numpy.random.seed', 'np.random.seed', (['(1988)'], {}), '(1988)\n', (27946, 27952), True, 'import numpy as np\n'), ((28146, 28158), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (28156, 28158), False, 'from keras.models import Sequential, Model\n'), ((28704, 28724), 'numpy.random.seed', 'np.random.seed', (['(1988)'], {}), '(1988)\n', (28718, 28724), True, 'import numpy as np\n'), ((28918, 28930), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (28928, 28930), False, 'from keras.models import Sequential, Model\n'), ((29476, 29496), 'numpy.random.seed', 'np.random.seed', (['(1988)'], {}), '(1988)\n', (29490, 29496), True, 'import numpy as np\n'), ((29690, 29702), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (29700, 29702), False, 'from keras.models import Sequential, Model\n'), ((30228, 30248), 'numpy.random.seed', 'np.random.seed', (['(1988)'], {}), '(1988)\n', (30242, 30248), True, 'import numpy as np\n'), ((30466, 30478), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (30476, 30478), False, 'from keras.models import Sequential, Model\n'), ((31046, 31066), 'numpy.random.seed', 'np.random.seed', (['(1988)'], {}), '(1988)\n', (31060, 31066), True, 'import numpy as np\n'), ((31284, 31296), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (31294, 31296), False, 'from keras.models import Sequential, Model\n'), ((31910, 31930), 'numpy.random.seed', 'np.random.seed', (['(1988)'], {}), '(1988)\n', (31924, 31930), True, 'import numpy as np\n'), ((32149, 32161), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (32159, 32161), False, 'from keras.models import Sequential, Model\n'), ((32802, 32822), 'numpy.random.seed', 'np.random.seed', (['(1988)'], {}), '(1988)\n', (32816, 32822), True, 'import numpy as np\n'), ((33041, 33053), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (33051, 33053), False, 'from keras.models import Sequential, Model\n'), ((33946, 33966), 'numpy.random.seed', 'np.random.seed', (['(1988)'], {}), '(1988)\n', (33960, 33966), True, 'import numpy as np\n'), ((34139, 34151), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (34149, 34151), False, 'from keras.models import Sequential, Model\n'), ((34918, 34938), 'numpy.random.seed', 'np.random.seed', (['(1988)'], {}), '(1988)\n', (34932, 34938), True, 'import numpy as np\n'), ((35090, 35102), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (35100, 35102), False, 'from keras.models import Sequential, Model\n'), ((35871, 35883), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (35881, 35883), False, 'from keras.models import Sequential, Model\n'), ((36209, 36221), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (36219, 36221), False, 'from keras.models import Sequential, Model\n'), ((36524, 36536), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (36534, 36536), False, 'from keras.models import Sequential, Model\n'), ((36840, 36852), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (36850, 36852), False, 'from keras.models import Sequential, Model\n'), ((37149, 37161), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (37159, 37161), False, 'from keras.models import Sequential, Model\n'), ((37363, 37375), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (37373, 37375), False, 'from keras.models import Sequential, Model\n'), ((37696, 37708), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (37706, 37708), False, 'from keras.models import Sequential, Model\n'), ((38025, 38037), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (38035, 38037), False, 'from keras.models import Sequential, Model\n'), ((38189, 38201), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (38199, 38201), False, 'from keras.models import Sequential, Model\n'), ((38351, 38371), 'numpy.random.seed', 'np.random.seed', (['(1988)'], {}), '(1988)\n', (38365, 38371), True, 'import numpy as np\n'), ((38485, 38497), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (38495, 38497), False, 'from keras.models import Sequential, Model\n'), ((38839, 38859), 'numpy.random.seed', 'np.random.seed', (['(1988)'], {}), '(1988)\n', (38853, 38859), True, 'import numpy as np\n'), ((38973, 38985), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (38983, 38985), False, 'from keras.models import Sequential, Model\n'), ((39343, 39363), 'numpy.random.seed', 'np.random.seed', (['(1988)'], {}), '(1988)\n', (39357, 39363), True, 'import numpy as np\n'), ((39477, 39489), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (39487, 39489), False, 'from keras.models import Sequential, Model\n'), ((39842, 39862), 'numpy.random.seed', 'np.random.seed', (['(1988)'], {}), '(1988)\n', (39856, 39862), True, 'import numpy as np\n'), ((40051, 40063), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (40061, 40063), False, 'from keras.models import Sequential, Model\n'), ((40544, 40564), 'numpy.random.seed', 'np.random.seed', (['(1988)'], {}), '(1988)\n', (40558, 40564), True, 'import numpy as np\n'), ((40678, 40690), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (40688, 40690), False, 'from keras.models import Sequential, Model\n'), ((41229, 41249), 'numpy.random.seed', 'np.random.seed', (['(1988)'], {}), '(1988)\n', (41243, 41249), True, 'import numpy as np\n'), ((41363, 41375), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (41373, 41375), False, 'from keras.models import Sequential, Model\n'), ((42122, 42142), 'numpy.random.seed', 'np.random.seed', (['(1988)'], {}), '(1988)\n', (42136, 42142), True, 'import numpy as np\n'), ((42256, 42268), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (42266, 42268), False, 'from keras.models import Sequential, Model\n'), ((42970, 42990), 'numpy.random.seed', 'np.random.seed', (['(1988)'], {}), '(1988)\n', (42984, 42990), True, 'import numpy as np\n'), ((43007, 43019), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (43017, 43019), False, 'from keras.models import Sequential, Model\n'), ((43310, 43322), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (43320, 43322), False, 'from keras.models import Sequential, Model\n'), ((43788, 43800), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (43798, 43800), False, 'from keras.models import Sequential, Model\n'), ((44310, 44330), 'numpy.random.seed', 'np.random.seed', (['(1988)'], {}), '(1988)\n', (44324, 44330), True, 'import numpy as np\n'), ((44446, 44458), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (44456, 44458), False, 'from keras.models import Sequential, Model\n'), ((44784, 44804), 'numpy.random.seed', 'np.random.seed', (['(1988)'], {}), '(1988)\n', (44798, 44804), True, 'import numpy as np\n'), ((44919, 44931), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (44929, 44931), False, 'from keras.models import Sequential, Model\n'), ((45283, 45303), 'numpy.random.seed', 'np.random.seed', (['(1988)'], {}), '(1988)\n', (45297, 45303), True, 'import numpy as np\n'), ((45418, 45430), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (45428, 45430), False, 'from keras.models import Sequential, Model\n'), ((45874, 45894), 'numpy.random.seed', 'np.random.seed', (['(1988)'], {}), '(1988)\n', (45888, 45894), True, 'import numpy as np\n'), ((45985, 45997), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (45995, 45997), False, 'from keras.models import Sequential, Model\n'), ((46379, 46399), 'numpy.random.seed', 'np.random.seed', (['(1988)'], {}), '(1988)\n', (46393, 46399), True, 'import numpy as np\n'), ((46490, 46502), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (46500, 46502), False, 'from keras.models import Sequential, Model\n'), ((47001, 47021), 'numpy.random.seed', 'np.random.seed', (['(1988)'], {}), '(1988)\n', (47015, 47021), True, 'import numpy as np\n'), ((47138, 47150), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (47148, 47150), False, 'from keras.models import Sequential, Model\n'), ((47506, 47526), 'numpy.random.seed', 'np.random.seed', (['(1988)'], {}), '(1988)\n', (47520, 47526), True, 'import numpy as np\n'), ((47616, 47628), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (47626, 47628), False, 'from keras.models import Sequential, Model\n'), ((48077, 48097), 'numpy.random.seed', 'np.random.seed', (['(1988)'], {}), '(1988)\n', (48091, 48097), True, 'import numpy as np\n'), ((48187, 48199), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (48197, 48199), False, 'from keras.models import Sequential, Model\n'), ((48649, 48669), 'numpy.random.seed', 'np.random.seed', (['(1988)'], {}), '(1988)\n', (48663, 48669), True, 'import numpy as np\n'), ((48760, 48772), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (48770, 48772), False, 'from keras.models import Sequential, Model\n'), ((49222, 49242), 'numpy.random.seed', 'np.random.seed', (['(1988)'], {}), '(1988)\n', (49236, 49242), True, 'import numpy as np\n'), ((49333, 49345), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (49343, 49345), False, 'from keras.models import Sequential, Model\n'), ((49775, 49795), 'numpy.random.seed', 'np.random.seed', (['(1988)'], {}), '(1988)\n', (49789, 49795), True, 'import numpy as np\n'), ((49861, 49873), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (49871, 49873), False, 'from keras.models import Sequential, Model\n'), ((50185, 50205), 'numpy.random.seed', 'np.random.seed', (['(1988)'], {}), '(1988)\n', (50199, 50205), True, 'import numpy as np\n'), ((50271, 50283), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (50281, 50283), False, 'from keras.models import Sequential, Model\n'), ((50705, 50725), 'numpy.random.seed', 'np.random.seed', (['(1988)'], {}), '(1988)\n', (50719, 50725), True, 'import numpy as np\n'), ((50842, 50854), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (50852, 50854), False, 'from keras.models import Sequential, Model\n'), ((51326, 51346), 'numpy.random.seed', 'np.random.seed', (['(1988)'], {}), '(1988)\n', (51340, 51346), True, 'import numpy as np\n'), ((51461, 51473), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (51471, 51473), False, 'from keras.models import Sequential, Model\n'), ((51991, 52011), 'numpy.random.seed', 'np.random.seed', (['(1988)'], {}), '(1988)\n', (52005, 52011), True, 'import numpy as np\n'), ((52127, 52139), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (52137, 52139), False, 'from keras.models import Sequential, Model\n'), ((52682, 52702), 'numpy.random.seed', 'np.random.seed', (['(1988)'], {}), '(1988)\n', (52696, 52702), True, 'import numpy as np\n'), ((52841, 52853), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (52851, 52853), False, 'from keras.models import Sequential, Model\n'), ((53534, 53554), 'numpy.random.seed', 'np.random.seed', (['(1988)'], {}), '(1988)\n', (53548, 53554), True, 'import numpy as np\n'), ((53670, 53682), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (53680, 53682), False, 'from keras.models import Sequential, Model\n'), ((54206, 54226), 'numpy.random.seed', 'np.random.seed', (['(1988)'], {}), '(1988)\n', (54220, 54226), True, 'import numpy as np\n'), ((54343, 54355), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (54353, 54355), False, 'from keras.models import Sequential, Model\n'), ((54985, 55005), 'numpy.random.seed', 'np.random.seed', (['(1988)'], {}), '(1988)\n', (54999, 55005), True, 'import numpy as np\n'), ((55096, 55108), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (55106, 55108), False, 'from keras.models import Sequential, Model\n'), ((55525, 55545), 'numpy.random.seed', 'np.random.seed', (['(1988)'], {}), '(1988)\n', (55539, 55545), True, 'import numpy as np\n'), ((55636, 55648), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (55646, 55648), False, 'from keras.models import Sequential, Model\n'), ((56360, 56380), 'numpy.random.seed', 'np.random.seed', (['(1988)'], {}), '(1988)\n', (56374, 56380), True, 'import numpy as np\n'), ((56519, 56531), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (56529, 56531), False, 'from keras.models import Sequential, Model\n'), ((57294, 57314), 'numpy.random.seed', 'np.random.seed', (['(1988)'], {}), '(1988)\n', (57308, 57314), True, 'import numpy as np\n'), ((57453, 57465), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (57463, 57465), False, 'from keras.models import Sequential, Model\n'), ((58178, 58198), 'numpy.random.seed', 'np.random.seed', (['(1988)'], {}), '(1988)\n', (58192, 58198), True, 'import numpy as np\n'), ((58314, 58326), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (58324, 58326), False, 'from keras.models import Sequential, Model\n'), ((58815, 58835), 'numpy.random.seed', 'np.random.seed', (['(1988)'], {}), '(1988)\n', (58829, 58835), True, 'import numpy as np\n'), ((58952, 58964), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (58962, 58964), False, 'from keras.models import Sequential, Model\n'), ((59458, 59478), 'numpy.random.seed', 'np.random.seed', (['(1988)'], {}), '(1988)\n', (59472, 59478), True, 'import numpy as np\n'), ((59594, 59606), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (59604, 59606), False, 'from keras.models import Sequential, Model\n'), ((60222, 60242), 'numpy.random.seed', 'np.random.seed', (['(1988)'], {}), '(1988)\n', (60236, 60242), True, 'import numpy as np\n'), ((60358, 60370), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (60368, 60370), False, 'from keras.models import Sequential, Model\n'), ((62198, 62218), 'numpy.random.seed', 'np.random.seed', (['(1988)'], {}), '(1988)\n', (62212, 62218), True, 'import numpy as np\n'), ((62320, 62332), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (62330, 62332), False, 'from keras.models import Sequential, Model\n'), ((62690, 62710), 'numpy.random.seed', 'np.random.seed', (['(1988)'], {}), '(1988)\n', (62704, 62710), True, 'import numpy as np\n'), ((62814, 62826), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (62824, 62826), False, 'from keras.models import Sequential, Model\n'), ((63408, 63428), 'numpy.random.seed', 'np.random.seed', (['(1988)'], {}), '(1988)\n', (63422, 63428), True, 'import numpy as np\n'), ((63536, 63548), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (63546, 63548), False, 'from keras.models import Sequential, Model\n'), ((63950, 63970), 'numpy.random.seed', 'np.random.seed', (['(1988)'], {}), '(1988)\n', (63964, 63970), True, 'import numpy as np\n'), ((64084, 64096), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (64094, 64096), False, 'from keras.models import Sequential, Model\n'), ((64489, 64509), 'numpy.random.seed', 'np.random.seed', (['(1988)'], {}), '(1988)\n', (64503, 64509), True, 'import numpy as np\n'), ((64607, 64632), 'keras.layers.Input', 'Input', ([], {'shape': '(input_dim,)'}), '(shape=(input_dim,))\n', (64612, 64632), False, 'from keras.layers import Embedding, Input, Permute, Reshape, RepeatVector, Dropout\n'), ((64767, 64788), 'keras.layers.concatenate', 'concatenate', (['[x2, x3]'], {}), '([x2, x3])\n', (64778, 64788), False, 'from keras.layers import add, multiply, concatenate, dot, maximum, average\n'), ((64843, 64885), 'keras.models.Model', 'Model', ([], {'inputs': '[input_tensor]', 'outputs': '[x5]'}), '(inputs=[input_tensor], outputs=[x5])\n', (64848, 64885), False, 'from keras.models import Sequential, Model\n'), ((65120, 65140), 'numpy.random.seed', 'np.random.seed', (['(1988)'], {}), '(1988)\n', (65134, 65140), True, 'import numpy as np\n'), ((65288, 65311), 'keras.layers.Input', 'Input', ([], {'shape': '(seq_len,)'}), '(shape=(seq_len,))\n', (65293, 65311), False, 'from keras.layers import Embedding, Input, Permute, Reshape, RepeatVector, Dropout\n'), ((65459, 65488), 'keras.layers.concatenate', 'concatenate', (['[x1, x2]'], {'axis': '(1)'}), '([x1, x2], axis=1)\n', (65470, 65488), False, 'from keras.layers import add, multiply, concatenate, dot, maximum, average\n'), ((65506, 65548), 'keras.models.Model', 'Model', ([], {'inputs': '[input_tensor]', 'outputs': '[x3]'}), '(inputs=[input_tensor], outputs=[x3])\n', (65511, 65548), False, 'from keras.models import Sequential, Model\n'), ((65811, 65831), 'numpy.random.seed', 'np.random.seed', (['(1988)'], {}), '(1988)\n', (65825, 65831), True, 'import numpy as np\n'), ((65942, 65968), 'keras.layers.Input', 'Input', ([], {'shape': '(seq_length,)'}), '(shape=(seq_length,))\n', (65947, 65968), False, 'from keras.layers import Embedding, Input, Permute, Reshape, RepeatVector, Dropout\n'), ((65983, 66011), 'keras.layers.Input', 'Input', ([], {'shape': '(seq_length, 1)'}), '(shape=(seq_length, 1))\n', (65988, 66011), False, 'from keras.layers import Embedding, Input, Permute, Reshape, RepeatVector, Dropout\n'), ((66412, 66453), 'keras.models.Model', 'Model', ([], {'inputs': '[input, pos]', 'outputs': 'model'}), '(inputs=[input, pos], outputs=model)\n', (66417, 66453), False, 'from keras.models import Sequential, Model\n'), ((66713, 66733), 'numpy.random.seed', 'np.random.seed', (['(1988)'], {}), '(1988)\n', (66727, 66733), True, 'import numpy as np\n'), ((66831, 66856), 'keras.layers.Input', 'Input', ([], {'shape': '(input_dim,)'}), '(shape=(input_dim,))\n', (66836, 66856), False, 'from keras.layers import Embedding, Input, Permute, Reshape, RepeatVector, Dropout\n'), ((66991, 67004), 'keras.layers.add', 'add', (['[x2, x3]'], {}), '([x2, x3])\n', (66994, 67004), False, 'from keras.layers import add, multiply, concatenate, dot, maximum, average\n'), ((67059, 67101), 'keras.models.Model', 'Model', ([], {'inputs': '[input_tensor]', 'outputs': '[x5]'}), '(inputs=[input_tensor], outputs=[x5])\n', (67064, 67101), False, 'from keras.models import Sequential, Model\n'), ((67329, 67349), 'numpy.random.seed', 'np.random.seed', (['(1988)'], {}), '(1988)\n', (67343, 67349), True, 'import numpy as np\n'), ((67447, 67472), 'keras.layers.Input', 'Input', ([], {'shape': '(input_dim,)'}), '(shape=(input_dim,))\n', (67452, 67472), False, 'from keras.layers import Embedding, Input, Permute, Reshape, RepeatVector, Dropout\n'), ((67607, 67625), 'keras.layers.multiply', 'multiply', (['[x2, x3]'], {}), '([x2, x3])\n', (67615, 67625), False, 'from keras.layers import add, multiply, concatenate, dot, maximum, average\n'), ((67680, 67722), 'keras.models.Model', 'Model', ([], {'inputs': '[input_tensor]', 'outputs': '[x5]'}), '(inputs=[input_tensor], outputs=[x5])\n', (67685, 67722), False, 'from keras.models import Sequential, Model\n'), ((67950, 67970), 'numpy.random.seed', 'np.random.seed', (['(1988)'], {}), '(1988)\n', (67964, 67970), True, 'import numpy as np\n'), ((68068, 68093), 'keras.layers.Input', 'Input', ([], {'shape': '(input_dim,)'}), '(shape=(input_dim,))\n', (68073, 68093), False, 'from keras.layers import Embedding, Input, Permute, Reshape, RepeatVector, Dropout\n'), ((68228, 68266), 'keras.layers.dot', 'dot', (['[x2, x3]'], {'axes': '(-1)', 'normalize': '(True)'}), '([x2, x3], axes=-1, normalize=True)\n', (68231, 68266), False, 'from keras.layers import add, multiply, concatenate, dot, maximum, average\n'), ((68321, 68363), 'keras.models.Model', 'Model', ([], {'inputs': '[input_tensor]', 'outputs': '[x5]'}), '(inputs=[input_tensor], outputs=[x5])\n', (68326, 68363), False, 'from keras.models import Sequential, Model\n'), ((68633, 68645), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (68643, 68645), False, 'from keras.models import Sequential, Model\n'), ((68980, 68992), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (68990, 68992), False, 'from keras.models import Sequential, Model\n'), ((69335, 69347), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (69345, 69347), False, 'from keras.models import Sequential, Model\n'), ((70110, 70122), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (70120, 70122), False, 'from keras.models import Sequential, Model\n'), ((70284, 70304), 'numpy.random.seed', 'np.random.seed', (['(1988)'], {}), '(1988)\n', (70298, 70304), True, 'import numpy as np\n'), ((70539, 70551), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (70549, 70551), False, 'from keras.models import Sequential, Model\n'), ((71086, 71106), 'numpy.random.seed', 'np.random.seed', (['(1988)'], {}), '(1988)\n', (71100, 71106), True, 'import numpy as np\n'), ((71341, 71353), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (71351, 71353), False, 'from keras.models import Sequential, Model\n'), ((71901, 71921), 'numpy.random.seed', 'np.random.seed', (['(1988)'], {}), '(1988)\n', (71915, 71921), True, 'import numpy as np\n'), ((72033, 72045), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (72043, 72045), False, 'from keras.models import Sequential, Model\n'), ((72367, 72387), 'numpy.random.seed', 'np.random.seed', (['(1988)'], {}), '(1988)\n', (72381, 72387), True, 'import numpy as np\n'), ((72622, 72634), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (72632, 72634), False, 'from keras.models import Sequential, Model\n'), ((73151, 73163), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (73161, 73163), False, 'from keras.models import Sequential, Model\n'), ((73438, 73458), 'numpy.random.seed', 'np.random.seed', (['(1988)'], {}), '(1988)\n', (73452, 73458), True, 'import numpy as np\n'), ((73573, 73585), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (73583, 73585), False, 'from keras.models import Sequential, Model\n'), ((74262, 74282), 'numpy.random.seed', 'np.random.seed', (['(1988)'], {}), '(1988)\n', (74276, 74282), True, 'import numpy as np\n'), ((74298, 74320), 'keras.layers.Input', 'Input', ([], {'shape': '(7, 7, 2)'}), '(shape=(7, 7, 2))\n', (74303, 74320), False, 'from keras.layers import Embedding, Input, Permute, Reshape, RepeatVector, Dropout\n'), ((74427, 74441), 'keras.models.Model', 'Model', (['x_in', 'x'], {}), '(x_in, x)\n', (74432, 74441), False, 'from keras.models import Sequential, Model\n'), ((74682, 74694), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (74692, 74694), False, 'from keras.models import Sequential, Model\n'), ((75084, 75096), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (75094, 75096), False, 'from keras.models import Sequential, Model\n'), ((75557, 75577), 'numpy.random.seed', 'np.random.seed', (['(1988)'], {}), '(1988)\n', (75571, 75577), True, 'import numpy as np\n'), ((75593, 75613), 'keras.layers.Input', 'Input', ([], {'shape': '(10, 2)'}), '(shape=(10, 2))\n', (75598, 75613), False, 'from keras.layers import Embedding, Input, Permute, Reshape, RepeatVector, Dropout\n'), ((75696, 75729), 'keras.models.Model', 'Model', ([], {'inputs': '[x_in]', 'outputs': '[x]'}), '(inputs=[x_in], outputs=[x])\n', (75701, 75729), False, 'from keras.models import Sequential, Model\n'), ((75818, 75838), 'numpy.random.seed', 'np.random.seed', (['(1988)'], {}), '(1988)\n', (75832, 75838), True, 'import numpy as np\n'), ((75942, 75954), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (75952, 75954), False, 'from keras.models import Sequential, Model\n'), ((76492, 76512), 'numpy.random.seed', 'np.random.seed', (['(1988)'], {}), '(1988)\n', (76506, 76512), True, 'import numpy as np\n'), ((76627, 76639), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (76637, 76639), False, 'from keras.models import Sequential, Model\n'), ((77299, 77311), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (77309, 77311), False, 'from keras.models import Sequential, Model\n'), ((77846, 77858), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (77856, 77858), False, 'from keras.models import Sequential, Model\n'), ((78353, 78385), 'keras.layers.Input', 'Input', ([], {'shape': '(4,)', 'name': '"""input1"""'}), "(shape=(4,), name='input1')\n", (78358, 78385), False, 'from keras.layers import Embedding, Input, Permute, Reshape, RepeatVector, Dropout\n'), ((78399, 78431), 'keras.layers.Input', 'Input', ([], {'shape': '(5,)', 'name': '"""input2"""'}), "(shape=(5,), name='input2')\n", (78404, 78431), False, 'from keras.layers import Embedding, Input, Permute, Reshape, RepeatVector, Dropout\n'), ((78484, 78505), 'keras.layers.concatenate', 'concatenate', (['[x1, y1]'], {}), '([x1, y1])\n', (78495, 78505), False, 'from keras.layers import add, multiply, concatenate, dot, maximum, average\n'), ((78522, 78557), 'keras.models.Model', 'Model', ([], {'inputs': '[x1, x2]', 'outputs': '[z]'}), '(inputs=[x1, x2], outputs=[z])\n', (78527, 78557), False, 'from keras.models import Sequential, Model\n'), ((78731, 78763), 'keras.layers.Input', 'Input', ([], {'shape': '(4,)', 'name': '"""input1"""'}), "(shape=(4,), name='input1')\n", (78736, 78763), False, 'from keras.layers import Embedding, Input, Permute, Reshape, RepeatVector, Dropout\n'), ((78777, 78809), 'keras.layers.Input', 'Input', ([], {'shape': '(5,)', 'name': '"""input2"""'}), "(shape=(5,), name='input2')\n", (78782, 78809), False, 'from keras.layers import Embedding, Input, Permute, Reshape, RepeatVector, Dropout\n'), ((78862, 78883), 'keras.layers.concatenate', 'concatenate', (['[y1, x1]'], {}), '([y1, x1])\n', (78873, 78883), False, 'from keras.layers import add, multiply, concatenate, dot, maximum, average\n'), ((78900, 78935), 'keras.models.Model', 'Model', ([], {'inputs': '[x1, x2]', 'outputs': '[z]'}), '(inputs=[x1, x2], outputs=[z])\n', (78905, 78935), False, 'from keras.models import Sequential, Model\n'), ((79110, 79134), 'keras.layers.Input', 'Input', ([], {'shape': '(27, 27, 1)'}), '(shape=(27, 27, 1))\n', (79115, 79134), False, 'from keras.layers import Embedding, Input, Permute, Reshape, RepeatVector, Dropout\n'), ((79264, 79306), 'keras.models.Model', 'Model', ([], {'inputs': '[digit_input]', 'outputs': '[out]'}), '(inputs=[digit_input], outputs=[out])\n', (79269, 79306), False, 'from keras.models import Sequential, Model\n'), ((79376, 79400), 'keras.layers.Input', 'Input', ([], {'shape': '(27, 27, 1)'}), '(shape=(27, 27, 1))\n', (79381, 79400), False, 'from keras.layers import Embedding, Input, Permute, Reshape, RepeatVector, Dropout\n'), ((79419, 79443), 'keras.layers.Input', 'Input', ([], {'shape': '(27, 27, 1)'}), '(shape=(27, 27, 1))\n', (79424, 79443), False, 'from keras.layers import Embedding, Input, Permute, Reshape, RepeatVector, Dropout\n'), ((79604, 79631), 'keras.layers.concatenate', 'concatenate', (['[out_a, out_b]'], {}), '([out_a, out_b])\n', (79615, 79631), False, 'from keras.layers import add, multiply, concatenate, dot, maximum, average\n'), ((79707, 79752), 'keras.models.Model', 'Model', ([], {'inputs': '[digit_a, digit_b]', 'outputs': 'out'}), '(inputs=[digit_a, digit_b], outputs=out)\n', (79712, 79752), False, 'from keras.models import Sequential, Model\n'), ((80035, 80052), 'keras.layers.Input', 'Input', ([], {'shape': '(3,)'}), '(shape=(3,))\n', (80040, 80052), False, 'from keras.layers import Embedding, Input, Permute, Reshape, RepeatVector, Dropout\n'), ((80069, 80077), 'keras.layers.Dense', 'Dense', (['(4)'], {}), '(4)\n', (80074, 80077), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((80160, 80181), 'keras.layers.concatenate', 'concatenate', (['[y1, y3]'], {}), '([y1, y3])\n', (80171, 80181), False, 'from keras.layers import add, multiply, concatenate, dot, maximum, average\n'), ((80198, 80228), 'keras.models.Model', 'Model', ([], {'inputs': '[x]', 'outputs': '[z]'}), '(inputs=[x], outputs=[z])\n', (80203, 80228), False, 'from keras.models import Sequential, Model\n'), ((80427, 80444), 'keras.layers.Input', 'Input', ([], {'shape': '(3,)'}), '(shape=(3,))\n', (80432, 80444), False, 'from keras.layers import Embedding, Input, Permute, Reshape, RepeatVector, Dropout\n'), ((80511, 80531), 'keras.models.Model', 'Model', (['[x]', '[y1, y2]'], {}), '([x], [y1, y2])\n', (80516, 80531), False, 'from keras.models import Sequential, Model\n'), ((80735, 80752), 'keras.layers.Input', 'Input', ([], {'shape': '(3,)'}), '(shape=(3,))\n', (80740, 80752), False, 'from keras.layers import Embedding, Input, Permute, Reshape, RepeatVector, Dropout\n'), ((80875, 80893), 'keras.models.Model', 'Model', (['[x]', '[y, z]'], {}), '([x], [y, z])\n', (80880, 80893), False, 'from keras.models import Sequential, Model\n'), ((81098, 81120), 'keras.layers.Input', 'Input', ([], {'shape': '(8, 8, 3)'}), '(shape=(8, 8, 3))\n', (81103, 81120), False, 'from keras.layers import Embedding, Input, Permute, Reshape, RepeatVector, Dropout\n'), ((81261, 81279), 'keras.models.Model', 'Model', (['[x]', '[y, z]'], {}), '([x], [y, z])\n', (81266, 81279), False, 'from keras.models import Sequential, Model\n'), ((81494, 81516), 'keras.layers.Input', 'Input', ([], {'shape': '(8, 8, 3)'}), '(shape=(8, 8, 3))\n', (81499, 81516), False, 'from keras.layers import Embedding, Input, Permute, Reshape, RepeatVector, Dropout\n'), ((81707, 81725), 'keras.models.Model', 'Model', (['[x]', '[y, z]'], {}), '([x], [y, z])\n', (81712, 81725), False, 'from keras.models import Sequential, Model\n'), ((81936, 81956), 'keras.layers.Input', 'Input', ([], {'shape': '(10, 3)'}), '(shape=(10, 3))\n', (81941, 81956), False, 'from keras.layers import Embedding, Input, Permute, Reshape, RepeatVector, Dropout\n'), ((82087, 82105), 'keras.models.Model', 'Model', (['[x]', '[y, z]'], {}), '([x], [y, z])\n', (82092, 82105), False, 'from keras.models import Sequential, Model\n'), ((82319, 82339), 'keras.layers.Input', 'Input', ([], {'shape': '(10, 3)'}), '(shape=(10, 3))\n', (82324, 82339), False, 'from keras.layers import Embedding, Input, Permute, Reshape, RepeatVector, Dropout\n'), ((82520, 82538), 'keras.models.Model', 'Model', (['[x]', '[y, z]'], {}), '([x], [y, z])\n', (82525, 82538), False, 'from keras.models import Sequential, Model\n'), ((82745, 82765), 'keras.layers.Input', 'Input', ([], {'shape': '(10, 2)'}), '(shape=(10, 2))\n', (82750, 82765), False, 'from keras.layers import Embedding, Input, Permute, Reshape, RepeatVector, Dropout\n'), ((83137, 83162), 'keras.models.Model', 'Model', (['x_in', '[out1, out2]'], {}), '(x_in, [out1, out2])\n', (83142, 83162), False, 'from keras.models import Sequential, Model\n'), ((83582, 83606), 'keras.layers.Input', 'Input', ([], {'shape': '(32, 32, 3)'}), '(shape=(32, 32, 3))\n', (83587, 83606), False, 'from keras.layers import Embedding, Input, Permute, Reshape, RepeatVector, Dropout\n'), ((84943, 84981), 'keras.models.Model', 'Model', ([], {'inputs': '[img_input]', 'outputs': '[x]'}), '(inputs=[img_input], outputs=[x])\n', (84948, 84981), False, 'from keras.models import Sequential, Model\n'), ((85291, 85315), 'keras.layers.Input', 'Input', ([], {'shape': '(32, 32, 3)'}), '(shape=(32, 32, 3))\n', (85296, 85315), False, 'from keras.layers import Embedding, Input, Permute, Reshape, RepeatVector, Dropout\n'), ((86401, 86419), 'keras.layers.add', 'add', (['[x, residual]'], {}), '([x, residual])\n', (86404, 86419), False, 'from keras.layers import add, multiply, concatenate, dot, maximum, average\n'), ((86577, 86622), 'keras.models.Model', 'Model', ([], {'inputs': '[img_input]', 'outputs': '[residual]'}), '(inputs=[img_input], outputs=[residual])\n', (86582, 86622), False, 'from keras.models import Sequential, Model\n'), ((86900, 86912), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (86910, 86912), False, 'from keras.models import Sequential, Model\n'), ((87000, 87012), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (87010, 87012), False, 'from keras.models import Sequential, Model\n'), ((87395, 87407), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (87405, 87407), False, 'from keras.models import Sequential, Model\n'), ((88877, 88917), 'os.path.join', 'os.path.join', (['model_dir', '"""keras.mlmodel"""'], {}), "(model_dir, 'keras.mlmodel')\n", (88889, 88917), False, 'import os\n'), ((93770, 93790), 'numpy.random.seed', 'np.random.seed', (['(1988)'], {}), '(1988)\n', (93784, 93790), True, 'import numpy as np\n'), ((93874, 93905), 'numpy.random.rand', 'np.random.rand', (['(1)', '*input_shape'], {}), '(1, *input_shape)\n', (93888, 93905), True, 'import numpy as np\n'), ((96592, 96604), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (96602, 96604), False, 'from keras.models import Sequential, Model\n'), ((98748, 98760), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (98758, 98760), False, 'from keras.models import Sequential, Model\n'), ((100974, 100986), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (100984, 100986), False, 'from keras.models import Sequential, Model\n'), ((102447, 102459), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (102457, 102459), False, 'from keras.models import Sequential, Model\n'), ((102821, 102833), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (102831, 102833), False, 'from keras.models import Sequential, Model\n'), ((103136, 103148), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (103146, 103148), False, 'from keras.models import Sequential, Model\n'), ((103467, 103489), 'keras.layers.Input', 'Input', ([], {'shape': '(4, 6, 1)'}), '(shape=(4, 6, 1))\n', (103472, 103489), False, 'from keras.layers import Embedding, Input, Permute, Reshape, RepeatVector, Dropout\n'), ((104323, 104337), 'keras.models.Model', 'Model', (['x_in', 'x'], {}), '(x_in, x)\n', (104328, 104337), False, 'from keras.models import Sequential, Model\n'), ((104545, 104557), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (104555, 104557), False, 'from keras.models import Sequential, Model\n'), ((104817, 104841), 'keras.layers.Input', 'Input', ([], {'shape': '(16, 16, 3)'}), '(shape=(16, 16, 3))\n', (104822, 104841), False, 'from keras.layers import Embedding, Input, Permute, Reshape, RepeatVector, Dropout\n'), ((104930, 104970), 'keras.models.Model', 'Model', ([], {'inputs': '[img_input_1]', 'outputs': '[x]'}), '(inputs=[img_input_1], outputs=[x])\n', (104935, 104970), False, 'from keras.models import Sequential, Model\n'), ((104992, 105016), 'keras.layers.Input', 'Input', ([], {'shape': '(16, 16, 3)'}), '(shape=(16, 16, 3))\n', (104997, 105016), False, 'from keras.layers import Embedding, Input, Permute, Reshape, RepeatVector, Dropout\n'), ((105166, 105204), 'keras.models.Model', 'Model', ([], {'inputs': '[img_input]', 'outputs': '[x]'}), '(inputs=[img_input], outputs=[x])\n', (105171, 105204), False, 'from keras.models import Sequential, Model\n'), ((105324, 105348), 'keras.layers.Input', 'Input', ([], {'shape': '(16, 16, 3)'}), '(shape=(16, 16, 3))\n', (105329, 105348), False, 'from keras.layers import Embedding, Input, Permute, Reshape, RepeatVector, Dropout\n'), ((105437, 105462), 'keras.models.Model', 'Model', (['[img_input_1]', '[x]'], {}), '([img_input_1], [x])\n', (105442, 105462), False, 'from keras.models import Sequential, Model\n'), ((105484, 105508), 'keras.layers.Input', 'Input', ([], {'shape': '(16, 16, 3)'}), '(shape=(16, 16, 3))\n', (105489, 105508), False, 'from keras.layers import Embedding, Input, Permute, Reshape, RepeatVector, Dropout\n'), ((105661, 105678), 'keras.layers.Input', 'Input', ([], {'shape': '(5,)'}), '(shape=(5,))\n', (105666, 105678), False, 'from keras.layers import Embedding, Input, Permute, Reshape, RepeatVector, Dropout\n'), ((105775, 105820), 'keras.layers.concatenate', 'concatenate', (['[x, y]'], {'axis': '(1)', 'name': '"""cap_merge"""'}), "([x, y], axis=1, name='cap_merge')\n", (105786, 105820), False, 'from keras.layers import add, multiply, concatenate, dot, maximum, average\n'), ((105847, 105901), 'keras.models.Model', 'Model', ([], {'inputs': '[img_input, sentence_input]', 'outputs': '[z]'}), '(inputs=[img_input, sentence_input], outputs=[z])\n', (105852, 105901), False, 'from keras.models import Sequential, Model\n'), ((106094, 106118), 'keras.layers.Input', 'Input', ([], {'shape': '(16, 16, 3)'}), '(shape=(16, 16, 3))\n', (106099, 106118), False, 'from keras.layers import Embedding, Input, Permute, Reshape, RepeatVector, Dropout\n'), ((106207, 106247), 'keras.models.Model', 'Model', ([], {'inputs': '[img_input_1]', 'outputs': '[x]'}), '(inputs=[img_input_1], outputs=[x])\n', (106212, 106247), False, 'from keras.models import Sequential, Model\n'), ((106269, 106293), 'keras.layers.Input', 'Input', ([], {'shape': '(16, 16, 3)'}), '(shape=(16, 16, 3))\n', (106274, 106293), False, 'from keras.layers import Embedding, Input, Permute, Reshape, RepeatVector, Dropout\n'), ((106446, 106463), 'keras.layers.Input', 'Input', ([], {'shape': '(5,)'}), '(shape=(5,))\n', (106451, 106463), False, 'from keras.layers import Embedding, Input, Permute, Reshape, RepeatVector, Dropout\n'), ((106560, 106605), 'keras.layers.concatenate', 'concatenate', (['[x, y]'], {'axis': '(1)', 'name': '"""cap_merge"""'}), "([x, y], axis=1, name='cap_merge')\n", (106571, 106605), False, 'from keras.layers import add, multiply, concatenate, dot, maximum, average\n'), ((106764, 106818), 'keras.models.Model', 'Model', ([], {'inputs': '[img_input, sentence_input]', 'outputs': '[z]'}), '(inputs=[img_input, sentence_input], outputs=[z])\n', (106769, 106818), False, 'from keras.models import Sequential, Model\n'), ((107058, 107086), 'keras.layers.Input', 'Input', ([], {'shape': '(story_maxlen,)'}), '(shape=(story_maxlen,))\n', (107063, 107086), False, 'from keras.layers import Embedding, Input, Permute, Reshape, RepeatVector, Dropout\n'), ((107213, 107241), 'keras.layers.Input', 'Input', ([], {'shape': '(query_maxlen,)'}), '(shape=(query_maxlen,))\n', (107218, 107241), False, 'from keras.layers import Embedding, Input, Permute, Reshape, RepeatVector, Dropout\n'), ((107465, 107478), 'keras.layers.add', 'add', (['[x1, x2]'], {}), '([x1, x2])\n', (107468, 107478), False, 'from keras.layers import add, multiply, concatenate, dot, maximum, average\n'), ((107648, 107708), 'keras.models.Model', 'Model', ([], {'inputs': '[input_tensor_1, input_tensor_2]', 'outputs': '[x3]'}), '(inputs=[input_tensor_1, input_tensor_2], outputs=[x3])\n', (107653, 107708), False, 'from keras.models import Sequential, Model\n'), ((108026, 108038), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (108036, 108038), False, 'from keras.models import Sequential, Model\n'), ((109119, 109140), 'keras.layers.Input', 'Input', ([], {'shape': '(20, 20)'}), '(shape=(20, 20))\n', (109124, 109140), False, 'from keras.layers import Embedding, Input, Permute, Reshape, RepeatVector, Dropout\n'), ((109368, 109389), 'keras.models.Model', 'Model', (['inputs', 'ouputs'], {}), '(inputs, ouputs)\n', (109373, 109389), False, 'from keras.models import Sequential, Model\n'), ((109625, 109645), 'numpy.random.seed', 'np.random.seed', (['(1988)'], {}), '(1988)\n', (109639, 109645), True, 'import numpy as np\n'), ((109687, 109699), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (109697, 109699), False, 'from keras.models import Sequential, Model\n'), ((109984, 110040), 'coremltools.converters.keras.convert', 'keras_converter.convert', (['model'], {'use_float_arraytype': '(True)'}), '(model, use_float_arraytype=True)\n', (110007, 110040), True, 'from coremltools.converters import keras as keras_converter\n'), ((1602, 1625), 'distutils.version.StrictVersion', '_StrictVersion', (['"""2.2.0"""'], {}), "('2.2.0')\n", (1616, 1625), True, 'from distutils.version import StrictVersion as _StrictVersion\n'), ((2280, 2306), 'numpy.transpose', 'np.transpose', (['x', '[1, 0, 2]'], {}), '(x, [1, 0, 2])\n', (2292, 2306), True, 'import numpy as np\n'), ((3535, 3555), 'numpy.ones', 'np.ones', (['input_shape'], {}), '(input_shape)\n', (3542, 3555), True, 'import numpy as np\n'), ((6941, 6959), 'tempfile.mkdtemp', 'tempfile.mkdtemp', ([], {}), '()\n', (6957, 6959), False, 'import tempfile\n'), ((9132, 9158), 'keras.layers.Dense', 'Dense', (['(2)'], {'input_shape': '(2,)'}), '(2, input_shape=(2,))\n', (9137, 9158), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((10015, 10046), 'keras.layers.Dense', 'Dense', (['(1000)'], {'input_shape': '(100,)'}), '(1000, input_shape=(100,))\n', (10020, 10046), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((10537, 10587), 'keras.layers.Dense', 'Dense', (['(32)'], {'input_shape': '(32,)', 'activation': '"""softmax"""'}), "(32, input_shape=(32,), activation='softmax')\n", (10542, 10587), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((10904, 10950), 'keras.layers.Dense', 'Dense', (['(32)'], {'input_shape': '(32,)', 'activation': '"""elu"""'}), "(32, input_shape=(32,), activation='elu')\n", (10909, 10950), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((11268, 11315), 'keras.layers.Dense', 'Dense', (['(32)'], {'input_shape': '(32,)', 'activation': '"""selu"""'}), "(32, input_shape=(32,), activation='selu')\n", (11273, 11315), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((11686, 11727), 'keras.layers.Dense', 'Dense', (['num_hidden'], {'input_dim': 'num_features'}), '(num_hidden, input_dim=num_features)\n', (11691, 11727), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((11747, 11765), 'keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (11757, 11765), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((11785, 11817), 'keras.layers.Dense', 'Dense', (['(1)'], {'input_dim': 'num_features'}), '(1, input_dim=num_features)\n', (11790, 11817), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((12323, 12423), 'keras.layers.Conv2D', 'Conv2D', ([], {'input_shape': 'input_shape', 'filters': 'num_kernels', 'kernel_size': '(kernel_height, kernel_width)'}), '(input_shape=input_shape, filters=num_kernels, kernel_size=(\n kernel_height, kernel_width))\n', (12329, 12423), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((13148, 13248), 'keras.layers.Conv2D', 'Conv2D', ([], {'input_shape': 'input_shape', 'filters': 'num_kernels', 'kernel_size': '(kernel_height, kernel_width)'}), '(input_shape=input_shape, filters=num_kernels, kernel_size=(\n kernel_height, kernel_width))\n', (13154, 13248), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((14001, 14105), 'keras.layers.Conv2D', 'Conv2D', ([], {'input_shape': '(None, None, C)', 'filters': 'num_kernels', 'kernel_size': '(kernel_height, kernel_width)'}), '(input_shape=(None, None, C), filters=num_kernels, kernel_size=(\n kernel_height, kernel_width))\n', (14007, 14105), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((13567, 13578), 'coremltools.models.utils._is_macos', '_is_macos', ([], {}), '()\n', (13576, 13578), False, 'from coremltools.models.utils import _macos_version, _is_macos\n'), ((14935, 15056), 'keras.layers.Conv2D', 'Conv2D', ([], {'input_shape': 'input_shape', 'dilation_rate': '(2, 2)', 'filters': 'num_kernels', 'kernel_size': '(kernel_height, kernel_width)'}), '(input_shape=input_shape, dilation_rate=(2, 2), filters=num_kernels,\n kernel_size=(kernel_height, kernel_width))\n', (14941, 15056), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((15825, 15946), 'keras.layers.Conv2D', 'Conv2D', ([], {'input_shape': 'input_shape', 'dilation_rate': '(2, 2)', 'filters': 'num_kernels', 'kernel_size': '(kernel_height, kernel_width)'}), '(input_shape=input_shape, dilation_rate=(2, 2), filters=num_kernels,\n kernel_size=(kernel_height, kernel_width))\n', (15831, 15946), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((16730, 16843), 'keras.layers.Conv2D', 'Conv2D', (['nb_filters'], {'kernel_size': '(1, filter_length)', 'input_shape': '(1, input_length, input_dim)', 'padding': '"""valid"""'}), "(nb_filters, kernel_size=(1, filter_length), input_shape=(1,\n input_length, input_dim), padding='valid')\n", (16736, 16843), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((17495, 17600), 'keras.layers.Conv1D', 'Conv1D', (['nb_filters'], {'kernel_size': 'filter_length', 'padding': '"""same"""', 'input_shape': '(input_length, input_dim)'}), "(nb_filters, kernel_size=filter_length, padding='same', input_shape=(\n input_length, input_dim))\n", (17501, 17600), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((18115, 18212), 'keras.layers.Conv1D', 'Conv1D', (['nb_filters'], {'kernel_size': 'filter_length', 'padding': '"""same"""', 'input_shape': '(None, input_dim)'}), "(nb_filters, kernel_size=filter_length, padding='same', input_shape=(\n None, input_dim))\n", (18121, 18212), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((18866, 18971), 'keras.layers.Conv1D', 'Conv1D', (['nb_filters'], {'kernel_size': 'filter_length', 'padding': '"""same"""', 'input_shape': '(input_length, input_dim)'}), "(nb_filters, kernel_size=filter_length, padding='same', input_shape=(\n input_length, input_dim))\n", (18872, 18971), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((19703, 19809), 'keras.layers.Conv1D', 'Conv1D', (['nb_filters'], {'kernel_size': 'filter_length', 'padding': '"""valid"""', 'input_shape': '(input_length, input_dim)'}), "(nb_filters, kernel_size=filter_length, padding='valid', input_shape=\n (input_length, input_dim))\n", (19709, 19809), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((20319, 20429), 'keras.layers.Conv1D', 'Conv1D', (['num_kernels'], {'kernel_size': 'filter_length', 'padding': '"""valid"""', 'input_shape': 'input_shape', 'dilation_rate': '(3)'}), "(num_kernels, kernel_size=filter_length, padding='valid', input_shape\n =input_shape, dilation_rate=3)\n", (20325, 20429), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((21018, 21134), 'keras.layers.Conv2D', 'Conv2D', ([], {'input_shape': 'input_shape', 'filters': 'num_kernels', 'kernel_size': '(kernel_height, kernel_width)', 'padding': '"""same"""'}), "(input_shape=input_shape, filters=num_kernels, kernel_size=(\n kernel_height, kernel_width), padding='same')\n", (21024, 21134), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((21707, 21824), 'keras.layers.Conv2D', 'Conv2D', ([], {'input_shape': 'input_shape', 'filters': 'num_kernels', 'kernel_size': '(kernel_height, kernel_width)', 'padding': '"""valid"""'}), "(input_shape=input_shape, filters=num_kernels, kernel_size=(\n kernel_height, kernel_width), padding='valid')\n", (21713, 21824), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((22439, 22556), 'keras.layers.Conv2D', 'Conv2D', ([], {'input_shape': 'input_shape', 'filters': 'num_kernels', 'kernel_size': '(kernel_height, kernel_width)', 'padding': '"""valid"""'}), "(input_shape=input_shape, filters=num_kernels, kernel_size=(\n kernel_height, kernel_width), padding='valid')\n", (22445, 22556), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((23077, 23107), 'keras.layers.Flatten', 'Flatten', ([], {'input_shape': '(2, 2, 2)'}), '(input_shape=(2, 2, 2))\n', (23084, 23107), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((23310, 23372), 'keras.layers.Conv2D', 'Conv2D', (['(32)', '(3, 3)'], {'activation': '"""relu"""', 'input_shape': 'input_shape'}), "(32, (3, 3), activation='relu', input_shape=input_shape)\n", (23316, 23372), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((23392, 23401), 'keras.layers.Flatten', 'Flatten', ([], {}), '()\n', (23399, 23401), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((23421, 23452), 'keras.layers.Dense', 'Dense', (['(10)'], {'activation': '"""softmax"""'}), "(10, activation='softmax')\n", (23426, 23452), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((24020, 24120), 'keras.layers.Conv2D', 'Conv2D', ([], {'input_shape': 'input_shape', 'filters': 'num_kernels', 'kernel_size': '(kernel_height, kernel_width)'}), '(input_shape=input_shape, filters=num_kernels, kernel_size=(\n kernel_height, kernel_width))\n', (24026, 24120), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((24207, 24240), 'keras.layers.BatchNormalization', 'BatchNormalization', ([], {'epsilon': '(1e-05)'}), '(epsilon=1e-05)\n', (24225, 24240), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((24937, 25037), 'keras.layers.Conv2D', 'Conv2D', ([], {'input_shape': 'input_shape', 'filters': 'num_kernels', 'kernel_size': '(kernel_height, kernel_width)'}), '(input_shape=input_shape, filters=num_kernels, kernel_size=(\n kernel_height, kernel_width))\n', (24943, 25037), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((25124, 25184), 'keras.layers.BatchNormalization', 'BatchNormalization', ([], {'center': '(False)', 'scale': '(False)', 'epsilon': '(1e-05)'}), '(center=False, scale=False, epsilon=1e-05)\n', (25142, 25184), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((25924, 26065), 'keras.layers.Conv2DTranspose', 'Conv2DTranspose', ([], {'filters': 'num_kernels', 'kernel_size': '(kernel_height, kernel_width)', 'input_shape': 'input_shape', 'padding': '"""valid"""', 'use_bias': '(False)'}), "(filters=num_kernels, kernel_size=(kernel_height,\n kernel_width), input_shape=input_shape, padding='valid', use_bias=False)\n", (25939, 26065), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((26664, 26823), 'keras.layers.Conv2DTranspose', 'Conv2DTranspose', ([], {'filters': 'num_kernels', 'kernel_size': '(kernel_height, kernel_width)', 'input_shape': 'input_shape', 'padding': '"""same"""', 'strides': '(2, 2)', 'use_bias': '(True)'}), "(filters=num_kernels, kernel_size=(kernel_height,\n kernel_width), input_shape=input_shape, padding='same', strides=(2, 2),\n use_bias=True)\n", (26679, 26823), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((27435, 27594), 'keras.applications.mobilenet.DepthwiseConv2D', 'DepthwiseConv2D', ([], {'depth_multiplier': 'depth_multiplier', 'kernel_size': '(kernel_height, kernel_width)', 'input_shape': 'input_shape', 'padding': '"""same"""', 'strides': '(1, 1)'}), "(depth_multiplier=depth_multiplier, kernel_size=(\n kernel_height, kernel_width), input_shape=input_shape, padding='same',\n strides=(1, 1))\n", (27450, 27594), False, 'from keras.applications.mobilenet import DepthwiseConv2D, relu6\n'), ((28190, 28350), 'keras.applications.mobilenet.DepthwiseConv2D', 'DepthwiseConv2D', ([], {'depth_multiplier': 'depth_multiplier', 'kernel_size': '(kernel_height, kernel_width)', 'input_shape': 'input_shape', 'padding': '"""valid"""', 'strides': '(1, 1)'}), "(depth_multiplier=depth_multiplier, kernel_size=(\n kernel_height, kernel_width), input_shape=input_shape, padding='valid',\n strides=(1, 1))\n", (28205, 28350), False, 'from keras.applications.mobilenet import DepthwiseConv2D, relu6\n'), ((28962, 29121), 'keras.applications.mobilenet.DepthwiseConv2D', 'DepthwiseConv2D', ([], {'depth_multiplier': 'depth_multiplier', 'kernel_size': '(kernel_height, kernel_width)', 'input_shape': 'input_shape', 'padding': '"""same"""', 'strides': '(1, 1)'}), "(depth_multiplier=depth_multiplier, kernel_size=(\n kernel_height, kernel_width), input_shape=input_shape, padding='same',\n strides=(1, 1))\n", (28977, 29121), False, 'from keras.applications.mobilenet import DepthwiseConv2D, relu6\n'), ((29734, 29894), 'keras.applications.mobilenet.DepthwiseConv2D', 'DepthwiseConv2D', ([], {'depth_multiplier': 'depth_multiplier', 'kernel_size': '(kernel_height, kernel_width)', 'input_shape': 'input_shape', 'padding': '"""valid"""', 'strides': '(1, 1)'}), "(depth_multiplier=depth_multiplier, kernel_size=(\n kernel_height, kernel_width), input_shape=input_shape, padding='valid',\n strides=(1, 1))\n", (29749, 29894), False, 'from keras.applications.mobilenet import DepthwiseConv2D, relu6\n'), ((30510, 30691), 'keras.layers.SeparableConv2D', 'SeparableConv2D', ([], {'filters': 'num_kernels', 'kernel_size': '(kernel_height, kernel_width)', 'padding': '"""valid"""', 'strides': '(1, 1)', 'depth_multiplier': 'depth_multiplier', 'input_shape': 'input_shape'}), "(filters=num_kernels, kernel_size=(kernel_height,\n kernel_width), padding='valid', strides=(1, 1), depth_multiplier=\n depth_multiplier, input_shape=input_shape)\n", (30525, 30691), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((31328, 31526), 'keras.layers.SeparableConv2D', 'SeparableConv2D', ([], {'filters': 'num_kernels', 'kernel_size': '(kernel_height, kernel_width)', 'padding': '"""same"""', 'strides': '(2, 2)', 'activation': '"""relu"""', 'depth_multiplier': 'depth_multiplier', 'input_shape': 'input_shape'}), "(filters=num_kernels, kernel_size=(kernel_height,\n kernel_width), padding='same', strides=(2, 2), activation='relu',\n depth_multiplier=depth_multiplier, input_shape=input_shape)\n", (31343, 31526), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((32193, 32374), 'keras.layers.SeparableConv2D', 'SeparableConv2D', ([], {'filters': 'num_kernels', 'kernel_size': '(kernel_height, kernel_width)', 'padding': '"""valid"""', 'strides': '(1, 1)', 'depth_multiplier': 'depth_multiplier', 'input_shape': 'input_shape'}), "(filters=num_kernels, kernel_size=(kernel_height,\n kernel_width), padding='valid', strides=(1, 1), depth_multiplier=\n depth_multiplier, input_shape=input_shape)\n", (32208, 32374), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((33085, 33283), 'keras.layers.SeparableConv2D', 'SeparableConv2D', ([], {'filters': 'num_kernels', 'kernel_size': '(kernel_height, kernel_width)', 'padding': '"""same"""', 'strides': '(2, 2)', 'activation': '"""relu"""', 'depth_multiplier': 'depth_multiplier', 'input_shape': 'input_shape'}), "(filters=num_kernels, kernel_size=(kernel_height,\n kernel_width), padding='same', strides=(2, 2), activation='relu',\n depth_multiplier=depth_multiplier, input_shape=input_shape)\n", (33100, 33283), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((34183, 34314), 'keras.layers.SeparableConv2D', 'SeparableConv2D', ([], {'input_shape': 'input_shape', 'dilation_rate': '(2, 2)', 'filters': 'num_kernels', 'kernel_size': '(kernel_height, kernel_width)'}), '(input_shape=input_shape, dilation_rate=(2, 2), filters=\n num_kernels, kernel_size=(kernel_height, kernel_width))\n', (34198, 34314), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((35134, 35265), 'keras.layers.SeparableConv2D', 'SeparableConv2D', ([], {'input_shape': 'input_shape', 'dilation_rate': '(2, 2)', 'filters': 'num_kernels', 'kernel_size': '(kernel_height, kernel_width)'}), '(input_shape=input_shape, dilation_rate=(2, 2), filters=\n num_kernels, kernel_size=(kernel_height, kernel_width))\n', (35149, 35265), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((35915, 36005), 'keras.layers.MaxPooling2D', 'MaxPooling2D', ([], {'input_shape': '(16, 16, 3)', 'pool_size': '(2, 2)', 'strides': 'None', 'padding': '"""valid"""'}), "(input_shape=(16, 16, 3), pool_size=(2, 2), strides=None,\n padding='valid')\n", (35927, 36005), False, 'from keras.layers import MaxPooling2D, AveragePooling2D, GlobalAveragePooling2D, GlobalMaxPooling2D\n'), ((36253, 36345), 'keras.layers.MaxPooling2D', 'MaxPooling2D', ([], {'input_shape': '(18, 18, 3)', 'pool_size': '(3, 3)', 'strides': '(2, 2)', 'padding': '"""valid"""'}), "(input_shape=(18, 18, 3), pool_size=(3, 3), strides=(2, 2),\n padding='valid')\n", (36265, 36345), False, 'from keras.layers import MaxPooling2D, AveragePooling2D, GlobalAveragePooling2D, GlobalMaxPooling2D\n'), ((36568, 36660), 'keras.layers.MaxPooling2D', 'MaxPooling2D', ([], {'input_shape': '(16, 16, 3)', 'pool_size': '(3, 3)', 'strides': '(2, 2)', 'padding': '"""valid"""'}), "(input_shape=(16, 16, 3), pool_size=(3, 3), strides=(2, 2),\n padding='valid')\n", (36580, 36660), False, 'from keras.layers import MaxPooling2D, AveragePooling2D, GlobalAveragePooling2D, GlobalMaxPooling2D\n'), ((36884, 36975), 'keras.layers.MaxPooling2D', 'MaxPooling2D', ([], {'input_shape': '(16, 16, 3)', 'pool_size': '(3, 3)', 'strides': '(2, 2)', 'padding': '"""same"""'}), "(input_shape=(16, 16, 3), pool_size=(3, 3), strides=(2, 2),\n padding='same')\n", (36896, 36975), False, 'from keras.layers import MaxPooling2D, AveragePooling2D, GlobalAveragePooling2D, GlobalMaxPooling2D\n'), ((37180, 37223), 'keras.layers.GlobalMaxPooling2D', 'GlobalMaxPooling2D', ([], {'input_shape': '(16, 16, 3)'}), '(input_shape=(16, 16, 3))\n', (37198, 37223), False, 'from keras.layers import MaxPooling2D, AveragePooling2D, GlobalAveragePooling2D, GlobalMaxPooling2D\n'), ((37407, 37501), 'keras.layers.AveragePooling2D', 'AveragePooling2D', ([], {'input_shape': '(16, 16, 3)', 'pool_size': '(2, 2)', 'strides': 'None', 'padding': '"""valid"""'}), "(input_shape=(16, 16, 3), pool_size=(2, 2), strides=None,\n padding='valid')\n", (37423, 37501), False, 'from keras.layers import MaxPooling2D, AveragePooling2D, GlobalAveragePooling2D, GlobalMaxPooling2D\n'), ((37740, 37835), 'keras.layers.AveragePooling2D', 'AveragePooling2D', ([], {'input_shape': '(16, 16, 3)', 'pool_size': '(3, 3)', 'strides': '(1, 1)', 'padding': '"""same"""'}), "(input_shape=(16, 16, 3), pool_size=(3, 3), strides=(1, 1),\n padding='same')\n", (37756, 37835), False, 'from keras.layers import MaxPooling2D, AveragePooling2D, GlobalAveragePooling2D, GlobalMaxPooling2D\n'), ((38056, 38103), 'keras.layers.GlobalAveragePooling2D', 'GlobalAveragePooling2D', ([], {'input_shape': '(16, 16, 3)'}), '(input_shape=(16, 16, 3))\n', (38078, 38103), False, 'from keras.layers import MaxPooling2D, AveragePooling2D, GlobalAveragePooling2D, GlobalMaxPooling2D\n'), ((38220, 38266), 'keras.layers.MaxPooling1D', 'MaxPooling1D', ([], {'input_shape': '(16, 3)', 'pool_size': '(4)'}), '(input_shape=(16, 3), pool_size=4)\n', (38232, 38266), False, 'from keras.layers import MaxPooling1D, AveragePooling1D, GlobalAveragePooling1D, GlobalMaxPooling1D\n'), ((38529, 38634), 'keras.layers.Conv1D', 'Conv1D', (['nb_filters'], {'kernel_size': 'filter_length', 'padding': '"""same"""', 'input_shape': '(input_length, input_dim)'}), "(nb_filters, kernel_size=filter_length, padding='same', input_shape=(\n input_length, input_dim))\n", (38535, 38634), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((38737, 38757), 'keras.layers.GlobalMaxPooling1D', 'GlobalMaxPooling1D', ([], {}), '()\n', (38755, 38757), False, 'from keras.layers import MaxPooling1D, AveragePooling1D, GlobalAveragePooling1D, GlobalMaxPooling1D\n'), ((39017, 39122), 'keras.layers.Conv1D', 'Conv1D', (['nb_filters'], {'kernel_size': 'filter_length', 'padding': '"""same"""', 'input_shape': '(input_length, input_dim)'}), "(nb_filters, kernel_size=filter_length, padding='same', input_shape=(\n input_length, input_dim))\n", (39023, 39122), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((39225, 39254), 'keras.layers.AveragePooling1D', 'AveragePooling1D', ([], {'pool_size': '(2)'}), '(pool_size=2)\n', (39241, 39254), False, 'from keras.layers import MaxPooling1D, AveragePooling1D, GlobalAveragePooling1D, GlobalMaxPooling1D\n'), ((39521, 39626), 'keras.layers.Conv1D', 'Conv1D', (['nb_filters'], {'kernel_size': 'filter_length', 'padding': '"""same"""', 'input_shape': '(input_length, input_dim)'}), "(nb_filters, kernel_size=filter_length, padding='same', input_shape=(\n input_length, input_dim))\n", (39527, 39626), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((39729, 39753), 'keras.layers.GlobalAveragePooling1D', 'GlobalAveragePooling1D', ([], {}), '()\n', (39751, 39753), False, 'from keras.layers import MaxPooling1D, AveragePooling1D, GlobalAveragePooling1D, GlobalMaxPooling1D\n'), ((40095, 40195), 'keras.layers.Conv2D', 'Conv2D', ([], {'input_shape': 'input_shape', 'filters': 'num_kernels', 'kernel_size': '(kernel_height, kernel_width)'}), '(input_shape=input_shape, filters=num_kernels, kernel_size=(\n kernel_height, kernel_width))\n', (40101, 40195), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((40282, 40302), 'keras.layers.UpSampling2D', 'UpSampling2D', ([], {'size': '(2)'}), '(size=2)\n', (40294, 40302), False, 'from keras.layers import ZeroPadding2D, UpSampling2D, Cropping2D\n'), ((40722, 40827), 'keras.layers.Conv1D', 'Conv1D', (['nb_filters'], {'kernel_size': 'filter_length', 'padding': '"""same"""', 'input_shape': '(input_length, input_dim)'}), "(nb_filters, kernel_size=filter_length, padding='same', input_shape=(\n input_length, input_dim))\n", (40728, 40827), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((40930, 40950), 'keras.layers.UpSampling1D', 'UpSampling1D', ([], {'size': '(2)'}), '(size=2)\n', (40942, 40950), False, 'from keras.layers import ZeroPadding1D, UpSampling1D, Cropping1D\n'), ((41407, 41512), 'keras.layers.Conv1D', 'Conv1D', (['nb_filters'], {'kernel_size': 'filter_length', 'padding': '"""same"""', 'input_shape': '(input_length, input_dim)'}), "(nb_filters, kernel_size=filter_length, padding='same', input_shape=(\n input_length, input_dim))\n", (41413, 41512), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((41615, 41637), 'keras.layers.Cropping1D', 'Cropping1D', ([], {'cropping': '(2)'}), '(cropping=2)\n', (41625, 41637), False, 'from keras.layers import ZeroPadding1D, UpSampling1D, Cropping1D\n'), ((42300, 42405), 'keras.layers.Conv1D', 'Conv1D', (['nb_filters'], {'kernel_size': 'filter_length', 'padding': '"""same"""', 'input_shape': '(input_length, input_dim)'}), "(nb_filters, kernel_size=filter_length, padding='same', input_shape=(\n input_length, input_dim))\n", (42306, 42405), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((42508, 42532), 'keras.layers.ZeroPadding1D', 'ZeroPadding1D', ([], {'padding': '(2)'}), '(padding=2)\n', (42521, 42532), False, 'from keras.layers import ZeroPadding1D, UpSampling1D, Cropping1D\n'), ((43038, 43105), 'keras.layers.Conv1D', 'Conv1D', (['(1)', '(3)'], {'input_shape': '(10, 1)', 'use_bias': '(False)', 'padding': '"""causal"""'}), "(1, 3, input_shape=(10, 1), use_bias=False, padding='causal')\n", (43044, 43105), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((43389, 43423), 'keras.layers.Embedding', 'Embedding', (['num_inputs', 'num_outputs'], {}), '(num_inputs, num_outputs)\n', (43398, 43423), False, 'from keras.layers import Embedding, Input, Permute, Reshape, RepeatVector, Dropout\n'), ((43867, 43917), 'keras.layers.Embedding', 'Embedding', (['num_inputs', 'num_outputs'], {'input_length': '(7)'}), '(num_inputs, num_outputs, input_length=7)\n', (43876, 43917), False, 'from keras.layers import Embedding, Input, Permute, Reshape, RepeatVector, Dropout\n'), ((44477, 44539), 'keras.layers.SimpleRNN', 'SimpleRNN', (['num_channels'], {'input_shape': '(input_length, input_dim)'}), '(num_channels, input_shape=(input_length, input_dim))\n', (44486, 44539), False, 'from keras.layers import SimpleRNN, LSTM, GRU\n'), ((44950, 45012), 'keras.layers.SimpleRNN', 'SimpleRNN', (['num_channels'], {'input_shape': '(input_length, input_dim)'}), '(num_channels, input_shape=(input_length, input_dim))\n', (44959, 45012), False, 'from keras.layers import SimpleRNN, LSTM, GRU\n'), ((45462, 45551), 'keras.layers.SimpleRNN', 'SimpleRNN', (['num_channels'], {'input_shape': '(input_length, input_dim)', 'return_sequences': '(True)'}), '(num_channels, input_shape=(input_length, input_dim),\n return_sequences=True)\n', (45471, 45551), False, 'from keras.layers import SimpleRNN, LSTM, GRU\n'), ((46029, 46105), 'keras.layers.SimpleRNN', 'SimpleRNN', (['(20)'], {'input_shape': '(input_length, input_dim)', 'return_sequences': '(False)'}), '(20, input_shape=(input_length, input_dim), return_sequences=False)\n', (46038, 46105), False, 'from keras.layers import SimpleRNN, LSTM, GRU\n'), ((46534, 46633), 'keras.layers.SimpleRNN', 'SimpleRNN', (['(20)'], {'input_shape': '(input_length, input_dim)', 'return_sequences': '(False)', 'go_backwards': '(True)'}), '(20, input_shape=(input_length, input_dim), return_sequences=False,\n go_backwards=True)\n', (46543, 46633), False, 'from keras.layers import SimpleRNN, LSTM, GRU\n'), ((47169, 47231), 'keras.layers.SimpleRNN', 'SimpleRNN', (['num_channels'], {'input_shape': '(input_length, input_dim)'}), '(num_channels, input_shape=(input_length, input_dim))\n', (47178, 47231), False, 'from keras.layers import SimpleRNN, LSTM, GRU\n'), ((47660, 47771), 'keras.layers.LSTM', 'LSTM', (['num_channels'], {'input_shape': '(input_length, input_dim)', 'implementation': '(1)', 'recurrent_activation': '"""sigmoid"""'}), "(num_channels, input_shape=(input_length, input_dim), implementation=1,\n recurrent_activation='sigmoid')\n", (47664, 47771), False, 'from keras.layers import SimpleRNN, LSTM, GRU\n'), ((48231, 48342), 'keras.layers.LSTM', 'LSTM', (['num_channels'], {'input_shape': '(input_length, input_dim)', 'implementation': '(1)', 'recurrent_activation': '"""sigmoid"""'}), "(num_channels, input_shape=(input_length, input_dim), implementation=1,\n recurrent_activation='sigmoid')\n", (48235, 48342), False, 'from keras.layers import SimpleRNN, LSTM, GRU\n'), ((48804, 48915), 'keras.layers.LSTM', 'LSTM', (['num_channels'], {'input_shape': '(input_length, input_dim)', 'implementation': '(2)', 'recurrent_activation': '"""sigmoid"""'}), "(num_channels, input_shape=(input_length, input_dim), implementation=2,\n recurrent_activation='sigmoid')\n", (48808, 48915), False, 'from keras.layers import SimpleRNN, LSTM, GRU\n'), ((49377, 49488), 'keras.layers.LSTM', 'LSTM', (['num_channels'], {'input_shape': '(input_length, input_dim)', 'implementation': '(2)', 'recurrent_activation': '"""sigmoid"""'}), "(num_channels, input_shape=(input_length, input_dim), implementation=2,\n recurrent_activation='sigmoid')\n", (49381, 49488), False, 'from keras.layers import SimpleRNN, LSTM, GRU\n'), ((49905, 49976), 'keras.layers.LSTM', 'LSTM', (['(20)'], {'input_shape': '(input_length, input_dim)', 'return_sequences': '(False)'}), '(20, input_shape=(input_length, input_dim), return_sequences=False)\n', (49909, 49976), False, 'from keras.layers import SimpleRNN, LSTM, GRU\n'), ((50315, 50409), 'keras.layers.LSTM', 'LSTM', (['(20)'], {'input_shape': '(input_length, input_dim)', 'return_sequences': '(False)', 'go_backwards': '(True)'}), '(20, input_shape=(input_length, input_dim), return_sequences=False,\n go_backwards=True)\n', (50319, 50409), False, 'from keras.layers import SimpleRNN, LSTM, GRU\n'), ((50886, 50979), 'keras.layers.LSTM', 'LSTM', (['num_channels'], {'input_shape': '(input_length, input_dim)', 'recurrent_activation': '"""sigmoid"""'}), "(num_channels, input_shape=(input_length, input_dim),\n recurrent_activation='sigmoid')\n", (50890, 50979), False, 'from keras.layers import SimpleRNN, LSTM, GRU\n'), ((51505, 51616), 'keras.layers.LSTM', 'LSTM', (['num_channels'], {'input_shape': '(input_length, input_dim)', 'implementation': '(2)', 'recurrent_activation': '"""sigmoid"""'}), "(num_channels, input_shape=(input_length, input_dim), implementation=2,\n recurrent_activation='sigmoid')\n", (51509, 51616), False, 'from keras.layers import SimpleRNN, LSTM, GRU\n'), ((52171, 52282), 'keras.layers.LSTM', 'LSTM', (['num_channels'], {'input_shape': '(input_length, input_dim)', 'implementation': '(2)', 'recurrent_activation': '"""sigmoid"""'}), "(num_channels, input_shape=(input_length, input_dim), implementation=2,\n recurrent_activation='sigmoid')\n", (52175, 52282), False, 'from keras.layers import SimpleRNN, LSTM, GRU\n'), ((52885, 52977), 'keras.layers.GRU', 'GRU', (['num_channels'], {'input_shape': '(input_length, input_dim)', 'recurrent_activation': '"""sigmoid"""'}), "(num_channels, input_shape=(input_length, input_dim),\n recurrent_activation='sigmoid')\n", (52888, 52977), False, 'from keras.layers import SimpleRNN, LSTM, GRU\n'), ((53714, 53806), 'keras.layers.GRU', 'GRU', (['num_channels'], {'input_shape': '(input_length, input_dim)', 'recurrent_activation': '"""sigmoid"""'}), "(num_channels, input_shape=(input_length, input_dim),\n recurrent_activation='sigmoid')\n", (53717, 53806), False, 'from keras.layers import SimpleRNN, LSTM, GRU\n'), ((54387, 54479), 'keras.layers.GRU', 'GRU', (['num_channels'], {'input_shape': '(input_length, input_dim)', 'recurrent_activation': '"""sigmoid"""'}), "(num_channels, input_shape=(input_length, input_dim),\n recurrent_activation='sigmoid')\n", (54390, 54479), False, 'from keras.layers import SimpleRNN, LSTM, GRU\n'), ((55140, 55210), 'keras.layers.GRU', 'GRU', (['(20)'], {'input_shape': '(input_length, input_dim)', 'return_sequences': '(False)'}), '(20, input_shape=(input_length, input_dim), return_sequences=False)\n', (55143, 55210), False, 'from keras.layers import SimpleRNN, LSTM, GRU\n'), ((55680, 55773), 'keras.layers.GRU', 'GRU', (['(20)'], {'input_shape': '(input_length, input_dim)', 'return_sequences': '(False)', 'go_backwards': '(True)'}), '(20, input_shape=(input_length, input_dim), return_sequences=False,\n go_backwards=True)\n', (55683, 55773), False, 'from keras.layers import SimpleRNN, LSTM, GRU\n'), ((61083, 61131), 'keras.layers.Input', 'Input', ([], {'name': '"""the_input"""', 'shape': '(None, input_dim)'}), "(name='the_input', shape=(None, input_dim))\n", (61088, 61131), False, 'from keras.layers import Embedding, Input, Permute, Reshape, RepeatVector, Dropout\n'), ((61692, 61721), 'keras.models.Model', 'Model', (['[input_data]', '[y_pred]'], {}), '([input_data], [y_pred])\n', (61697, 61721), False, 'from keras.models import Sequential, Model\n'), ((62351, 62413), 'keras.layers.Conv2D', 'Conv2D', ([], {'input_shape': '(10, 10, 3)', 'filters': '(3)', 'kernel_size': '(5, 5)'}), '(input_shape=(10, 10, 3), filters=3, kernel_size=(5, 5))\n', (62357, 62413), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((62433, 62447), 'keras.layers.advanced_activations.ELU', 'ELU', ([], {'alpha': '(0.8)'}), '(alpha=0.8)\n', (62436, 62447), False, 'from keras.layers.advanced_activations import ELU\n'), ((62858, 62936), 'keras.layers.Conv2D', 'Conv2D', ([], {'input_shape': '(10, 10, 3)', 'filters': '(3)', 'kernel_size': '(5, 5)', 'padding': '"""same"""'}), "(input_shape=(10, 10, 3), filters=3, kernel_size=(5, 5), padding='same')\n", (62864, 62936), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((62995, 63020), 'keras.layers.advanced_activations.PReLU', 'PReLU', ([], {'shared_axes': '[1, 2]'}), '(shared_axes=[1, 2])\n', (63000, 63020), False, 'from keras.layers.advanced_activations import PReLU\n'), ((63580, 63658), 'keras.layers.Conv2D', 'Conv2D', ([], {'input_shape': '(10, 10, 3)', 'filters': '(3)', 'kernel_size': '(5, 5)', 'padding': '"""same"""'}), "(input_shape=(10, 10, 3), filters=3, kernel_size=(5, 5), padding='same')\n", (63586, 63658), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((63717, 63737), 'keras.layers.advanced_activations.LeakyReLU', 'LeakyReLU', ([], {'alpha': '(0.3)'}), '(alpha=0.3)\n', (63726, 63737), False, 'from keras.layers.advanced_activations import LeakyReLU\n'), ((64128, 64206), 'keras.layers.Conv2D', 'Conv2D', ([], {'input_shape': '(10, 10, 3)', 'filters': '(3)', 'kernel_size': '(5, 5)', 'padding': '"""same"""'}), "(input_shape=(10, 10, 3), filters=3, kernel_size=(5, 5), padding='same')\n", (64134, 64206), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((64265, 64291), 'keras.layers.advanced_activations.ThresholdedReLU', 'ThresholdedReLU', ([], {'theta': '(0.8)'}), '(theta=0.8)\n', (64280, 64291), False, 'from keras.layers.advanced_activations import ThresholdedReLU\n'), ((64646, 64665), 'keras.layers.Dense', 'Dense', (['num_channels'], {}), '(num_channels)\n', (64651, 64665), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((64693, 64712), 'keras.layers.Dense', 'Dense', (['num_channels'], {}), '(num_channels)\n', (64698, 64712), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((64730, 64749), 'keras.layers.Dense', 'Dense', (['num_channels'], {}), '(num_channels)\n', (64735, 64749), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((64802, 64821), 'keras.layers.Dense', 'Dense', (['num_channels'], {}), '(num_channels)\n', (64807, 64821), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((65325, 65364), 'keras.layers.Embedding', 'Embedding', (['max_features', 'embedding_dims'], {}), '(max_features, embedding_dims)\n', (65334, 65364), False, 'from keras.layers import Embedding, Input, Permute, Reshape, RepeatVector, Dropout\n'), ((65392, 65431), 'keras.layers.Embedding', 'Embedding', (['max_features', 'embedding_dims'], {}), '(max_features, embedding_dims)\n', (65401, 65431), False, 'from keras.layers import Embedding, Input, Permute, Reshape, RepeatVector, Dropout\n'), ((66032, 66082), 'keras.layers.Embedding', 'Embedding', (['vocab_size', '(50)'], {'input_length': 'seq_length'}), '(vocab_size, 50, input_length=seq_length)\n', (66041, 66082), False, 'from keras.layers import Embedding, Input, Permute, Reshape, RepeatVector, Dropout\n'), ((66107, 66126), 'keras.layers.Concatenate', 'Concatenate', ([], {'axis': '(2)'}), '(axis=2)\n', (66118, 66126), False, 'from keras.layers import Add, Concatenate\n'), ((66161, 66211), 'keras.layers.LSTM', 'LSTM', (['units'], {'return_sequences': '(True)', 'stateful': '(False)'}), '(units, return_sequences=True, stateful=False)\n', (66165, 66211), False, 'from keras.layers import SimpleRNN, LSTM, GRU\n'), ((66236, 66271), 'keras.layers.LSTM', 'LSTM', (['units'], {'return_sequences': '(False)'}), '(units, return_sequences=False)\n', (66240, 66271), False, 'from keras.layers import SimpleRNN, LSTM, GRU\n'), ((66295, 66324), 'keras.layers.Dense', 'Dense', (['(100)'], {'activation': '"""relu"""'}), "(100, activation='relu')\n", (66300, 66324), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((66348, 66387), 'keras.layers.Dense', 'Dense', (['vocab_size'], {'activation': '"""softmax"""'}), "(vocab_size, activation='softmax')\n", (66353, 66387), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((66870, 66889), 'keras.layers.Dense', 'Dense', (['num_channels'], {}), '(num_channels)\n', (66875, 66889), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((66917, 66936), 'keras.layers.Dense', 'Dense', (['num_channels'], {}), '(num_channels)\n', (66922, 66936), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((66954, 66973), 'keras.layers.Dense', 'Dense', (['num_channels'], {}), '(num_channels)\n', (66959, 66973), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((67018, 67037), 'keras.layers.Dense', 'Dense', (['num_channels'], {}), '(num_channels)\n', (67023, 67037), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((67486, 67505), 'keras.layers.Dense', 'Dense', (['num_channels'], {}), '(num_channels)\n', (67491, 67505), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((67533, 67552), 'keras.layers.Dense', 'Dense', (['num_channels'], {}), '(num_channels)\n', (67538, 67552), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((67570, 67589), 'keras.layers.Dense', 'Dense', (['num_channels'], {}), '(num_channels)\n', (67575, 67589), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((67639, 67658), 'keras.layers.Dense', 'Dense', (['num_channels'], {}), '(num_channels)\n', (67644, 67658), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((68107, 68126), 'keras.layers.Dense', 'Dense', (['num_channels'], {}), '(num_channels)\n', (68112, 68126), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((68154, 68173), 'keras.layers.Dense', 'Dense', (['num_channels'], {}), '(num_channels)\n', (68159, 68173), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((68191, 68210), 'keras.layers.Dense', 'Dense', (['num_channels'], {}), '(num_channels)\n', (68196, 68210), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((68280, 68299), 'keras.layers.Dense', 'Dense', (['num_channels'], {}), '(num_channels)\n', (68285, 68299), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((68664, 68710), 'keras.layers.ZeroPadding2D', 'ZeroPadding2D', (['(1, 1)'], {'input_shape': 'input_shape'}), '((1, 1), input_shape=input_shape)\n', (68677, 68710), False, 'from keras.layers import ZeroPadding2D, UpSampling2D, Cropping2D\n'), ((69011, 69067), 'keras.layers.ZeroPadding2D', 'ZeroPadding2D', (['((2, 5), (3, 4))'], {'input_shape': 'input_shape'}), '(((2, 5), (3, 4)), input_shape=input_shape)\n', (69024, 69067), False, 'from keras.layers import ZeroPadding2D, UpSampling2D, Cropping2D\n'), ((69366, 69428), 'keras.layers.Cropping2D', 'Cropping2D', ([], {'cropping': '((2, 5), (2, 5))', 'input_shape': 'input_shape'}), '(cropping=((2, 5), (2, 5)), input_shape=input_shape)\n', (69376, 69428), False, 'from keras.layers import ZeroPadding2D, UpSampling2D, Cropping2D\n'), ((69859, 69892), 'itertools.permutations', 'itertools.permutations', (['[1, 2, 3]'], {}), '([1, 2, 3])\n', (69881, 69892), False, 'import itertools\n'), ((69915, 69927), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (69925, 69927), False, 'from keras.models import Sequential, Model\n'), ((70141, 70183), 'keras.layers.Reshape', 'Reshape', (['(10, 1, 6)'], {'input_shape': '(5, 4, 3)'}), '((10, 1, 6), input_shape=(5, 4, 3))\n', (70148, 70183), False, 'from keras.layers import Embedding, Input, Permute, Reshape, RepeatVector, Dropout\n'), ((70583, 70683), 'keras.layers.Conv2D', 'Conv2D', ([], {'input_shape': 'input_shape', 'filters': 'num_kernels', 'kernel_size': '(kernel_height, kernel_width)'}), '(input_shape=input_shape, filters=num_kernels, kernel_size=(\n kernel_height, kernel_width))\n', (70589, 70683), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((70770, 70782), 'keras.layers.Dropout', 'Dropout', (['(0.5)'], {}), '(0.5)\n', (70777, 70782), False, 'from keras.layers import Embedding, Input, Permute, Reshape, RepeatVector, Dropout\n'), ((70802, 70811), 'keras.layers.Flatten', 'Flatten', ([], {}), '()\n', (70809, 70811), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((70831, 70848), 'keras.layers.Dense', 'Dense', (['hidden_dim'], {}), '(hidden_dim)\n', (70836, 70848), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((71385, 71485), 'keras.layers.Conv2D', 'Conv2D', ([], {'input_shape': 'input_shape', 'filters': 'num_kernels', 'kernel_size': '(kernel_height, kernel_width)'}), '(input_shape=input_shape, filters=num_kernels, kernel_size=(\n kernel_height, kernel_width))\n', (71391, 71485), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((71572, 71593), 'keras.layers.core.SpatialDropout2D', 'SpatialDropout2D', (['(0.5)'], {}), '(0.5)\n', (71588, 71593), False, 'from keras.layers.core import SpatialDropout2D\n'), ((71613, 71622), 'keras.layers.Flatten', 'Flatten', ([], {}), '()\n', (71620, 71622), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((71642, 71659), 'keras.layers.Dense', 'Dense', (['hidden_dim'], {}), '(hidden_dim)\n', (71647, 71659), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((72064, 72126), 'keras.layers.Dense', 'Dense', (['hidden_dim'], {'input_shape': '(input_dim,)', 'activation': '"""tanh"""'}), "(hidden_dim, input_shape=(input_dim,), activation='tanh')\n", (72069, 72126), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((72666, 72784), 'keras.layers.Conv2D', 'Conv2D', ([], {'input_shape': 'input_shape', 'activation': '"""relu"""', 'filters': 'num_kernels', 'kernel_size': '(kernel_height, kernel_width)'}), "(input_shape=input_shape, activation='relu', filters=num_kernels,\n kernel_size=(kernel_height, kernel_width))\n", (72672, 72784), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((73617, 73728), 'keras.layers.LSTM', 'LSTM', (['num_channels'], {'input_shape': '(input_length, input_dim)', 'implementation': '(1)', 'recurrent_activation': '"""sigmoid"""'}), "(num_channels, input_shape=(input_length, input_dim), implementation=1,\n recurrent_activation='sigmoid')\n", (73621, 73728), False, 'from keras.layers import SimpleRNN, LSTM, GRU\n'), ((74333, 74362), 'keras.layers.ZeroPadding2D', 'ZeroPadding2D', ([], {'padding': '(1, 1)'}), '(padding=(1, 1))\n', (74346, 74362), False, 'from keras.layers import ZeroPadding2D, UpSampling2D, Cropping2D\n'), ((74381, 74407), 'keras.layers.BatchNormalization', 'BatchNormalization', ([], {'axis': '(2)'}), '(axis=2)\n', (74399, 74407), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((74713, 74780), 'keras.layers.Embedding', 'Embedding', (['vocab_size', 'embed_channels'], {'input_length': 'sequence_length'}), '(vocab_size, embed_channels, input_length=sequence_length)\n', (74722, 74780), False, 'from keras.layers import Embedding, Input, Permute, Reshape, RepeatVector, Dropout\n'), ((74800, 74809), 'keras.layers.Flatten', 'Flatten', ([], {}), '()\n', (74807, 74809), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((74829, 74847), 'keras.layers.Dense', 'Dense', (['dense_units'], {}), '(dense_units)\n', (74834, 74847), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((74867, 74876), 'keras.layers.Dense', 'Dense', (['(20)'], {}), '(20)\n', (74872, 74876), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((75115, 75155), 'keras.layers.AveragePooling1D', 'AveragePooling1D', (['(2)'], {'input_shape': '(64, 9)'}), '(2, input_shape=(64, 9))\n', (75131, 75155), False, 'from keras.layers import MaxPooling1D, AveragePooling1D, GlobalAveragePooling1D, GlobalMaxPooling1D\n'), ((75175, 75239), 'keras.layers.Conv1D', 'Conv1D', (['(16)', '(1)'], {'padding': '"""same"""', 'activation': '"""relu"""', 'use_bias': '(False)'}), "(16, 1, padding='same', activation='relu', use_bias=False)\n", (75181, 75239), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((75259, 75274), 'keras.layers.MaxPooling1D', 'MaxPooling1D', (['(2)'], {}), '(2)\n', (75271, 75274), False, 'from keras.layers import MaxPooling1D, AveragePooling1D, GlobalAveragePooling1D, GlobalMaxPooling1D\n'), ((75294, 75303), 'keras.layers.Flatten', 'Flatten', ([], {}), '()\n', (75301, 75303), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((75323, 75375), 'keras.layers.Dense', 'Dense', ([], {'units': '(7)', 'activation': '"""softmax"""', 'use_bias': '(False)'}), "(units=7, activation='softmax', use_bias=False)\n", (75328, 75375), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((75986, 76080), 'keras.layers.Embedding', 'Embedding', (['vocabulary_size', 'embedding_dimension'], {'input_length': 'input_length', 'trainable': '(True)'}), '(vocabulary_size, embedding_dimension, input_length=input_length,\n trainable=True)\n', (75995, 76080), False, 'from keras.layers import Embedding, Input, Permute, Reshape, RepeatVector, Dropout\n'), ((76185, 76197), 'keras.layers.Conv1D', 'Conv1D', (['(5)', '(2)'], {}), '(5, 2)\n', (76191, 76197), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((76217, 76237), 'keras.layers.BatchNormalization', 'BatchNormalization', ([], {}), '()\n', (76235, 76237), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((76257, 76275), 'keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (76267, 76275), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((76296, 76311), 'keras.layers.MaxPooling1D', 'MaxPooling1D', (['(2)'], {}), '(2)\n', (76308, 76311), False, 'from keras.layers import MaxPooling1D, AveragePooling1D, GlobalAveragePooling1D, GlobalMaxPooling1D\n'), ((76671, 76760), 'keras.layers.SimpleRNN', 'SimpleRNN', (['num_channels'], {'return_sequences': '(True)', 'input_shape': '(input_length, input_dim)'}), '(num_channels, return_sequences=True, input_shape=(input_length,\n input_dim))\n', (76680, 76760), False, 'from keras.layers import SimpleRNN, LSTM, GRU\n'), ((77343, 77441), 'keras.layers.Conv2D', 'Conv2D', ([], {'input_shape': 'input_shape', 'filters': 'num_channels', 'kernel_size': '(kernel_size, kernel_size)'}), '(input_shape=input_shape, filters=num_channels, kernel_size=(\n kernel_size, kernel_size))\n', (77349, 77441), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((78445, 78467), 'keras.layers.Dense', 'Dense', (['(6)'], {'name': '"""dense"""'}), "(6, name='dense')\n", (78450, 78467), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((78823, 78845), 'keras.layers.Dense', 'Dense', (['(6)'], {'name': '"""dense"""'}), "(6, name='dense')\n", (78828, 78845), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((79147, 79165), 'keras.layers.Conv2D', 'Conv2D', (['(64)', '(3, 3)'], {}), '(64, (3, 3))\n', (79153, 79165), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((79191, 79209), 'keras.layers.Conv2D', 'Conv2D', (['(64)', '(3, 3)'], {}), '(64, (3, 3))\n', (79197, 79209), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((79227, 79236), 'keras.layers.Flatten', 'Flatten', ([], {}), '()\n', (79234, 79236), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((79646, 79676), 'keras.layers.Dense', 'Dense', (['(1)'], {'activation': '"""sigmoid"""'}), "(1, activation='sigmoid')\n", (79651, 79676), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((80135, 80143), 'keras.layers.Dense', 'Dense', (['(4)'], {}), '(4)\n', (80140, 80143), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((80458, 80466), 'keras.layers.Dense', 'Dense', (['(4)'], {}), '(4)\n', (80463, 80466), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((80483, 80491), 'keras.layers.Dense', 'Dense', (['(5)'], {}), '(5)\n', (80488, 80491), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((80765, 80802), 'keras.layers.Dense', 'Dense', (['(4)'], {'name': '"""intermediate_dense_y"""'}), "(4, name='intermediate_dense_y')\n", (80770, 80802), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((80818, 80855), 'keras.layers.Dense', 'Dense', (['(5)'], {'name': '"""intermediate_dense_z"""'}), "(5, name='intermediate_dense_z')\n", (80823, 80855), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((81133, 81179), 'keras.layers.Conv2D', 'Conv2D', (['(4)', '(3, 3)'], {'name': '"""intermdiate_conv2d_1"""'}), "(4, (3, 3), name='intermdiate_conv2d_1')\n", (81139, 81179), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((81195, 81241), 'keras.layers.Conv2D', 'Conv2D', (['(5)', '(3, 3)'], {'name': '"""intermdiate_conv2d_2"""'}), "(5, (3, 3), name='intermdiate_conv2d_2')\n", (81201, 81241), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((81529, 81600), 'keras.layers.Conv2D', 'Conv2D', (['(4)', '(3, 3)'], {'name': '"""intermdiate_conv2d_1_fused"""', 'activation': '"""relu"""'}), "(4, (3, 3), name='intermdiate_conv2d_1_fused', activation='relu')\n", (81535, 81600), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((81616, 81687), 'keras.layers.Conv2D', 'Conv2D', (['(5)', '(3, 3)'], {'name': '"""intermdiate_conv2d_2_fused"""', 'activation': '"""relu"""'}), "(5, (3, 3), name='intermdiate_conv2d_2_fused', activation='relu')\n", (81622, 81687), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((81969, 82010), 'keras.layers.Conv1D', 'Conv1D', (['(4)', '(3)'], {'name': '"""intermdiate_conv1d_1"""'}), "(4, 3, name='intermdiate_conv1d_1')\n", (81975, 82010), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((82026, 82067), 'keras.layers.Conv1D', 'Conv1D', (['(5)', '(3)'], {'name': '"""intermdiate_conv1d_2"""'}), "(5, 3, name='intermdiate_conv1d_2')\n", (82032, 82067), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((82352, 82418), 'keras.layers.Conv1D', 'Conv1D', (['(4)', '(3)'], {'name': '"""intermdiate_conv1d_1_fused"""', 'activation': '"""relu"""'}), "(4, 3, name='intermdiate_conv1d_1_fused', activation='relu')\n", (82358, 82418), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((82434, 82500), 'keras.layers.Conv1D', 'Conv1D', (['(5)', '(3)'], {'name': '"""intermdiate_conv1d_2_fused"""', 'activation': '"""relu"""'}), "(5, 3, name='intermdiate_conv1d_2_fused', activation='relu')\n", (82440, 82500), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((82801, 82855), 'keras.layers.Conv1D', 'Conv1D', (['(3)', '(3)'], {'padding': '"""same"""', 'name': '"""interm_rcnn_conv1"""'}), "(3, 3, padding='same', name='interm_rcnn_conv1')\n", (82807, 82855), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((82874, 82925), 'keras.layers.BatchNormalization', 'BatchNormalization', ([], {'axis': '(-1)', 'name': '"""interm_rcnn_bn1"""'}), "(axis=-1, name='interm_rcnn_bn1')\n", (82892, 82925), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((82941, 82958), 'keras.layers.Activation', 'Activation', (['"""elu"""'], {}), "('elu')\n", (82951, 82958), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((82974, 83025), 'keras.layers.MaxPooling1D', 'MaxPooling1D', ([], {'pool_size': '(2)', 'name': '"""interm_rcnn_pool1"""'}), "(pool_size=2, name='interm_rcnn_pool1')\n", (82986, 83025), False, 'from keras.layers import MaxPooling1D, AveragePooling1D, GlobalAveragePooling1D, GlobalMaxPooling1D\n'), ((83081, 83100), 'keras.layers.GRU', 'GRU', (['(6)'], {'name': '"""gru1"""'}), "(6, name='gru1')\n", (83084, 83100), False, 'from keras.layers import SimpleRNN, LSTM, GRU\n'), ((83619, 83698), 'keras.layers.Conv2D', 'Conv2D', (['(4)', '(3, 3)'], {'padding': '"""same"""', 'use_bias': '(False)', 'strides': '(2, 2)', 'name': '"""conv1"""'}), "(4, (3, 3), padding='same', use_bias=False, strides=(2, 2), name='conv1')\n", (83625, 83698), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((83744, 83788), 'keras.layers.BatchNormalization', 'BatchNormalization', ([], {'axis': '(-1)', 'name': '"""conv1_bn"""'}), "(axis=-1, name='conv1_bn')\n", (83762, 83788), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((83845, 83958), 'keras.applications.mobilenet.DepthwiseConv2D', 'DepthwiseConv2D', (['(3, 3)'], {'padding': '"""same"""', 'depth_multiplier': '(1)', 'strides': '(1, 1)', 'use_bias': '(False)', 'name': '"""conv_dw_1"""'}), "((3, 3), padding='same', depth_multiplier=1, strides=(1, 1),\n use_bias=False, name='conv_dw_1')\n", (83860, 83958), False, 'from keras.applications.mobilenet import DepthwiseConv2D, relu6\n'), ((84053, 84101), 'keras.layers.BatchNormalization', 'BatchNormalization', ([], {'axis': '(-1)', 'name': '"""conv_dw_1_bn"""'}), "(axis=-1, name='conv_dw_1_bn')\n", (84071, 84101), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((84162, 84250), 'keras.layers.Conv2D', 'Conv2D', (['(8)', '(1, 1)'], {'padding': '"""same"""', 'use_bias': '(False)', 'strides': '(1, 1)', 'name': '"""conv_pw_1"""'}), "(8, (1, 1), padding='same', use_bias=False, strides=(1, 1), name=\n 'conv_pw_1')\n", (84168, 84250), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((84283, 84331), 'keras.layers.BatchNormalization', 'BatchNormalization', ([], {'axis': '(-1)', 'name': '"""conv_pw_1_bn"""'}), "(axis=-1, name='conv_pw_1_bn')\n", (84301, 84331), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((84392, 84505), 'keras.applications.mobilenet.DepthwiseConv2D', 'DepthwiseConv2D', (['(3, 3)'], {'padding': '"""same"""', 'depth_multiplier': '(1)', 'strides': '(2, 2)', 'use_bias': '(False)', 'name': '"""conv_dw_2"""'}), "((3, 3), padding='same', depth_multiplier=1, strides=(2, 2),\n use_bias=False, name='conv_dw_2')\n", (84407, 84505), False, 'from keras.applications.mobilenet import DepthwiseConv2D, relu6\n'), ((84600, 84648), 'keras.layers.BatchNormalization', 'BatchNormalization', ([], {'axis': '(-1)', 'name': '"""conv_dw_2_bn"""'}), "(axis=-1, name='conv_dw_2_bn')\n", (84618, 84648), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((84709, 84797), 'keras.layers.Conv2D', 'Conv2D', (['(8)', '(1, 1)'], {'padding': '"""same"""', 'use_bias': '(False)', 'strides': '(2, 2)', 'name': '"""conv_pw_2"""'}), "(8, (1, 1), padding='same', use_bias=False, strides=(2, 2), name=\n 'conv_pw_2')\n", (84715, 84797), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((84830, 84878), 'keras.layers.BatchNormalization', 'BatchNormalization', ([], {'axis': '(-1)', 'name': '"""conv_pw_2_bn"""'}), "(axis=-1, name='conv_pw_2_bn')\n", (84848, 84878), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((85328, 85398), 'keras.layers.Conv2D', 'Conv2D', (['(2)', '(3, 3)'], {'strides': '(2, 2)', 'use_bias': '(False)', 'name': '"""block1_conv1"""'}), "(2, (3, 3), strides=(2, 2), use_bias=False, name='block1_conv1')\n", (85334, 85398), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((85444, 85486), 'keras.layers.BatchNormalization', 'BatchNormalization', ([], {'name': '"""block1_conv1_bn"""'}), "(name='block1_conv1_bn')\n", (85462, 85486), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((85502, 85545), 'keras.layers.Activation', 'Activation', (['"""relu"""'], {'name': '"""block1_conv1_act"""'}), "('relu', name='block1_conv1_act')\n", (85512, 85545), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((85561, 85615), 'keras.layers.Conv2D', 'Conv2D', (['(4)', '(3, 3)'], {'use_bias': '(False)', 'name': '"""block1_conv2"""'}), "(4, (3, 3), use_bias=False, name='block1_conv2')\n", (85567, 85615), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((85631, 85673), 'keras.layers.BatchNormalization', 'BatchNormalization', ([], {'name': '"""block1_conv2_bn"""'}), "(name='block1_conv2_bn')\n", (85649, 85673), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((85689, 85732), 'keras.layers.Activation', 'Activation', (['"""relu"""'], {'name': '"""block1_conv2_act"""'}), "('relu', name='block1_conv2_act')\n", (85699, 85732), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((85756, 85821), 'keras.layers.Conv2D', 'Conv2D', (['(8)', '(1, 1)'], {'strides': '(2, 2)', 'padding': '"""same"""', 'use_bias': '(False)'}), "(8, (1, 1), strides=(2, 2), padding='same', use_bias=False)\n", (85762, 85821), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((85844, 85864), 'keras.layers.BatchNormalization', 'BatchNormalization', ([], {}), '()\n', (85862, 85864), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((85888, 85975), 'keras.layers.SeparableConv2D', 'SeparableConv2D', (['(8)', '(3, 3)'], {'padding': '"""same"""', 'use_bias': '(False)', 'name': '"""block2_sepconv1"""'}), "(8, (3, 3), padding='same', use_bias=False, name=\n 'block2_sepconv1')\n", (85903, 85975), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((86008, 86053), 'keras.layers.BatchNormalization', 'BatchNormalization', ([], {'name': '"""block2_sepconv1_bn"""'}), "(name='block2_sepconv1_bn')\n", (86026, 86053), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((86069, 86115), 'keras.layers.Activation', 'Activation', (['"""relu"""'], {'name': '"""block2_sepconv2_act"""'}), "('relu', name='block2_sepconv2_act')\n", (86079, 86115), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((86131, 86218), 'keras.layers.SeparableConv2D', 'SeparableConv2D', (['(8)', '(3, 3)'], {'padding': '"""same"""', 'use_bias': '(False)', 'name': '"""block2_sepconv2"""'}), "(8, (3, 3), padding='same', use_bias=False, name=\n 'block2_sepconv2')\n", (86146, 86218), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((86251, 86296), 'keras.layers.BatchNormalization', 'BatchNormalization', ([], {'name': '"""block2_sepconv2_bn"""'}), "(name='block2_sepconv2_bn')\n", (86269, 86296), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((86313, 86385), 'keras.layers.MaxPooling2D', 'MaxPooling2D', (['(3, 3)'], {'strides': '(2, 2)', 'padding': '"""same"""', 'name': '"""block2_pool"""'}), "((3, 3), strides=(2, 2), padding='same', name='block2_pool')\n", (86325, 86385), False, 'from keras.layers import MaxPooling2D, AveragePooling2D, GlobalAveragePooling2D, GlobalMaxPooling2D\n'), ((86440, 86506), 'keras.layers.Conv2D', 'Conv2D', (['(16)', '(1, 1)'], {'strides': '(2, 2)', 'padding': '"""same"""', 'use_bias': '(False)'}), "(16, (1, 1), strides=(2, 2), padding='same', use_bias=False)\n", (86446, 86506), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((86529, 86549), 'keras.layers.BatchNormalization', 'BatchNormalization', ([], {}), '()\n', (86547, 86549), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((86936, 86977), 'keras.layers.Conv2D', 'Conv2D', (['(32)', '(1, 1)'], {'input_shape': '(4, 4, 3)'}), '(32, (1, 1), input_shape=(4, 4, 3))\n', (86942, 86977), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((87035, 87083), 'keras.layers.Flatten', 'Flatten', ([], {'input_shape': 'base_model.output_shape[1:]'}), '(input_shape=base_model.output_shape[1:])\n', (87042, 87083), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((87107, 87135), 'keras.layers.Dense', 'Dense', (['(16)'], {'activation': '"""relu"""'}), "(16, activation='relu')\n", (87112, 87135), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((87159, 87189), 'keras.layers.Dense', 'Dense', (['(1)'], {'activation': '"""sigmoid"""'}), "(1, activation='sigmoid')\n", (87164, 87189), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((87993, 88005), 'keras.layers.Dropout', 'Dropout', (['(0.5)'], {}), '(0.5)\n', (88000, 88005), False, 'from keras.layers import Embedding, Input, Permute, Reshape, RepeatVector, Dropout\n'), ((88025, 88070), 'keras.layers.LSTM', 'LSTM', (['(32)'], {'return_sequences': '(False)', 'dropout': '(0.5)'}), '(32, return_sequences=False, dropout=0.5)\n', (88029, 88070), False, 'from keras.layers import SimpleRNN, LSTM, GRU\n'), ((88090, 88121), 'keras.layers.Dense', 'Dense', (['(10)'], {'activation': '"""sigmoid"""'}), "(10, activation='sigmoid')\n", (88095, 88121), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((88837, 88855), 'tempfile.mkdtemp', 'tempfile.mkdtemp', ([], {}), '()\n', (88853, 88855), False, 'import tempfile\n'), ((90598, 90609), 'coremltools.models.utils._is_macos', '_is_macos', ([], {}), '()\n', (90607, 90609), False, 'from coremltools.models.utils import _macos_version, _is_macos\n'), ((92010, 92022), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (92020, 92022), False, 'from keras.models import Sequential, Model\n'), ((93184, 93196), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (93194, 93196), False, 'from keras.models import Sequential, Model\n'), ((94052, 94064), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (94062, 94064), False, 'from keras.models import Sequential, Model\n'), ((95203, 95215), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (95213, 95215), False, 'from keras.models import Sequential, Model\n'), ((96029, 96046), 'keras.layers.Input', 'Input', ([], {'shape': '(3,)'}), '(shape=(3,))\n', (96034, 96046), False, 'from keras.layers import Embedding, Input, Permute, Reshape, RepeatVector, Dropout\n'), ((96064, 96081), 'keras.layers.Input', 'Input', ([], {'shape': '(3,)'}), '(shape=(3,))\n', (96069, 96081), False, 'from keras.layers import Embedding, Input, Permute, Reshape, RepeatVector, Dropout\n'), ((96193, 96211), 'keras.models.Model', 'Model', (['[x1, x2]', 'z'], {}), '([x1, x2], z)\n', (96198, 96211), False, 'from keras.models import Sequential, Model\n'), ((96623, 96669), 'keras.layers.ZeroPadding2D', 'ZeroPadding2D', (['(1, 1)'], {'input_shape': 'input_shape'}), '((1, 1), input_shape=input_shape)\n', (96636, 96669), False, 'from keras.layers import ZeroPadding2D, UpSampling2D, Cropping2D\n'), ((96689, 96726), 'keras.layers.Conv2D', 'Conv2D', (['(32)', '(3, 3)'], {'activation': '"""relu"""'}), "(32, (3, 3), activation='relu')\n", (96695, 96726), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((96746, 96767), 'keras.layers.ZeroPadding2D', 'ZeroPadding2D', (['(1, 1)'], {}), '((1, 1))\n', (96759, 96767), False, 'from keras.layers import ZeroPadding2D, UpSampling2D, Cropping2D\n'), ((96787, 96824), 'keras.layers.Conv2D', 'Conv2D', (['(32)', '(3, 3)'], {'activation': '"""relu"""'}), "(32, (3, 3), activation='relu')\n", (96793, 96824), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((96844, 96880), 'keras.layers.MaxPooling2D', 'MaxPooling2D', (['(2, 2)'], {'strides': '(2, 2)'}), '((2, 2), strides=(2, 2))\n', (96856, 96880), False, 'from keras.layers import MaxPooling2D, AveragePooling2D, GlobalAveragePooling2D, GlobalMaxPooling2D\n'), ((96901, 96922), 'keras.layers.ZeroPadding2D', 'ZeroPadding2D', (['(1, 1)'], {}), '((1, 1))\n', (96914, 96922), False, 'from keras.layers import ZeroPadding2D, UpSampling2D, Cropping2D\n'), ((96942, 96979), 'keras.layers.Conv2D', 'Conv2D', (['(32)', '(3, 3)'], {'activation': '"""relu"""'}), "(32, (3, 3), activation='relu')\n", (96948, 96979), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((96999, 97020), 'keras.layers.ZeroPadding2D', 'ZeroPadding2D', (['(1, 1)'], {}), '((1, 1))\n', (97012, 97020), False, 'from keras.layers import ZeroPadding2D, UpSampling2D, Cropping2D\n'), ((97040, 97077), 'keras.layers.Conv2D', 'Conv2D', (['(32)', '(3, 3)'], {'activation': '"""relu"""'}), "(32, (3, 3), activation='relu')\n", (97046, 97077), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((97097, 97133), 'keras.layers.MaxPooling2D', 'MaxPooling2D', (['(2, 2)'], {'strides': '(2, 2)'}), '((2, 2), strides=(2, 2))\n', (97109, 97133), False, 'from keras.layers import MaxPooling2D, AveragePooling2D, GlobalAveragePooling2D, GlobalMaxPooling2D\n'), ((97154, 97175), 'keras.layers.ZeroPadding2D', 'ZeroPadding2D', (['(1, 1)'], {}), '((1, 1))\n', (97167, 97175), False, 'from keras.layers import ZeroPadding2D, UpSampling2D, Cropping2D\n'), ((97195, 97232), 'keras.layers.Conv2D', 'Conv2D', (['(32)', '(3, 3)'], {'activation': '"""relu"""'}), "(32, (3, 3), activation='relu')\n", (97201, 97232), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((97252, 97273), 'keras.layers.ZeroPadding2D', 'ZeroPadding2D', (['(1, 1)'], {}), '((1, 1))\n', (97265, 97273), False, 'from keras.layers import ZeroPadding2D, UpSampling2D, Cropping2D\n'), ((97293, 97330), 'keras.layers.Conv2D', 'Conv2D', (['(32)', '(3, 3)'], {'activation': '"""relu"""'}), "(32, (3, 3), activation='relu')\n", (97299, 97330), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((97350, 97371), 'keras.layers.ZeroPadding2D', 'ZeroPadding2D', (['(1, 1)'], {}), '((1, 1))\n', (97363, 97371), False, 'from keras.layers import ZeroPadding2D, UpSampling2D, Cropping2D\n'), ((97391, 97428), 'keras.layers.Conv2D', 'Conv2D', (['(32)', '(3, 3)'], {'activation': '"""relu"""'}), "(32, (3, 3), activation='relu')\n", (97397, 97428), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((97448, 97484), 'keras.layers.MaxPooling2D', 'MaxPooling2D', (['(2, 2)'], {'strides': '(2, 2)'}), '((2, 2), strides=(2, 2))\n', (97460, 97484), False, 'from keras.layers import MaxPooling2D, AveragePooling2D, GlobalAveragePooling2D, GlobalMaxPooling2D\n'), ((97505, 97526), 'keras.layers.ZeroPadding2D', 'ZeroPadding2D', (['(1, 1)'], {}), '((1, 1))\n', (97518, 97526), False, 'from keras.layers import ZeroPadding2D, UpSampling2D, Cropping2D\n'), ((97546, 97583), 'keras.layers.Conv2D', 'Conv2D', (['(32)', '(3, 3)'], {'activation': '"""relu"""'}), "(32, (3, 3), activation='relu')\n", (97552, 97583), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((97603, 97624), 'keras.layers.ZeroPadding2D', 'ZeroPadding2D', (['(1, 1)'], {}), '((1, 1))\n', (97616, 97624), False, 'from keras.layers import ZeroPadding2D, UpSampling2D, Cropping2D\n'), ((97644, 97681), 'keras.layers.Conv2D', 'Conv2D', (['(32)', '(3, 3)'], {'activation': '"""relu"""'}), "(32, (3, 3), activation='relu')\n", (97650, 97681), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((97701, 97722), 'keras.layers.ZeroPadding2D', 'ZeroPadding2D', (['(1, 1)'], {}), '((1, 1))\n', (97714, 97722), False, 'from keras.layers import ZeroPadding2D, UpSampling2D, Cropping2D\n'), ((97742, 97779), 'keras.layers.Conv2D', 'Conv2D', (['(32)', '(3, 3)'], {'activation': '"""relu"""'}), "(32, (3, 3), activation='relu')\n", (97748, 97779), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((97799, 97835), 'keras.layers.MaxPooling2D', 'MaxPooling2D', (['(2, 2)'], {'strides': '(2, 2)'}), '((2, 2), strides=(2, 2))\n', (97811, 97835), False, 'from keras.layers import MaxPooling2D, AveragePooling2D, GlobalAveragePooling2D, GlobalMaxPooling2D\n'), ((97856, 97877), 'keras.layers.ZeroPadding2D', 'ZeroPadding2D', (['(1, 1)'], {}), '((1, 1))\n', (97869, 97877), False, 'from keras.layers import ZeroPadding2D, UpSampling2D, Cropping2D\n'), ((97897, 97934), 'keras.layers.Conv2D', 'Conv2D', (['(32)', '(3, 3)'], {'activation': '"""relu"""'}), "(32, (3, 3), activation='relu')\n", (97903, 97934), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((97954, 97975), 'keras.layers.ZeroPadding2D', 'ZeroPadding2D', (['(1, 1)'], {}), '((1, 1))\n', (97967, 97975), False, 'from keras.layers import ZeroPadding2D, UpSampling2D, Cropping2D\n'), ((97995, 98032), 'keras.layers.Conv2D', 'Conv2D', (['(32)', '(3, 3)'], {'activation': '"""relu"""'}), "(32, (3, 3), activation='relu')\n", (98001, 98032), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((98052, 98073), 'keras.layers.ZeroPadding2D', 'ZeroPadding2D', (['(1, 1)'], {}), '((1, 1))\n', (98065, 98073), False, 'from keras.layers import ZeroPadding2D, UpSampling2D, Cropping2D\n'), ((98093, 98130), 'keras.layers.Conv2D', 'Conv2D', (['(32)', '(3, 3)'], {'activation': '"""relu"""'}), "(32, (3, 3), activation='relu')\n", (98099, 98130), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((98150, 98186), 'keras.layers.MaxPooling2D', 'MaxPooling2D', (['(2, 2)'], {'strides': '(2, 2)'}), '((2, 2), strides=(2, 2))\n', (98162, 98186), False, 'from keras.layers import MaxPooling2D, AveragePooling2D, GlobalAveragePooling2D, GlobalMaxPooling2D\n'), ((98207, 98216), 'keras.layers.Flatten', 'Flatten', ([], {}), '()\n', (98214, 98216), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((98236, 98264), 'keras.layers.Dense', 'Dense', (['(32)'], {'activation': '"""relu"""'}), "(32, activation='relu')\n", (98241, 98264), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((98284, 98296), 'keras.layers.Dropout', 'Dropout', (['(0.5)'], {}), '(0.5)\n', (98291, 98296), False, 'from keras.layers import Embedding, Input, Permute, Reshape, RepeatVector, Dropout\n'), ((98316, 98344), 'keras.layers.Dense', 'Dense', (['(32)'], {'activation': '"""relu"""'}), "(32, activation='relu')\n", (98321, 98344), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((98364, 98376), 'keras.layers.Dropout', 'Dropout', (['(0.5)'], {}), '(0.5)\n', (98371, 98376), False, 'from keras.layers import Embedding, Input, Permute, Reshape, RepeatVector, Dropout\n'), ((98396, 98407), 'keras.layers.Dense', 'Dense', (['(1000)'], {}), '(1000)\n', (98401, 98407), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((98779, 98825), 'keras.layers.ZeroPadding2D', 'ZeroPadding2D', (['(1, 1)'], {'input_shape': 'input_shape'}), '((1, 1), input_shape=input_shape)\n', (98792, 98825), False, 'from keras.layers import ZeroPadding2D, UpSampling2D, Cropping2D\n'), ((98845, 98882), 'keras.layers.Conv2D', 'Conv2D', (['(32)', '(3, 3)'], {'activation': '"""relu"""'}), "(32, (3, 3), activation='relu')\n", (98851, 98882), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((98902, 98923), 'keras.layers.ZeroPadding2D', 'ZeroPadding2D', (['(1, 1)'], {}), '((1, 1))\n', (98915, 98923), False, 'from keras.layers import ZeroPadding2D, UpSampling2D, Cropping2D\n'), ((98943, 98980), 'keras.layers.Conv2D', 'Conv2D', (['(32)', '(3, 3)'], {'activation': '"""relu"""'}), "(32, (3, 3), activation='relu')\n", (98949, 98980), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((99000, 99036), 'keras.layers.MaxPooling2D', 'MaxPooling2D', (['(2, 2)'], {'strides': '(2, 2)'}), '((2, 2), strides=(2, 2))\n', (99012, 99036), False, 'from keras.layers import MaxPooling2D, AveragePooling2D, GlobalAveragePooling2D, GlobalMaxPooling2D\n'), ((99057, 99078), 'keras.layers.ZeroPadding2D', 'ZeroPadding2D', (['(1, 1)'], {}), '((1, 1))\n', (99070, 99078), False, 'from keras.layers import ZeroPadding2D, UpSampling2D, Cropping2D\n'), ((99098, 99135), 'keras.layers.Conv2D', 'Conv2D', (['(32)', '(3, 3)'], {'activation': '"""relu"""'}), "(32, (3, 3), activation='relu')\n", (99104, 99135), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((99155, 99176), 'keras.layers.ZeroPadding2D', 'ZeroPadding2D', (['(1, 1)'], {}), '((1, 1))\n', (99168, 99176), False, 'from keras.layers import ZeroPadding2D, UpSampling2D, Cropping2D\n'), ((99196, 99233), 'keras.layers.Conv2D', 'Conv2D', (['(32)', '(3, 3)'], {'activation': '"""relu"""'}), "(32, (3, 3), activation='relu')\n", (99202, 99233), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((99253, 99289), 'keras.layers.MaxPooling2D', 'MaxPooling2D', (['(2, 2)'], {'strides': '(2, 2)'}), '((2, 2), strides=(2, 2))\n', (99265, 99289), False, 'from keras.layers import MaxPooling2D, AveragePooling2D, GlobalAveragePooling2D, GlobalMaxPooling2D\n'), ((99310, 99331), 'keras.layers.ZeroPadding2D', 'ZeroPadding2D', (['(1, 1)'], {}), '((1, 1))\n', (99323, 99331), False, 'from keras.layers import ZeroPadding2D, UpSampling2D, Cropping2D\n'), ((99351, 99388), 'keras.layers.Conv2D', 'Conv2D', (['(32)', '(3, 3)'], {'activation': '"""relu"""'}), "(32, (3, 3), activation='relu')\n", (99357, 99388), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((99408, 99429), 'keras.layers.ZeroPadding2D', 'ZeroPadding2D', (['(1, 1)'], {}), '((1, 1))\n', (99421, 99429), False, 'from keras.layers import ZeroPadding2D, UpSampling2D, Cropping2D\n'), ((99449, 99486), 'keras.layers.Conv2D', 'Conv2D', (['(32)', '(3, 3)'], {'activation': '"""relu"""'}), "(32, (3, 3), activation='relu')\n", (99455, 99486), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((99506, 99527), 'keras.layers.ZeroPadding2D', 'ZeroPadding2D', (['(1, 1)'], {}), '((1, 1))\n', (99519, 99527), False, 'from keras.layers import ZeroPadding2D, UpSampling2D, Cropping2D\n'), ((99547, 99584), 'keras.layers.Conv2D', 'Conv2D', (['(32)', '(3, 3)'], {'activation': '"""relu"""'}), "(32, (3, 3), activation='relu')\n", (99553, 99584), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((99604, 99640), 'keras.layers.MaxPooling2D', 'MaxPooling2D', (['(2, 2)'], {'strides': '(2, 2)'}), '((2, 2), strides=(2, 2))\n', (99616, 99640), False, 'from keras.layers import MaxPooling2D, AveragePooling2D, GlobalAveragePooling2D, GlobalMaxPooling2D\n'), ((99661, 99682), 'keras.layers.ZeroPadding2D', 'ZeroPadding2D', (['(1, 1)'], {}), '((1, 1))\n', (99674, 99682), False, 'from keras.layers import ZeroPadding2D, UpSampling2D, Cropping2D\n'), ((99702, 99739), 'keras.layers.Conv2D', 'Conv2D', (['(32)', '(3, 3)'], {'activation': '"""relu"""'}), "(32, (3, 3), activation='relu')\n", (99708, 99739), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((99759, 99780), 'keras.layers.ZeroPadding2D', 'ZeroPadding2D', (['(1, 1)'], {}), '((1, 1))\n', (99772, 99780), False, 'from keras.layers import ZeroPadding2D, UpSampling2D, Cropping2D\n'), ((99800, 99837), 'keras.layers.Conv2D', 'Conv2D', (['(32)', '(3, 3)'], {'activation': '"""relu"""'}), "(32, (3, 3), activation='relu')\n", (99806, 99837), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((99857, 99878), 'keras.layers.ZeroPadding2D', 'ZeroPadding2D', (['(1, 1)'], {}), '((1, 1))\n', (99870, 99878), False, 'from keras.layers import ZeroPadding2D, UpSampling2D, Cropping2D\n'), ((99898, 99935), 'keras.layers.Conv2D', 'Conv2D', (['(32)', '(3, 3)'], {'activation': '"""relu"""'}), "(32, (3, 3), activation='relu')\n", (99904, 99935), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((99955, 99991), 'keras.layers.MaxPooling2D', 'MaxPooling2D', (['(2, 2)'], {'strides': '(2, 2)'}), '((2, 2), strides=(2, 2))\n', (99967, 99991), False, 'from keras.layers import MaxPooling2D, AveragePooling2D, GlobalAveragePooling2D, GlobalMaxPooling2D\n'), ((100012, 100033), 'keras.layers.ZeroPadding2D', 'ZeroPadding2D', (['(1, 1)'], {}), '((1, 1))\n', (100025, 100033), False, 'from keras.layers import ZeroPadding2D, UpSampling2D, Cropping2D\n'), ((100053, 100090), 'keras.layers.Conv2D', 'Conv2D', (['(32)', '(3, 3)'], {'activation': '"""relu"""'}), "(32, (3, 3), activation='relu')\n", (100059, 100090), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((100110, 100131), 'keras.layers.ZeroPadding2D', 'ZeroPadding2D', (['(1, 1)'], {}), '((1, 1))\n', (100123, 100131), False, 'from keras.layers import ZeroPadding2D, UpSampling2D, Cropping2D\n'), ((100151, 100188), 'keras.layers.Conv2D', 'Conv2D', (['(32)', '(3, 3)'], {'activation': '"""relu"""'}), "(32, (3, 3), activation='relu')\n", (100157, 100188), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((100208, 100229), 'keras.layers.ZeroPadding2D', 'ZeroPadding2D', (['(1, 1)'], {}), '((1, 1))\n', (100221, 100229), False, 'from keras.layers import ZeroPadding2D, UpSampling2D, Cropping2D\n'), ((100249, 100286), 'keras.layers.Conv2D', 'Conv2D', (['(32)', '(3, 3)'], {'activation': '"""relu"""'}), "(32, (3, 3), activation='relu')\n", (100255, 100286), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((100306, 100342), 'keras.layers.MaxPooling2D', 'MaxPooling2D', (['(2, 2)'], {'strides': '(2, 2)'}), '((2, 2), strides=(2, 2))\n', (100318, 100342), False, 'from keras.layers import MaxPooling2D, AveragePooling2D, GlobalAveragePooling2D, GlobalMaxPooling2D\n'), ((100363, 100372), 'keras.layers.Flatten', 'Flatten', ([], {}), '()\n', (100370, 100372), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((100392, 100420), 'keras.layers.Dense', 'Dense', (['(32)'], {'activation': '"""relu"""'}), "(32, activation='relu')\n", (100397, 100420), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((100474, 100502), 'keras.layers.Dense', 'Dense', (['(32)'], {'activation': '"""relu"""'}), "(32, activation='relu')\n", (100479, 100502), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((100556, 100567), 'keras.layers.Dense', 'Dense', (['(1000)'], {}), '(1000)\n', (100561, 100567), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((101005, 101067), 'keras.layers.Conv2D', 'Conv2D', (['(32)', '(3, 3)'], {'activation': '"""relu"""', 'input_shape': 'input_shape'}), "(32, (3, 3), activation='relu', input_shape=input_shape)\n", (101011, 101067), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((101087, 101124), 'keras.layers.Conv2D', 'Conv2D', (['(32)', '(3, 3)'], {'activation': '"""relu"""'}), "(32, (3, 3), activation='relu')\n", (101093, 101124), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((101145, 101182), 'keras.layers.Conv2D', 'Conv2D', (['(32)', '(3, 3)'], {'activation': '"""relu"""'}), "(32, (3, 3), activation='relu')\n", (101151, 101182), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((101202, 101239), 'keras.layers.Conv2D', 'Conv2D', (['(32)', '(3, 3)'], {'activation': '"""relu"""'}), "(32, (3, 3), activation='relu')\n", (101208, 101239), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((101260, 101297), 'keras.layers.Conv2D', 'Conv2D', (['(32)', '(3, 3)'], {'activation': '"""relu"""'}), "(32, (3, 3), activation='relu')\n", (101266, 101297), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((101317, 101354), 'keras.layers.Conv2D', 'Conv2D', (['(32)', '(3, 3)'], {'activation': '"""relu"""'}), "(32, (3, 3), activation='relu')\n", (101323, 101354), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((101374, 101411), 'keras.layers.Conv2D', 'Conv2D', (['(32)', '(3, 3)'], {'activation': '"""relu"""'}), "(32, (3, 3), activation='relu')\n", (101380, 101411), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((101432, 101469), 'keras.layers.Conv2D', 'Conv2D', (['(32)', '(3, 3)'], {'activation': '"""relu"""'}), "(32, (3, 3), activation='relu')\n", (101438, 101469), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((101489, 101526), 'keras.layers.Conv2D', 'Conv2D', (['(32)', '(3, 3)'], {'activation': '"""relu"""'}), "(32, (3, 3), activation='relu')\n", (101495, 101526), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((101546, 101583), 'keras.layers.Conv2D', 'Conv2D', (['(32)', '(3, 3)'], {'activation': '"""relu"""'}), "(32, (3, 3), activation='relu')\n", (101552, 101583), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((101604, 101641), 'keras.layers.Conv2D', 'Conv2D', (['(32)', '(3, 3)'], {'activation': '"""relu"""'}), "(32, (3, 3), activation='relu')\n", (101610, 101641), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((101661, 101698), 'keras.layers.Conv2D', 'Conv2D', (['(32)', '(3, 3)'], {'activation': '"""relu"""'}), "(32, (3, 3), activation='relu')\n", (101667, 101698), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((101718, 101755), 'keras.layers.Conv2D', 'Conv2D', (['(32)', '(3, 3)'], {'activation': '"""relu"""'}), "(32, (3, 3), activation='relu')\n", (101724, 101755), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((101776, 101785), 'keras.layers.Flatten', 'Flatten', ([], {}), '()\n', (101783, 101785), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((101805, 101833), 'keras.layers.Dense', 'Dense', (['(32)'], {'activation': '"""relu"""'}), "(32, activation='relu')\n", (101810, 101833), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((101853, 101865), 'keras.layers.Dropout', 'Dropout', (['(0.5)'], {}), '(0.5)\n', (101860, 101865), False, 'from keras.layers import Embedding, Input, Permute, Reshape, RepeatVector, Dropout\n'), ((101885, 101913), 'keras.layers.Dense', 'Dense', (['(32)'], {'activation': '"""relu"""'}), "(32, activation='relu')\n", (101890, 101913), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((101933, 101945), 'keras.layers.Dropout', 'Dropout', (['(0.5)'], {}), '(0.5)\n', (101940, 101945), False, 'from keras.layers import Embedding, Input, Permute, Reshape, RepeatVector, Dropout\n'), ((101965, 101998), 'keras.layers.Dense', 'Dense', (['(1000)'], {'activation': '"""softmax"""'}), "(1000, activation='softmax')\n", (101970, 101998), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((102478, 102539), 'keras.layers.Embedding', 'Embedding', (['max_features', 'embedding_dims'], {'input_length': 'max_len'}), '(max_features, embedding_dims, input_length=max_len)\n', (102487, 102539), False, 'from keras.layers import Embedding, Input, Permute, Reshape, RepeatVector, Dropout\n'), ((102669, 102708), 'keras.layers.AveragePooling1D', 'AveragePooling1D', ([], {'pool_size': 'pool_length'}), '(pool_size=pool_length)\n', (102685, 102708), False, 'from keras.layers import MaxPooling1D, AveragePooling1D, GlobalAveragePooling1D, GlobalMaxPooling1D\n'), ((102852, 102908), 'keras.layers.Conv2D', 'Conv2D', (['(3)', '(1, 1)'], {'input_shape': '(2, 4, 4)', 'padding': '"""same"""'}), "(3, (1, 1), input_shape=(2, 4, 4), padding='same')\n", (102858, 102908), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((102928, 102962), 'keras.layers.AveragePooling2D', 'AveragePooling2D', ([], {'pool_size': '(2, 2)'}), '(pool_size=(2, 2))\n', (102944, 102962), False, 'from keras.layers import MaxPooling2D, AveragePooling2D, GlobalAveragePooling2D, GlobalMaxPooling2D\n'), ((102982, 102997), 'keras.layers.Reshape', 'Reshape', (['(2, 3)'], {}), '((2, 3))\n', (102989, 102997), False, 'from keras.layers import Embedding, Input, Permute, Reshape, RepeatVector, Dropout\n'), ((103167, 103223), 'keras.layers.Conv2D', 'Conv2D', (['(3)', '(1, 1)'], {'input_shape': '(2, 4, 4)', 'padding': '"""same"""'}), "(3, (1, 1), input_shape=(2, 4, 4), padding='same')\n", (103173, 103223), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((103243, 103277), 'keras.layers.AveragePooling2D', 'AveragePooling2D', ([], {'pool_size': '(2, 2)'}), '(pool_size=(2, 2))\n', (103259, 103277), False, 'from keras.layers import MaxPooling2D, AveragePooling2D, GlobalAveragePooling2D, GlobalMaxPooling2D\n'), ((103297, 103312), 'keras.layers.Reshape', 'Reshape', (['(2, 3)'], {}), '((2, 3))\n', (103304, 103312), False, 'from keras.layers import Embedding, Input, Permute, Reshape, RepeatVector, Dropout\n'), ((103332, 103371), 'keras.layers.LSTM', 'LSTM', (['(5)'], {'recurrent_activation': '"""sigmoid"""'}), "(5, recurrent_activation='sigmoid')\n", (103336, 103371), False, 'from keras.layers import SimpleRNN, LSTM, GRU\n'), ((103502, 103531), 'keras.layers.ZeroPadding2D', 'ZeroPadding2D', ([], {'padding': '(0, 1)'}), '(padding=(0, 1))\n', (103515, 103531), False, 'from keras.layers import ZeroPadding2D, UpSampling2D, Cropping2D\n'), ((103550, 103594), 'keras.layers.BatchNormalization', 'BatchNormalization', ([], {'axis': '(2)', 'name': '"""bn_0_freq"""'}), "(axis=2, name='bn_0_freq')\n", (103568, 103594), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((103633, 103680), 'keras.layers.Conv2D', 'Conv2D', (['(2)', '(3, 3)'], {'padding': '"""same"""', 'name': '"""conv1"""'}), "(2, (3, 3), padding='same', name='conv1')\n", (103639, 103680), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((103696, 103734), 'keras.layers.BatchNormalization', 'BatchNormalization', ([], {'axis': '(3)', 'name': '"""bn1"""'}), "(axis=3, name='bn1')\n", (103714, 103734), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((103750, 103767), 'keras.layers.Activation', 'Activation', (['"""elu"""'], {}), "('elu')\n", (103760, 103767), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((103783, 103843), 'keras.layers.MaxPooling2D', 'MaxPooling2D', ([], {'pool_size': '(2, 2)', 'strides': '(2, 2)', 'name': '"""pool1"""'}), "(pool_size=(2, 2), strides=(2, 2), name='pool1')\n", (103795, 103843), False, 'from keras.layers import MaxPooling2D, AveragePooling2D, GlobalAveragePooling2D, GlobalMaxPooling2D\n'), ((103882, 103929), 'keras.layers.Conv2D', 'Conv2D', (['(4)', '(3, 3)'], {'padding': '"""same"""', 'name': '"""conv2"""'}), "(4, (3, 3), padding='same', name='conv2')\n", (103888, 103929), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((103945, 103983), 'keras.layers.BatchNormalization', 'BatchNormalization', ([], {'axis': '(3)', 'name': '"""bn2"""'}), "(axis=3, name='bn2')\n", (103963, 103983), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((103999, 104016), 'keras.layers.Activation', 'Activation', (['"""elu"""'], {}), "('elu')\n", (104009, 104016), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((104032, 104092), 'keras.layers.MaxPooling2D', 'MaxPooling2D', ([], {'pool_size': '(2, 2)', 'strides': '(2, 2)', 'name': '"""pool2"""'}), "(pool_size=(2, 2), strides=(2, 2), name='pool2')\n", (104044, 104092), False, 'from keras.layers import MaxPooling2D, AveragePooling2D, GlobalAveragePooling2D, GlobalMaxPooling2D\n'), ((104144, 104159), 'keras.layers.Reshape', 'Reshape', (['(2, 4)'], {}), '((2, 4))\n', (104151, 104159), False, 'from keras.layers import Embedding, Input, Permute, Reshape, RepeatVector, Dropout\n'), ((104175, 104218), 'keras.layers.GRU', 'GRU', (['(32)'], {'return_sequences': '(True)', 'name': '"""gru1"""'}), "(32, return_sequences=True, name='gru1')\n", (104178, 104218), False, 'from keras.layers import SimpleRNN, LSTM, GRU\n'), ((104234, 104278), 'keras.layers.GRU', 'GRU', (['(32)'], {'return_sequences': '(False)', 'name': '"""gru2"""'}), "(32, return_sequences=False, name='gru2')\n", (104237, 104278), False, 'from keras.layers import SimpleRNN, LSTM, GRU\n'), ((104576, 104635), 'keras.layers.LSTM', 'LSTM', (['(3)'], {'input_shape': '(4, 5)', 'recurrent_activation': '"""sigmoid"""'}), "(3, input_shape=(4, 5), recurrent_activation='sigmoid')\n", (104580, 104635), False, 'from keras.layers import SimpleRNN, LSTM, GRU\n'), ((104655, 104663), 'keras.layers.Dense', 'Dense', (['(5)'], {}), '(5)\n', (104660, 104663), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((104683, 104704), 'keras.layers.Activation', 'Activation', (['"""softmax"""'], {}), "('softmax')\n", (104693, 104704), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((104854, 104871), 'keras.layers.Conv2D', 'Conv2D', (['(2)', '(3, 3)'], {}), '(2, (3, 3))\n', (104860, 104871), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((104897, 104906), 'keras.layers.Flatten', 'Flatten', ([], {}), '()\n', (104904, 104906), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((105062, 105088), 'keras.layers.Dense', 'Dense', (['(8)'], {'name': '"""cap_dense"""'}), "(8, name='cap_dense')\n", (105067, 105088), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((105104, 105139), 'keras.layers.Reshape', 'Reshape', (['(1, 8)'], {'name': '"""cap_reshape"""'}), "((1, 8), name='cap_reshape')\n", (105111, 105139), False, 'from keras.layers import Embedding, Input, Permute, Reshape, RepeatVector, Dropout\n'), ((105361, 105378), 'keras.layers.Conv2D', 'Conv2D', (['(2)', '(3, 3)'], {}), '(2, (3, 3))\n', (105367, 105378), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((105404, 105413), 'keras.layers.Flatten', 'Flatten', ([], {}), '()\n', (105411, 105413), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((105554, 105580), 'keras.layers.Dense', 'Dense', (['(8)'], {'name': '"""cap_dense"""'}), "(8, name='cap_dense')\n", (105559, 105580), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((105596, 105631), 'keras.layers.Reshape', 'Reshape', (['(1, 8)'], {'name': '"""cap_reshape"""'}), "((1, 8), name='cap_reshape')\n", (105603, 105631), False, 'from keras.layers import Embedding, Input, Permute, Reshape, RepeatVector, Dropout\n'), ((105709, 105746), 'keras.layers.Embedding', 'Embedding', (['(8)', '(8)'], {'name': '"""cap_embedding"""'}), "(8, 8, name='cap_embedding')\n", (105718, 105746), False, 'from keras.layers import Embedding, Input, Permute, Reshape, RepeatVector, Dropout\n'), ((106131, 106148), 'keras.layers.Conv2D', 'Conv2D', (['(2)', '(3, 3)'], {}), '(2, (3, 3))\n', (106137, 106148), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((106174, 106183), 'keras.layers.Flatten', 'Flatten', ([], {}), '()\n', (106181, 106183), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((106339, 106365), 'keras.layers.Dense', 'Dense', (['(8)'], {'name': '"""cap_dense"""'}), "(8, name='cap_dense')\n", (106344, 106365), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((106381, 106416), 'keras.layers.Reshape', 'Reshape', (['(1, 8)'], {'name': '"""cap_reshape"""'}), "((1, 8), name='cap_reshape')\n", (106388, 106416), False, 'from keras.layers import Embedding, Input, Permute, Reshape, RepeatVector, Dropout\n'), ((106494, 106531), 'keras.layers.Embedding', 'Embedding', (['(8)', '(8)'], {'name': '"""cap_embedding"""'}), "(8, 8, name='cap_embedding')\n", (106503, 106531), False, 'from keras.layers import Embedding, Input, Permute, Reshape, RepeatVector, Dropout\n'), ((106618, 106665), 'keras.layers.LSTM', 'LSTM', (['(4)'], {'return_sequences': '(True)', 'name': '"""cap_lstm"""'}), "(4, return_sequences=True, name='cap_lstm')\n", (106622, 106665), False, 'from keras.layers import SimpleRNN, LSTM, GRU\n'), ((107100, 107140), 'keras.layers.Embedding', 'Embedding', (['vocab_size', 'embed_hidden_size'], {}), '(vocab_size, embed_hidden_size)\n', (107109, 107140), False, 'from keras.layers import Embedding, Input, Permute, Reshape, RepeatVector, Dropout\n'), ((107170, 107182), 'keras.layers.Dropout', 'Dropout', (['(0.3)'], {}), '(0.3)\n', (107177, 107182), False, 'from keras.layers import Embedding, Input, Permute, Reshape, RepeatVector, Dropout\n'), ((107255, 107295), 'keras.layers.Embedding', 'Embedding', (['vocab_size', 'embed_hidden_size'], {}), '(vocab_size, embed_hidden_size)\n', (107264, 107295), False, 'from keras.layers import Embedding, Input, Permute, Reshape, RepeatVector, Dropout\n'), ((107325, 107337), 'keras.layers.Dropout', 'Dropout', (['(0.3)'], {}), '(0.3)\n', (107332, 107337), False, 'from keras.layers import Embedding, Input, Permute, Reshape, RepeatVector, Dropout\n'), ((107355, 107402), 'keras.layers.LSTM', 'LSTM', (['embed_hidden_size'], {'return_sequences': '(False)'}), '(embed_hidden_size, return_sequences=False)\n', (107359, 107402), False, 'from keras.layers import SimpleRNN, LSTM, GRU\n'), ((107420, 107446), 'keras.layers.RepeatVector', 'RepeatVector', (['story_maxlen'], {}), '(story_maxlen)\n', (107432, 107446), False, 'from keras.layers import Embedding, Input, Permute, Reshape, RepeatVector, Dropout\n'), ((107492, 107539), 'keras.layers.LSTM', 'LSTM', (['embed_hidden_size'], {'return_sequences': '(False)'}), '(embed_hidden_size, return_sequences=False)\n', (107496, 107539), False, 'from keras.layers import SimpleRNN, LSTM, GRU\n'), ((107557, 107569), 'keras.layers.Dropout', 'Dropout', (['(0.3)'], {}), '(0.3)\n', (107564, 107569), False, 'from keras.layers import Embedding, Input, Permute, Reshape, RepeatVector, Dropout\n'), ((107587, 107626), 'keras.layers.Dense', 'Dense', (['vocab_size'], {'activation': '"""softmax"""'}), "(vocab_size, activation='softmax')\n", (107592, 107626), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((108070, 108164), 'keras.layers.Embedding', 'Embedding', (['vocabulary_size', 'embedding_dimension'], {'input_length': 'input_length', 'trainable': '(True)'}), '(vocabulary_size, embedding_dimension, input_length=input_length,\n trainable=True)\n', (108079, 108164), False, 'from keras.layers import Embedding, Input, Permute, Reshape, RepeatVector, Dropout\n'), ((108269, 108282), 'keras.layers.Conv1D', 'Conv1D', (['(32)', '(2)'], {}), '(32, 2)\n', (108275, 108282), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((108302, 108322), 'keras.layers.BatchNormalization', 'BatchNormalization', ([], {}), '()\n', (108320, 108322), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((108342, 108360), 'keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (108352, 108360), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((108381, 108394), 'keras.layers.Conv1D', 'Conv1D', (['(32)', '(2)'], {}), '(32, 2)\n', (108387, 108394), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((108414, 108434), 'keras.layers.BatchNormalization', 'BatchNormalization', ([], {}), '()\n', (108432, 108434), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((108454, 108472), 'keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (108464, 108472), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((108493, 108506), 'keras.layers.Conv1D', 'Conv1D', (['(32)', '(2)'], {}), '(32, 2)\n', (108499, 108506), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((108526, 108546), 'keras.layers.BatchNormalization', 'BatchNormalization', ([], {}), '()\n', (108544, 108546), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((108566, 108584), 'keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (108576, 108584), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((108605, 108621), 'keras.layers.MaxPooling1D', 'MaxPooling1D', (['(17)'], {}), '(17)\n', (108617, 108621), False, 'from keras.layers import MaxPooling1D, AveragePooling1D, GlobalAveragePooling1D, GlobalMaxPooling1D\n'), ((108641, 108650), 'keras.layers.Flatten', 'Flatten', ([], {}), '()\n', (108648, 108650), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((108671, 108694), 'keras.layers.Dense', 'Dense', (['(1)'], {'use_bias': '(True)'}), '(1, use_bias=True)\n', (108676, 108694), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((108714, 108734), 'keras.layers.BatchNormalization', 'BatchNormalization', ([], {}), '()\n', (108732, 108734), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((108754, 108775), 'keras.layers.Activation', 'Activation', (['"""sigmoid"""'], {}), "('sigmoid')\n", (108764, 108775), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((109162, 109180), 'keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (109172, 109180), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((109208, 109235), 'keras.layers.Cropping1D', 'Cropping1D', ([], {'cropping': '(1, 1)'}), '(cropping=(1, 1))\n', (109218, 109235), False, 'from keras.layers import ZeroPadding1D, UpSampling1D, Cropping1D\n'), ((109265, 109295), 'keras.layers.Conv1D', 'Conv1D', (['(20)', '(3)'], {'padding': '"""valid"""'}), "(20, 3, padding='valid')\n", (109271, 109295), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((109325, 109330), 'keras.layers.Add', 'Add', ([], {}), '()\n', (109328, 109330), False, 'from keras.layers import Add, Concatenate\n'), ((109718, 109749), 'keras.layers.Dense', 'Dense', (['(1000)'], {'input_shape': '(100,)'}), '(1000, input_shape=(100,))\n', (109723, 109749), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((8720, 8745), 'os.path.exists', 'os.path.exists', (['model_dir'], {}), '(model_dir)\n', (8734, 8745), False, 'import os\n'), ((8763, 8787), 'shutil.rmtree', 'shutil.rmtree', (['model_dir'], {}), '(model_dir)\n', (8776, 8787), False, 'import shutil\n'), ((9213, 9237), 'numpy.random.rand', 'np.random.rand', (['*w.shape'], {}), '(*w.shape)\n', (9227, 9237), True, 'import numpy as np\n'), ((9400, 9416), 'numpy.ones', 'np.ones', (['w.shape'], {}), '(w.shape)\n', (9407, 9416), True, 'import numpy as np\n'), ((9576, 9600), 'numpy.random.rand', 'np.random.rand', (['*w.shape'], {}), '(*w.shape)\n', (9590, 9600), True, 'import numpy as np\n'), ((10110, 10134), 'numpy.random.rand', 'np.random.rand', (['*w.shape'], {}), '(*w.shape)\n', (10124, 10134), True, 'import numpy as np\n'), ((10651, 10675), 'numpy.random.rand', 'np.random.rand', (['*w.shape'], {}), '(*w.shape)\n', (10665, 10675), True, 'import numpy as np\n'), ((11014, 11038), 'numpy.random.rand', 'np.random.rand', (['*w.shape'], {}), '(*w.shape)\n', (11028, 11038), True, 'import numpy as np\n'), ((11379, 11403), 'numpy.random.rand', 'np.random.rand', (['*w.shape'], {}), '(*w.shape)\n', (11393, 11403), True, 'import numpy as np\n'), ((11881, 11905), 'numpy.random.rand', 'np.random.rand', (['*w.shape'], {}), '(*w.shape)\n', (11895, 11905), True, 'import numpy as np\n'), ((12554, 12570), 'numpy.ones', 'np.ones', (['w.shape'], {}), '(w.shape)\n', (12561, 12570), True, 'import numpy as np\n'), ((13379, 13403), 'numpy.random.rand', 'np.random.rand', (['*w.shape'], {}), '(*w.shape)\n', (13393, 13403), True, 'import numpy as np\n'), ((14236, 14260), 'numpy.random.rand', 'np.random.rand', (['*w.shape'], {}), '(*w.shape)\n', (14250, 14260), True, 'import numpy as np\n'), ((13583, 13599), 'coremltools.models.utils._macos_version', '_macos_version', ([], {}), '()\n', (13597, 13599), False, 'from coremltools.models.utils import _macos_version, _is_macos\n'), ((15204, 15228), 'numpy.random.rand', 'np.random.rand', (['*w.shape'], {}), '(*w.shape)\n', (15218, 15228), True, 'import numpy as np\n'), ((16094, 16118), 'numpy.random.rand', 'np.random.rand', (['*w.shape'], {}), '(*w.shape)\n', (16108, 16118), True, 'import numpy as np\n'), ((16990, 17006), 'numpy.ones', 'np.ones', (['w.shape'], {}), '(w.shape)\n', (16997, 17006), True, 'import numpy as np\n'), ((17747, 17771), 'numpy.random.rand', 'np.random.rand', (['*w.shape'], {}), '(*w.shape)\n', (17761, 17771), True, 'import numpy as np\n'), ((18359, 18383), 'numpy.random.rand', 'np.random.rand', (['*w.shape'], {}), '(*w.shape)\n', (18373, 18383), True, 'import numpy as np\n'), ((19118, 19142), 'numpy.random.rand', 'np.random.rand', (['*w.shape'], {}), '(*w.shape)\n', (19132, 19142), True, 'import numpy as np\n'), ((19956, 19980), 'numpy.random.rand', 'np.random.rand', (['*w.shape'], {}), '(*w.shape)\n', (19970, 19980), True, 'import numpy as np\n'), ((20592, 20616), 'numpy.random.rand', 'np.random.rand', (['*w.shape'], {}), '(*w.shape)\n', (20606, 20616), True, 'import numpy as np\n'), ((21281, 21305), 'numpy.random.rand', 'np.random.rand', (['*w.shape'], {}), '(*w.shape)\n', (21295, 21305), True, 'import numpy as np\n'), ((21971, 21995), 'numpy.random.rand', 'np.random.rand', (['*w.shape'], {}), '(*w.shape)\n', (21985, 21995), True, 'import numpy as np\n'), ((22703, 22727), 'numpy.random.rand', 'np.random.rand', (['*w.shape'], {}), '(*w.shape)\n', (22717, 22727), True, 'import numpy as np\n'), ((24269, 24293), 'numpy.random.rand', 'np.random.rand', (['*w.shape'], {}), '(*w.shape)\n', (24283, 24293), True, 'import numpy as np\n'), ((25213, 25237), 'numpy.random.rand', 'np.random.rand', (['*w.shape'], {}), '(*w.shape)\n', (25227, 25237), True, 'import numpy as np\n'), ((26229, 26253), 'numpy.random.rand', 'np.random.rand', (['*w.shape'], {}), '(*w.shape)\n', (26243, 26253), True, 'import numpy as np\n'), ((26999, 27023), 'numpy.random.rand', 'np.random.rand', (['*w.shape'], {}), '(*w.shape)\n', (27013, 27023), True, 'import numpy as np\n'), ((27753, 27777), 'numpy.random.rand', 'np.random.rand', (['*w.shape'], {}), '(*w.shape)\n', (27767, 27777), True, 'import numpy as np\n'), ((28509, 28533), 'numpy.random.rand', 'np.random.rand', (['*w.shape'], {}), '(*w.shape)\n', (28523, 28533), True, 'import numpy as np\n'), ((29280, 29304), 'numpy.random.rand', 'np.random.rand', (['*w.shape'], {}), '(*w.shape)\n', (29294, 29304), True, 'import numpy as np\n'), ((30053, 30077), 'numpy.random.rand', 'np.random.rand', (['*w.shape'], {}), '(*w.shape)\n', (30067, 30077), True, 'import numpy as np\n'), ((30866, 30890), 'numpy.random.rand', 'np.random.rand', (['*w.shape'], {}), '(*w.shape)\n', (30880, 30890), True, 'import numpy as np\n'), ((31718, 31742), 'numpy.random.rand', 'np.random.rand', (['*w.shape'], {}), '(*w.shape)\n', (31732, 31742), True, 'import numpy as np\n'), ((32549, 32573), 'numpy.random.rand', 'np.random.rand', (['*w.shape'], {}), '(*w.shape)\n', (32563, 32573), True, 'import numpy as np\n'), ((33475, 33499), 'numpy.random.rand', 'np.random.rand', (['*w.shape'], {}), '(*w.shape)\n', (33489, 33499), True, 'import numpy as np\n'), ((34461, 34485), 'numpy.random.rand', 'np.random.rand', (['*w.shape'], {}), '(*w.shape)\n', (34475, 34485), True, 'import numpy as np\n'), ((35412, 35436), 'numpy.random.rand', 'np.random.rand', (['*w.shape'], {}), '(*w.shape)\n', (35426, 35436), True, 'import numpy as np\n'), ((40366, 40390), 'numpy.random.rand', 'np.random.rand', (['*w.shape'], {}), '(*w.shape)\n', (40380, 40390), True, 'import numpy as np\n'), ((41014, 41038), 'numpy.random.rand', 'np.random.rand', (['*w.shape'], {}), '(*w.shape)\n', (41028, 41038), True, 'import numpy as np\n'), ((41701, 41725), 'numpy.random.rand', 'np.random.rand', (['*w.shape'], {}), '(*w.shape)\n', (41715, 41725), True, 'import numpy as np\n'), ((42596, 42620), 'numpy.random.rand', 'np.random.rand', (['*w.shape'], {}), '(*w.shape)\n', (42610, 42620), True, 'import numpy as np\n'), ((43134, 43158), 'numpy.random.rand', 'np.random.rand', (['*w.shape'], {}), '(*w.shape)\n', (43148, 43158), True, 'import numpy as np\n'), ((43453, 43477), 'numpy.random.rand', 'np.random.rand', (['*w.shape'], {}), '(*w.shape)\n', (43467, 43477), True, 'import numpy as np\n'), ((43947, 43971), 'numpy.random.rand', 'np.random.rand', (['*w.shape'], {}), '(*w.shape)\n', (43961, 43971), True, 'import numpy as np\n'), ((44603, 44627), 'numpy.random.rand', 'np.random.rand', (['*w.shape'], {}), '(*w.shape)\n', (44617, 44627), True, 'import numpy as np\n'), ((54611, 54635), 'numpy.random.rand', 'np.random.rand', (['*w.shape'], {}), '(*w.shape)\n', (54625, 54635), True, 'import numpy as np\n'), ((56594, 56662), 'keras.layers.LSTM', 'LSTM', (['num_channels'], {'implementation': '(1)', 'recurrent_activation': '"""sigmoid"""'}), "(num_channels, implementation=1, recurrent_activation='sigmoid')\n", (56598, 56662), False, 'from keras.layers import SimpleRNN, LSTM, GRU\n'), ((57528, 57596), 'keras.layers.LSTM', 'LSTM', (['num_channels'], {'implementation': '(2)', 'recurrent_activation': '"""sigmoid"""'}), "(num_channels, implementation=2, recurrent_activation='sigmoid')\n", (57532, 57596), False, 'from keras.layers import SimpleRNN, LSTM, GRU\n'), ((58389, 58457), 'keras.layers.LSTM', 'LSTM', (['num_channels'], {'implementation': '(2)', 'recurrent_activation': '"""sigmoid"""'}), "(num_channels, implementation=2, recurrent_activation='sigmoid')\n", (58393, 58457), False, 'from keras.layers import SimpleRNN, LSTM, GRU\n'), ((59027, 59095), 'keras.layers.LSTM', 'LSTM', (['num_channels'], {'implementation': '(2)', 'recurrent_activation': '"""sigmoid"""'}), "(num_channels, implementation=2, recurrent_activation='sigmoid')\n", (59031, 59095), False, 'from keras.layers import SimpleRNN, LSTM, GRU\n'), ((59669, 59765), 'keras.layers.LSTM', 'LSTM', (['num_channels'], {'return_sequences': '(False)', 'implementation': '(2)', 'recurrent_activation': '"""sigmoid"""'}), "(num_channels, return_sequences=False, implementation=2,\n recurrent_activation='sigmoid')\n", (59673, 59765), False, 'from keras.layers import SimpleRNN, LSTM, GRU\n'), ((60433, 60528), 'keras.layers.LSTM', 'LSTM', (['num_channels'], {'return_sequences': '(True)', 'implementation': '(2)', 'recurrent_activation': '"""sigmoid"""'}), "(num_channels, return_sequences=True, implementation=2,\n recurrent_activation='sigmoid')\n", (60437, 60528), False, 'from keras.layers import SimpleRNN, LSTM, GRU\n'), ((62477, 62501), 'numpy.random.rand', 'np.random.rand', (['*w.shape'], {}), '(*w.shape)\n', (62491, 62501), True, 'import numpy as np\n'), ((63050, 63074), 'numpy.random.rand', 'np.random.rand', (['*w.shape'], {}), '(*w.shape)\n', (63064, 63074), True, 'import numpy as np\n'), ((63767, 63791), 'numpy.random.rand', 'np.random.rand', (['*w.shape'], {}), '(*w.shape)\n', (63781, 63791), True, 'import numpy as np\n'), ((64321, 64345), 'numpy.random.rand', 'np.random.rand', (['*w.shape'], {}), '(*w.shape)\n', (64335, 64345), True, 'import numpy as np\n'), ((64948, 64972), 'numpy.random.rand', 'np.random.rand', (['*w.shape'], {}), '(*w.shape)\n', (64962, 64972), True, 'import numpy as np\n'), ((65611, 65635), 'numpy.random.rand', 'np.random.rand', (['*w.shape'], {}), '(*w.shape)\n', (65625, 65635), True, 'import numpy as np\n'), ((66516, 66540), 'numpy.random.rand', 'np.random.rand', (['*w.shape'], {}), '(*w.shape)\n', (66530, 66540), True, 'import numpy as np\n'), ((67164, 67188), 'numpy.random.rand', 'np.random.rand', (['*w.shape'], {}), '(*w.shape)\n', (67178, 67188), True, 'import numpy as np\n'), ((67785, 67809), 'numpy.random.rand', 'np.random.rand', (['*w.shape'], {}), '(*w.shape)\n', (67799, 67809), True, 'import numpy as np\n'), ((68426, 68450), 'numpy.random.rand', 'np.random.rand', (['*w.shape'], {}), '(*w.shape)\n', (68440, 68450), True, 'import numpy as np\n'), ((68774, 68798), 'numpy.random.rand', 'np.random.rand', (['*w.shape'], {}), '(*w.shape)\n', (68788, 68798), True, 'import numpy as np\n'), ((69131, 69155), 'numpy.random.rand', 'np.random.rand', (['*w.shape'], {}), '(*w.shape)\n', (69145, 69155), True, 'import numpy as np\n'), ((69492, 69516), 'numpy.random.rand', 'np.random.rand', (['*w.shape'], {}), '(*w.shape)\n', (69506, 69516), True, 'import numpy as np\n'), ((69950, 69995), 'keras.layers.Permute', 'Permute', (['permute_order'], {'input_shape': '(4, 3, 2)'}), '(permute_order, input_shape=(4, 3, 2))\n', (69957, 69995), False, 'from keras.layers import Embedding, Input, Permute, Reshape, RepeatVector, Dropout\n'), ((70912, 70936), 'numpy.random.rand', 'np.random.rand', (['*w.shape'], {}), '(*w.shape)\n', (70926, 70936), True, 'import numpy as np\n'), ((71723, 71747), 'numpy.random.rand', 'np.random.rand', (['*w.shape'], {}), '(*w.shape)\n', (71737, 71747), True, 'import numpy as np\n'), ((72190, 72214), 'numpy.random.rand', 'np.random.rand', (['*w.shape'], {}), '(*w.shape)\n', (72204, 72214), True, 'import numpy as np\n'), ((72932, 72956), 'numpy.random.rand', 'np.random.rand', (['*w.shape'], {}), '(*w.shape)\n', (72946, 72956), True, 'import numpy as np\n'), ((73198, 73206), 'keras.layers.Dense', 'Dense', (['(8)'], {}), '(8)\n', (73203, 73206), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((73259, 73283), 'numpy.random.rand', 'np.random.rand', (['*w.shape'], {}), '(*w.shape)\n', (73273, 73283), True, 'import numpy as np\n'), ((74906, 74930), 'numpy.random.rand', 'np.random.rand', (['*w.shape'], {}), '(*w.shape)\n', (74920, 74930), True, 'import numpy as np\n'), ((75405, 75429), 'numpy.random.rand', 'np.random.rand', (['*w.shape'], {}), '(*w.shape)\n', (75419, 75429), True, 'import numpy as np\n'), ((75642, 75672), 'keras.layers.Dense', 'Dense', (['(6)'], {'activation': '"""softmax"""'}), "(6, activation='softmax')\n", (75647, 75672), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((76341, 76365), 'numpy.random.rand', 'np.random.rand', (['*w.shape'], {}), '(*w.shape)\n', (76355, 76365), True, 'import numpy as np\n'), ((76864, 76872), 'keras.layers.Dense', 'Dense', (['(5)'], {}), '(5)\n', (76869, 76872), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((77906, 77925), 'keras.layers.Dense', 'Dense', (['num_channels'], {}), '(num_channels)\n', (77911, 77925), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((78586, 78610), 'numpy.random.rand', 'np.random.rand', (['*w.shape'], {}), '(*w.shape)\n', (78600, 78610), True, 'import numpy as np\n'), ((78964, 78988), 'numpy.random.rand', 'np.random.rand', (['*w.shape'], {}), '(*w.shape)\n', (78978, 78988), True, 'import numpy as np\n'), ((79780, 79804), 'numpy.random.rand', 'np.random.rand', (['*w.shape'], {}), '(*w.shape)\n', (79794, 79804), True, 'import numpy as np\n'), ((80257, 80281), 'numpy.random.rand', 'np.random.rand', (['*w.shape'], {}), '(*w.shape)\n', (80271, 80281), True, 'import numpy as np\n'), ((80560, 80584), 'numpy.random.rand', 'np.random.rand', (['*w.shape'], {}), '(*w.shape)\n', (80574, 80584), True, 'import numpy as np\n'), ((80922, 80946), 'numpy.random.rand', 'np.random.rand', (['*w.shape'], {}), '(*w.shape)\n', (80936, 80946), True, 'import numpy as np\n'), ((81308, 81332), 'numpy.random.rand', 'np.random.rand', (['*w.shape'], {}), '(*w.shape)\n', (81322, 81332), True, 'import numpy as np\n'), ((82133, 82157), 'numpy.random.rand', 'np.random.rand', (['*w.shape'], {}), '(*w.shape)\n', (82147, 82157), True, 'import numpy as np\n'), ((83416, 83439), 'distutils.version.StrictVersion', '_StrictVersion', (['"""2.2.1"""'], {}), "('2.2.1')\n", (83430, 83439), True, 'from distutils.version import StrictVersion as _StrictVersion\n'), ((87472, 87509), 'keras.layers.Conv2D', 'Conv2D', (['(64)', '(3, 3)'], {'activation': '"""relu"""'}), "(64, (3, 3), activation='relu')\n", (87478, 87509), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((87596, 87632), 'keras.layers.MaxPooling2D', 'MaxPooling2D', (['(2, 2)'], {'strides': '(1, 1)'}), '((2, 2), strides=(1, 1))\n', (87608, 87632), False, 'from keras.layers import MaxPooling2D, AveragePooling2D, GlobalAveragePooling2D, GlobalMaxPooling2D\n'), ((87669, 87706), 'keras.layers.Conv2D', 'Conv2D', (['(32)', '(4, 4)'], {'activation': '"""relu"""'}), "(32, (4, 4), activation='relu')\n", (87675, 87706), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((87743, 87779), 'keras.layers.MaxPooling2D', 'MaxPooling2D', (['(2, 2)'], {'strides': '(2, 2)'}), '((2, 2), strides=(2, 2))\n', (87755, 87779), False, 'from keras.layers import MaxPooling2D, AveragePooling2D, GlobalAveragePooling2D, GlobalMaxPooling2D\n'), ((87816, 87853), 'keras.layers.Conv2D', 'Conv2D', (['(32)', '(4, 4)'], {'activation': '"""relu"""'}), "(32, (4, 4), activation='relu')\n", (87822, 87853), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((87890, 87926), 'keras.layers.MaxPooling2D', 'MaxPooling2D', (['(2, 2)'], {'strides': '(2, 2)'}), '((2, 2), strides=(2, 2))\n', (87902, 87926), False, 'from keras.layers import MaxPooling2D, AveragePooling2D, GlobalAveragePooling2D, GlobalMaxPooling2D\n'), ((87963, 87972), 'keras.layers.Flatten', 'Flatten', ([], {}), '()\n', (87970, 87972), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((90614, 90630), 'coremltools.models.utils._macos_version', '_macos_version', ([], {}), '()\n', (90628, 90630), False, 'from coremltools.models.utils import _macos_version, _is_macos\n'), ((90803, 90827), 'shutil.rmtree', 'shutil.rmtree', (['model_dir'], {}), '(model_dir)\n', (90816, 90827), False, 'import shutil\n'), ((92045, 92085), 'keras.layers.Dense', 'Dense', (['num_channels'], {'input_dim': 'input_dim'}), '(num_channels, input_dim=input_dim)\n', (92050, 92085), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((93219, 93273), 'keras.layers.Dense', 'Dense', (['num_channels'], {'input_shape': 'input_shape'}), '(num_channels, input_shape=input_shape, **kwargs)\n', (93224, 93273), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((94087, 94149), 'keras.layers.Conv2D', 'Conv2D', ([], {'filters': '(5)', 'kernel_size': '(7, 7)', 'input_shape': 'input_shape'}), '(filters=5, kernel_size=(7, 7), input_shape=input_shape)\n', (94093, 94149), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((94173, 94195), 'keras.layers.UpSampling2D', 'UpSampling2D', ([], {}), '(**kwargs)\n', (94185, 94195), False, 'from keras.layers import ZeroPadding2D, UpSampling2D, Cropping2D\n'), ((95238, 95279), 'keras.layers.Conv2D', 'Conv2D', ([], {'input_shape': 'input_shape'}), '(input_shape=input_shape, **kwargs)\n', (95244, 95279), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((96099, 96107), 'keras.layers.Dense', 'Dense', (['(4)'], {}), '(4)\n', (96104, 96107), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((96129, 96137), 'keras.layers.Dense', 'Dense', (['(4)'], {}), '(4)\n', (96134, 96137), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((103033, 103041), 'keras.layers.Dense', 'Dense', (['(5)'], {}), '(5)\n', (103038, 103041), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((104365, 104389), 'numpy.random.rand', 'np.random.rand', (['*w.shape'], {}), '(*w.shape)\n', (104379, 104389), True, 'import numpy as np\n'), ((106697, 106705), 'keras.layers.Dense', 'Dense', (['(8)'], {}), '(8)\n', (106702, 106705), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((109812, 109836), 'numpy.random.rand', 'np.random.rand', (['*w.shape'], {}), '(*w.shape)\n', (109826, 109836), True, 'import numpy as np\n'), ((3696, 3724), 'numpy.random.rand', 'np.random.rand', (['*input_shape'], {}), '(*input_shape)\n', (3710, 3724), True, 'import numpy as np\n'), ((7442, 7453), 'coremltools.models.utils._is_macos', '_is_macos', ([], {}), '()\n', (7451, 7453), False, 'from coremltools.models.utils import _macos_version, _is_macos\n'), ((61164, 61209), 'keras.layers.Dense', 'Dense', (['fc_size'], {'name': '"""fc1"""', 'activation': '"""relu"""'}), "(fc_size, name='fc1', activation='relu')\n", (61169, 61209), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((61301, 61394), 'keras.layers.LSTM', 'LSTM', (['rnn_size'], {'return_sequences': '(True)', 'activation': '"""relu"""', 'kernel_initializer': '"""he_normal"""'}), "(rnn_size, return_sequences=True, activation='relu', kernel_initializer\n ='he_normal')\n", (61305, 61394), False, 'from keras.layers import SimpleRNN, LSTM, GRU\n'), ((61600, 61654), 'keras.layers.Dense', 'Dense', (['output_dim'], {'name': '"""y_pred"""', 'activation': '"""softmax"""'}), "(output_dim, name='y_pred', activation='softmax')\n", (61605, 61654), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((81754, 81778), 'numpy.random.rand', 'np.random.rand', (['*w.shape'], {}), '(*w.shape)\n', (81768, 81778), True, 'import numpy as np\n'), ((82566, 82590), 'numpy.random.rand', 'np.random.rand', (['*w.shape'], {}), '(*w.shape)\n', (82580, 82590), True, 'import numpy as np\n'), ((83464, 83484), 'keras.layers.ReLU', 'ReLU', (['(6.0)'], {'name': 'name'}), '(6.0, name=name)\n', (83468, 83484), False, 'from keras.layers import DepthwiseConv2D, ReLU\n'), ((83529, 83557), 'keras.layers.Activation', 'Activation', (['relu6'], {'name': 'name'}), '(relu6, name=name)\n', (83539, 83557), False, 'from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D\n'), ((92502, 92526), 'numpy.random.rand', 'np.random.rand', (['*w.shape'], {}), '(*w.shape)\n', (92516, 92526), True, 'import numpy as np\n'), ((7458, 7474), 'coremltools.models.utils._macos_version', '_macos_version', ([], {}), '()\n', (7472, 7474), False, 'from coremltools.models.utils import _macos_version, _is_macos\n'), ((45089, 45113), 'numpy.random.rand', 'np.random.rand', (['*w.shape'], {}), '(*w.shape)\n', (45103, 45113), True, 'import numpy as np\n'), ((45696, 45720), 'numpy.random.rand', 'np.random.rand', (['*w.shape'], {}), '(*w.shape)\n', (45710, 45720), True, 'import numpy as np\n'), ((46191, 46215), 'numpy.random.rand', 'np.random.rand', (['*w.shape'], {}), '(*w.shape)\n', (46205, 46215), True, 'import numpy as np\n'), ((46794, 46818), 'numpy.random.rand', 'np.random.rand', (['*w.shape'], {}), '(*w.shape)\n', (46808, 46818), True, 'import numpy as np\n'), ((47308, 47332), 'numpy.random.rand', 'np.random.rand', (['*w.shape'], {}), '(*w.shape)\n', (47322, 47332), True, 'import numpy as np\n'), ((47898, 47922), 'numpy.random.rand', 'np.random.rand', (['*w.shape'], {}), '(*w.shape)\n', (47912, 47922), True, 'import numpy as np\n'), ((48469, 48493), 'numpy.random.rand', 'np.random.rand', (['*w.shape'], {}), '(*w.shape)\n', (48483, 48493), True, 'import numpy as np\n'), ((49042, 49066), 'numpy.random.rand', 'np.random.rand', (['*w.shape'], {}), '(*w.shape)\n', (49056, 49066), True, 'import numpy as np\n'), ((49615, 49639), 'numpy.random.rand', 'np.random.rand', (['*w.shape'], {}), '(*w.shape)\n', (49629, 49639), True, 'import numpy as np\n'), ((50028, 50052), 'numpy.random.rand', 'np.random.rand', (['*w.shape'], {}), '(*w.shape)\n', (50042, 50052), True, 'import numpy as np\n'), ((50536, 50560), 'numpy.random.rand', 'np.random.rand', (['*w.shape'], {}), '(*w.shape)\n', (50550, 50560), True, 'import numpy as np\n'), ((51124, 51148), 'numpy.random.rand', 'np.random.rand', (['*w.shape'], {}), '(*w.shape)\n', (51138, 51148), True, 'import numpy as np\n'), ((51777, 51801), 'numpy.random.rand', 'np.random.rand', (['*w.shape'], {}), '(*w.shape)\n', (51791, 51801), True, 'import numpy as np\n'), ((52443, 52467), 'numpy.random.rand', 'np.random.rand', (['*w.shape'], {}), '(*w.shape)\n', (52457, 52467), True, 'import numpy as np\n'), ((53122, 53146), 'numpy.random.rand', 'np.random.rand', (['*w.shape'], {}), '(*w.shape)\n', (53136, 53146), True, 'import numpy as np\n'), ((53951, 53975), 'numpy.random.rand', 'np.random.rand', (['*w.shape'], {}), '(*w.shape)\n', (53965, 53975), True, 'import numpy as np\n'), ((55296, 55320), 'numpy.random.rand', 'np.random.rand', (['*w.shape'], {}), '(*w.shape)\n', (55310, 55320), True, 'import numpy as np\n'), ((55934, 55958), 'numpy.random.rand', 'np.random.rand', (['*w.shape'], {}), '(*w.shape)\n', (55948, 55958), True, 'import numpy as np\n'), ((56818, 56842), 'numpy.random.rand', 'np.random.rand', (['*w.shape'], {}), '(*w.shape)\n', (56832, 56842), True, 'import numpy as np\n'), ((57752, 57776), 'numpy.random.rand', 'np.random.rand', (['*w.shape'], {}), '(*w.shape)\n', (57766, 57776), True, 'import numpy as np\n'), ((58613, 58637), 'numpy.random.rand', 'np.random.rand', (['*w.shape'], {}), '(*w.shape)\n', (58627, 58637), True, 'import numpy as np\n'), ((59251, 59275), 'numpy.random.rand', 'np.random.rand', (['*w.shape'], {}), '(*w.shape)\n', (59265, 59275), True, 'import numpy as np\n'), ((60016, 60040), 'numpy.random.rand', 'np.random.rand', (['*w.shape'], {}), '(*w.shape)\n', (60030, 60040), True, 'import numpy as np\n'), ((60779, 60803), 'numpy.random.rand', 'np.random.rand', (['*w.shape'], {}), '(*w.shape)\n', (60793, 60803), True, 'import numpy as np\n'), ((73890, 73914), 'numpy.random.rand', 'np.random.rand', (['*w.shape'], {}), '(*w.shape)\n', (73904, 73914), True, 'import numpy as np\n'), ((76950, 76974), 'numpy.random.rand', 'np.random.rand', (['*w.shape'], {}), '(*w.shape)\n', (76964, 76974), True, 'import numpy as np\n'), ((77552, 77576), 'numpy.random.rand', 'np.random.rand', (['*w.shape'], {}), '(*w.shape)\n', (77566, 77576), True, 'import numpy as np\n'), ((78018, 78042), 'numpy.random.rand', 'np.random.rand', (['*w.shape'], {}), '(*w.shape)\n', (78032, 78042), True, 'import numpy as np\n'), ((98511, 98535), 'numpy.random.rand', 'np.random.rand', (['*w.shape'], {}), '(*w.shape)\n', (98525, 98535), True, 'import numpy as np\n'), ((100671, 100695), 'numpy.random.rand', 'np.random.rand', (['*w.shape'], {}), '(*w.shape)\n', (100685, 100695), True, 'import numpy as np\n'), ((3774, 3802), 'numpy.random.rand', 'np.random.rand', (['*input_shape'], {}), '(*input_shape)\n', (3788, 3802), True, 'import numpy as np\n'), ((61770, 61794), 'numpy.random.rand', 'np.random.rand', (['*w.shape'], {}), '(*w.shape)\n', (61784, 61794), True, 'import numpy as np\n'), ((3610, 3633), 'numpy.product', 'np.product', (['input_shape'], {}), '(input_shape)\n', (3620, 3633), True, 'import numpy as np\n')]
|
from __future__ import unicode_literals
import time
from netmiko.cisco_base_connection import CiscoSSHConnection
class AccedianSSH(CiscoSSHConnection):
def session_preparation(self):
self._test_channel_read()
self.set_base_prompt()
# Clear the read buffer
time.sleep(0.3 * self.global_delay_factor)
self.clear_buffer()
def check_enable_mode(self, *args, **kwargs):
raise AttributeError("Accedian devices do not support enable mode!")
def enable(self, *args, **kwargs):
raise AttributeError("Accedian devices do not support enable mode!")
def exit_enable_mode(self, *args, **kwargs):
raise AttributeError("Accedian devices do not support enable mode!")
def check_config_mode(self):
"""Accedian devices do not have a config mode."""
return False
def config_mode(self):
"""Accedian devices do not have a config mode."""
return ""
def exit_config_mode(self):
"""Accedian devices do not have a config mode."""
return ""
def set_base_prompt(
self, pri_prompt_terminator=":", alt_prompt_terminator="#", delay_factor=2
):
"""Sets self.base_prompt: used as delimiter for stripping of trailing prompt in output."""
super(AccedianSSH, self).set_base_prompt(
pri_prompt_terminator=pri_prompt_terminator,
alt_prompt_terminator=alt_prompt_terminator,
delay_factor=delay_factor,
)
return self.base_prompt
def save_config(self, cmd="", confirm=True, confirm_response=""):
"""Not Implemented"""
raise NotImplementedError
|
[
"time.sleep"
] |
[((294, 336), 'time.sleep', 'time.sleep', (['(0.3 * self.global_delay_factor)'], {}), '(0.3 * self.global_delay_factor)\n', (304, 336), False, 'import time\n')]
|
import importlib
import os
import sys
from contextlib import contextmanager, suppress
def symbol_by_name(name, imp=None):
imp = importlib.import_module if imp is None else imp
if ':' in name:
module_name, _, attr = name.rpartition(':')
else:
module_name, _, attr = name.rpartition('.')
try:
module = imp(module_name)
except ValueError as exc:
raise ValueError(
f'Cannot import {name!r}: {exc}',
).with_traceback(sys.exc_info()[2])
return getattr(module, attr) if attr else module
def import_from_cwd(module: str, *, imp=None, package=None):
"""
Import module, temporarily including modules in the current directory.
Modules located in the current directory has
precedence over modules located in `sys.path`.
"""
if imp is None:
imp = importlib.import_module
with cwd_in_path():
return imp(module, package=package)
@contextmanager
def cwd_in_path():
"""Context adding the current working directory to sys.path."""
cwd = os.getcwd()
if cwd in sys.path:
yield
else:
sys.path.insert(0, cwd)
try:
yield cwd
finally:
with suppress(ValueError):
sys.path.remove(cwd)
|
[
"sys.path.remove",
"os.getcwd",
"contextlib.suppress",
"sys.path.insert",
"sys.exc_info"
] |
[((1054, 1065), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (1063, 1065), False, 'import os\n'), ((1122, 1145), 'sys.path.insert', 'sys.path.insert', (['(0)', 'cwd'], {}), '(0, cwd)\n', (1137, 1145), False, 'import sys\n'), ((1215, 1235), 'contextlib.suppress', 'suppress', (['ValueError'], {}), '(ValueError)\n', (1223, 1235), False, 'from contextlib import contextmanager, suppress\n'), ((1253, 1273), 'sys.path.remove', 'sys.path.remove', (['cwd'], {}), '(cwd)\n', (1268, 1273), False, 'import sys\n'), ((486, 500), 'sys.exc_info', 'sys.exc_info', ([], {}), '()\n', (498, 500), False, 'import sys\n')]
|
from django.contrib import admin
from .models import Page
class PageAdmin(admin.ModelAdmin):
# Convierte los campos de fecha en formato de solo lectura
readonly_fields = ('created', 'updated',)
list_display = ('title', 'order', )
admin.site.register(Page, PageAdmin)
|
[
"django.contrib.admin.site.register"
] |
[((245, 281), 'django.contrib.admin.site.register', 'admin.site.register', (['Page', 'PageAdmin'], {}), '(Page, PageAdmin)\n', (264, 281), False, 'from django.contrib import admin\n')]
|
# -*- coding: utf-8 -*-
"""
Setup details for ANLffr
Created on Thu Oct 10 19:00:08 2013
@author: hari
"""
from setuptools import setup, find_packages
setup(
name='ANLffr',
version='0.3.0a1',
author='<NAME>',
author_email='<EMAIL>',
packages=find_packages(include=['anlffr', 'anlffr.*']),
python_requires='>=3',
requires=['joblib'],
package_data={'anlffr.helper': ['sysfiles/*']},
url='https://github.com/SNAPsoftware/ANLffr',
license='BSD (3 Clause)',
description=('Useful functions for processing and analysis of'
'mass-potentials and other electrophysiological data from'
'SNAPlab at Purdue University. Provides frequency, and'
'time-frequency analysis capabilities'),
long_description=open('README.rst').read(),
)
|
[
"setuptools.find_packages"
] |
[((265, 310), 'setuptools.find_packages', 'find_packages', ([], {'include': "['anlffr', 'anlffr.*']"}), "(include=['anlffr', 'anlffr.*'])\n", (278, 310), False, 'from setuptools import setup, find_packages\n')]
|
#!/usr/bin/env python3
from jinja2 import Environment, FileSystemLoader
import yaml
import os
import datetime
import numpy as np
# HARDCODE
LUMPTEMPLATE = 'LuMP_recorder.j2'
LCUTEMPLATE = 'beamctl.j2'
LUMPPROCESS = 'LuMP_processor.j2'
DATADIRROOT = '/local_data/ARTEMIS/'
SCRIPTDIR = '/data/Commissioning/PSRMonitor/Artemis3/'
if __name__== "__main__":
import argparse
parser = argparse.ArgumentParser(
description='''Generate an LCU beamctl script and LuMP recorder scripts from ARTEMIS3 YAML config files''')
parser.add_argument('-o', '--obsConfigFile', help='YAML observation config file (required)')
parser.add_argument('-s', '--srcConfigFile', help='YAML source config file (required)')
parser.add_argument('-d', '--start_date', default=None, help='Start date and time to begin observation, if not used the observation begins when the script begins, which is not ideal when capturing all lanes, use the format: YYYY-MM-DDThh:mm:ssZ e.g. 2019-02-26T10:30:00Z')
parser.add_argument('--templateDir', help='jinja2 template dir', default='../templates')
parser.add_argument('-v', '--verbose', help='Verbose mode', action='store_true')
parser.add_argument('--dry_run', help='Do not write scripts, just test out the generator script, useful with the -v option', action='store_true')
parser.add_argument('--duration', help='Number of seconds to capture, default: 60', default=60)
args = parser.parse_args()
if args.obsConfigFile is None:
print('ERROR: Observation config file is not set but is required, exiting')
exit(1)
if args.srcConfigFile is None:
print('ERROR: Source config file is not set but is required, exiting')
exit(1)
obsConfigDict = yaml.load(open(args.obsConfigFile))
srcConfigDict = yaml.load(open(args.srcConfigFile))
# Useful variables
dt = datetime.datetime.now()
dataPath = dt.strftime('%Y%m%d_%H%M%S')
generatorStartTime = str(dt)
arrayMode = obsConfigDict['beamctl']['antennaset'].split('_')[0]
decRad = srcConfigDict['DECJD'] * np.pi / 180.
raRad = srcConfigDict['RAJD'] * np.pi / 180.
dirStr = '%0.6f,%0.6f,J2000'%(raRad,decRad)
# Generate LuMP scripts
lumpDict = obsConfigDict['LuMP']
if not (args.start_date is None):
lumpDict['opt_arg'] = '--start_date=%s'%args.start_date
year = int(args.start_date[0:4])
month = int(args.start_date[5:7])
day = int(args.start_date[8:10])
hour = int(args.start_date[11:13])
minute = int(args.start_date[14:16])
lumpDict['generator_script'] = os.path.basename(__file__)
lumpDict['generator_datetime'] = generatorStartTime
lumpDict['anadir'] = dirStr
lumpDict['digdir'] = dirStr
lumpDict['duration'] = args.duration
lumpDict['datadir'] = DATADIRROOT + '%s_%s'%(dataPath, srcConfigDict['NAME'])
lumpDict['sourcename'] = srcConfigDict['NAME']
lumpDict['ephemeris'] = srcConfigDict['NAME'][1:]+'.par'
lumpDict['source_RA'] = srcConfigDict['RAJ']
lumpDict['source_Dec'] = srcConfigDict['DecJ']
lumpDict['dspsr_out'] = '/oxford_data2/PSRMonitor/data/' + srcConfigDict['NAME'] +'/' + args.start_date[0:4] + args.start_date[5:7] + args.start_date[8:10]
print (lumpDict['dspsr_out'])
# build a config dict for each lane
configBase = lumpDict.copy()
configBase.pop('lane', None)
configBase['sourcename_array'] = '[%s]*%i'%(srcConfigDict['NAME'], lumpDict['beamlets_per_lane'])
configBase['rightascension_array'] = '[%0.6f]*%i'%(raRad, lumpDict['beamlets_per_lane'])
configBase['declination_array'] = '[%0.6f]*%i'%(decRad, lumpDict['beamlets_per_lane'])
configBase['epoch_array'] = '[J2000]*%i'%(lumpDict['beamlets_per_lane'])
obstime = datetime.datetime(year,month,day, hour,minute)
attime = obstime - datetime.timedelta(minutes=1)
for lane in lumpDict['lane']:
configLane = {**configBase, **lane}
configLane['filename_base'] = '%s_lane%i'%(srcConfigDict['NAME'], lane['id'])
#Load Jinja2 template
env = Environment(loader = FileSystemLoader(args.templateDir), trim_blocks=True, lstrip_blocks=True)
templateLuMP = env.get_template(LUMPTEMPLATE)
renderText = templateLuMP.render(configLane)
templateLuMPProc = env.get_template(LUMPPROCESS)
renderTextProc = templateLuMPProc.render(configLane)
if args.verbose: print(renderText)
outputFn = SCRIPTDIR + '%s_%s_lane%i.sh'%(srcConfigDict['NAME'], arrayMode, lane['id'])
outputFnProc = SCRIPTDIR + '%s_%s_lane%i_Proc.sh'%(srcConfigDict['NAME'], arrayMode, lane['id'])
if lane['id'] < 3:
adagio = 'adagio1'
else:
adagio = 'adagio2'
startdate = attime.strftime('%H:%M %m/%d/%Y')
atstring = 'echo \"ssh %s \'bash -ix \' < %s \" |at %s \n' %(adagio,outputFn, startdate)
if os.path.exists('atscript.exe'):
append_write = 'a' # append if already exists
else:
append_write = 'w' # make a new file if not
fh2 = open('atscript.exe',append_write)
fh2.write(atstring)
fh2.close()
if not args.dry_run:
print('Writing ' + outputFn)
fh = open(outputFn,'w')
fh.write(renderText)
fh.close()
if not args.dry_run:
print('Writing ' + outputFnProc)
fh = open(outputFnProc,'w')
fh.write(renderTextProc)
fh.close()
# Generate LCU script
lcuDict = obsConfigDict['beamctl']
lcuDict['generator_script'] = os.path.basename(__file__)
lcuDict['generator_datetime'] = generatorStartTime
lcuDict['anadir'] = dirStr
lcuDict['digdir'] = dirStr
#Load Jinja2 template
templateLCU = env.get_template(LCUTEMPLATE)
renderText = templateLCU.render(lcuDict)
if args.verbose: print(renderText)
outputFn = SCRIPTDIR + '%s_%s_LCU.sh'%(srcConfigDict['NAME'], arrayMode)
atstring = 'echo \"ssh lcu \'bash -ix \' < %s \" |at %s \n' %(outputFn, startdate)
fh2 = open('atscript.exe',append_write)
fh2.write(atstring)
fh2.close()
if not args.dry_run:
print('Writing ' + outputFn)
fh = open(outputFn,'w')
fh.write(renderText)
fh.close()
|
[
"argparse.ArgumentParser",
"os.path.basename",
"os.path.exists",
"datetime.datetime",
"jinja2.FileSystemLoader",
"datetime.timedelta",
"datetime.datetime.now"
] |
[((389, 526), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Generate an LCU beamctl script and LuMP recorder scripts from ARTEMIS3 YAML config files"""'}), "(description=\n 'Generate an LCU beamctl script and LuMP recorder scripts from ARTEMIS3 YAML config files'\n )\n", (412, 526), False, 'import argparse\n'), ((1866, 1889), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (1887, 1889), False, 'import datetime\n'), ((2579, 2605), 'os.path.basename', 'os.path.basename', (['__file__'], {}), '(__file__)\n', (2595, 2605), False, 'import os\n'), ((3741, 3790), 'datetime.datetime', 'datetime.datetime', (['year', 'month', 'day', 'hour', 'minute'], {}), '(year, month, day, hour, minute)\n', (3758, 3790), False, 'import datetime\n'), ((5607, 5633), 'os.path.basename', 'os.path.basename', (['__file__'], {}), '(__file__)\n', (5623, 5633), False, 'import os\n'), ((3811, 3840), 'datetime.timedelta', 'datetime.timedelta', ([], {'minutes': '(1)'}), '(minutes=1)\n', (3829, 3840), False, 'import datetime\n'), ((4915, 4945), 'os.path.exists', 'os.path.exists', (['"""atscript.exe"""'], {}), "('atscript.exe')\n", (4929, 4945), False, 'import os\n'), ((4084, 4118), 'jinja2.FileSystemLoader', 'FileSystemLoader', (['args.templateDir'], {}), '(args.templateDir)\n', (4100, 4118), False, 'from jinja2 import Environment, FileSystemLoader\n')]
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Quantization API functions for tf.keras models."""
import tensorflow as tf
from tensorflow_model_optimization.python.core.quantization.keras import quantize_annotate as quantize_annotate_mod
from tensorflow_model_optimization.python.core.quantization.keras import quantize_aware_activation
from tensorflow_model_optimization.python.core.quantization.keras import quantize_config as quantize_config_mod
from tensorflow_model_optimization.python.core.quantization.keras import quantize_layer
from tensorflow_model_optimization.python.core.quantization.keras import quantize_wrapper
from tensorflow_model_optimization.python.core.quantization.keras import quantizers
from tensorflow_model_optimization.python.core.quantization.keras.default_8bit import default_8bit_quantize_layout_transform
from tensorflow_model_optimization.python.core.quantization.keras.default_8bit import default_8bit_quantize_registry
from tensorflow_model_optimization.python.core.quantization.keras.layers import conv_batchnorm
keras = tf.keras
def quantize_scope(*args):
"""Required scope to deserialize quantized models stored in tf.keras h5 format.
Args:
*args: Variable length list of dictionaries of name, class pairs to add to
the scope created by this method.
Returns:
Object of type `CustomObjectScope` with quantization objects included.
Example:
```python
tf.keras.models.save_model(quantized_model, keras_file)
with quantize_scope():
loaded_model = tf.keras.models.load_model(keras_file)
```
"""
quantization_objects = {
'QuantizeAnnotate': quantize_annotate_mod.QuantizeAnnotate,
'QuantizeAwareActivation':
quantize_aware_activation.QuantizeAwareActivation,
'NoOpActivation': quantize_aware_activation.NoOpActivation,
'QuantizeWrapper': quantize_wrapper.QuantizeWrapper,
'QuantizeLayer': quantize_layer.QuantizeLayer,
# TODO(tf-mot): add way for different quantization schemes to modify this.
'_DepthwiseConvBatchNorm2D': conv_batchnorm._DepthwiseConvBatchNorm2D, # pylint: disable=protected-access
'_ConvBatchNorm2D': conv_batchnorm._ConvBatchNorm2D # pylint: disable=protected-access
}
quantization_objects.update(default_8bit_quantize_registry._types_dict()) # pylint: disable=protected-access
quantization_objects.update(quantizers._types_dict()) # pylint: disable=protected-access
return tf.keras.utils.custom_object_scope(*(args + (quantization_objects,)))
# TODO(tfmot): link to docs to explain what quantization implementation means.
def quantize_model(to_quantize):
"""Quantize a whole tf.keras model with the default quantization implementation.
To be more precise, `quantize_model` creates a model that emulates
quantization during training and stores information that downstream
tools will use to produce actually quantized models.
Quantize a model:
```python
model = quantize_model(
keras.Sequential([
layers.Dense(10, activation='relu', input_shape=(100,)),
layers.Dense(2, activation='sigmoid')
]))
```
Note that this function removes the optimizer from the original model.
Additionally, training the model returned by `quantize_model` will not affect
the weights of the original model.
Args:
to_quantize: tf.keras model to be quantized. It can have pre-trained
weights.
Returns:
Returns a new tf.keras model prepared for quantization.
"""
if to_quantize is None:
raise ValueError('`to_quantize` cannot be None')
if not isinstance(to_quantize, keras.Model):
raise ValueError(
'`to_quantize` can only be a `tf.keras.Model` instance. Use '
'the `quantize_annotate_layer` API to handle individual layers.'
'You passed an instance of type: {input}.'.format(
input=to_quantize.__class__.__name__))
if not isinstance(to_quantize, keras.Sequential) \
and not to_quantize._is_graph_network: # pylint: disable=protected-access
raise ValueError(
'`to_quantize` can only either be a tf.keras Sequential or '
'Functional model.')
annotated_model = quantize_annotate_model(to_quantize)
return quantize_apply(annotated_model)
def quantize_annotate_model(to_annotate):
"""Annotate a model to be quantized.
This function does not actually quantize anything. It is merely to specify
that the model needs to be quantized.
This function is intended to be used in conjunction with the
`quantize_annotate_layer`
API. It's otherwise simpler to use `quantize_model`.
Annotate a model while overriding the default behavior for one layer:
```python
quantize_config = MyDenseQuantizeConfig()
model = quantize_annotate_model(keras.Sequential([
layers.Dense(10, activation='relu', input_shape=(100,)),
quantize_annotate_layer(layers.Dense(2, activation='sigmoid'),
quantize_config=quantize_config)
])))
```
Note that this function removes the optimizer from the original model.
Args:
to_annotate: tf.keras model to annotate to be quantized.
Returns:
New tf.keras model with each layer in the model wrapped with
`QuantizeAnnotate`.
"""
if to_annotate is None:
raise ValueError('`to_annotate` cannot be None')
if not isinstance(to_annotate, keras.Model):
raise ValueError(
'`to_annotate` can only be a `tf.keras.Model` instance. Use '
'the `quantize_annotate_layer` API to handle individual layers. '
'You passed an instance of type: {input}.'.format(
input=to_annotate.__class__.__name__))
if not isinstance(to_annotate, keras.Sequential) \
and not to_annotate._is_graph_network: # pylint: disable=protected-access
raise ValueError(
'`to_annotate` can only either be a tf.keras Sequential or '
'Functional model.')
def _add_quant_wrapper(layer):
"""Add annotation wrapper."""
# Already annotated layer. No need to wrap.
if isinstance(layer, quantize_annotate_mod.QuantizeAnnotate):
return layer
if isinstance(layer, tf.keras.Model):
raise ValueError(
'Quantizing a tf.keras Model inside another tf.keras Model is not supported.'
)
return quantize_annotate_mod.QuantizeAnnotate(layer)
return keras.models.clone_model(
to_annotate, input_tensors=None, clone_function=_add_quant_wrapper)
def quantize_annotate_layer(to_annotate, quantize_config=None):
"""Annotate a layer to be quantized.
This function does not actually quantize anything. It is merely to specify
that the layer needs to be quantized.
Annotate a layer:
```python
model = keras.Sequential([
layers.Dense(10, activation='relu', input_shape=(100,)),
quantize_annotate_layer(layers.Dense(2, activation='sigmoid'))
]))
```
Note that this function removes the optimizer from the original model.
Args:
to_annotate: tf.keras layer to annotate to be quantized.
quantize_config: `QuantizeConfig` to quantize layer.
Returns:
tf.keras layer wrapped with `QuantizeAnnotate`.
"""
if to_annotate is None:
raise ValueError('`to_annotate` cannot be None')
# Check against keras.Model since it is an instance of keras.layers.Layer.
if not isinstance(to_annotate, keras.layers.Layer) or isinstance(
to_annotate, keras.Model):
raise ValueError(
'`to_annotate` can only be a `tf.keras.layers.Layer` instance. '
'You passed an instance of type: {input}.'.format(
input=to_annotate.__class__.__name__))
if quantize_config is not None and not isinstance(
quantize_config, quantize_config_mod.QuantizeConfig):
raise ValueError(
'`quantize_config` can only be a `tfmot.quantization.keras.QuantizeConfig` instance.'
'You passed an instance of type: {input}.'.format(
input=quantize_config.__class__.__name__))
return quantize_annotate_mod.QuantizeAnnotate(
layer=to_annotate, quantize_config=quantize_config)
def quantize_apply(model):
"""Introduce quantization operations to a tf.keras model.
This function takes a tf.keras model which has been annotated with
`quantize_annotate` and constructs a new model in which each of the
annotated layers will ultimately be quantized. The new quantization
operations enable the model to **emulate* quantization during training
and store information that downstream tools will use to produce
an actually quantized model.
Apply quantization to a model:
```python
model = quantize_apply(annotated_model)
```
Note that this function removes the optimizer from the original model.
Additionally, training the model returned by `quantize_apply` will not affect
the weights of the original model.
Args:
model: A tf.keras Sequential or Functional model which has been annotated
with `quantize_annotate`. It can have pre-trained weights.
Returns:
Returns a new tf.keras model in which the annotated layers have been
prepared for quantization.
"""
if model is None:
raise ValueError('`model` cannot be None')
if not isinstance(model, keras.Model):
raise ValueError('`model` can only be a `tf.keras.Model` instance.'
'You passed an instance of type: {input}.'.format(
input=model.__class__.__name__))
if not isinstance(model, keras.Sequential) \
and not model._is_graph_network: # pylint: disable=protected-access
raise ValueError('`model` can only either be a tf.keras Sequential or '
'Functional model.')
# Have at least 1 layer annotated with QuantizeAnnotate
if not any(isinstance(layer, quantize_annotate_mod.QuantizeAnnotate)
for layer in model.layers):
raise ValueError('`model` must contain at least one layer which have been '
'annotated with `quantize_annotate*`. There are no layers '
'to quantize.')
if not model.built:
raise ValueError('`model` must be a built model. '
'been built yet. Please call `model.build(input_shape)` '
'before quantizing your model.')
def _clone_model_with_weights(model_to_clone):
cloned_model = keras.models.clone_model(model_to_clone)
cloned_model.set_weights(model_to_clone.get_weights())
return cloned_model
def _extract_original_model(model_to_unwrap):
"""Extracts original model by removing wrappers."""
layer_quantize_map = {}
def _unwrap(layer):
if not isinstance(layer, quantize_annotate_mod.QuantizeAnnotate):
return layer
annotate_wrapper = layer
layer_quantize_map[annotate_wrapper.layer.name] = {
'quantize_config': annotate_wrapper.quantize_config
}
return annotate_wrapper.layer
unwrapped_model = keras.models.clone_model(
model_to_unwrap, input_tensors=None, clone_function=_unwrap)
return unwrapped_model, layer_quantize_map
def _quantize(layer): # pylint: disable=missing-docstring
if layer.name not in layer_quantize_map:
return layer
quantize_config = layer_quantize_map[layer.name].get('quantize_config')
if not quantize_config and quantize_registry.supports(layer):
quantize_config = quantize_registry.get_quantize_config(layer)
if not quantize_config:
error_msg = (
'Layer {}:{} is not supported. You can quantize this '
'layer by passing a `tfmot.quantization.keras.QuantizeConfig` '
'instance to the `quantize_annotate_layer` '
'API.')
raise RuntimeError(
error_msg.format(layer.name, layer.__class__,
quantize_registry.__class__))
# `QuantizeWrapper` does not copy any additional layer params from
# `QuantizeAnnotate`. This should generally be fine, but occasionally
# `QuantizeAnnotate` wrapper may contain `batch_input_shape` like params.
# TODO(pulkitb): Ensure this does not affect model cloning.
return quantize_wrapper.QuantizeWrapper(layer, quantize_config)
# 1. Create a copy of the model with the same weights. This ensures
# modifications don't affect the original model, or its weights.
model_copy = _clone_model_with_weights(model)
# 2. Remove QuantizeAnnotate wrappers from the layers in the model. This
# extracts the original model structure (easier to transform), and
# stores relevant quantization information in a map.
unwrapped_model, layer_quantize_map = _extract_original_model(model_copy)
# Model cloning excludes input layers. Add input layers into the map
# since they need to be matched for patterns as well.
# pylint: disable=protected-access
for input_layer in unwrapped_model._input_layers:
for outbound_node in input_layer._outbound_nodes:
if outbound_node.outbound_layer.name in layer_quantize_map:
layer_quantize_map[input_layer.name] = {}
# pylint: enable=protected-access
# 3. Apply the graph transformations required to match model passes on
# target device/dialect.
quantize_transform = \
default_8bit_quantize_layout_transform.QuantizeLayoutTransform()
# layer_quantize_map gets modified by the transformations.
transformed_model, layer_quantize_map = quantize_transform.apply(
unwrapped_model, layer_quantize_map)
# TODO(pulkitb): Think more about how to introduce Default specific code.
quantize_registry = default_8bit_quantize_registry.QuantizeRegistry(
)
# 4. Actually quantize all the relevant layers in the model. This is done by
# wrapping the layers with QuantizeWrapper, and passing the associated
# `QuantizeConfig`.
return keras.models.clone_model(
transformed_model, input_tensors=None, clone_function=_quantize)
|
[
"tensorflow_model_optimization.python.core.quantization.keras.quantize_annotate.QuantizeAnnotate",
"tensorflow_model_optimization.python.core.quantization.keras.default_8bit.default_8bit_quantize_registry.QuantizeRegistry",
"tensorflow_model_optimization.python.core.quantization.keras.default_8bit.default_8bit_quantize_layout_transform.QuantizeLayoutTransform",
"tensorflow.keras.utils.custom_object_scope",
"tensorflow_model_optimization.python.core.quantization.keras.quantizers._types_dict",
"tensorflow_model_optimization.python.core.quantization.keras.quantize_wrapper.QuantizeWrapper",
"tensorflow_model_optimization.python.core.quantization.keras.default_8bit.default_8bit_quantize_registry._types_dict"
] |
[((3089, 3158), 'tensorflow.keras.utils.custom_object_scope', 'tf.keras.utils.custom_object_scope', (['*(args + (quantization_objects,))'], {}), '(*(args + (quantization_objects,)))\n', (3123, 3158), True, 'import tensorflow as tf\n'), ((8564, 8659), 'tensorflow_model_optimization.python.core.quantization.keras.quantize_annotate.QuantizeAnnotate', 'quantize_annotate_mod.QuantizeAnnotate', ([], {'layer': 'to_annotate', 'quantize_config': 'quantize_config'}), '(layer=to_annotate, quantize_config=\n quantize_config)\n', (8602, 8659), True, 'from tensorflow_model_optimization.python.core.quantization.keras import quantize_annotate as quantize_annotate_mod\n'), ((13736, 13800), 'tensorflow_model_optimization.python.core.quantization.keras.default_8bit.default_8bit_quantize_layout_transform.QuantizeLayoutTransform', 'default_8bit_quantize_layout_transform.QuantizeLayoutTransform', ([], {}), '()\n', (13798, 13800), False, 'from tensorflow_model_optimization.python.core.quantization.keras.default_8bit import default_8bit_quantize_layout_transform\n'), ((14072, 14121), 'tensorflow_model_optimization.python.core.quantization.keras.default_8bit.default_8bit_quantize_registry.QuantizeRegistry', 'default_8bit_quantize_registry.QuantizeRegistry', ([], {}), '()\n', (14119, 14121), False, 'from tensorflow_model_optimization.python.core.quantization.keras.default_8bit import default_8bit_quantize_registry\n'), ((2905, 2949), 'tensorflow_model_optimization.python.core.quantization.keras.default_8bit.default_8bit_quantize_registry._types_dict', 'default_8bit_quantize_registry._types_dict', ([], {}), '()\n', (2947, 2949), False, 'from tensorflow_model_optimization.python.core.quantization.keras.default_8bit import default_8bit_quantize_registry\n'), ((3017, 3041), 'tensorflow_model_optimization.python.core.quantization.keras.quantizers._types_dict', 'quantizers._types_dict', ([], {}), '()\n', (3039, 3041), False, 'from tensorflow_model_optimization.python.core.quantization.keras import quantizers\n'), ((6890, 6935), 'tensorflow_model_optimization.python.core.quantization.keras.quantize_annotate.QuantizeAnnotate', 'quantize_annotate_mod.QuantizeAnnotate', (['layer'], {}), '(layer)\n', (6928, 6935), True, 'from tensorflow_model_optimization.python.core.quantization.keras import quantize_annotate as quantize_annotate_mod\n'), ((12665, 12721), 'tensorflow_model_optimization.python.core.quantization.keras.quantize_wrapper.QuantizeWrapper', 'quantize_wrapper.QuantizeWrapper', (['layer', 'quantize_config'], {}), '(layer, quantize_config)\n', (12697, 12721), False, 'from tensorflow_model_optimization.python.core.quantization.keras import quantize_wrapper\n')]
|
import json
import os
import io
# Imports for the REST API
from flask import Flask, request
# Imports for image procesing
from PIL import Image
#import scipy
#from scipy import misc
# Imports for prediction
from predict import initialize, predict_image, predict_url
app = Flask(__name__)
# Replace <Subscription Key> with your valid subscription key. Subscription Key is being passed as an Environment Variable.
subscription_key = os.getenv("SUBSCRIPTION_KEY")
pushover_token = os.getenv("PUSHOVER_TOKEN")
pushover_user = os.getenv("PUSHOVER_USER")
# assert subscription_key
# 4MB Max image size limit
app.config['MAX_CONTENT_LENGTH'] = 4 * 1024 * 1024
# Default route just shows simple text
@app.route('/')
def index():
return 'CustomVision.ai model host harness'
# Like the CustomVision.ai Prediction service /image route handles either
# - octet-stream image file
# - a multipart/form-data with files in the imageData parameter
@app.route('/image', methods=['POST'])
def predict_image_handler():
try:
imageData = None
if ('imageData' in request.files):
imageData = request.files['imageData']
else:
imageData = io.BytesIO(request.get_data())
#img = scipy.misc.imread(imageData)
img = Image.open(imageData)
results = predict_image(img)
# local model
highestProb = highestProbabilityTagMeetingThreshold(results, 0.3)
# cloud model
if highestProb < 0.6:
results = analyze_image_external(img)
print(results)
return json.dumps(results)
except Exception as e:
print('EXCEPTION:', str(e))
return 'Error processing image', 500
vision_base_url = "https://westcentralus.api.cognitive.microsoft.com/vision/v2.0/"
# get the Cogntive Services Computer Vision URL
def get_analysis_url(url):
vision_base_url = str(url)
vision_url = vision_base_url + "analyze"
return vision_url
#Returns the highest probablity tag in the json object (takes the output as json.loads as input)
def highestProbabilityTagMeetingThreshold(allTagsAndProbability, threshold):
highestProbabilityTag = 'none'
highestProbability = 0
for item in allTagsAndProbability:
if item['Probability'] > highestProbability and item['Probability'] > threshold:
highestProbability = item['Probability']
highestProbabilityTag = item['Tag']
return highestProbability
def analyze_image_external(image):
image_data = image
headers = {'Ocp-Apim-Subscription-Key': subscription_key,'Content-Type': 'application/octet-stream'}
params = {'visualFeatures': 'Categories,Description,Color'}
analyze_url = get_analysis_url(vision_base_url)
response = requests.post(analyze_url, headers=headers, params=params, data=image_data)
response.raise_for_status()
analysis = response.json()
print(analysis)
image_caption = analysis["description"]["captions"][0]["text"].capitalize()
return image_caption
def push_notification():
conn = http.client.HTTPSConnection("api.pushover.net:443")
conn.request("POST", "/1/messages.json",
urllib.parse.urlencode({
"token": pushover_token,
"user": pushover_user,
"message": "Bear Alert",
}), { "Content-type": "application/x-www-form-urlencoded" })
conn.getresponse()
# Like the CustomVision.ai Prediction service /url route handles url's
# in the body of hte request of the form:
# { 'Url': '<http url>'}
@app.route('/url', methods=['POST'])
def predict_url_handler():
try:
image_url = json.loads(request.get_data())['Url']
results = predict_url(image_url)
return json.dumps(results)
except Exception as e:
print('EXCEPTION:', str(e))
return 'Error processing image'
if __name__ == '__main__':
# Load and intialize the model
initialize()
# Run the server
app.run(host='0.0.0.0', port=80)
|
[
"predict.predict_url",
"flask.Flask",
"predict.initialize",
"json.dumps",
"PIL.Image.open",
"flask.request.get_data",
"predict.predict_image",
"os.getenv"
] |
[((277, 292), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (282, 292), False, 'from flask import Flask, request\n'), ((437, 466), 'os.getenv', 'os.getenv', (['"""SUBSCRIPTION_KEY"""'], {}), "('SUBSCRIPTION_KEY')\n", (446, 466), False, 'import os\n'), ((484, 511), 'os.getenv', 'os.getenv', (['"""PUSHOVER_TOKEN"""'], {}), "('PUSHOVER_TOKEN')\n", (493, 511), False, 'import os\n'), ((528, 554), 'os.getenv', 'os.getenv', (['"""PUSHOVER_USER"""'], {}), "('PUSHOVER_USER')\n", (537, 554), False, 'import os\n'), ((3905, 3917), 'predict.initialize', 'initialize', ([], {}), '()\n', (3915, 3917), False, 'from predict import initialize, predict_image, predict_url\n'), ((1278, 1299), 'PIL.Image.open', 'Image.open', (['imageData'], {}), '(imageData)\n', (1288, 1299), False, 'from PIL import Image\n'), ((1318, 1336), 'predict.predict_image', 'predict_image', (['img'], {}), '(img)\n', (1331, 1336), False, 'from predict import initialize, predict_image, predict_url\n'), ((1580, 1599), 'json.dumps', 'json.dumps', (['results'], {}), '(results)\n', (1590, 1599), False, 'import json\n'), ((3677, 3699), 'predict.predict_url', 'predict_url', (['image_url'], {}), '(image_url)\n', (3688, 3699), False, 'from predict import initialize, predict_image, predict_url\n'), ((3715, 3734), 'json.dumps', 'json.dumps', (['results'], {}), '(results)\n', (3725, 3734), False, 'import json\n'), ((1199, 1217), 'flask.request.get_data', 'request.get_data', ([], {}), '()\n', (1215, 1217), False, 'from flask import Flask, request\n'), ((3632, 3650), 'flask.request.get_data', 'request.get_data', ([], {}), '()\n', (3648, 3650), False, 'from flask import Flask, request\n')]
|
import os
from overrides import overrides
import json
from PIL import Image
import numpy as np
from torchvision import transforms
from allennlp.data.dataset_readers.dataset_reader import DatasetReader
from allennlp.data.fields import Field, ArrayField, LabelField, MetadataField
from allennlp.data.instance import Instance
@DatasetReader.register("docfigure")
class DocFigureDatasetReader(DatasetReader):
def __init__(self,
image_root: str,
lazy: bool = False) -> None:
super().__init__(lazy)
self.image_root = image_root
expected_img_size = 224
self.image_transform = transforms.Compose([
transforms.Resize(expected_img_size),
transforms.CenterCrop(expected_img_size),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])])
@overrides
def _read(self, file_path: str):
input_file = open(file_path)
lines = input_file.readlines()
if 'json' in file_path:
data = [json.loads(line) for line in lines]
for i, datum in enumerate(data):
image_id = datum['pdf_hash']+'_'+datum['fig_uri']
label = ""
image_root = self.image_root
if 'image_root' in datum and datum['image_root'] is not None:
image_root = datum['image_root']
instance = self.text_to_instance(image_id=image_id, label=label, image_root=image_root)
if instance is not None:
yield instance
else:
for index, line in enumerate(lines):
parts = line.split(', ')
image_id = parts[0].strip()
label = parts[1].strip()
yield self.text_to_instance(image_id, label)
def text_to_instance(self,
image_id: str,
label: str,
image_root: str = None) -> Instance:
if image_root is None:
image_root = self.image_root
try:
image = Image.open(os.path.join(image_root, image_id)).convert('RGB')
except:
return None
fields: Dict[str, Field] = {}
fields['image'] = ArrayField(self.image_transform(image).numpy())
if len(label) > 0:
fields['label'] = LabelField(label)
fields['image_id'] = MetadataField(image_id)
return Instance(fields)
|
[
"allennlp.data.fields.MetadataField",
"json.loads",
"allennlp.data.instance.Instance",
"allennlp.data.fields.LabelField",
"allennlp.data.dataset_readers.dataset_reader.DatasetReader.register",
"torchvision.transforms.ToTensor",
"torchvision.transforms.CenterCrop",
"torchvision.transforms.Normalize",
"os.path.join",
"torchvision.transforms.Resize"
] |
[((327, 362), 'allennlp.data.dataset_readers.dataset_reader.DatasetReader.register', 'DatasetReader.register', (['"""docfigure"""'], {}), "('docfigure')\n", (349, 362), False, 'from allennlp.data.dataset_readers.dataset_reader import DatasetReader\n'), ((2546, 2569), 'allennlp.data.fields.MetadataField', 'MetadataField', (['image_id'], {}), '(image_id)\n', (2559, 2569), False, 'from allennlp.data.fields import Field, ArrayField, LabelField, MetadataField\n'), ((2585, 2601), 'allennlp.data.instance.Instance', 'Instance', (['fields'], {}), '(fields)\n', (2593, 2601), False, 'from allennlp.data.instance import Instance\n'), ((2499, 2516), 'allennlp.data.fields.LabelField', 'LabelField', (['label'], {}), '(label)\n', (2509, 2516), False, 'from allennlp.data.fields import Field, ArrayField, LabelField, MetadataField\n'), ((699, 735), 'torchvision.transforms.Resize', 'transforms.Resize', (['expected_img_size'], {}), '(expected_img_size)\n', (716, 735), False, 'from torchvision import transforms\n'), ((773, 813), 'torchvision.transforms.CenterCrop', 'transforms.CenterCrop', (['expected_img_size'], {}), '(expected_img_size)\n', (794, 813), False, 'from torchvision import transforms\n'), ((851, 872), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (870, 872), False, 'from torchvision import transforms\n'), ((910, 976), 'torchvision.transforms.Normalize', 'transforms.Normalize', (['[0.485, 0.456, 0.406]', '[0.229, 0.224, 0.225]'], {}), '([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])\n', (930, 976), False, 'from torchvision import transforms\n'), ((1160, 1176), 'json.loads', 'json.loads', (['line'], {}), '(line)\n', (1170, 1176), False, 'import json\n'), ((2239, 2273), 'os.path.join', 'os.path.join', (['image_root', 'image_id'], {}), '(image_root, image_id)\n', (2251, 2273), False, 'import os\n')]
|
# -*- coding: utf-8 -*-
# Looks for scenes in arbitrary video
# python -W ignore video_to_scenes.py -in media/sample/LivingSt1958.mp4 -overwrite 1 -threshold 24 -fade 1 -plot 800
# python -W ignore video_to_scenes.py -in "media/downloads/ia_politicaladarchive/*.mp4" -threshold 24 -out "tmp/ia_politicaladarchive_scenes.csv"
import argparse
import csv
from lib.image_utils import *
from lib.io_utils import *
from lib.math_utils import *
from lib.video_utils import *
import matplotlib.pyplot as plt
import numpy as np
import os
from pprint import pprint
from scenedetect.video_manager import VideoManager
from scenedetect.scene_manager import SceneManager
from scenedetect.stats_manager import StatsManager
from scenedetect.detectors.content_detector import ContentDetector
import sys
# input
parser = argparse.ArgumentParser()
parser.add_argument('-in', dest="INPUT_FILES", default="media/sample/moonlight.mp4", help="Input file pattern")
parser.add_argument('-out', dest="OUTPUT_FILE", default="tmp/scenes.csv", help="CSV output file")
parser.add_argument('-threshold', dest="THRESHOLD", default=30.0, type=float, help="Threshold for scene detection; lower number = more scenes")
parser.add_argument('-min', dest="MIN_SCENE_DUR", default=500, type=int, help="Minimum scene duration in milliseconds")
parser.add_argument('-fade', dest="CHECK_FOR_FADE", default=0, type=int, help="Check for crossfades?")
parser.add_argument('-stats', dest="SAVE_STATS", default=0, type=int, help="Save statistics?")
parser.add_argument('-window', dest="WINDOW_SIZE", default=60, type=int, help="For fades, this is the window size in frames")
parser.add_argument('-fthreshold', dest="FADE_THRESHOLD", default=3.0, type=float, help="Threshold for crossfade detection; lower number = more scenes")
parser.add_argument('-overwrite', dest="OVERWRITE", default=0, type=int, help="Overwrite existing data?")
parser.add_argument('-plot', dest="PLOT", default="", help="Draw plot frames (e.g. 30:90)")
args = parser.parse_args()
# Parse arguments
INPUT_FILES = args.INPUT_FILES
OUTPUT_FILE = args.OUTPUT_FILE
THRESHOLD = args.THRESHOLD
MIN_SCENE_DUR = args.MIN_SCENE_DUR
CHECK_FOR_FADE = args.CHECK_FOR_FADE > 0
SAVE_STATS = args.SAVE_STATS > 0
WINDOW_SIZE = args.WINDOW_SIZE
FADE_THRESHOLD = args.FADE_THRESHOLD
OVERWRITE = args.OVERWRITE > 0
PLOT = args.PLOT.strip()
# Determine plot frames
if ":" in PLOT:
PLOT = tuple([int(p) for p in PLOT.split(":")])
elif len(PLOT) > 0:
PLOT = (0, int(PLOT))
else:
PLOT = False
# Check if file exists already
if os.path.isfile(OUTPUT_FILE) and not OVERWRITE:
print("%s already exists. Skipping." % OUTPUT_FILE)
sys.exit()
# Read files
files = getFilenames(INPUT_FILES)
fileCount = len(files)
# Make sure output dirs exist
makeDirectories(OUTPUT_FILE)
progress = 0
def getScenes(video_path, threshold=30.0, minSceneDur=500, windowSize=50, fadeThreshold=3.0):
global progress
global fileCount
basename = os.path.basename(video_path)
doStats = CHECK_FOR_FADE or PLOT or SAVE_STATS
# type: (str) -> List[Tuple[FrameTimecode, FrameTimecode]]
video_manager = VideoManager([video_path])
stats_manager = StatsManager()
# Construct our SceneManager and pass it our StatsManager.
scene_manager = SceneManager(stats_manager)
base_timecode = video_manager.get_base_timecode()
framerate = video_manager.get_framerate()
# Add ContentDetector algorithm (each detector's constructor
# takes detector options, e.g. threshold).
min_scene_len = roundInt(minSceneDur / 1000.0 * framerate)
scene_manager.add_detector(ContentDetector(threshold=threshold, min_scene_len=min_scene_len))
# We save our stats file to {VIDEO_PATH}.stats.csv.
stats_file_path = OUTPUT_FILE.replace(".csv", "%s.csv")
stats_file_path = stats_file_path % ("_" + basename + "_stats")
scene_list = []
print("Looking for scenes in %s" % video_path)
try:
# If stats file exists, load it.
if doStats and os.path.exists(stats_file_path):
# Read stats from CSV file opened in read mode:
with open(stats_file_path, 'r') as stats_file:
stats_manager.load_from_csv(stats_file, base_timecode)
# Set downscale factor to improve processing speed.
video_manager.set_downscale_factor()
# Start video_manager.
video_manager.start()
# Perform scene detection on video_manager.
scene_manager.detect_scenes(frame_source=video_manager)
# Obtain list of detected scenes.
scenes = scene_manager.get_scene_list(base_timecode)
# Each scene is a tuple of (start, end) FrameTimecodes.
for i, scene in enumerate(scenes):
start = roundInt(scene[0].get_seconds()*1000)
end = roundInt(scene[1].get_seconds()*1000)
scene_list.append({
"filename": basename,
"index": i,
"start": start,
"end": end,
"dur": end - start,
"frameStart": scene[0].get_frames(),
"frameEnd": scene[1].get_frames()
})
# We only write to the stats file if a save is required:
if doStats and stats_manager.is_save_required():
with open(stats_file_path, 'w') as stats_file:
stats_manager.save_to_csv(stats_file, base_timecode)
# Retrieve raw data for plotting and additional analysis
fieldNames, sceneData = readCsv(stats_file_path, skipLines=1)
dlen = len(sceneData)
# Add smoothed data
windowLeft = int(windowSize/2)
windowRight = windowSize - windowLeft
for i, d in enumerate(sceneData):
i0 = max(i - windowLeft, 0)
i1 = min(i + windowRight, dlen-1)
sceneData[i]["smoothed"] = np.mean([d["content_val"] for d in sceneData[i0:i1]])
sceneData[i]["ms"] = timecodeToMs(d["Timecode"])
# Add crossfade cuts
if CHECK_FOR_FADE:
for i, d in enumerate(sceneData):
ms = d["ms"]
value = d["smoothed"]
frame = d["Frame Number"]
neighboringCuts = [s for s in scene_list if abs(frame-s["frameStart"]) <= windowSize or abs(frame-s["frameEnd"]) <= windowSize]
# if there's no nearby cuts and we've reached the fade threshold
if len(neighboringCuts) <= 0 and value >= fadeThreshold:
# retrieve the scene right before this one
sortedList = sorted(scene_list, key=lambda k: k['frameStart'])
prev = [s for s in sortedList if s["frameStart"] < frame]
if len(prev) > 0:
prev = prev[-1]
else:
prev = sortedList[0]
# Find local minimums to determine fade start/end
leftWindow = sorted([d for d in sceneData if frame-windowSize < d["Frame Number"] < frame], key=lambda k: k['smoothed'])
rightWindow = sorted([d for d in sceneData if frame < d["Frame Number"] < frame+windowSize], key=lambda k: k['smoothed'])
fadeStart = leftWindow[0]
fadeEnd = rightWindow[0]
# Add new cut if we're not too close to the edges
if fadeStart["ms"]-prev["start"] >= minSceneDur and prev["end"] - fadeEnd["ms"] >= minSceneDur:
# Add the new scene
scene_list.append({
"filename": basename,
"index": prev["index"]+1,
"frameStart": fadeEnd["Frame Number"],
"frameEnd": prev["frameEnd"],
"start": fadeEnd["ms"],
"end": prev["end"],
"dur": prev["end"] - fadeEnd["ms"]
})
# Update the previous scene
scene_list[prev["index"]]["end"] = fadeStart["ms"]
scene_list[prev["index"]]["dur"] = fadeStart["ms"] - prev["start"]
scene_list[prev["index"]]["frameEnd"] = fadeStart["Frame Number"]
# Sort and update indices
scene_list = sorted(scene_list, key=lambda k: k['frameStart'])
for j, s in enumerate(scene_list):
scene_list[j]["index"] = j
if PLOT:
f0, f1 = PLOT
# add raw data
xs = [d["Frame Number"]-1 for d in sceneData if f0 <= d["Frame Number"] <= f1]
ys = [d["content_val"] for d in sceneData if f0 <= d["Frame Number"] <= f1]
plt.plot(xs, ys)
# add smoothed data
ys = [d["smoothed"] for d in sceneData if f0 <= d["Frame Number"] <= f1]
plt.plot(xs, ys, "c")
# add horizontal line for threshold
plt.plot([xs[0], xs[-1]], [threshold, threshold], "g--")
# add scenes as plot data
xs = [d["frameEnd"]-1 for d in scene_list if f0 <= d["frameEnd"] <= f1]
ys = [sceneData[d["frameEnd"]-1]["content_val"] for d in scene_list if f0 <= d["frameEnd"] <= f1]
plt.scatter(xs, ys, c="red")
plt.show()
if os.path.exists(stats_file_path) and not SAVE_STATS:
os.remove(stats_file_path)
finally:
video_manager.release()
progress += 1
sys.stdout.write('\r')
sys.stdout.write("%s%%" % round(1.0*progress/fileCount*100,1))
sys.stdout.flush()
return scene_list
scenes = []
for fn in files:
scenes += getScenes(fn, threshold=THRESHOLD, minSceneDur=MIN_SCENE_DUR, windowSize=WINDOW_SIZE, fadeThreshold=FADE_THRESHOLD)
headings = ["filename", "index", "start", "dur"]
writeCsv(OUTPUT_FILE, scenes, headings)
|
[
"sys.stdout.write",
"os.remove",
"matplotlib.pyplot.show",
"argparse.ArgumentParser",
"scenedetect.stats_manager.StatsManager",
"os.path.basename",
"scenedetect.scene_manager.SceneManager",
"scenedetect.detectors.content_detector.ContentDetector",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.scatter",
"os.path.exists",
"os.path.isfile",
"numpy.mean",
"sys.stdout.flush",
"scenedetect.video_manager.VideoManager",
"sys.exit"
] |
[((807, 832), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (830, 832), False, 'import argparse\n'), ((2547, 2574), 'os.path.isfile', 'os.path.isfile', (['OUTPUT_FILE'], {}), '(OUTPUT_FILE)\n', (2561, 2574), False, 'import os\n'), ((2654, 2664), 'sys.exit', 'sys.exit', ([], {}), '()\n', (2662, 2664), False, 'import sys\n'), ((2962, 2990), 'os.path.basename', 'os.path.basename', (['video_path'], {}), '(video_path)\n', (2978, 2990), False, 'import os\n'), ((3126, 3152), 'scenedetect.video_manager.VideoManager', 'VideoManager', (['[video_path]'], {}), '([video_path])\n', (3138, 3152), False, 'from scenedetect.video_manager import VideoManager\n'), ((3173, 3187), 'scenedetect.stats_manager.StatsManager', 'StatsManager', ([], {}), '()\n', (3185, 3187), False, 'from scenedetect.stats_manager import StatsManager\n'), ((3271, 3298), 'scenedetect.scene_manager.SceneManager', 'SceneManager', (['stats_manager'], {}), '(stats_manager)\n', (3283, 3298), False, 'from scenedetect.scene_manager import SceneManager\n'), ((9566, 9588), 'sys.stdout.write', 'sys.stdout.write', (["'\\r'"], {}), "('\\r')\n", (9582, 9588), False, 'import sys\n'), ((9660, 9678), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (9676, 9678), False, 'import sys\n'), ((3607, 3672), 'scenedetect.detectors.content_detector.ContentDetector', 'ContentDetector', ([], {'threshold': 'threshold', 'min_scene_len': 'min_scene_len'}), '(threshold=threshold, min_scene_len=min_scene_len)\n', (3622, 3672), False, 'from scenedetect.detectors.content_detector import ContentDetector\n'), ((4005, 4036), 'os.path.exists', 'os.path.exists', (['stats_file_path'], {}), '(stats_file_path)\n', (4019, 4036), False, 'import os\n'), ((5849, 5902), 'numpy.mean', 'np.mean', (["[d['content_val'] for d in sceneData[i0:i1]]"], {}), "([d['content_val'] for d in sceneData[i0:i1]])\n", (5856, 5902), True, 'import numpy as np\n'), ((8810, 8826), 'matplotlib.pyplot.plot', 'plt.plot', (['xs', 'ys'], {}), '(xs, ys)\n', (8818, 8826), True, 'import matplotlib.pyplot as plt\n'), ((8957, 8978), 'matplotlib.pyplot.plot', 'plt.plot', (['xs', 'ys', '"""c"""'], {}), "(xs, ys, 'c')\n", (8965, 8978), True, 'import matplotlib.pyplot as plt\n'), ((9040, 9096), 'matplotlib.pyplot.plot', 'plt.plot', (['[xs[0], xs[-1]]', '[threshold, threshold]', '"""g--"""'], {}), "([xs[0], xs[-1]], [threshold, threshold], 'g--')\n", (9048, 9096), True, 'import matplotlib.pyplot as plt\n'), ((9342, 9370), 'matplotlib.pyplot.scatter', 'plt.scatter', (['xs', 'ys'], {'c': '"""red"""'}), "(xs, ys, c='red')\n", (9353, 9370), True, 'import matplotlib.pyplot as plt\n'), ((9383, 9393), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (9391, 9393), True, 'import matplotlib.pyplot as plt\n'), ((9406, 9437), 'os.path.exists', 'os.path.exists', (['stats_file_path'], {}), '(stats_file_path)\n', (9420, 9437), False, 'import os\n'), ((9470, 9496), 'os.remove', 'os.remove', (['stats_file_path'], {}), '(stats_file_path)\n', (9479, 9496), False, 'import os\n')]
|
import json
from pyld import jsonld
from services.proto import ldnorm_pb2_grpc
from services.proto import ldnorm_pb2 as lpb2
from services.proto import general_pb2
class LDNormServicer(ldnorm_pb2_grpc.LDNormServicer):
def __init__(self, logger):
self._logger = logger
requests = jsonld.requests_document_loader(timeout=10)
jsonld.set_document_loader(requests)
def _norm(self, ld):
j = json.loads(ld)
flat = jsonld.flatten(j)
return json.dumps(flat)
def Normalise(self, request, context):
self._logger.debug('Got normalise request for: %s', request.json)
resp = lpb2.NormaliseResponse(
result_type=general_pb2.ResultType.OK
)
try:
resp.normalised = self._norm(request.json)
except (Exception, jsonld.JsonLdError) as e:
# For some reason JsonLdError doesn't inherit from Exception so it
# has to be caught seperately, it also has a super long message
# (~20 lines) so I truncate it.
self._logger.error(
"JSON-LD could not be normalised: %s", str(e)[:50])
resp.result_type = general_pb2.ResultType.ERROR
return resp
|
[
"pyld.jsonld.requests_document_loader",
"json.loads",
"pyld.jsonld.set_document_loader",
"json.dumps",
"pyld.jsonld.flatten",
"services.proto.ldnorm_pb2.NormaliseResponse"
] |
[((302, 345), 'pyld.jsonld.requests_document_loader', 'jsonld.requests_document_loader', ([], {'timeout': '(10)'}), '(timeout=10)\n', (333, 345), False, 'from pyld import jsonld\n'), ((354, 390), 'pyld.jsonld.set_document_loader', 'jsonld.set_document_loader', (['requests'], {}), '(requests)\n', (380, 390), False, 'from pyld import jsonld\n'), ((429, 443), 'json.loads', 'json.loads', (['ld'], {}), '(ld)\n', (439, 443), False, 'import json\n'), ((459, 476), 'pyld.jsonld.flatten', 'jsonld.flatten', (['j'], {}), '(j)\n', (473, 476), False, 'from pyld import jsonld\n'), ((492, 508), 'json.dumps', 'json.dumps', (['flat'], {}), '(flat)\n', (502, 508), False, 'import json\n'), ((642, 703), 'services.proto.ldnorm_pb2.NormaliseResponse', 'lpb2.NormaliseResponse', ([], {'result_type': 'general_pb2.ResultType.OK'}), '(result_type=general_pb2.ResultType.OK)\n', (664, 703), True, 'from services.proto import ldnorm_pb2 as lpb2\n')]
|
from unittest.mock import patch
from django.core.management import call_command
from django.db.utils import OperationalError
from django.test import TestCase
class CommandTests(TestCase):
def test_wait_for_db_ready(self):
"""Test waiting for db when db is available"""
# We are mocking the behavior of getting the database and returning true
with patch("django.db.utils.ConnectionHandler.__getitem__") as gi:
gi.return_value = True
call_command("wait_for_db")
self.assertEqual(gi.call_count, 1)
# Ts is like the gi in the previous test
# we are mocking here the sleep time to make the test more faster
@patch("time.sleep", return_value=True)
def test_wait_for_db(self, ts):
"""Test waiting for db"""
with patch("django.db.utils.ConnectionHandler.__getitem__") as gi:
gi.side_effect = [OperationalError] * 5 + [True]
call_command("wait_for_db")
self.assertEqual(gi.call_count, 6)
|
[
"unittest.mock.patch",
"django.core.management.call_command"
] |
[((683, 721), 'unittest.mock.patch', 'patch', (['"""time.sleep"""'], {'return_value': '(True)'}), "('time.sleep', return_value=True)\n", (688, 721), False, 'from unittest.mock import patch\n'), ((378, 432), 'unittest.mock.patch', 'patch', (['"""django.db.utils.ConnectionHandler.__getitem__"""'], {}), "('django.db.utils.ConnectionHandler.__getitem__')\n", (383, 432), False, 'from unittest.mock import patch\n'), ((487, 514), 'django.core.management.call_command', 'call_command', (['"""wait_for_db"""'], {}), "('wait_for_db')\n", (499, 514), False, 'from django.core.management import call_command\n'), ((805, 859), 'unittest.mock.patch', 'patch', (['"""django.db.utils.ConnectionHandler.__getitem__"""'], {}), "('django.db.utils.ConnectionHandler.__getitem__')\n", (810, 859), False, 'from unittest.mock import patch\n'), ((940, 967), 'django.core.management.call_command', 'call_command', (['"""wait_for_db"""'], {}), "('wait_for_db')\n", (952, 967), False, 'from django.core.management import call_command\n')]
|
import socket
import json
import aiohttp
from pathlib import Path
from enum import Enum
from ..app import logger
from .emailer import send_mail
from .configuration import Configuration
from .queues import enqueue_notification
class Subject(Enum):
"""Provides the molior subject types"""
websocket = 1
eventwatch = 2
userrole = 3
user = 4
project = 5
projectversion = 6
build = 7
buildlog = 8
mirror = 9
node = 10
class Event(Enum):
"""Provides the molior event types"""
added = 1
changed = 2
removed = 3
connected = 4
done = 5
class Action(Enum):
"""Provides the molior action types"""
add = 1
change = 2
remove = 3
start = 4
stop = 5
async def trigger_hook(method, url, skip_ssl, body=None):
"""
Triggers a web hook.
Args:
method (str): The http method to be used. E.g. POST
url (str): The url to send the request to.
skip_ssl (bool): Set to True if ssl handshake should not be verified.
body (str): The request body, only pass if method is POST
"""
data = None
headers = {"content-type": "application/json"}
verify = not skip_ssl
try:
data = json.loads(body)
except Exception as exc:
logger.error("hook: error parsing json body: {}".format(exc))
return
connector = aiohttp.TCPConnector(verify_ssl=verify)
if method.lower() == "post":
async with aiohttp.ClientSession(connector=connector) as http:
async with http.post(url, headers=headers, data=json.dumps(data)) as resp:
if resp.status != 200:
logger.warning("trigger web hook '%s' to '%s' returned %d ", method, url, resp.status)
elif method.lower() == "get":
async with aiohttp.ClientSession() as http:
async with http.get(url) as resp:
if resp.status != 200:
logger.warning("trigger web hook '%s' to '%s' returned %d ", method, url, resp.status)
def send_mail_notification(build):
"""
Sends a build finished notification
to the given receiver.
Args:
build: Model of the finished build.
reciever (str): The reciever's email address.
"""
cfg = Configuration()
email_cfg = cfg.email_notifications
if not email_cfg or not email_cfg.get("enabled"):
return
buildout_path = Path(cfg.working_dir) / "buildout"
log_file = buildout_path / str(build.id) / "build.log"
if not log_file.exists():
logger.warning(
"not sending notification: buildlog file '%s' does not exist!",
str(log_file),
)
return
template_file = Path("/etc/molior/email.template")
with template_file.open() as _file:
template = "".join(_file.readlines())
pkg_name = build.sourcename
receiver = build.maintainer.email
r_name = build.maintainer.fullname
version = build.version
arch = build.architecture
distrelease_version = build.projectversion.basemirror.name
distrelease = build.projectversion.basemirror.project.name
hostname = cfg.hostname if cfg.hostname else socket.getfqdn()
link = "http://{}/#!/build/{}".format(hostname, build.id)
if build.buildstate == "build_failed":
subject = "Build Failed: {} {} ({}-{})".format(pkg_name, version, distrelease, arch)
message = "Unfortunately the build failed for:"
elif build.buildstate == "successful":
subject = "Released: {} {} ({}-{})".format(pkg_name, version, distrelease, arch)
message = "I've just finished building the debian packages for:"
else:
logger.warning("not sending notification: build has state '%s'", str(build.buildstate))
return
content = template.format(
receiver_name=r_name,
message=message,
package_name=pkg_name,
build_version=version,
distrelease=distrelease,
distrelease_version=distrelease_version,
arch=arch,
build_log_link=link,
)
send_mail(receiver, subject, content, [str(log_file)])
async def notify(subject, event, data):
await enqueue_notification({"notify": {"subject": subject, "event": event, "data": data}})
async def run_hooks(build_id):
await enqueue_notification({"hooks": {"build_id": build_id}})
|
[
"json.loads",
"json.dumps",
"aiohttp.ClientSession",
"socket.getfqdn",
"pathlib.Path",
"aiohttp.TCPConnector"
] |
[((1371, 1410), 'aiohttp.TCPConnector', 'aiohttp.TCPConnector', ([], {'verify_ssl': 'verify'}), '(verify_ssl=verify)\n', (1391, 1410), False, 'import aiohttp\n'), ((2710, 2744), 'pathlib.Path', 'Path', (['"""/etc/molior/email.template"""'], {}), "('/etc/molior/email.template')\n", (2714, 2744), False, 'from pathlib import Path\n'), ((1223, 1239), 'json.loads', 'json.loads', (['body'], {}), '(body)\n', (1233, 1239), False, 'import json\n'), ((2413, 2434), 'pathlib.Path', 'Path', (['cfg.working_dir'], {}), '(cfg.working_dir)\n', (2417, 2434), False, 'from pathlib import Path\n'), ((3175, 3191), 'socket.getfqdn', 'socket.getfqdn', ([], {}), '()\n', (3189, 3191), False, 'import socket\n'), ((1464, 1506), 'aiohttp.ClientSession', 'aiohttp.ClientSession', ([], {'connector': 'connector'}), '(connector=connector)\n', (1485, 1506), False, 'import aiohttp\n'), ((1803, 1826), 'aiohttp.ClientSession', 'aiohttp.ClientSession', ([], {}), '()\n', (1824, 1826), False, 'import aiohttp\n'), ((1576, 1592), 'json.dumps', 'json.dumps', (['data'], {}), '(data)\n', (1586, 1592), False, 'import json\n')]
|
from selenium import webdriver
from selenium.webdriver.chrome.service import Service
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.common.by import By
import time
s = Service(executable_path='/Users/dorcy/Developer/chromedriver')
driver = webdriver.Chrome(service=s)
driver.maximize_window()
def send_messages(phone_number, message_text):
driver.get("https://web.whatsapp.com/")
wait = WebDriverWait(driver, 600)
# Enter the target number in the target variable. Example 1234567890
string = "Testing whatsapp automation python"
target = phone_number
x_arg = '//*[@id="side"]/div[1]/div/label/div/div[2]'
group_title = wait.until(EC.presence_of_element_located((By.XPATH, x_arg)))
time.sleep(10)
group_title.click()
group_title.send_keys(target + Keys.ENTER)
inp_xpath = '//div[@class="_13NKt copyable-text selectable-text"][@data-tab="9"]'
input_box = wait.until(EC.presence_of_element_located((By.XPATH, inp_xpath)))
input_box.send_keys(message_text + Keys.ENTER)
time.sleep(4)
|
[
"selenium.webdriver.support.expected_conditions.presence_of_element_located",
"selenium.webdriver.chrome.service.Service",
"time.sleep",
"selenium.webdriver.Chrome",
"selenium.webdriver.support.ui.WebDriverWait"
] |
[((315, 377), 'selenium.webdriver.chrome.service.Service', 'Service', ([], {'executable_path': '"""/Users/dorcy/Developer/chromedriver"""'}), "(executable_path='/Users/dorcy/Developer/chromedriver')\n", (322, 377), False, 'from selenium.webdriver.chrome.service import Service\n'), ((387, 414), 'selenium.webdriver.Chrome', 'webdriver.Chrome', ([], {'service': 's'}), '(service=s)\n', (403, 414), False, 'from selenium import webdriver\n'), ((544, 570), 'selenium.webdriver.support.ui.WebDriverWait', 'WebDriverWait', (['driver', '(600)'], {}), '(driver, 600)\n', (557, 570), False, 'from selenium.webdriver.support.ui import WebDriverWait\n'), ((862, 876), 'time.sleep', 'time.sleep', (['(10)'], {}), '(10)\n', (872, 876), False, 'import time\n'), ((1171, 1184), 'time.sleep', 'time.sleep', (['(4)'], {}), '(4)\n', (1181, 1184), False, 'import time\n'), ((807, 856), 'selenium.webdriver.support.expected_conditions.presence_of_element_located', 'EC.presence_of_element_located', (['(By.XPATH, x_arg)'], {}), '((By.XPATH, x_arg))\n', (837, 856), True, 'from selenium.webdriver.support import expected_conditions as EC\n'), ((1061, 1114), 'selenium.webdriver.support.expected_conditions.presence_of_element_located', 'EC.presence_of_element_located', (['(By.XPATH, inp_xpath)'], {}), '((By.XPATH, inp_xpath))\n', (1091, 1114), True, 'from selenium.webdriver.support import expected_conditions as EC\n')]
|
import socket
# No debugging by default; this is overriden dev config
DEBUG = False
# Override this; best to make it different for each environment
SECRET_KEY = ''
# Email address that emails originate from. Make sure it's real, you own it,
# and SPF allows you to send from it.
DEFAULT_MAIL_SENDER = 'vagrant@%s' % socket.getfqdn()
# General email address for admins and errors
ADMIN_RECIPIENTS = ['vagrant<EMAIL>']
ERROR_EMAIL = None
# Database connection string
SQLALCHEMY_DATABASE_URI = 'postgresql+psycopg2://@/app'
|
[
"socket.getfqdn"
] |
[((319, 335), 'socket.getfqdn', 'socket.getfqdn', ([], {}), '()\n', (333, 335), False, 'import socket\n')]
|
# -*- encoding: utf-8 -*-
from django.shortcuts import render, redirect
from django.core.urlresolvers import reverse
from django.contrib import messages
from django.core import serializers
from django.views.decorators.csrf import csrf_exempt
from django.http import HttpResponseRedirect, HttpResponse
from django.utils.text import slugify
from django.contrib.auth import login, authenticate, logout
from django.http import Http404
from datetime import datetime
from .forms import *
from .models import *
import json
from tecnoservicio.users.models import *
from django.db.models import Q
import reversion
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
from tecnoservicio.tareas.tasks import *
def comprobar_orden(request, orden):
if request.user.perfil == 'Tecnico local' or request.user.perfil == 'Tecnico foraneo':
if not orden.tecnico == request.user:
raise Http404
elif request.user.perfil == 'Vendedor':
if not orden.servicio == 'Icon':
raise Http404
elif request.user.perfil == 'Tienda':
if not orden.tienda.empresa.usuario == request.user:
raise Http404
return True
def filtrar_orden(request, ordenes_list):
if request.user.perfil == 'Tecnico local' or request.user.perfil == 'Tecnico foraneo':
ordenes_list = ordenes_list.filter(tecnico = request.user)
elif request.user.perfil == 'Vendedor':
ordenes_list = ordenes_list.filter(servicio = 'Icon')
elif request.user.perfil == 'Tienda':
ordenes_list = ordenes_list.filter( tienda__empresa__usuario = request.user )
return ordenes_list
def lista_orden(request):
if request.method == 'POST':
busqueda = request.POST.get("buscar", "")
if not busqueda == '':
ordenes_list = Orden.objects.filter(Q(folio__contains=busqueda) | Q(cliente__icontains=busqueda)| Q(telefono_casa__contains=busqueda) | Q(telefono_oficina__contains=busqueda) | Q(telefono_movil__contains=busqueda) | Q(tecnico__username__icontains=busqueda) | Q(icon_os__contains=busqueda) | Q(icon_ics__contains=busqueda) | Q(icon_on__contains=busqueda) | Q(icon_cn__contains=busqueda) | Q(no_serie__contains=busqueda)).order_by('-id')
else:
ordenes_list = []
else:
ordenes_list = Orden.objects.all()
ordenes_list = filtrar_orden(request, ordenes_list)
paginator = Paginator(ordenes_list, 35)
page = request.GET.get('page')
try:
ordenes = paginator.page(page)
except PageNotAnInteger:
ordenes = paginator.page(1)
except EmptyPage:
ordenes = paginator.page(paginator.num_pages)
return render(request, 'ordenes/orden_lista.html', locals())
def alta_orden(request):
form = OrdenForm()
conceptoform = ConceptoForm()
tecnicos_locales = serializers.serialize("json", User.objects.filter(perfil = 'Tecnico local', is_active = True), fields=('username',))
tecnicos_foraneos = serializers.serialize("json", User.objects.filter(perfil = 'Tecnico foraneo', is_active = True), fields=('username',))
if request.method == "POST":
form = OrdenForm(request.POST)
if form.is_valid():
o = form.save(commit = False)
o.operador = request.user
o.estatus = 'Nueva'
o.save()
conceptoform = ConceptoForm(request.POST)
if conceptoform.is_valid():
nombre = conceptoform.cleaned_data.get("nombre", "")
cantidad = conceptoform.cleaned_data.get("cantidad", "")
if nombre and cantidad:
concepto = conceptoform.save(commit = False)
concepto.orden = o
concepto.usuario = request.user
corte = Corte()
concepto.corte = corte.asignar()
concepto.save()
return HttpResponseRedirect(reverse( 'editar_orden', args=[o.folio]))
else:
messages.warning(request, 'Algo salió mal, intenta de nuevo.')
return render(request, 'ordenes/orden_form.html', locals())
def editar_orden(request, folio):
orden = Orden.objects.filter(folio = folio)[0]
comprobar_orden(request, orden)
mensajes_orden = Mensaje.objects.filter(orden = orden)
form = OrdenForm(instance = orden)
conceptoform = ConceptoForm()
mensaje = MensajeForm()
orden_estatus = Orden_estatusForm(instance = orden)
tecnicos_locales = serializers.serialize("json", User.objects.filter(perfil = 'Tecnico local', is_active = True), fields=('username',))
tecnicos_foraneos = serializers.serialize("json", User.objects.filter(perfil = 'Tecnico foraneo', is_active = True), fields=('username',))
if request.method == "POST":
if 'guardar' in request.POST:
form = OrdenForm(request.POST, instance = orden)
orden_estatus = Orden_estatusForm(request.POST, instance = orden)
if form.is_valid() and orden_estatus.is_valid():
o = form.save()
orden_estatus.save()
conceptoform = ConceptoForm(request.POST)
if conceptoform.is_valid():
nombre = conceptoform.cleaned_data.get("nombre", "")
cantidad = conceptoform.cleaned_data.get("cantidad", "")
if nombre and cantidad:
concepto = conceptoform.save(commit = False)
concepto.orden = o
concepto.usuario = request.user
corte = Corte()
concepto.corte = corte.asignar()
concepto.save()
else:
messages.warning(request, 'Algo salió mal, intenta de nuevo.')
if 'enviar_comentarios' in request.POST:
mensaje = MensajeForm(request.POST)
if mensaje.is_valid():
o = mensaje.save(commit=False)
o.usuario = request.user
o.orden = orden
o.save()
conceptoform = ConceptoForm(request.POST)
conceptos = Concepto.objects.filter(orden = orden)
version_list = reversion.get_for_object(orden)
mensaje = MensajeForm()
if request.user.perfil == 'Tecnico local' or request.user.perfil == 'Tecnico foraneo':
return render(request, 'ordenes/orden_info.html', locals())
return render(request, 'ordenes/orden_form.html', locals())
def imprimir_orden(request, folio):
orden = Orden.objects.filter(folio=folio)[0]
comprobar_orden(request, orden)
hoy = datetime.datetime.now()
publicidad = Publicidad.objects.all()
return render(request, 'ordenes/imprimir_orden.html', locals())
def calendario(request,tipo):
if tipo == 'locales':
tipo='Local'
elif tipo == 'foraneas':
tipo='Foraneo'
fecha = datetime.datetime.now()
ordenes = Orden.objects.filter(fecha_programada__year = fecha.year, fecha_programada__month = fecha.month, zona=tipo)
ordenes = filtrar_orden(request, ordenes)
return render(request, 'ordenes/calendario.html', locals())
def publicidad(request):
form = PublicidadForm()
if request.method == 'POST':
form = PublicidadForm(request.POST, request.FILES)
if form.is_valid():
Publicidad.objects.all().delete()
form.save()
publicidad = Publicidad.objects.all()
return render(request, 'ordenes/publicidad_form.html', locals())
def lista_cortes(request):
objects = Corte.objects.all()
return render(request, 'ordenes/corte_lista.html', locals())
def generar_corte(request, id):
corte = Corte.objects.filter(id = id)[0]
corte.cortado = True
corte.save()
return HttpResponseRedirect(reverse('lista_cortes'))
def corte(request, id):
ordenes = []
corte = Corte.objects.filter(id = id)[0]
conceptos = Concepto.objects.filter(corte=corte)
total = 0
for concepto in conceptos:
total += concepto.cantidad
if not concepto.orden in ordenes:
ordenes.append(concepto.orden)
return render(request, 'ordenes/corte.html', locals())
def reportes(request):
hoy = datetime.datetime.now()
return render(request, 'ordenes/reportes.html', locals())
def ordenes_icon(request,inicio,fin):
inicio = datetime.datetime.strptime(inicio, '%d-%m-%Y')
fin = datetime.datetime.strptime(fin, '%d-%m-%Y')
objects = Orden.objects.filter(fecha_alta__range=(inicio, fin), servicio='Icon', concepto='Armado')
return render(request, 'ordenes/reporte_ordenes.html', locals())
def ordenes_tecno(request,inicio,fin):
inicio = datetime.datetime.strptime(inicio, '%d-%m-%Y')
fin = datetime.datetime.strptime(fin, '%d-%m-%Y')
objects = Orden.objects.filter(fecha_alta__range=(inicio, fin), servicio='Tecnoservicio', concepto='Armado')
return render(request, 'ordenes/reporte_ordenes.html', locals())
def armados_locales(request,inicio,fin):
inicio = datetime.datetime.strptime(inicio, '%d-%m-%Y')
fin = datetime.datetime.strptime(fin, '%d-%m-%Y')
objects = Orden.objects.filter(fecha_alta__range=(inicio, fin), servicio='Icon', concepto='Armado', zona='Local')
return render(request, 'ordenes/reporte_ordenes.html', locals())
def armados_foraneos(request,inicio,fin):
inicio = datetime.datetime.strptime(inicio, '%d-%m-%Y')
fin = datetime.datetime.strptime(fin, '%d-%m-%Y')
objects = Orden.objects.filter(fecha_alta__range=(inicio, fin), servicio='Icon', concepto='Armado', zona='Foraneo')
return render(request, 'ordenes/reporte_ordenes.html', locals())
@csrf_exempt
def actualizar_marca(request):
if request.method == 'POST':
tienda = request.POST.get('tienda')
marca = serializers.serialize("json", Marca.objects.filter(tienda__id = tienda), fields=('nombre',))
return HttpResponse(marca,content_type="application/json")
else:
return HttpResponse(json.dumps({"actualizar_marca": "ok"}),content_type="application/json")
@csrf_exempt
def actualizar_modelo(request):
if request.method == 'POST':
marca = request.POST.get('marca')
modelo = serializers.serialize("json", Modelo.objects.filter(marca__id = marca), fields=('nombre',))
return HttpResponse(modelo,content_type="application/json")
else:
return HttpResponse(json.dumps({"actualizar_modelo": "ok"}),content_type="application/json")
@csrf_exempt
def calendario_ordenes(request):
from lib import JsonPropertySerializer
if request.method == 'POST':
fecha = request.POST.get('fecha').split('/')
tipo = request.POST.get('tipo')
fecha = datetime.datetime(year=int(fecha[2]), month=int(fecha[1]), day=int(fecha[0]))
ordenes = JsonPropertySerializer().serialize(Orden.objects.filter(fecha_programada__year = fecha.year, fecha_programada__month = fecha.month, zona=tipo))
return HttpResponse(ordenes,content_type="application/json")
else:
return HttpResponse(json.dumps({"calendario_ordenes": "ok"}),content_type="application/json")
|
[
"django.core.urlresolvers.reverse",
"django.http.HttpResponse",
"datetime.datetime.datetime.now",
"reversion.get_for_object",
"django.db.models.Q",
"json.dumps",
"lib.JsonPropertySerializer",
"django.core.paginator.Paginator",
"datetime.datetime.datetime.strptime",
"django.contrib.messages.warning"
] |
[((2254, 2281), 'django.core.paginator.Paginator', 'Paginator', (['ordenes_list', '(35)'], {}), '(ordenes_list, 35)\n', (2263, 2281), False, 'from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger\n'), ((5388, 5419), 'reversion.get_for_object', 'reversion.get_for_object', (['orden'], {}), '(orden)\n', (5412, 5419), False, 'import reversion\n'), ((5779, 5802), 'datetime.datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (5800, 5802), False, 'from datetime import datetime\n'), ((6028, 6051), 'datetime.datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (6049, 6051), False, 'from datetime import datetime\n'), ((7230, 7253), 'datetime.datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (7251, 7253), False, 'from datetime import datetime\n'), ((7362, 7408), 'datetime.datetime.datetime.strptime', 'datetime.datetime.strptime', (['inicio', '"""%d-%m-%Y"""'], {}), "(inicio, '%d-%m-%Y')\n", (7388, 7408), False, 'from datetime import datetime\n'), ((7416, 7459), 'datetime.datetime.datetime.strptime', 'datetime.datetime.strptime', (['fin', '"""%d-%m-%Y"""'], {}), "(fin, '%d-%m-%Y')\n", (7442, 7459), False, 'from datetime import datetime\n'), ((7677, 7723), 'datetime.datetime.datetime.strptime', 'datetime.datetime.strptime', (['inicio', '"""%d-%m-%Y"""'], {}), "(inicio, '%d-%m-%Y')\n", (7703, 7723), False, 'from datetime import datetime\n'), ((7731, 7774), 'datetime.datetime.datetime.strptime', 'datetime.datetime.strptime', (['fin', '"""%d-%m-%Y"""'], {}), "(fin, '%d-%m-%Y')\n", (7757, 7774), False, 'from datetime import datetime\n'), ((8003, 8049), 'datetime.datetime.datetime.strptime', 'datetime.datetime.strptime', (['inicio', '"""%d-%m-%Y"""'], {}), "(inicio, '%d-%m-%Y')\n", (8029, 8049), False, 'from datetime import datetime\n'), ((8057, 8100), 'datetime.datetime.datetime.strptime', 'datetime.datetime.strptime', (['fin', '"""%d-%m-%Y"""'], {}), "(fin, '%d-%m-%Y')\n", (8083, 8100), False, 'from datetime import datetime\n'), ((8335, 8381), 'datetime.datetime.datetime.strptime', 'datetime.datetime.strptime', (['inicio', '"""%d-%m-%Y"""'], {}), "(inicio, '%d-%m-%Y')\n", (8361, 8381), False, 'from datetime import datetime\n'), ((8389, 8432), 'datetime.datetime.datetime.strptime', 'datetime.datetime.strptime', (['fin', '"""%d-%m-%Y"""'], {}), "(fin, '%d-%m-%Y')\n", (8415, 8432), False, 'from datetime import datetime\n'), ((6849, 6872), 'django.core.urlresolvers.reverse', 'reverse', (['"""lista_cortes"""'], {}), "('lista_cortes')\n", (6856, 6872), False, 'from django.core.urlresolvers import reverse\n'), ((8841, 8893), 'django.http.HttpResponse', 'HttpResponse', (['marca'], {'content_type': '"""application/json"""'}), "(marca, content_type='application/json')\n", (8853, 8893), False, 'from django.http import HttpResponseRedirect, HttpResponse\n'), ((9220, 9273), 'django.http.HttpResponse', 'HttpResponse', (['modelo'], {'content_type': '"""application/json"""'}), "(modelo, content_type='application/json')\n", (9232, 9273), False, 'from django.http import HttpResponseRedirect, HttpResponse\n'), ((9826, 9880), 'django.http.HttpResponse', 'HttpResponse', (['ordenes'], {'content_type': '"""application/json"""'}), "(ordenes, content_type='application/json')\n", (9838, 9880), False, 'from django.http import HttpResponseRedirect, HttpResponse\n'), ((3571, 3633), 'django.contrib.messages.warning', 'messages.warning', (['request', '"""Algo salió mal, intenta de nuevo."""'], {}), "(request, 'Algo salió mal, intenta de nuevo.')\n", (3587, 3633), False, 'from django.contrib import messages\n'), ((8922, 8960), 'json.dumps', 'json.dumps', (["{'actualizar_marca': 'ok'}"], {}), "({'actualizar_marca': 'ok'})\n", (8932, 8960), False, 'import json\n'), ((9302, 9341), 'json.dumps', 'json.dumps', (["{'actualizar_modelo': 'ok'}"], {}), "({'actualizar_modelo': 'ok'})\n", (9312, 9341), False, 'import json\n'), ((9909, 9949), 'json.dumps', 'json.dumps', (["{'calendario_ordenes': 'ok'}"], {}), "({'calendario_ordenes': 'ok'})\n", (9919, 9949), False, 'import json\n'), ((3518, 3557), 'django.core.urlresolvers.reverse', 'reverse', (['"""editar_orden"""'], {'args': '[o.folio]'}), "('editar_orden', args=[o.folio])\n", (3525, 3557), False, 'from django.core.urlresolvers import reverse\n'), ((5006, 5068), 'django.contrib.messages.warning', 'messages.warning', (['request', '"""Algo salió mal, intenta de nuevo."""'], {}), "(request, 'Algo salió mal, intenta de nuevo.')\n", (5022, 5068), False, 'from django.contrib import messages\n'), ((9673, 9697), 'lib.JsonPropertySerializer', 'JsonPropertySerializer', ([], {}), '()\n', (9695, 9697), False, 'from lib import JsonPropertySerializer\n'), ((2067, 2097), 'django.db.models.Q', 'Q', ([], {'no_serie__contains': 'busqueda'}), '(no_serie__contains=busqueda)\n', (2068, 2097), False, 'from django.db.models import Q\n'), ((2035, 2064), 'django.db.models.Q', 'Q', ([], {'icon_cn__contains': 'busqueda'}), '(icon_cn__contains=busqueda)\n', (2036, 2064), False, 'from django.db.models import Q\n'), ((2003, 2032), 'django.db.models.Q', 'Q', ([], {'icon_on__contains': 'busqueda'}), '(icon_on__contains=busqueda)\n', (2004, 2032), False, 'from django.db.models import Q\n'), ((1970, 2000), 'django.db.models.Q', 'Q', ([], {'icon_ics__contains': 'busqueda'}), '(icon_ics__contains=busqueda)\n', (1971, 2000), False, 'from django.db.models import Q\n'), ((1938, 1967), 'django.db.models.Q', 'Q', ([], {'icon_os__contains': 'busqueda'}), '(icon_os__contains=busqueda)\n', (1939, 1967), False, 'from django.db.models import Q\n'), ((1894, 1934), 'django.db.models.Q', 'Q', ([], {'tecnico__username__icontains': 'busqueda'}), '(tecnico__username__icontains=busqueda)\n', (1895, 1934), False, 'from django.db.models import Q\n'), ((1855, 1891), 'django.db.models.Q', 'Q', ([], {'telefono_movil__contains': 'busqueda'}), '(telefono_movil__contains=busqueda)\n', (1856, 1891), False, 'from django.db.models import Q\n'), ((1814, 1852), 'django.db.models.Q', 'Q', ([], {'telefono_oficina__contains': 'busqueda'}), '(telefono_oficina__contains=busqueda)\n', (1815, 1852), False, 'from django.db.models import Q\n'), ((1776, 1811), 'django.db.models.Q', 'Q', ([], {'telefono_casa__contains': 'busqueda'}), '(telefono_casa__contains=busqueda)\n', (1777, 1811), False, 'from django.db.models import Q\n'), ((1714, 1741), 'django.db.models.Q', 'Q', ([], {'folio__contains': 'busqueda'}), '(folio__contains=busqueda)\n', (1715, 1741), False, 'from django.db.models import Q\n'), ((1744, 1774), 'django.db.models.Q', 'Q', ([], {'cliente__icontains': 'busqueda'}), '(cliente__icontains=busqueda)\n', (1745, 1774), False, 'from django.db.models import Q\n')]
|
import cv2
import argparse
import pickle
parser = argparse.ArgumentParser()
parser.add_argument("-c", "--calibrated", help = "turn on calibration", action = 'store_true')
args = parser.parse_args()
vid = cv2.VideoCapture(0)
if args.calibrated:
f_name = "cam_vars.pkl"
with open( f_name, 'rb' ) as file_object:
raw_data = file_object.read( )
raw_data = pickle.loads( raw_data ) # deserialization
mtx, dist, optimal_camera_matrix, roi = raw_data
while( True ):
# Capture the video frame-by-frame
ret, frame = vid.read()
if args.calibrated:
# Undistort the image
undistorted_image = cv2.undistort( frame, mtx, dist, None, optimal_camera_matrix )
# Crop the image. Uncomment these two lines to remove black lines
# on the edge of the undistorted image.
x, y, w, h = roi
frame = undistorted_image[y:y+h, x:x+w] # Rewriting the frame
cv2.imshow('frame', frame) # Display the resulting frame
k = cv2.waitKey( 1 ) # Wait for 1ms and get the key input
# [BACKUP] ord('q'):
if k%256 == 27: # If (ESC) key is given, stop the video
print( "ESC inputted, Close Camera!" )
break
vid.release()
cv2.destroyAllWindows()
|
[
"pickle.loads",
"argparse.ArgumentParser",
"cv2.waitKey",
"cv2.imshow",
"cv2.VideoCapture",
"cv2.destroyAllWindows",
"cv2.undistort"
] |
[((52, 77), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (75, 77), False, 'import argparse\n'), ((210, 229), 'cv2.VideoCapture', 'cv2.VideoCapture', (['(0)'], {}), '(0)\n', (226, 229), False, 'import cv2\n'), ((1376, 1399), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (1397, 1399), False, 'import cv2\n'), ((380, 402), 'pickle.loads', 'pickle.loads', (['raw_data'], {}), '(raw_data)\n', (392, 402), False, 'import pickle\n'), ((962, 988), 'cv2.imshow', 'cv2.imshow', (['"""frame"""', 'frame'], {}), "('frame', frame)\n", (972, 988), False, 'import cv2\n'), ((1077, 1091), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (1088, 1091), False, 'import cv2\n'), ((643, 703), 'cv2.undistort', 'cv2.undistort', (['frame', 'mtx', 'dist', 'None', 'optimal_camera_matrix'], {}), '(frame, mtx, dist, None, optimal_camera_matrix)\n', (656, 703), False, 'import cv2\n')]
|
import Core.chrome as chrome
operators = ['=', '+', '-', '*', '/', '%',
'+=', '-=', '*=', '/=', '%=',
'==', '>', '<', '>=', '<=', '!=',
'&&', '||']
# parser
def parse(lex):
print(lex)
print() #empty line
func = ''
parameters = []
varSetting = ['NAME', 'TYPE', 'VALUE']
variables = {}
for pos, tok in enumerate(lex):
if tok == '+':
pass
#i = 0
#while(i < len(lex)):
for pos, tok in enumerate(lex):
if tok[:5] == 'FUNC ':
parameters = []
# get function without the colons
func = tok[6:-1]
elif tok == 'FUNC_END':
chrome.execute(func, parameters)
func = ''
elif func:
parameters.append(tok)
elif tok[:4] == 'VAR ':
if lex[pos+1] in ['='] + operators[6:11]:
varSetting[0] = tok[5:-1] # set name
|
[
"Core.chrome.execute"
] |
[((681, 713), 'Core.chrome.execute', 'chrome.execute', (['func', 'parameters'], {}), '(func, parameters)\n', (695, 713), True, 'import Core.chrome as chrome\n')]
|
#!/usr/bin/env python
import rasterio as rio
import numpy as np
import click, json, os
import tiledelta, mercantile
@click.group()
def cli():
pass
@click.command(short_help="HELP")
@click.argument('bounds', default='-', required=False)
@click.option('--stride', default=1)
def loaddata(bounds, stride):
"""Does something"""
try:
inBounds = click.open_file(bounds).readlines()
except IOError:
inBounds = [bounds]
bounds = json.loads(inBounds[0])
click.echo(bounds['bbox'])
# with rio.drivers():
# with rio.open('src_path', 'r') as src:
cli.add_command(loaddata)
@click.command()
@click.argument('filedir', type=click.Path(exists=True))
@click.argument('comparedir', type=click.Path(exists=True))
@click.option('--sampling', '-s', type=(int), default=0)
@click.option('--filetype', '-f', type=(str), default='png')
@click.option('--plotdir', '-p', type=click.Path(exists=True))
def comptiles(filedir, comparedir, sampling, filetype):
# plotdir = '/Users/dnomadb/Documents/pcomp'
files = os.listdir(filedir)
cfiles = os.listdir(comparedir)
if plotdir:
import matplotlib.pyplot as plot
for f in files:
fileinfo = f.split('-')
if len(fileinfo[-1].split('.')) != 0 and fileinfo[-1].split('.')[-1] == filetype:
x, y, z = tiledelta.getXYZ(fileinfo)
bbox = mercantile.bounds(x, y, z)
with rio.drivers():
with rio.open(os.path.join(filedir, f), 'r') as src:
greyimage_before = (src.read(1).astype(np.uint16) + src.read(2).astype(np.uint16) + src.read(3).astype(np.uint16))
with rio.open(os.path.join(comparedir, f), 'r') as src:
greyimage_after = (src.read(1).astype(np.uint16) + src.read(2).astype(np.uint16) + src.read(3).astype(np.uint16))
pcplo = tiledelta.compareGreys(greyimage_after, greyimage_before, 10, 20)
pcplo = pcplo[::sampling,::sampling]
if plotdir:
fig = plot.figure(figsize=(20,10))
before = fig.add_subplot(131)
before.imshow(greyimage_after,cmap='Greys_r')
after = fig.add_subplot(132)
after.imshow(greyimage_before, cmap='Greys_r')
pc2 = fig.add_subplot(133)
pc2.imshow(pcplo, cmap='YlGnBu')
fig.savefig(os.path.join(plotdir, f))
else:
tiledelta.makeVectors(pcplo, tiledelta.makeAffine(pcplo.shape, bbox))
cli.add_command(comptiles)
if __name__ == '__main__':
cli()
|
[
"tiledelta.makeAffine",
"json.loads",
"click.argument",
"rasterio.drivers",
"click.option",
"click.echo",
"click.command",
"click.open_file",
"matplotlib.pyplot.figure",
"tiledelta.compareGreys",
"click.Path",
"mercantile.bounds",
"click.group",
"os.path.join",
"os.listdir",
"tiledelta.getXYZ"
] |
[((119, 132), 'click.group', 'click.group', ([], {}), '()\n', (130, 132), False, 'import click, json, os\n'), ((155, 187), 'click.command', 'click.command', ([], {'short_help': '"""HELP"""'}), "(short_help='HELP')\n", (168, 187), False, 'import click, json, os\n'), ((189, 242), 'click.argument', 'click.argument', (['"""bounds"""'], {'default': '"""-"""', 'required': '(False)'}), "('bounds', default='-', required=False)\n", (203, 242), False, 'import click, json, os\n'), ((244, 279), 'click.option', 'click.option', (['"""--stride"""'], {'default': '(1)'}), "('--stride', default=1)\n", (256, 279), False, 'import click, json, os\n'), ((620, 635), 'click.command', 'click.command', ([], {}), '()\n', (633, 635), False, 'import click, json, os\n'), ((754, 807), 'click.option', 'click.option', (['"""--sampling"""', '"""-s"""'], {'type': 'int', 'default': '(0)'}), "('--sampling', '-s', type=int, default=0)\n", (766, 807), False, 'import click, json, os\n'), ((811, 868), 'click.option', 'click.option', (['"""--filetype"""', '"""-f"""'], {'type': 'str', 'default': '"""png"""'}), "('--filetype', '-f', type=str, default='png')\n", (823, 868), False, 'import click, json, os\n'), ((460, 483), 'json.loads', 'json.loads', (['inBounds[0]'], {}), '(inBounds[0])\n', (470, 483), False, 'import click, json, os\n'), ((488, 514), 'click.echo', 'click.echo', (["bounds['bbox']"], {}), "(bounds['bbox'])\n", (498, 514), False, 'import click, json, os\n'), ((1053, 1072), 'os.listdir', 'os.listdir', (['filedir'], {}), '(filedir)\n', (1063, 1072), False, 'import click, json, os\n'), ((1086, 1108), 'os.listdir', 'os.listdir', (['comparedir'], {}), '(comparedir)\n', (1096, 1108), False, 'import click, json, os\n'), ((668, 691), 'click.Path', 'click.Path', ([], {'exists': '(True)'}), '(exists=True)\n', (678, 691), False, 'import click, json, os\n'), ((728, 751), 'click.Path', 'click.Path', ([], {'exists': '(True)'}), '(exists=True)\n', (738, 751), False, 'import click, json, os\n'), ((909, 932), 'click.Path', 'click.Path', ([], {'exists': '(True)'}), '(exists=True)\n', (919, 932), False, 'import click, json, os\n'), ((1332, 1358), 'tiledelta.getXYZ', 'tiledelta.getXYZ', (['fileinfo'], {}), '(fileinfo)\n', (1348, 1358), False, 'import tiledelta, mercantile\n'), ((1378, 1404), 'mercantile.bounds', 'mercantile.bounds', (['x', 'y', 'z'], {}), '(x, y, z)\n', (1395, 1404), False, 'import tiledelta, mercantile\n'), ((363, 386), 'click.open_file', 'click.open_file', (['bounds'], {}), '(bounds)\n', (378, 386), False, 'import click, json, os\n'), ((1422, 1435), 'rasterio.drivers', 'rio.drivers', ([], {}), '()\n', (1433, 1435), True, 'import rasterio as rio\n'), ((1888, 1953), 'tiledelta.compareGreys', 'tiledelta.compareGreys', (['greyimage_after', 'greyimage_before', '(10)', '(20)'], {}), '(greyimage_after, greyimage_before, 10, 20)\n', (1910, 1953), False, 'import tiledelta, mercantile\n'), ((2078, 2107), 'matplotlib.pyplot.figure', 'plot.figure', ([], {'figsize': '(20, 10)'}), '(figsize=(20, 10))\n', (2089, 2107), True, 'import matplotlib.pyplot as plot\n'), ((1467, 1491), 'os.path.join', 'os.path.join', (['filedir', 'f'], {}), '(filedir, f)\n', (1479, 1491), False, 'import click, json, os\n'), ((1671, 1698), 'os.path.join', 'os.path.join', (['comparedir', 'f'], {}), '(comparedir, f)\n', (1683, 1698), False, 'import click, json, os\n'), ((2471, 2495), 'os.path.join', 'os.path.join', (['plotdir', 'f'], {}), '(plotdir, f)\n', (2483, 2495), False, 'import click, json, os\n'), ((2568, 2607), 'tiledelta.makeAffine', 'tiledelta.makeAffine', (['pcplo.shape', 'bbox'], {}), '(pcplo.shape, bbox)\n', (2588, 2607), False, 'import tiledelta, mercantile\n')]
|
from builtins import object
from elasticsearch import Elasticsearch
from elasticsearch_dsl import Search
from pandas.io.json import json_normalize
import pandas as pd
from datetime import datetime, timedelta
class ElasticDF(object):
'''
The ElasticDF() class searches Elastic and returns results as a Pandas
DataFrame. This makes it easier to work with the search results with
standard data analysis techniques.
Example usage:
# Create a plaintext connection to the Elastic server, no authentication
e = ElasticDF(url="http://localhost:9200")
# The same, but with SSL and authentication
e = ElasticDF(url="https://localhost:9200", ssl=True, username="myuser",
password="<PASSWORD>")
# Fetch search results from an index or index pattern for the previous day
df = e.search_df(lucene="item:5282 AND color:red", index="myindex-*", days=1)
# The same, but do not flatten structures into individual columns.
# This will result in each structure having a single column with a
# JSON string describing the structure.
df = e.search_df(lucene="item:5282 AND color:red", index="myindex-*", days=1,
normalize=False)
# A more complex example, showing how to set the Elastic document type,
# use Python-style datetime objects to constrain the search to a certain
# time period, and a user-defined field against which to do the time
# comparisons.
df = e.search_df(lucene="item:5285 AND color:red", index="myindex-*",
doctype="doc", date_field="mydate",
start_time=datetime.now() - timedelta(days=8),
end_time=datetime.now() - timedelta(days=6))
'''
es_conn = None # The connection to the ES server
def __init__(self, url=None, timeout=250, ssl=False, username="", password="", verify_certs=True, ca_certs=None):
'''
Create the ElasticDF object and log into the Elastic server.
'''
self.es_conn = Elasticsearch(
url,
timeout=timeout,
use_ssl=ssl,
verify_certs=verify_certs,
ca_certs=ca_certs,
http_auth=(username, password)
)
def search(self, lucene, index="*", doctype="doc", fields=None,
date_field="@timestamp", days=None, start_time=None,
end_time=None):
'''
Search Elastic and return the results as a list of dicts.
lucene: A string containing the Elastic search (e.g., 'item:5282 AND color:red')
index: A string containing the index name to search, or an index name pattern
if you want to search multiple indices (e.g., 'myindex' or 'myindex-*')
doctype: The document type you are interested in.
fields: A string containing a comma-separated list of field names to return.
The default is to return all fields, but using this list you can
select only certain fields, which may make things a bit faster.
date_field: The name of the field used for date/time comparison.
days: Search the past X days. If provided, this supercedes both start_time
and end_time.
start_time: A datetime() object representing the start of the search
window. If used without end_time, the end of the search
window is the current time.
end_time: A datetime() object representing the end of the search window.
If used without start_time, the search start will be the earliest
time in the index.
'''
s = Search(using=self.es_conn, index=index, doc_type=doctype)
s = s.query("query_string", query=lucene)
if fields:
s = s.source(fields.split(','))
# Add timestamp filters, if provided. Days takes precendence over
# use of either/both of start_time and end_time.
# Note the weird unpacked dictionary syntax in the call to s.filter().
# We have to do it this way because Python has an issue naming things
# with "@" in them, but the default timestamp field in many ES servers is
# "@timestamp".
# ref: https://github.com/elastic/elasticsearch-dsl-py/blob/master/docs/search_dsl.rst
if days:
end = datetime.now()
start = end - timedelta(days=days)
s = s.filter('range', ** {date_field: {"gte": start, "lte": end}})
elif start_time and not end_time:
s = s.filter('range', ** {date_field: {"gte": start_time}})
elif end_time and not start_time:
s = s.filter('range', ** {date_field: {"lte": end_time}})
elif start_time and end_time:
s = s.filter('range', ** {date_field: {"gte": start_time, "lte": end_time}})
# execute the search
results = s.scan()
for hit in results:
yield hit.to_dict()
def search_df(self, lucene, index="*", doctype="doc", fields=None, date_field="@timestamp", days=None, start_time=None, end_time=None, normalize=True):
'''
Search Elastic and return the results as a Pandas DataFrame.
lucene: A string containing the Elastic search (e.g., 'item:5282 AND color:red')
index: A string containing the index name to search, or an index name pattern
if you want to search multiple indices (e.g., 'myindex' or 'myindex-*')
doctype: The document type you are interested in.
fields: A string containing a comma-separated list of field names to return.
The default is to return all fields, but using this list you can
select only certain fields, which may make things a bit faster.
date_field: The name of the field used for date/time comparison.
days: Search the past X days. If provided, this supercedes both start_time
and end_time.
start_time: A datetime() object representing the start of the search
window. If used without end_time, the end of the search
window is the current time.
end_time: A datetime() object representing the end of the search window.
If used without start_time, the search start will be the earliest
time in the index.
normalize: If set to True, fields containing structures (i.e. subfields)
will be flattened such that each field has it's own column in
the dataframe. If False, there will be a single column for the
structure, with a JSON string encoding all the contents.
'''
results = list()
for hit in self.search(lucene=lucene, index=index, doctype=doctype,
fields=fields, date_field=date_field, days=days,
start_time=start_time, end_time=end_time):
results.append(hit)
if normalize:
df = json_normalize(results)
else:
df = pd.DataFrame(results)
return df
|
[
"elasticsearch.Elasticsearch",
"pandas.DataFrame",
"pandas.io.json.json_normalize",
"elasticsearch_dsl.Search",
"datetime.timedelta",
"datetime.datetime.now"
] |
[((2101, 2231), 'elasticsearch.Elasticsearch', 'Elasticsearch', (['url'], {'timeout': 'timeout', 'use_ssl': 'ssl', 'verify_certs': 'verify_certs', 'ca_certs': 'ca_certs', 'http_auth': '(username, password)'}), '(url, timeout=timeout, use_ssl=ssl, verify_certs=verify_certs,\n ca_certs=ca_certs, http_auth=(username, password))\n', (2114, 2231), False, 'from elasticsearch import Elasticsearch\n'), ((3735, 3792), 'elasticsearch_dsl.Search', 'Search', ([], {'using': 'self.es_conn', 'index': 'index', 'doc_type': 'doctype'}), '(using=self.es_conn, index=index, doc_type=doctype)\n', (3741, 3792), False, 'from elasticsearch_dsl import Search\n'), ((4435, 4449), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (4447, 4449), False, 'from datetime import datetime, timedelta\n'), ((7099, 7122), 'pandas.io.json.json_normalize', 'json_normalize', (['results'], {}), '(results)\n', (7113, 7122), False, 'from pandas.io.json import json_normalize\n'), ((7154, 7175), 'pandas.DataFrame', 'pd.DataFrame', (['results'], {}), '(results)\n', (7166, 7175), True, 'import pandas as pd\n'), ((4476, 4496), 'datetime.timedelta', 'timedelta', ([], {'days': 'days'}), '(days=days)\n', (4485, 4496), False, 'from datetime import datetime, timedelta\n')]
|
# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
ACL2016 Multimodal Machine Translation. Please see this website for more
details: http://www.statmt.org/wmt16/multimodal-task.html#task1
If you use the dataset created for your task, please cite the following paper:
Multi30K: Multilingual English-German Image Descriptions.
@article{elliott-EtAl:2016:VL16,
author = {{<NAME>. and {<NAME>. and {<NAME>. and {<NAME>.},
title = {Multi30K: Multilingual English-German Image Descriptions},
booktitle = {Proceedings of the 6th Workshop on Vision and Language},
year = {2016},
pages = {70--74},
year = 2016
}
"""
from __future__ import print_function
import os
import six
import tarfile
import gzip
from collections import defaultdict
import paddle.dataset.common
import paddle.compat as cpt
__all__ = [
"train",
"test",
"validation",
"convert",
"fetch",
"get_dict",
]
DATA_URL = ("http://cloud.dlnel.org/filepub/"
"?uuid=46a0808e-ddd8-427c-bacd-0dbc6d045fed")
DATA_MD5 = "0c38be43600334966403524a40dcd81e"
TOTAL_EN_WORDS = 11250
TOTAL_DE_WORDS = 19220
START_MARK = "<s>"
END_MARK = "<e>"
UNK_MARK = "<unk>"
def __build_dict(tar_file, dict_size, save_path, lang):
word_dict = defaultdict(int)
with tarfile.open(tar_file, mode="r") as f:
for line in f.extractfile("wmt16/train"):
line = cpt.to_text(line)
line_split = line.strip().split("\t")
if len(line_split) != 2: continue
sen = line_split[0] if lang == "en" else line_split[1]
for w in sen.split():
word_dict[w] += 1
with open(save_path, "w") as fout:
fout.write("%s\n%s\n%s\n" % (START_MARK, END_MARK, UNK_MARK))
for idx, word in enumerate(
sorted(
six.iteritems(word_dict), key=lambda x: x[1],
reverse=True)):
if idx + 3 == dict_size: break
fout.write("%s\n" % (word[0]))
def __load_dict(tar_file, dict_size, lang, reverse=False):
dict_path = os.path.join(paddle.dataset.common.DATA_HOME,
"wmt16/%s_%d.dict" % (lang, dict_size))
if not os.path.exists(dict_path) or (
len(open(dict_path, "rb").readlines()) != dict_size):
__build_dict(tar_file, dict_size, dict_path, lang)
word_dict = {}
with open(dict_path, "rb") as fdict:
for idx, line in enumerate(fdict):
if reverse:
word_dict[idx] = cpt.to_text(line.strip())
else:
word_dict[cpt.to_text(line.strip())] = idx
return word_dict
def __get_dict_size(src_dict_size, trg_dict_size, src_lang):
src_dict_size = min(src_dict_size, (TOTAL_EN_WORDS if src_lang == "en" else
TOTAL_DE_WORDS))
trg_dict_size = min(trg_dict_size, (TOTAL_DE_WORDS if src_lang == "en" else
TOTAL_EN_WORDS))
return src_dict_size, trg_dict_size
def reader_creator(tar_file, file_name, src_dict_size, trg_dict_size, src_lang):
def reader():
src_dict = __load_dict(tar_file, src_dict_size, src_lang)
trg_dict = __load_dict(tar_file, trg_dict_size,
("de" if src_lang == "en" else "en"))
# the indice for start mark, end mark, and unk are the same in source
# language and target language. Here uses the source language
# dictionary to determine their indices.
start_id = src_dict[START_MARK]
end_id = src_dict[END_MARK]
unk_id = src_dict[UNK_MARK]
src_col = 0 if src_lang == "en" else 1
trg_col = 1 - src_col
with tarfile.open(tar_file, mode="r") as f:
for line in f.extractfile(file_name):
line = cpt.to_text(line)
line_split = line.strip().split("\t")
if len(line_split) != 2:
continue
src_words = line_split[src_col].split()
src_ids = [start_id] + [
src_dict.get(w, unk_id) for w in src_words
] + [end_id]
trg_words = line_split[trg_col].split()
trg_ids = [trg_dict.get(w, unk_id) for w in trg_words]
trg_ids_next = trg_ids + [end_id]
trg_ids = [start_id] + trg_ids
yield src_ids, trg_ids, trg_ids_next
return reader
def train(src_dict_size, trg_dict_size, src_lang="en"):
"""
WMT16 train set reader.
This function returns the reader for train data. Each sample the reader
returns is made up of three fields: the source language word index sequence,
target language word index sequence and next word index sequence.
NOTE:
The original like for training data is:
http://www.quest.dcs.shef.ac.uk/wmt16_files_mmt/training.tar.gz
paddle.dataset.wmt16 provides a tokenized version of the original dataset by
using moses's tokenization script:
https://github.com/moses-smt/mosesdecoder/blob/master/scripts/tokenizer/tokenizer.perl
Args:
src_dict_size(int): Size of the source language dictionary. Three
special tokens will be added into the dictionary:
<s> for start mark, <e> for end mark, and <unk> for
unknown word.
trg_dict_size(int): Size of the target language dictionary. Three
special tokens will be added into the dictionary:
<s> for start mark, <e> for end mark, and <unk> for
unknown word.
src_lang(string): A string indicating which language is the source
language. Available options are: "en" for English
and "de" for Germany.
Returns:
callable: The train reader.
"""
if src_lang not in ["en", "de"]:
raise ValueError("An error language type. Only support: "
"en (for English); de(for Germany).")
src_dict_size, trg_dict_size = __get_dict_size(src_dict_size, trg_dict_size,
src_lang)
return reader_creator(
tar_file=paddle.dataset.common.download(DATA_URL, "wmt16", DATA_MD5,
"wmt16.tar.gz"),
file_name="wmt16/train",
src_dict_size=src_dict_size,
trg_dict_size=trg_dict_size,
src_lang=src_lang)
def test(src_dict_size, trg_dict_size, src_lang="en"):
"""
WMT16 test set reader.
This function returns the reader for test data. Each sample the reader
returns is made up of three fields: the source language word index sequence,
target language word index sequence and next word index sequence.
NOTE:
The original like for test data is:
http://www.quest.dcs.shef.ac.uk/wmt16_files_mmt/mmt16_task1_test.tar.gz
paddle.dataset.wmt16 provides a tokenized version of the original dataset by
using moses's tokenization script:
https://github.com/moses-smt/mosesdecoder/blob/master/scripts/tokenizer/tokenizer.perl
Args:
src_dict_size(int): Size of the source language dictionary. Three
special tokens will be added into the dictionary:
<s> for start mark, <e> for end mark, and <unk> for
unknown word.
trg_dict_size(int): Size of the target language dictionary. Three
special tokens will be added into the dictionary:
<s> for start mark, <e> for end mark, and <unk> for
unknown word.
src_lang(string): A string indicating which language is the source
language. Available options are: "en" for English
and "de" for Germany.
Returns:
callable: The test reader.
"""
if src_lang not in ["en", "de"]:
raise ValueError("An error language type. "
"Only support: en (for English); de(for Germany).")
src_dict_size, trg_dict_size = __get_dict_size(src_dict_size, trg_dict_size,
src_lang)
return reader_creator(
tar_file=paddle.dataset.common.download(DATA_URL, "wmt16", DATA_MD5,
"wmt16.tar.gz"),
file_name="wmt16/test",
src_dict_size=src_dict_size,
trg_dict_size=trg_dict_size,
src_lang=src_lang)
def validation(src_dict_size, trg_dict_size, src_lang="en"):
"""
WMT16 validation set reader.
This function returns the reader for validation data. Each sample the reader
returns is made up of three fields: the source language word index sequence,
target language word index sequence and next word index sequence.
NOTE:
The original like for validation data is:
http://www.quest.dcs.shef.ac.uk/wmt16_files_mmt/validation.tar.gz
paddle.dataset.wmt16 provides a tokenized version of the original dataset by
using moses's tokenization script:
https://github.com/moses-smt/mosesdecoder/blob/master/scripts/tokenizer/tokenizer.perl
Args:
src_dict_size(int): Size of the source language dictionary. Three
special tokens will be added into the dictionary:
<s> for start mark, <e> for end mark, and <unk> for
unknown word.
trg_dict_size(int): Size of the target language dictionary. Three
special tokens will be added into the dictionary:
<s> for start mark, <e> for end mark, and <unk> for
unknown word.
src_lang(string): A string indicating which language is the source
language. Available options are: "en" for English
and "de" for Germany.
Returns:
callable: The validation reader.
"""
if src_lang not in ["en", "de"]:
raise ValueError("An error language type. "
"Only support: en (for English); de(for Germany).")
src_dict_size, trg_dict_size = __get_dict_size(src_dict_size, trg_dict_size,
src_lang)
return reader_creator(
tar_file=paddle.dataset.common.download(DATA_URL, "wmt16", DATA_MD5,
"wmt16.tar.gz"),
file_name="wmt16/val",
src_dict_size=src_dict_size,
trg_dict_size=trg_dict_size,
src_lang=src_lang)
def get_dict(lang, dict_size, reverse=False):
"""
return the word dictionary for the specified language.
Args:
lang(string): A string indicating which language is the source
language. Available options are: "en" for English
and "de" for Germany.
dict_size(int): Size of the specified language dictionary.
reverse(bool): If reverse is set to False, the returned python
dictionary will use word as key and use index as value.
If reverse is set to True, the returned python
dictionary will use index as key and word as value.
Returns:
dict: The word dictionary for the specific language.
"""
if lang == "en": dict_size = min(dict_size, TOTAL_EN_WORDS)
else: dict_size = min(dict_size, TOTAL_DE_WORDS)
dict_path = os.path.join(paddle.dataset.common.DATA_HOME,
"wmt16/%s_%d.dict" % (lang, dict_size))
assert os.path.exists(dict_path), "Word dictionary does not exist. "
"Please invoke paddle.dataset.wmt16.train/test/validation first "
"to build the dictionary."
tar_file = os.path.join(paddle.dataset.common.DATA_HOME, "wmt16.tar.gz")
return __load_dict(tar_file, dict_size, lang, reverse)
def fetch():
"""download the entire dataset.
"""
paddle.v4.dataset.common.download(DATA_URL, "wmt16", DATA_MD5,
"wmt16.tar.gz")
def convert(path, src_dict_size, trg_dict_size, src_lang):
"""Converts dataset to recordio format.
"""
paddle.dataset.common.convert(
path,
train(
src_dict_size=src_dict_size,
trg_dict_size=trg_dict_size,
src_lang=src_lang),
1000,
"wmt16_train")
paddle.dataset.common.convert(
path,
test(
src_dict_size=src_dict_size,
trg_dict_size=trg_dict_size,
src_lang=src_lang),
1000,
"wmt16_test")
paddle.dataset.common.convert(
path,
validation(
src_dict_size=src_dict_size,
trg_dict_size=trg_dict_size,
src_lang=src_lang),
1000,
"wmt16_validation")
|
[
"os.path.exists",
"collections.defaultdict",
"tarfile.open",
"six.iteritems",
"os.path.join",
"paddle.compat.to_text"
] |
[((1810, 1826), 'collections.defaultdict', 'defaultdict', (['int'], {}), '(int)\n', (1821, 1826), False, 'from collections import defaultdict\n'), ((2628, 2717), 'os.path.join', 'os.path.join', (['paddle.dataset.common.DATA_HOME', "('wmt16/%s_%d.dict' % (lang, dict_size))"], {}), "(paddle.dataset.common.DATA_HOME, 'wmt16/%s_%d.dict' % (lang,\n dict_size))\n", (2640, 2717), False, 'import os\n'), ((12182, 12271), 'os.path.join', 'os.path.join', (['paddle.dataset.common.DATA_HOME', "('wmt16/%s_%d.dict' % (lang, dict_size))"], {}), "(paddle.dataset.common.DATA_HOME, 'wmt16/%s_%d.dict' % (lang,\n dict_size))\n", (12194, 12271), False, 'import os\n'), ((12308, 12333), 'os.path.exists', 'os.path.exists', (['dict_path'], {}), '(dict_path)\n', (12322, 12333), False, 'import os\n'), ((12486, 12547), 'os.path.join', 'os.path.join', (['paddle.dataset.common.DATA_HOME', '"""wmt16.tar.gz"""'], {}), "(paddle.dataset.common.DATA_HOME, 'wmt16.tar.gz')\n", (12498, 12547), False, 'import os\n'), ((1836, 1868), 'tarfile.open', 'tarfile.open', (['tar_file'], {'mode': '"""r"""'}), "(tar_file, mode='r')\n", (1848, 1868), False, 'import tarfile\n'), ((1944, 1961), 'paddle.compat.to_text', 'cpt.to_text', (['line'], {}), '(line)\n', (1955, 1961), True, 'import paddle.compat as cpt\n'), ((2754, 2779), 'os.path.exists', 'os.path.exists', (['dict_path'], {}), '(dict_path)\n', (2768, 2779), False, 'import os\n'), ((4266, 4298), 'tarfile.open', 'tarfile.open', (['tar_file'], {'mode': '"""r"""'}), "(tar_file, mode='r')\n", (4278, 4298), False, 'import tarfile\n'), ((2383, 2407), 'six.iteritems', 'six.iteritems', (['word_dict'], {}), '(word_dict)\n', (2396, 2407), False, 'import six\n'), ((4378, 4395), 'paddle.compat.to_text', 'cpt.to_text', (['line'], {}), '(line)\n', (4389, 4395), True, 'import paddle.compat as cpt\n')]
|
from typing import List
from triple_agent.reports.generation.generic_query import query
from triple_agent.classes.game import Game
from triple_agent.classes.missions import Missions
from triple_agent.classes.timeline import TimelineCategory
from triple_agent.reports.generation.plot_specs import (
AxisProperties,
DataQueryProperties,
initialize_properties,
)
TAKE_ORDER = ["purloin", "fingerprint", "take", "gave up", "reject"]
def _drink_takes(games, data_dictionary):
for game in games:
tracking_drink = False
waiting_for_fp = False
for timeline_event in game.timeline:
if timeline_event.event == "waiter offered drink.":
tracking_drink = True
waiting_for_fp = False
continue
if tracking_drink:
# look for next thing
# could be problematic if this is the absolute last item? seems unlikely.
if (timeline_event.category & TimelineCategory.ActionTest) and (
timeline_event.mission & Missions.Purloin
):
data_dictionary["purloin"] += 1
tracking_drink = False
waiting_for_fp = False
continue
if timeline_event.event == "rejected drink from waiter.":
data_dictionary["reject"] += 1
tracking_drink = False
waiting_for_fp = False
continue
if timeline_event.event == "got drink from waiter.":
# need to wait for possible fingerprint
# this is sketchy because missed AT on drink are not different from any other object
waiting_for_fp = True
continue
if timeline_event.event == "fingerprinted drink.":
data_dictionary["fingerprint"] += 1
tracking_drink = False
waiting_for_fp = False
continue
if timeline_event.event in (
"waiter offered drink.",
"took last sip of drink.",
"took last bite of cupcake.",
"gulped drink.",
"chomped cupcake.",
):
if waiting_for_fp:
data_dictionary["take"] += 1
tracking_drink = False
waiting_for_fp = False
continue
if timeline_event.event == "waiter gave up.":
data_dictionary["gave up"] += 1
tracking_drink = False
waiting_for_fp = False
continue
if tracking_drink:
# timeline ran out of events, no FP, just call it a take.
data_dictionary["take"] += 1
def drink_takes(
games: List[Game],
data_query: DataQueryProperties = DataQueryProperties(),
axis_properties: AxisProperties = AxisProperties(),
): # pragma: no cover
axis_properties, data_query = initialize_properties(
axis_properties=axis_properties,
data_query=data_query,
suggested_data_query=DataQueryProperties(
query_function=_drink_takes, primary_order=TAKE_ORDER
),
)
return query(games, data_query, axis_properties)
|
[
"triple_agent.reports.generation.plot_specs.AxisProperties",
"triple_agent.reports.generation.plot_specs.DataQueryProperties",
"triple_agent.reports.generation.generic_query.query"
] |
[((2999, 3020), 'triple_agent.reports.generation.plot_specs.DataQueryProperties', 'DataQueryProperties', ([], {}), '()\n', (3018, 3020), False, 'from triple_agent.reports.generation.plot_specs import AxisProperties, DataQueryProperties, initialize_properties\n'), ((3060, 3076), 'triple_agent.reports.generation.plot_specs.AxisProperties', 'AxisProperties', ([], {}), '()\n', (3074, 3076), False, 'from triple_agent.reports.generation.plot_specs import AxisProperties, DataQueryProperties, initialize_properties\n'), ((3375, 3416), 'triple_agent.reports.generation.generic_query.query', 'query', (['games', 'data_query', 'axis_properties'], {}), '(games, data_query, axis_properties)\n', (3380, 3416), False, 'from triple_agent.reports.generation.generic_query import query\n'), ((3259, 3333), 'triple_agent.reports.generation.plot_specs.DataQueryProperties', 'DataQueryProperties', ([], {'query_function': '_drink_takes', 'primary_order': 'TAKE_ORDER'}), '(query_function=_drink_takes, primary_order=TAKE_ORDER)\n', (3278, 3333), False, 'from triple_agent.reports.generation.plot_specs import AxisProperties, DataQueryProperties, initialize_properties\n')]
|
import discord
from discord.ext import commands
import time
import datetime
from datetime import datetime
from datetime import timedelta
from core.utils import send
startTime = time.time()
class Link(discord.ui.View):
def __init__(self, link, label):
super().__init__()
self.add_item(discord.ui.Button(label=label, url=link))
class Information(commands.Cog):
# Constructor
def __init__(self, bot):
self.bot = bot
# Checks the ping of the bot
@commands.command(name="ping")
async def ping(self, ctx):
ping = f"🏓 Pong! My ping is {round(self.bot.latency * 1000)}ms"
await send(ctx, ping, False)
@commands.command(name="help")
async def help(self, ctx):
embed = discord.Embed(title="Help", color=0x3083e3)
embed.set_author(name="EinsteinBot", icon_url=self.bot.user.avatar.url)
embed.add_field(name="=help", value="Display this message.", inline=False)
embed.add_field(name="=ping", value="Displays the ping.", inline=False)
embed.add_field(name="=source", value="Displays the bot's GitHub repository.", inline=False)
embed.add_field(name="=botinfo", value="Displays the bot's information.", inline=False)
embed.add_field(name="=search `url`", value="Searches for the answer within a Chegg link.", inline=False)
await send(ctx, embed, True)
@commands.command(name="source")
async def source(self, ctx):
label = "GitHub"
link = "https://github.com/DouglasTaylorSupportGroup/EinsteinBot"
embed = discord.Embed(title="Source Code", color=0x3083e3, description="EinsteinBot is open source and can be found on GitHub. Any issues or suggestions can be raised there.")
embed.set_author(name="EinsteinBot", icon_url=self.bot.user.avatar.url)
embed.set_footer(text="If you like the bot, consider leaving a star ⭐ on the repository, it helps a ton :D.")
await send(ctx, embed, True, Link(link, label))
@commands.command(name="botinfo")
async def botinfo(self, ctx):
# Get all users in all servers the bot is in.
activeServers = self.bot.guilds
botUsers = 0
for i in activeServers:
botUsers += i.member_count
# Get the current uptime.
currentTime = time.time()
differenceUptime = int(round(currentTime - startTime))
uptime = str(timedelta(seconds = differenceUptime))
# Make the embed for the message.
botinfo = discord.Embed(
title="Bot info",
color=0x3083e3,
timestamp=datetime.now(),
description=f"**Server Count:** {len(self.bot.guilds)}\n**Bot Users:** {botUsers}\n**Bot Uptime:** {uptime}"
)
botinfo.set_author(name="MangaUpdates", icon_url=self.bot.user.avatar.url)
await send(ctx, botinfo, True)
def setup(bot):
bot.add_cog(Information(bot))
|
[
"discord.ext.commands.command",
"discord.Embed",
"time.time",
"datetime.timedelta",
"discord.ui.Button",
"datetime.datetime.now",
"core.utils.send"
] |
[((180, 191), 'time.time', 'time.time', ([], {}), '()\n', (189, 191), False, 'import time\n'), ((502, 531), 'discord.ext.commands.command', 'commands.command', ([], {'name': '"""ping"""'}), "(name='ping')\n", (518, 531), False, 'from discord.ext import commands\n'), ((678, 707), 'discord.ext.commands.command', 'commands.command', ([], {'name': '"""help"""'}), "(name='help')\n", (694, 707), False, 'from discord.ext import commands\n'), ((1396, 1427), 'discord.ext.commands.command', 'commands.command', ([], {'name': '"""source"""'}), "(name='source')\n", (1412, 1427), False, 'from discord.ext import commands\n'), ((2004, 2036), 'discord.ext.commands.command', 'commands.command', ([], {'name': '"""botinfo"""'}), "(name='botinfo')\n", (2020, 2036), False, 'from discord.ext import commands\n'), ((755, 797), 'discord.Embed', 'discord.Embed', ([], {'title': '"""Help"""', 'color': '(3179491)'}), "(title='Help', color=3179491)\n", (768, 797), False, 'import discord\n'), ((1576, 1752), 'discord.Embed', 'discord.Embed', ([], {'title': '"""Source Code"""', 'color': '(3179491)', 'description': '"""EinsteinBot is open source and can be found on GitHub. Any issues or suggestions can be raised there."""'}), "(title='Source Code', color=3179491, description=\n 'EinsteinBot is open source and can be found on GitHub. Any issues or suggestions can be raised there.'\n )\n", (1589, 1752), False, 'import discord\n'), ((2313, 2324), 'time.time', 'time.time', ([], {}), '()\n', (2322, 2324), False, 'import time\n'), ((308, 348), 'discord.ui.Button', 'discord.ui.Button', ([], {'label': 'label', 'url': 'link'}), '(label=label, url=link)\n', (325, 348), False, 'import discord\n'), ((649, 671), 'core.utils.send', 'send', (['ctx', 'ping', '(False)'], {}), '(ctx, ping, False)\n', (653, 671), False, 'from core.utils import send\n'), ((1367, 1389), 'core.utils.send', 'send', (['ctx', 'embed', '(True)'], {}), '(ctx, embed, True)\n', (1371, 1389), False, 'from core.utils import send\n'), ((2409, 2444), 'datetime.timedelta', 'timedelta', ([], {'seconds': 'differenceUptime'}), '(seconds=differenceUptime)\n', (2418, 2444), False, 'from datetime import timedelta\n'), ((2847, 2871), 'core.utils.send', 'send', (['ctx', 'botinfo', '(True)'], {}), '(ctx, botinfo, True)\n', (2851, 2871), False, 'from core.utils import send\n'), ((2603, 2617), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (2615, 2617), False, 'from datetime import datetime\n')]
|
import itertools
from typing import Dict, TextIO
def compute_points(line: str) -> Dict[complex, int]:
points: Dict[complex, int] = {}
steps = itertools.count(1)
pos = complex(0)
directions = {
'U': 1j,
'R': 1,
'D': -1j,
'L': -1,
}
for move in line.strip().split(','):
direction = directions[move[0]]
for _ in range(int(move[1:])):
pos += direction
points.setdefault(pos, next(steps))
return points
def part1(data: TextIO) -> int:
points_a = compute_points(next(data))
points_b = compute_points(next(data))
in_common = set(points_a.keys()) & set(points_b.keys())
return int(min(abs(c.imag) + abs(c.real) for c in in_common))
def part2(data: TextIO) -> int:
points_a = compute_points(next(data))
points_b = compute_points(next(data))
in_common = set(points_a.keys()) & set(points_b.keys())
return min(points_a[pos] + points_b[pos] for pos in in_common)
|
[
"itertools.count"
] |
[((152, 170), 'itertools.count', 'itertools.count', (['(1)'], {}), '(1)\n', (167, 170), False, 'import itertools\n')]
|
import requests
from requests.compat import quote_plus
from django.shortcuts import render
from . import models
from bs4 import BeautifulSoup
# Create your views here.
base_url='http://gen.lib.rus.ec'
main_url=base_url+'/search.php?req={}&open=0&res=50&view=simple&phrase=1&column={}'
def home(request):
return(render(request, 'book_app/home.html'))
def new_search(request):
try:
search= request.POST.get('search')
search_type = request.POST.get('Search_type')
models.Search.objects.create(search=search,type=search_type)
url_f=main_url.format(quote_plus(search),search_type)
for_front_data,check=get_data_on_book(url_f)
except:
for_front_data = []
check = -1
search=''
checks=str(check)
data_to_send={
'for_front_data':for_front_data,
'checks':checks,
'search':search,
}
return render(request,"book_app/new_search.html",data_to_send)
def get_data_on_book(url_f):
for_front_data = []
check = 0
try:
response = requests.get(url_f)
page_html = response.text
page_data = BeautifulSoup(page_html, features='html.parser')
post_data = page_data.find_all('tr', {'valign': 'top', 'bgcolor': ('#C6DEFF', '')})
except:
return
for post in post_data:
try:
check += 1
## all the required data from the page in text form ##
book = post.find_all('td')
book_id = book[0].text
book_data = book[2].find('a', {'id': book_id})
book_author = book[1].find_all('a')
book_year = book[4].text
book_lng = book[6].text
book_size = book[7].text
book_link = book[9].find('a').get('href', "")
book_formate = book[8].text
picture_url=""
## go get at max three author name of the book ##
author = ""
i = 0
for name in book_author:
if author != "":
author += ", "
author = author + name.text
i += 1
if (i == 3):
break
## to get titel of the book ##
ext = book_data.find_all('i')
ex = ""
p = 0
for i in ext:
p = 1
ex = ex + i.text
book_title = book_data.text
if p == 1:
book_name = book_title[:len(book_title) - len(ex) - 1]
else:
book_name = book_title
for_front_data.append(
(book_name,
author,
book_lng,
book_year,
book_link,
book_formate,
picture_url,
book_size,
book_id))
except :
pass
return((for_front_data,check))
|
[
"django.shortcuts.render",
"requests.compat.quote_plus",
"requests.get",
"bs4.BeautifulSoup"
] |
[((316, 353), 'django.shortcuts.render', 'render', (['request', '"""book_app/home.html"""'], {}), "(request, 'book_app/home.html')\n", (322, 353), False, 'from django.shortcuts import render\n'), ((902, 959), 'django.shortcuts.render', 'render', (['request', '"""book_app/new_search.html"""', 'data_to_send'], {}), "(request, 'book_app/new_search.html', data_to_send)\n", (908, 959), False, 'from django.shortcuts import render\n'), ((1055, 1074), 'requests.get', 'requests.get', (['url_f'], {}), '(url_f)\n', (1067, 1074), False, 'import requests\n'), ((1130, 1178), 'bs4.BeautifulSoup', 'BeautifulSoup', (['page_html'], {'features': '"""html.parser"""'}), "(page_html, features='html.parser')\n", (1143, 1178), False, 'from bs4 import BeautifulSoup\n'), ((589, 607), 'requests.compat.quote_plus', 'quote_plus', (['search'], {}), '(search)\n', (599, 607), False, 'from requests.compat import quote_plus\n')]
|
# Generated by Django 3.1.2 on 2020-10-25 19:21
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Project',
fields=[
('name', models.CharField(max_length=32, primary_key=True, serialize=False)),
('description', models.CharField(max_length=256)),
],
),
migrations.CreateModel(
name='Entry',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('start_at', models.DateTimeField(blank=True, null=True)),
('end_at', models.DateTimeField(blank=True, null=True)),
('duration_in_minutes', models.IntegerField(blank=True, null=True, verbose_name='Duration in Minutes')),
('project', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='time_tracker.project')),
],
),
]
|
[
"django.db.models.CharField",
"django.db.models.ForeignKey",
"django.db.models.AutoField",
"django.db.models.IntegerField",
"django.db.models.DateTimeField"
] |
[((338, 404), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(32)', 'primary_key': '(True)', 'serialize': '(False)'}), '(max_length=32, primary_key=True, serialize=False)\n', (354, 404), False, 'from django.db import migrations, models\n'), ((439, 471), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(256)'}), '(max_length=256)\n', (455, 471), False, 'from django.db import migrations, models\n'), ((602, 695), 'django.db.models.AutoField', 'models.AutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (618, 695), False, 'from django.db import migrations, models\n'), ((723, 766), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (743, 766), False, 'from django.db import migrations, models\n'), ((796, 839), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (816, 839), False, 'from django.db import migrations, models\n'), ((882, 960), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'blank': '(True)', 'null': '(True)', 'verbose_name': '"""Duration in Minutes"""'}), "(blank=True, null=True, verbose_name='Duration in Minutes')\n", (901, 960), False, 'from django.db import migrations, models\n'), ((991, 1085), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'on_delete': 'django.db.models.deletion.PROTECT', 'to': '"""time_tracker.project"""'}), "(on_delete=django.db.models.deletion.PROTECT, to=\n 'time_tracker.project')\n", (1008, 1085), False, 'from django.db import migrations, models\n')]
|
from util.SPARQL_Query_Wiki import SPARQL_Query_for_Wiki
import json, re, time, random
SPARQL_template = '''
#All properties with descriptions and aliases and types
SELECT ?item ?type ?itemLabel ?itemAltLabel WHERE {
wd:%s ?property ?x .
?item wikibase:directClaim ?property ;
wikibase:propertyType ?type .
SERVICE wikibase:label { bd:serviceParam wikibase:language "[AUTO_LANGUAGE],en". }
}
GROUP BY ?item ?type ?itemLabel ?itemAltLabel
'''
query_wiki = SPARQL_Query_for_Wiki()
valuable_instances = []
property_names = []
list_of_properties = []
counter = 0
property_record = []
instance_property_mapping = {}
errors = []
# during the upgrade, the properties should be connected to instances
# a dictionary {'Qxxxxx': label, alt_labels, [p1: label, alt_labels, p2, p3, p4]}
# later merge this dictionary with the instance-class dictionary
# which will serve as the ultimate corpus for trainning the NLU
# get all the properties connected to the first 2000 high-value instances
# then randomly choose another 3000 instances
# create the instance-property mapping
def get_property(instance):
id = re.search(r'Q[0-9]+', instance)[0]
SPARQL_query = SPARQL_template % id
try:
single_list_property = []
results = query_wiki.get_results(SPARQL_query)
bindings = results['results']['bindings']
for b in bindings:
item = b['item']['value']
if item in property_record: # it already exists
# then do nothing, it is repeated
pass
else:
# print('item', item) # item is the URI of the property, itemLabel to be its label, and
property_record.append(item) # it is new, add it to the list
if b not in list_of_properties:
list_of_properties.append(b)
print('collected', len(list_of_properties))
print(item)
tmp = {}
if 'item' in b:
tmp['uri'] = b['item']['value']
if 'itemLabel' in b:
tmp['label'] = b['itemLabel']['value']
if 'itemAltLabel' in b:
altlabel = b['itemAltLabel']['value']
tmp['alt_label'] = altlabel.split(',')
if 'type' in b:
tmp['type'] = b['type']['value']
single_list_property.append(tmp)
if id not in instance_property_mapping:
instance_property_mapping[id] = {'properties': single_list_property}
except:
errors.append(id)
with open('property_log', 'w') as f:
f.write(json.dumps(errors) + '\n')
f.close()
pass
time.sleep(0.5)
with open('distinct_properties') as f:
list_of_properties = json.loads(f.read())
f.close()
random_counter = 0
with open('WIKI_URI_LIST') as f:
instances = json.loads(f.read())
print('number of instances', len(instances))
f.close()
# random_instance = random.sample(instances[2000:], 3000)
# get the properties of the first 2000 instances (ranked by their length, from short to long )
# random_instance = random.sample(instances[2000:], 200) + random.sample(instances[:2000], 200)
for instance in FAILED_CASES:
counter = counter + 1
print('iterated', counter, 'out of', len(FAILED_CASES))
get_property(instance)
if counter == 10:
with open('ipm_test', 'w') as f:
f.write(json.dumps(instance_property_mapping))
f.close()
with open('distinct_properties', 'w') as f:
f.write(json.dumps(list_of_properties))
f.close()
# with open('instance_property_mapping_first_2000', 'w') as f:
# f.write(json.dumps(instance_property_mapping))
# f.close()
# instance_property_mapping = {}
# for instance in random_instance:
# random_counter = random_counter + 1
# print('iterated', random_counter, 'out of 3000')
#
# get_property(instance)
#
# with open('instance_property_mapping_random_3000', 'w') as f:
# f.write(json.dumps(instance_property_mapping))
# f.close()
#
##################################################
# randomly choose another
|
[
"util.SPARQL_Query_Wiki.SPARQL_Query_for_Wiki",
"re.search",
"json.dumps",
"time.sleep"
] |
[((475, 498), 'util.SPARQL_Query_Wiki.SPARQL_Query_for_Wiki', 'SPARQL_Query_for_Wiki', ([], {}), '()\n', (496, 498), False, 'from util.SPARQL_Query_Wiki import SPARQL_Query_for_Wiki\n'), ((1132, 1162), 're.search', 're.search', (['"""Q[0-9]+"""', 'instance'], {}), "('Q[0-9]+', instance)\n", (1141, 1162), False, 'import json, re, time, random\n'), ((2706, 2721), 'time.sleep', 'time.sleep', (['(0.5)'], {}), '(0.5)\n', (2716, 2721), False, 'import json, re, time, random\n'), ((3582, 3612), 'json.dumps', 'json.dumps', (['list_of_properties'], {}), '(list_of_properties)\n', (3592, 3612), False, 'import json, re, time, random\n'), ((3457, 3494), 'json.dumps', 'json.dumps', (['instance_property_mapping'], {}), '(instance_property_mapping)\n', (3467, 3494), False, 'import json, re, time, random\n'), ((2636, 2654), 'json.dumps', 'json.dumps', (['errors'], {}), '(errors)\n', (2646, 2654), False, 'import json, re, time, random\n')]
|
import sys
import random
from sprite.mario import *
from level.level import *
from tool.character import *
from . scene import *
from gui.gui import *
from gui.menuitem import *
from gui.menu import *
from gui.menubar import *
from gui.slider import *
from gui.widget import *
from gui.label import *
@Singleton
class GameScene(Scene):
def __init__(self):
print("init game scene")
Scene.__init__(self)
self.self_scene = GAME_SCENE
self.sound_volume = 0
self.player_group = pygame.sprite.Group()
self.coin_group = pygame.sprite.Group()
self.mushroom_group = pygame.sprite.Group()
self.mario = Mario()
self.player_group.add(self.mario)
self.init_gui()
self.globalData = GlobalData()
def set_level(self, level):
self.level = Level(level)
def init_gui(self):
self.gui = GUI()
menubar = MenuBar()
setting_menu = Menu(text="设置")
menubar.add_menu(setting_menu)
sound_menuitem = MenuItem(text="声音")
sound_menuitem.bind_active(self.set_volumn, sound_menuitem)
setting_menu.add_menuitem(sound_menuitem)
system_menu = Menu(text="系统")
menubar.add_menu(system_menu)
return_menu_menuitem = MenuItem(text='返回菜单')
return_menu_menuitem.bind_active(self.enter_gamemenu_sceen)
system_menu.add_menuitem(return_menu_menuitem)
exit_menuitem = MenuItem(text="退出")
exit_menuitem.bind_active(
lambda: sys.exit(0))
system_menu.add_menuitem(exit_menuitem)
self.gui.add_menubar(menubar, pos=(0, 0))
def set_volumn(self, button):
button.status = HOVER
widget = Widget()
music_label = Label(text='声音', text_size=48)
music_slider = Slider(value=pygame.mixer.music.get_volume())
widget.add_label(music_label, pos=(0, 100))
widget.add_slider(music_slider, pos=(music_label.rect.width, 100))
sound_label = Label(text='音效', text_size=48)
sound_slider = Slider(value=pygame.mixer.music.get_volume())
widget.add_label(sound_label, pos=(0, 200))
widget.add_slider(sound_slider, pos=(sound_label.rect.width, 200))
self.gui.add_widget(widget, pos=(100, 200))
clock = pygame.time.Clock()
while widget.alive():
for event in pygame.event.get():
self.gui.process_event(event)
self.update()
self.gui.update(self.screen)
pygame.display.update()
pygame.mixer.music.set_volume(music_slider.value)
self.sound_volume = sound_slider.value
clock.tick(60)
def enter_gamemenu_sceen(self):
self.globalData.scene = GAME_MENU_SCENE
def update(self):
self.screen.fill(SKYBLUE, (0, 0, 800, 600))
self.level.update(self.screen)
self.coin_group.update()
self.coin_group.draw(self.screen)
self.mushroom_group.update()
self.mushroom_group.draw(self.screen)
self.player_group.update()
self.player_group.draw(self.screen)
def show(self):
self.update()
self.move_mario()
self.move_item(self.level.enemy_group)
self.move_item(self.mushroom_group)
self.show_info()
self.gui.update(self.screen)
pygame.display.update()
if not self.player_group.has(self.mario):
self.globalData.scene = DEATH_SCENE
self.player_group.add(self.mario)
# 根据mario的x, y轴的速度移动mario
# 在x轴, y轴移动后, 检测碰撞, 处理碰撞
def move_mario(self):
self.mario.rect.x += self.mario.speed_x
self.check_move_scene()
self.check_mario_border()
self.check_mario_collision_x()
if self.mario.speed_y != 0:
self.mario.rect.y += self.mario.speed_y
self.check_mario_border()
self.check_mario_collision_y()
# 检测是否移动场景
# 当mairo走过屏幕的一半时, 移动场景
def check_move_scene(self):
if self.mario.rect.x > 400 and self.mario.speed_x > 0 \
and self.level.start_x + self.mario.speed_x + 800 < self.level.length:
self.level.start_x += self.mario.speed_x
self.mario.rect.x -= self.mario.speed_x
for enemy in self.level.enemy_group:
enemy.rect.x -= self.mario.speed_x
for coin in self.coin_group:
coin.rect.x -= self.mario.speed_x
for mushroom in self.mushroom_group:
mushroom.rect.x -= self.mario.speed_x
# 检测mario在x轴上的碰撞
def check_mario_collision_x(self):
if not self.mario.is_collider:
return
brick = pygame.sprite.spritecollideany(self.mario, self.level.brick_group)
pipe = pygame.sprite.spritecollideany(self.mario, self.level.pipe_group)
mushroom = pygame.sprite.spritecollideany(self.mario, self.mushroom_group)
enemy = pygame.sprite.spritecollideany(self.mario, self.level.enemy_group)
piranha = pygame.sprite.spritecollideany(self.mario, self.level.plant_enemy)
checkpoint = pygame.sprite.spritecollideany(self.mario, self.level.checkpoint_group)
win = pygame.sprite.spritecollideany(self.mario, self.level.castle_group)
if brick:
self.process_mario_collision_x(brick)
if pipe:
self.process_mario_collision_x(pipe)
if win:
self.process_win()
if mushroom:
self.process_mario_mushroom_collision(mushroom)
if enemy:
if self.mario.rect.x > enemy.rect.x and self.mario.rect.left + 10 < enemy.rect.right or \
self.mario.rect.x < enemy.rect.x and self.mario.rect.right - 10 > enemy.rect.left:
self.process_mario_enemy_collision_x()
if checkpoint:
self.process_mario_checkpoint_collision(checkpoint)
if piranha:
self.process_mario_piranha_collision()
def process_win(self):
self.globalData.scene = WIN_SCENE
def process_mario_piranha_collision(self):
self.mario.set_status(DEATH)
def process_mario_checkpoint_collision(self, checkpoint):
self.mario.save_data()
checkpoint.kill()
def process_mario_enemy_collision_x(self):
self.mario.set_status(DEATH)
def process_mario_mushroom_collision(self, mushroom):
mushroom.bump(self.mario)
# 处理Mario在x轴上的碰撞
def process_mario_collision_x(self, collider):
if self.mario.rect.x < collider.rect.x:
self.mario.rect.right = collider.rect.left
else:
self.mario.rect.left = collider.rect.right
# mario在x轴上遇到碰撞, 则x水平速度置为0
self.mario.speed_x = 0
# 检测mario在y轴上的碰撞
def check_mario_collision_y(self):
if not self.mario.is_collider:
return
brick = pygame.sprite.spritecollideany(self.mario, self.level.brick_group)
pipe = pygame.sprite.spritecollideany(self.mario, self.level.pipe_group)
mushroom = pygame.sprite.spritecollideany(self.mario, self.mushroom_group)
checkpoint = pygame.sprite.spritecollideany(self.mario, self.level.checkpoint_group)
if brick:
self.process_mario_collision_y(brick)
if pipe:
self.process_mario_collision_y(pipe)
if mushroom:
self.process_mario_mushroom_collision(mushroom)
enemy = pygame.sprite.spritecollideany(self.mario, self.level.enemy_group)
if enemy:
self.process_mario_enemy_collision_y(enemy)
if checkpoint:
self.process_mario_checkpoint_collision(checkpoint)
piranha = pygame.sprite.spritecollideany(self.mario, self.level.plant_enemy)
if piranha:
self.process_mario_piranha_collision()
def process_mario_enemy_collision_y(self, enemy):
if self.mario.rect.y + self.mario.rect.height * 0.5 < enemy.rect.y:
enemy.set_status(DEATH)
self.level.death_enemy_group.add(enemy)
self.level.enemy_group.remove(enemy)
# 处理mario在y轴上的碰撞
def process_mario_collision_y(self, collider):
if self.mario.rect.y < collider.rect.y:
self.mario.rect.bottom = collider.rect.top
elif self.mario.rect.y > collider.rect.y:
self.mario.rect.top = collider.rect.bottom
# 当mario用头撞击物体时, 处理奖励
if collider.type == 2100:
collider.bump(self.coin_group, self.mario)
else:
collider.bump(self.mushroom_group, self.mario)
# mairo在y轴方向遇到碰撞, 垂直速度置为0
self.mario.speed_y = 0
# 如果mario在跳跃中遇到y轴方向的碰撞
# 水平速度置为0
if self.mario.status == JUMP:
self.mario.speed_x = 0
self.mario.status = STAND
# 移动物体
# 移动后检测碰撞并处理碰撞
def move_item(self, item_group):
for item in item_group:
item.rect.x += item.speed_x
self.check_item_collision_x(item)
if item.speed_y != 0:
item.rect.y += item.speed_y
self.check_item_collision_y(item)
# 检测物体在x轴方向的碰撞
# 碰到砖块, 水管则调转方向
def check_item_collision_x(self, item):
brick = pygame.sprite.spritecollideany(item, self.level.brick_group)
pipe = pygame.sprite.spritecollideany(item, self.level.pipe_group)
if brick:
item.rotate_direction()
if pipe:
item.rotate_direction()
# 检测物体在y轴方向的碰撞
def check_item_collision_y(self, item):
brick = pygame.sprite.spritecollideany(item, self.level.brick_group)
pipe = pygame.sprite.spritecollideany(item, self.level.pipe_group)
if brick:
if item.rect.y < brick.rect.y:
item.rect.bottom = brick.rect.top
item.speed_y = 0
if pipe:
if item.rect.y < pipe.rect.y:
item.rect.bottom = pipe.rect.top
item.speed_y = 0
def check_mario_border(self):
if self.mario.rect.left < 0:
self.mario.rect.left = 0
if self.mario.rect.right > 800:
self.mario.rect.right = 800
if self.mario.rect.top < 0:
self.mario.rect.top = 0
if self.mario.rect.bottom > 600:
self.mario.rect.bottom = 600
# 展示信息
def show_info(self):
write_chars(self.screen, "分数: " + str(self.mario.score), 32, WHITE, (650, 0))
write_chars(self.screen, "硬币: " + str(self.mario.coin_num), 32, WHITE, (650, 32))
write_chars(self.screen, "生命: " + str(self.mario.life), 32, WHITE, (650, 64))
def process_event(self, event):
self.gui.process_event(event)
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_LEFT:
self.mario.set_status(WALK)
self.mario.set_direction(LEFT)
elif event.key == pygame.K_RIGHT:
self.mario.set_status(WALK)
self.mario.set_direction(RIGHT)
elif event.key == pygame.K_a:
self.mario.set_status(JUMP)
|
[
"sys.exit"
] |
[((1525, 1536), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (1533, 1536), False, 'import sys\n')]
|
# MIT License
#
# Copyright (c) 2017 <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""Calculate average latent variables (here called attribute vectors)
for the different attributes in CelebA
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import importlib
import math
import os
import sys
import time
import facenet
import h5py
import numpy as np
import tensorflow as tf
from six import iteritems
def main(args):
img_mean = np.array([134.10714722, 102.52040863, 87.15436554])
img_stddev = np.sqrt(np.array([3941.30175781, 2856.94287109, 2519.35791016]))
vae_checkpoint = os.path.expanduser(args.vae_checkpoint)
fields, attribs_dict = read_annotations(args.annotations_filename)
vae_def = importlib.import_module(args.vae_def)
vae = vae_def.Vae(args.latent_var_size)
gen_image_size = vae.get_image_size()
with tf.Graph().as_default():
tf.set_random_seed(args.seed)
image_list = facenet.get_image_paths(os.path.expanduser(args.data_dir))
# Get attributes for images
nrof_attributes = len(fields)
attribs_list = []
for img in image_list:
key = os.path.split(img)[1].split('.')[0]
attr = attribs_dict[key]
assert len(attr) == nrof_attributes
attribs_list.append(attr)
# Create the input queue
index_list = range(len(image_list))
input_queue = tf.train.slice_input_producer([image_list, attribs_list, index_list], num_epochs=1, shuffle=False)
nrof_preprocess_threads = 4
image_per_thread = []
for _ in range(nrof_preprocess_threads):
filename = input_queue[0]
file_contents = tf.read_file(filename)
image = tf.image.decode_image(file_contents, channels=3)
image = tf.image.resize_image_with_crop_or_pad(image, 160, 160)
# image = tf.image.resize_images(image, (64,64))
image.set_shape((args.image_size, args.image_size, 3))
attrib = input_queue[1]
attrib.set_shape((nrof_attributes,))
image = tf.cast(image, tf.float32)
image_per_thread.append([image, attrib, input_queue[2]])
images, attribs, indices = tf.train.batch_join(
image_per_thread, batch_size=args.batch_size,
shapes=[(args.image_size, args.image_size, 3), (nrof_attributes,), ()], enqueue_many=False,
capacity=4 * nrof_preprocess_threads * args.batch_size,
allow_smaller_final_batch=True)
# Normalize
images_norm = (images - img_mean) / img_stddev
# Resize to appropriate size for the encoder
images_norm_resize = tf.image.resize_images(images_norm, (gen_image_size, gen_image_size))
# Create encoder network
mean, log_variance = vae.encoder(images_norm_resize, True)
epsilon = tf.random_normal((tf.shape(mean)[0], args.latent_var_size))
std = tf.exp(log_variance / 2)
latent_var = mean + epsilon * std
# Create a saver
saver = tf.train.Saver(tf.trainable_variables(), max_to_keep=3)
# Start running operations on the Graph
gpu_memory_fraction = 1.0
gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=gpu_memory_fraction)
sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options, log_device_placement=False))
sess.run(tf.global_variables_initializer())
sess.run(tf.local_variables_initializer())
coord = tf.train.Coordinator()
tf.train.start_queue_runners(coord=coord, sess=sess)
with sess.as_default():
if vae_checkpoint:
print('Restoring VAE checkpoint: %s' % vae_checkpoint)
saver.restore(sess, vae_checkpoint)
nrof_images = len(image_list)
nrof_batches = int(math.ceil(len(image_list) / args.batch_size))
latent_vars = np.zeros((nrof_images, args.latent_var_size))
attributes = np.zeros((nrof_images, nrof_attributes))
for i in range(nrof_batches):
start_time = time.time()
latent_var_, attribs_, indices_ = sess.run([latent_var, attribs, indices])
latent_vars[indices_, :] = latent_var_
attributes[indices_, :] = attribs_
duration = time.time() - start_time
print('Batch %d/%d: %.3f seconds' % (i + 1, nrof_batches, duration))
# NOTE: This will print the 'Out of range' warning if the last batch is not full,
# as described by https://github.com/tensorflow/tensorflow/issues/8330
# Calculate average change in the latent variable when each attribute changes
attribute_vectors = np.zeros((nrof_attributes, args.latent_var_size), np.float32)
for i in range(nrof_attributes):
pos_idx = np.argwhere(attributes[:, i] == 1)[:, 0]
neg_idx = np.argwhere(attributes[:, i] == -1)[:, 0]
pos_avg = np.mean(latent_vars[pos_idx, :], 0)
neg_avg = np.mean(latent_vars[neg_idx, :], 0)
attribute_vectors[i, :] = pos_avg - neg_avg
filename = os.path.expanduser(args.output_filename)
print('Writing attribute vectors, latent variables and attributes to %s' % filename)
mdict = {'latent_vars': latent_vars, 'attributes': attributes,
'fields': fields, 'attribute_vectors': attribute_vectors}
with h5py.File(filename, 'w') as f:
for key, value in iteritems(mdict):
f.create_dataset(key, data=value)
def read_annotations(filename):
attribs = {}
with open(filename, 'r') as f:
for i, line in enumerate(f.readlines()):
if i == 0:
continue # First line is the number of entries in the file
elif i == 1:
fields = line.strip().split() # Second line is the field names
else:
line = line.split()
img_name = line[0].split('.')[0]
img_attribs = map(int, line[1:])
attribs[img_name] = img_attribs
return fields, attribs
def parse_arguments(argv):
parser = argparse.ArgumentParser()
parser.add_argument('vae_def', type=str,
help='Model definition for the variational autoencoder. Points to a module containing the definition.',
default='src.generative.models.dfc_vae')
parser.add_argument('vae_checkpoint', type=str,
help='Checkpoint file of a pre-trained variational autoencoder.')
parser.add_argument('data_dir', type=str,
help='Path to the directory containing aligned face patches for the CelebA dataset.')
parser.add_argument('annotations_filename', type=str,
help='Path to the annotations file',
default='/media/deep/datasets/CelebA/Anno/list_attr_celeba.txt')
parser.add_argument('output_filename', type=str,
help='Filename to use for the file containing the attribute vectors.')
parser.add_argument('--batch_size', type=int,
help='Number of images to process in a batch.', default=128)
parser.add_argument('--image_size', type=int,
help='Image size (height, width) in pixels.', default=64)
parser.add_argument('--latent_var_size', type=int,
help='Dimensionality of the latent variable.', default=100)
parser.add_argument('--seed', type=int,
help='Random seed.', default=666)
return parser.parse_args(argv)
if __name__ == '__main__':
main(parse_arguments(sys.argv[1:]))
|
[
"tensorflow.train.Coordinator",
"argparse.ArgumentParser",
"tensorflow.trainable_variables",
"tensorflow.train.batch_join",
"tensorflow.local_variables_initializer",
"tensorflow.ConfigProto",
"numpy.mean",
"tensorflow.image.resize_image_with_crop_or_pad",
"tensorflow.GPUOptions",
"six.iteritems",
"tensorflow.train.slice_input_producer",
"tensorflow.set_random_seed",
"tensorflow.train.start_queue_runners",
"tensorflow.cast",
"tensorflow.exp",
"tensorflow.image.resize_images",
"h5py.File",
"importlib.import_module",
"tensorflow.global_variables_initializer",
"tensorflow.Graph",
"numpy.argwhere",
"tensorflow.read_file",
"numpy.zeros",
"time.time",
"tensorflow.shape",
"tensorflow.image.decode_image",
"numpy.array",
"os.path.split",
"os.path.expanduser"
] |
[((1538, 1589), 'numpy.array', 'np.array', (['[134.10714722, 102.52040863, 87.15436554]'], {}), '([134.10714722, 102.52040863, 87.15436554])\n', (1546, 1589), True, 'import numpy as np\n'), ((1694, 1733), 'os.path.expanduser', 'os.path.expanduser', (['args.vae_checkpoint'], {}), '(args.vae_checkpoint)\n', (1712, 1733), False, 'import os\n'), ((1821, 1858), 'importlib.import_module', 'importlib.import_module', (['args.vae_def'], {}), '(args.vae_def)\n', (1844, 1858), False, 'import importlib\n'), ((7351, 7376), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (7374, 7376), False, 'import argparse\n'), ((1615, 1670), 'numpy.array', 'np.array', (['[3941.30175781, 2856.94287109, 2519.35791016]'], {}), '([3941.30175781, 2856.94287109, 2519.35791016])\n', (1623, 1670), True, 'import numpy as np\n'), ((1988, 2017), 'tensorflow.set_random_seed', 'tf.set_random_seed', (['args.seed'], {}), '(args.seed)\n', (2006, 2017), True, 'import tensorflow as tf\n'), ((2508, 2610), 'tensorflow.train.slice_input_producer', 'tf.train.slice_input_producer', (['[image_list, attribs_list, index_list]'], {'num_epochs': '(1)', 'shuffle': '(False)'}), '([image_list, attribs_list, index_list],\n num_epochs=1, shuffle=False)\n', (2537, 2610), True, 'import tensorflow as tf\n'), ((3322, 3581), 'tensorflow.train.batch_join', 'tf.train.batch_join', (['image_per_thread'], {'batch_size': 'args.batch_size', 'shapes': '[(args.image_size, args.image_size, 3), (nrof_attributes,), ()]', 'enqueue_many': '(False)', 'capacity': '(4 * nrof_preprocess_threads * args.batch_size)', 'allow_smaller_final_batch': '(True)'}), '(image_per_thread, batch_size=args.batch_size, shapes=[(\n args.image_size, args.image_size, 3), (nrof_attributes,), ()],\n enqueue_many=False, capacity=4 * nrof_preprocess_threads * args.\n batch_size, allow_smaller_final_batch=True)\n', (3341, 3581), True, 'import tensorflow as tf\n'), ((3777, 3846), 'tensorflow.image.resize_images', 'tf.image.resize_images', (['images_norm', '(gen_image_size, gen_image_size)'], {}), '(images_norm, (gen_image_size, gen_image_size))\n', (3799, 3846), True, 'import tensorflow as tf\n'), ((4041, 4065), 'tensorflow.exp', 'tf.exp', (['(log_variance / 2)'], {}), '(log_variance / 2)\n', (4047, 4065), True, 'import tensorflow as tf\n'), ((4311, 4377), 'tensorflow.GPUOptions', 'tf.GPUOptions', ([], {'per_process_gpu_memory_fraction': 'gpu_memory_fraction'}), '(per_process_gpu_memory_fraction=gpu_memory_fraction)\n', (4324, 4377), True, 'import tensorflow as tf\n'), ((4599, 4621), 'tensorflow.train.Coordinator', 'tf.train.Coordinator', ([], {}), '()\n', (4619, 4621), True, 'import tensorflow as tf\n'), ((4630, 4682), 'tensorflow.train.start_queue_runners', 'tf.train.start_queue_runners', ([], {'coord': 'coord', 'sess': 'sess'}), '(coord=coord, sess=sess)\n', (4658, 4682), True, 'import tensorflow as tf\n'), ((2064, 2097), 'os.path.expanduser', 'os.path.expanduser', (['args.data_dir'], {}), '(args.data_dir)\n', (2082, 2097), False, 'import os\n'), ((2789, 2811), 'tensorflow.read_file', 'tf.read_file', (['filename'], {}), '(filename)\n', (2801, 2811), True, 'import tensorflow as tf\n'), ((2832, 2880), 'tensorflow.image.decode_image', 'tf.image.decode_image', (['file_contents'], {'channels': '(3)'}), '(file_contents, channels=3)\n', (2853, 2880), True, 'import tensorflow as tf\n'), ((2901, 2956), 'tensorflow.image.resize_image_with_crop_or_pad', 'tf.image.resize_image_with_crop_or_pad', (['image', '(160)', '(160)'], {}), '(image, 160, 160)\n', (2939, 2956), True, 'import tensorflow as tf\n'), ((3190, 3216), 'tensorflow.cast', 'tf.cast', (['image', 'tf.float32'], {}), '(image, tf.float32)\n', (3197, 3216), True, 'import tensorflow as tf\n'), ((4165, 4189), 'tensorflow.trainable_variables', 'tf.trainable_variables', ([], {}), '()\n', (4187, 4189), True, 'import tensorflow as tf\n'), ((4497, 4530), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (4528, 4530), True, 'import tensorflow as tf\n'), ((4549, 4581), 'tensorflow.local_variables_initializer', 'tf.local_variables_initializer', ([], {}), '()\n', (4579, 4581), True, 'import tensorflow as tf\n'), ((5017, 5062), 'numpy.zeros', 'np.zeros', (['(nrof_images, args.latent_var_size)'], {}), '((nrof_images, args.latent_var_size))\n', (5025, 5062), True, 'import numpy as np\n'), ((5088, 5128), 'numpy.zeros', 'np.zeros', (['(nrof_images, nrof_attributes)'], {}), '((nrof_images, nrof_attributes))\n', (5096, 5128), True, 'import numpy as np\n'), ((5847, 5908), 'numpy.zeros', 'np.zeros', (['(nrof_attributes, args.latent_var_size)', 'np.float32'], {}), '((nrof_attributes, args.latent_var_size), np.float32)\n', (5855, 5908), True, 'import numpy as np\n'), ((6297, 6337), 'os.path.expanduser', 'os.path.expanduser', (['args.output_filename'], {}), '(args.output_filename)\n', (6315, 6337), False, 'import os\n'), ((1955, 1965), 'tensorflow.Graph', 'tf.Graph', ([], {}), '()\n', (1963, 1965), True, 'import tensorflow as tf\n'), ((4411, 4478), 'tensorflow.ConfigProto', 'tf.ConfigProto', ([], {'gpu_options': 'gpu_options', 'log_device_placement': '(False)'}), '(gpu_options=gpu_options, log_device_placement=False)\n', (4425, 4478), True, 'import tensorflow as tf\n'), ((5200, 5211), 'time.time', 'time.time', ([], {}), '()\n', (5209, 5211), False, 'import time\n'), ((6115, 6150), 'numpy.mean', 'np.mean', (['latent_vars[pos_idx, :]', '(0)'], {}), '(latent_vars[pos_idx, :], 0)\n', (6122, 6150), True, 'import numpy as np\n'), ((6177, 6212), 'numpy.mean', 'np.mean', (['latent_vars[neg_idx, :]', '(0)'], {}), '(latent_vars[neg_idx, :], 0)\n', (6184, 6212), True, 'import numpy as np\n'), ((6606, 6630), 'h5py.File', 'h5py.File', (['filename', '"""w"""'], {}), "(filename, 'w')\n", (6615, 6630), False, 'import h5py\n'), ((6671, 6687), 'six.iteritems', 'iteritems', (['mdict'], {}), '(mdict)\n', (6680, 6687), False, 'from six import iteritems\n'), ((3985, 3999), 'tensorflow.shape', 'tf.shape', (['mean'], {}), '(mean)\n', (3993, 3999), True, 'import tensorflow as tf\n'), ((5436, 5447), 'time.time', 'time.time', ([], {}), '()\n', (5445, 5447), False, 'import time\n'), ((5980, 6014), 'numpy.argwhere', 'np.argwhere', (['(attributes[:, i] == 1)'], {}), '(attributes[:, i] == 1)\n', (5991, 6014), True, 'import numpy as np\n'), ((6047, 6082), 'numpy.argwhere', 'np.argwhere', (['(attributes[:, i] == -1)'], {}), '(attributes[:, i] == -1)\n', (6058, 6082), True, 'import numpy as np\n'), ((2249, 2267), 'os.path.split', 'os.path.split', (['img'], {}), '(img)\n', (2262, 2267), False, 'import os\n')]
|
#!/usr/bin/env python3
"""
Model
"""
class Model:
table = 'model'
def __init__(self):
super().__init__()
def save(self):
pass
def json(self):
pass
def main(args):
pass
if __name__ == '__main__':
from argparse import ArgumentParser
parser = ArgumentParser()
parser.add_argument('-a', '--arg1',
help="An argument.",
type=str,
default='default')
args = parser.parse_args()
main(args)
|
[
"argparse.ArgumentParser"
] |
[((303, 319), 'argparse.ArgumentParser', 'ArgumentParser', ([], {}), '()\n', (317, 319), False, 'from argparse import ArgumentParser\n')]
|
"""
Functions for manipulating or otherwise processing strings
"""
import base64
import difflib
import errno
import fnmatch
import logging
import os
import re
import shlex
import time
import unicodedata
from salt.utils.decorators.jinja import jinja_filter
log = logging.getLogger(__name__)
@jinja_filter("to_bytes")
def to_bytes(s, encoding=None, errors="strict"):
"""
Given bytes, bytearray, str, or unicode (python 2), return bytes (str for
python 2)
"""
if encoding is None:
# Try utf-8 first, and fall back to detected encoding
encoding = ("utf-8", __salt_system_encoding__)
if not isinstance(encoding, (tuple, list)):
encoding = (encoding,)
if not encoding:
raise ValueError("encoding cannot be empty")
exc = None
if isinstance(s, bytes):
return s
if isinstance(s, bytearray):
return bytes(s)
if isinstance(s, str):
for enc in encoding:
try:
return s.encode(enc, errors)
except UnicodeEncodeError as err:
exc = err
continue
# The only way we get this far is if a UnicodeEncodeError was
# raised, otherwise we would have already returned (or raised some
# other exception).
raise exc # pylint: disable=raising-bad-type
raise TypeError("expected str, bytes, or bytearray not {}".format(type(s)))
def to_str(s, encoding=None, errors="strict", normalize=False):
"""
Given str, bytes, bytearray, or unicode (py2), return str
"""
def _normalize(s):
try:
return unicodedata.normalize("NFC", s) if normalize else s
except TypeError:
return s
if encoding is None:
# Try utf-8 first, and fall back to detected encoding
encoding = ("utf-8", __salt_system_encoding__)
if not isinstance(encoding, (tuple, list)):
encoding = (encoding,)
if not encoding:
raise ValueError("encoding cannot be empty")
if isinstance(s, str):
return _normalize(s)
exc = None
if isinstance(s, (bytes, bytearray)):
for enc in encoding:
try:
return _normalize(s.decode(enc, errors))
except UnicodeDecodeError as err:
exc = err
continue
# The only way we get this far is if a UnicodeDecodeError was
# raised, otherwise we would have already returned (or raised some
# other exception).
raise exc # pylint: disable=raising-bad-type
raise TypeError("expected str, bytes, or bytearray not {}".format(type(s)))
def to_unicode(s, encoding=None, errors="strict", normalize=False):
"""
Given str or unicode, return unicode (str for python 3)
"""
def _normalize(s):
return unicodedata.normalize("NFC", s) if normalize else s
if encoding is None:
# Try utf-8 first, and fall back to detected encoding
encoding = ("utf-8", __salt_system_encoding__)
if not isinstance(encoding, (tuple, list)):
encoding = (encoding,)
if not encoding:
raise ValueError("encoding cannot be empty")
if isinstance(s, str):
return _normalize(s)
elif isinstance(s, (bytes, bytearray)):
return _normalize(to_str(s, encoding, errors))
raise TypeError("expected str, bytes, or bytearray not {}".format(type(s)))
@jinja_filter("str_to_num")
@jinja_filter("to_num")
def to_num(text):
"""
Convert a string to a number.
Returns an integer if the string represents an integer, a floating
point number if the string is a real number, or the string unchanged
otherwise.
"""
try:
return int(text)
except ValueError:
try:
return float(text)
except ValueError:
return text
def to_none(text):
"""
Convert a string to None if the string is empty or contains only spaces.
"""
if str(text).strip():
return text
return None
def is_quoted(value):
"""
Return a single or double quote, if a string is wrapped in extra quotes.
Otherwise return an empty string.
"""
ret = ""
if (
isinstance(value, str)
and value[0] == value[-1]
and value.startswith(("'", '"'))
):
ret = value[0]
return ret
def dequote(value):
"""
Remove extra quotes around a string.
"""
if is_quoted(value):
return value[1:-1]
return value
@jinja_filter("is_hex")
def is_hex(value):
"""
Returns True if value is a hexadecimal string, otherwise returns False
"""
try:
int(value, 16)
return True
except (TypeError, ValueError):
return False
def is_binary(data):
"""
Detects if the passed string of data is binary or text
"""
if not data or not isinstance(data, ((str,), bytes)):
return False
if isinstance(data, bytes):
if b"\0" in data:
return True
elif "\0" in data:
return True
text_characters = "".join([chr(x) for x in range(32, 127)] + list("\n\r\t\b"))
# Get the non-text characters (map each character to itself then use the
# 'remove' option to get rid of the text characters.)
if isinstance(data, bytes):
import salt.utils.data
nontext = data.translate(None, salt.utils.data.encode(text_characters))
else:
trans = "".maketrans("", "", text_characters)
nontext = data.translate(trans)
# If more than 30% non-text characters, then
# this is considered binary data
if float(len(nontext)) / len(data) > 0.30:
return True
return False
@jinja_filter("random_str")
def random(size=32):
key = os.urandom(size)
return to_unicode(base64.b64encode(key).replace(b"\n", b"")[:size])
@jinja_filter("contains_whitespace")
def contains_whitespace(text):
"""
Returns True if there are any whitespace characters in the string
"""
return any(x.isspace() for x in text)
def human_to_bytes(size):
"""
Given a human-readable byte string (e.g. 2G, 30M),
return the number of bytes. Will return 0 if the argument has
unexpected form.
.. versionadded:: 2018.3.0
"""
sbytes = size[:-1]
unit = size[-1]
if sbytes.isdigit():
sbytes = int(sbytes)
if unit == "P":
sbytes *= 1125899906842624
elif unit == "T":
sbytes *= 1099511627776
elif unit == "G":
sbytes *= 1073741824
elif unit == "M":
sbytes *= 1048576
else:
sbytes = 0
else:
sbytes = 0
return sbytes
def build_whitespace_split_regex(text):
'''
Create a regular expression at runtime which should match ignoring the
addition or deletion of white space or line breaks, unless between commas
Example:
.. code-block:: python
>>> import re
>>> import salt.utils.stringutils
>>> regex = salt.utils.stringutils.build_whitespace_split_regex(
... """if [ -z "$debian_chroot" ] && [ -r /etc/debian_chroot ]; then"""
... )
>>> regex
'(?:[\\s]+)?if(?:[\\s]+)?\\[(?:[\\s]+)?\\-z(?:[\\s]+)?\\"\\$debian'
'\\_chroot\\"(?:[\\s]+)?\\](?:[\\s]+)?\\&\\&(?:[\\s]+)?\\[(?:[\\s]+)?'
'\\-r(?:[\\s]+)?\\/etc\\/debian\\_chroot(?:[\\s]+)?\\]\\;(?:[\\s]+)?'
'then(?:[\\s]+)?'
>>> re.search(
... regex,
... """if [ -z "$debian_chroot" ] && [ -r /etc/debian_chroot ]; then"""
... )
<_sre.SRE_Match object at 0xb70639c0>
>>>
'''
def __build_parts(text):
lexer = shlex.shlex(text)
lexer.whitespace_split = True
lexer.commenters = ""
if r"'\"" in text:
lexer.quotes = ""
elif "'" in text:
lexer.quotes = '"'
elif '"' in text:
lexer.quotes = "'"
return list(lexer)
regex = r""
for line in text.splitlines():
parts = [re.escape(s) for s in __build_parts(line)]
regex += r"(?:[\s]+)?{}(?:[\s]+)?".format(r"(?:[\s]+)?".join(parts))
return r"(?m)^{}$".format(regex)
def expr_match(line, expr):
"""
Checks whether or not the passed value matches the specified expression.
Tries to match expr first as a glob using fnmatch.fnmatch(), and then tries
to match expr as a regular expression. Originally designed to match minion
IDs for whitelists/blacklists.
Note that this also does exact matches, as fnmatch.fnmatch() will return
``True`` when no glob characters are used and the string is an exact match:
.. code-block:: python
>>> fnmatch.fnmatch('foo', 'foo')
True
"""
try:
if fnmatch.fnmatch(line, expr):
return True
try:
if re.match(r"\A{}\Z".format(expr), line):
return True
except re.error:
pass
except TypeError:
log.exception("Value %r or expression %r is not a string", line, expr)
return False
@jinja_filter("check_whitelist_blacklist")
def check_whitelist_blacklist(value, whitelist=None, blacklist=None):
"""
Check a whitelist and/or blacklist to see if the value matches it.
value
The item to check the whitelist and/or blacklist against.
whitelist
The list of items that are white-listed. If ``value`` is found
in the whitelist, then the function returns ``True``. Otherwise,
it returns ``False``.
blacklist
The list of items that are black-listed. If ``value`` is found
in the blacklist, then the function returns ``False``. Otherwise,
it returns ``True``.
If both a whitelist and a blacklist are provided, value membership
in the blacklist will be examined first. If the value is not found
in the blacklist, then the whitelist is checked. If the value isn't
found in the whitelist, the function returns ``False``.
"""
# Normalize the input so that we have a list
if blacklist:
if isinstance(blacklist, str):
blacklist = [blacklist]
if not hasattr(blacklist, "__iter__"):
raise TypeError(
"Expecting iterable blacklist, but got {} ({})".format(
type(blacklist).__name__, blacklist
)
)
else:
blacklist = []
if whitelist:
if isinstance(whitelist, str):
whitelist = [whitelist]
if not hasattr(whitelist, "__iter__"):
raise TypeError(
"Expecting iterable whitelist, but got {} ({})".format(
type(whitelist).__name__, whitelist
)
)
else:
whitelist = []
_blacklist_match = any(expr_match(value, expr) for expr in blacklist)
_whitelist_match = any(expr_match(value, expr) for expr in whitelist)
if blacklist and not whitelist:
# Blacklist but no whitelist
return not _blacklist_match
elif whitelist and not blacklist:
# Whitelist but no blacklist
return _whitelist_match
elif blacklist and whitelist:
# Both whitelist and blacklist
return not _blacklist_match and _whitelist_match
else:
# No blacklist or whitelist passed
return True
def check_include_exclude(path_str, include_pat=None, exclude_pat=None):
"""
Check for glob or regexp patterns for include_pat and exclude_pat in the
'path_str' string and return True/False conditions as follows.
- Default: return 'True' if no include_pat or exclude_pat patterns are
supplied
- If only include_pat or exclude_pat is supplied: return 'True' if string
passes the include_pat test or fails exclude_pat test respectively
- If both include_pat and exclude_pat are supplied: return 'True' if
include_pat matches AND exclude_pat does not match
"""
def _pat_check(path_str, check_pat):
if re.match("E@", check_pat):
return True if re.search(check_pat[2:], path_str) else False
else:
return True if fnmatch.fnmatch(path_str, check_pat) else False
ret = True # -- default true
# Before pattern match, check if it is regexp (E@'') or glob(default)
if include_pat:
if isinstance(include_pat, list):
for include_line in include_pat:
retchk_include = _pat_check(path_str, include_line)
if retchk_include:
break
else:
retchk_include = _pat_check(path_str, include_pat)
if exclude_pat:
if isinstance(exclude_pat, list):
for exclude_line in exclude_pat:
retchk_exclude = not _pat_check(path_str, exclude_line)
if not retchk_exclude:
break
else:
retchk_exclude = not _pat_check(path_str, exclude_pat)
# Now apply include/exclude conditions
if include_pat and not exclude_pat:
ret = retchk_include
elif exclude_pat and not include_pat:
ret = retchk_exclude
elif include_pat and exclude_pat:
ret = retchk_include and retchk_exclude
else:
ret = True
return ret
def print_cli(msg, retries=10, step=0.01):
"""
Wrapper around print() that suppresses tracebacks on broken pipes (i.e.
when salt output is piped to less and less is stopped prematurely).
"""
while retries:
try:
try:
print(msg)
except UnicodeEncodeError:
print(msg.encode("utf-8"))
except OSError as exc:
err = "{}".format(exc)
if exc.errno != errno.EPIPE:
if (
"temporarily unavailable" in err or exc.errno in (errno.EAGAIN,)
) and retries:
time.sleep(step)
retries -= 1
continue
else:
raise
break
def get_context(template, line, num_lines=5, marker=None):
"""
Returns debugging context around a line in a given string
Returns:: string
"""
template_lines = template.splitlines()
num_template_lines = len(template_lines)
# In test mode, a single line template would return a crazy line number like,
# 357. Do this sanity check and if the given line is obviously wrong, just
# return the entire template
if line > num_template_lines:
return template
context_start = max(0, line - num_lines - 1) # subt 1 for 0-based indexing
context_end = min(num_template_lines, line + num_lines)
error_line_in_context = line - context_start - 1 # subtr 1 for 0-based idx
buf = []
if context_start > 0:
buf.append("[...]")
error_line_in_context += 1
buf.extend(template_lines[context_start:context_end])
if context_end < num_template_lines:
buf.append("[...]")
if marker:
buf[error_line_in_context] += marker
return "---\n{}\n---".format("\n".join(buf))
def get_diff(a, b, *args, **kwargs):
"""
Perform diff on two iterables containing lines from two files, and return
the diff as as string. Lines are normalized to str types to avoid issues
with unicode on PY2.
"""
encoding = ("utf-8", "latin-1", __salt_system_encoding__)
# Late import to avoid circular import
import salt.utils.data
return "".join(
difflib.unified_diff(
salt.utils.data.decode_list(a, encoding=encoding),
salt.utils.data.decode_list(b, encoding=encoding),
*args,
**kwargs
)
)
@jinja_filter("to_snake_case")
def camel_to_snake_case(camel_input):
"""
Converts camelCase (or CamelCase) to snake_case.
From https://codereview.stackexchange.com/questions/185966/functions-to-convert-camelcase-strings-to-snake-case
:param str camel_input: The camelcase or CamelCase string to convert to snake_case
:return str
"""
res = camel_input[0].lower()
for i, letter in enumerate(camel_input[1:], 1):
if letter.isupper():
if camel_input[i - 1].islower() or (
i != len(camel_input) - 1 and camel_input[i + 1].islower()
):
res += "_"
res += letter.lower()
return res
@jinja_filter("to_camelcase")
def snake_to_camel_case(snake_input, uppercamel=False):
"""
Converts snake_case to camelCase (or CamelCase if uppercamel is ``True``).
Inspired by https://codereview.stackexchange.com/questions/85311/transform-snake-case-to-camelcase
:param str snake_input: The input snake_case string to convert to camelCase
:param bool uppercamel: Whether or not to convert to CamelCase instead
:return str
"""
words = snake_input.split("_")
if uppercamel:
words[0] = words[0].capitalize()
return words[0] + "".join(word.capitalize() for word in words[1:])
|
[
"unicodedata.normalize",
"re.match",
"re.escape",
"time.sleep",
"base64.b64encode",
"salt.utils.decorators.jinja.jinja_filter",
"shlex.shlex",
"os.urandom",
"re.search",
"fnmatch.fnmatch",
"logging.getLogger"
] |
[((266, 293), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (283, 293), False, 'import logging\n'), ((297, 321), 'salt.utils.decorators.jinja.jinja_filter', 'jinja_filter', (['"""to_bytes"""'], {}), "('to_bytes')\n", (309, 321), False, 'from salt.utils.decorators.jinja import jinja_filter\n'), ((3407, 3433), 'salt.utils.decorators.jinja.jinja_filter', 'jinja_filter', (['"""str_to_num"""'], {}), "('str_to_num')\n", (3419, 3433), False, 'from salt.utils.decorators.jinja import jinja_filter\n'), ((3435, 3457), 'salt.utils.decorators.jinja.jinja_filter', 'jinja_filter', (['"""to_num"""'], {}), "('to_num')\n", (3447, 3457), False, 'from salt.utils.decorators.jinja import jinja_filter\n'), ((4492, 4514), 'salt.utils.decorators.jinja.jinja_filter', 'jinja_filter', (['"""is_hex"""'], {}), "('is_hex')\n", (4504, 4514), False, 'from salt.utils.decorators.jinja import jinja_filter\n'), ((5678, 5704), 'salt.utils.decorators.jinja.jinja_filter', 'jinja_filter', (['"""random_str"""'], {}), "('random_str')\n", (5690, 5704), False, 'from salt.utils.decorators.jinja import jinja_filter\n'), ((5828, 5863), 'salt.utils.decorators.jinja.jinja_filter', 'jinja_filter', (['"""contains_whitespace"""'], {}), "('contains_whitespace')\n", (5840, 5863), False, 'from salt.utils.decorators.jinja import jinja_filter\n'), ((9081, 9122), 'salt.utils.decorators.jinja.jinja_filter', 'jinja_filter', (['"""check_whitelist_blacklist"""'], {}), "('check_whitelist_blacklist')\n", (9093, 9122), False, 'from salt.utils.decorators.jinja import jinja_filter\n'), ((15717, 15746), 'salt.utils.decorators.jinja.jinja_filter', 'jinja_filter', (['"""to_snake_case"""'], {}), "('to_snake_case')\n", (15729, 15746), False, 'from salt.utils.decorators.jinja import jinja_filter\n'), ((16403, 16431), 'salt.utils.decorators.jinja.jinja_filter', 'jinja_filter', (['"""to_camelcase"""'], {}), "('to_camelcase')\n", (16415, 16431), False, 'from salt.utils.decorators.jinja import jinja_filter\n'), ((5736, 5752), 'os.urandom', 'os.urandom', (['size'], {}), '(size)\n', (5746, 5752), False, 'import os\n'), ((7680, 7697), 'shlex.shlex', 'shlex.shlex', (['text'], {}), '(text)\n', (7691, 7697), False, 'import shlex\n'), ((8769, 8796), 'fnmatch.fnmatch', 'fnmatch.fnmatch', (['line', 'expr'], {}), '(line, expr)\n', (8784, 8796), False, 'import fnmatch\n'), ((12024, 12049), 're.match', 're.match', (['"""E@"""', 'check_pat'], {}), "('E@', check_pat)\n", (12032, 12049), False, 'import re\n'), ((2819, 2850), 'unicodedata.normalize', 'unicodedata.normalize', (['"""NFC"""', 's'], {}), "('NFC', s)\n", (2840, 2850), False, 'import unicodedata\n'), ((8033, 8045), 're.escape', 're.escape', (['s'], {}), '(s)\n', (8042, 8045), False, 'import re\n'), ((1616, 1647), 'unicodedata.normalize', 'unicodedata.normalize', (['"""NFC"""', 's'], {}), "('NFC', s)\n", (1637, 1647), False, 'import unicodedata\n'), ((12078, 12112), 're.search', 're.search', (['check_pat[2:]', 'path_str'], {}), '(check_pat[2:], path_str)\n', (12087, 12112), False, 'import re\n'), ((12165, 12201), 'fnmatch.fnmatch', 'fnmatch.fnmatch', (['path_str', 'check_pat'], {}), '(path_str, check_pat)\n', (12180, 12201), False, 'import fnmatch\n'), ((5775, 5796), 'base64.b64encode', 'base64.b64encode', (['key'], {}), '(key)\n', (5791, 5796), False, 'import base64\n'), ((13907, 13923), 'time.sleep', 'time.sleep', (['step'], {}), '(step)\n', (13917, 13923), False, 'import time\n')]
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_log import log as logging
from tacker.common import log
LOG = logging.getLogger(__name__)
class HOTUpdater(object):
"""Update HOT template."""
def __init__(self, heatclient):
self.heatclient = heatclient
self.template = {}
self.nested_templates = dict()
@log.log
def get_templates_from_stack(self, stack_id):
"""Get template information from the stack.
Get the template from stack specified by stack_id,
if stack has scalable resource, get the its child
template.
"""
def _get_resource(name, resources):
for resource in resources:
if resource.resource_name == name:
return resource
self.template = self.heatclient.stacks.template(stack_id)
LOG.debug('got main template for stack({}). template={}'.format(
stack_id, self.template))
stack_resources = self.heatclient.resource_get_list(stack_id,
nested_depth=2)
for resource in stack_resources:
if resource.resource_type == 'OS::Heat::AutoScalingGroup':
intermediate_template = self.heatclient.stacks.template(
resource.physical_resource_id)
for resource_id in intermediate_template['resources'].keys():
corresponding_resource = _get_resource(resource_id,
stack_resources)
nested_template = self.heatclient.stacks.template(
corresponding_resource.physical_resource_id)
LOG.debug('got nested template for stack({}). template={}'
.format(corresponding_resource.physical_resource_id,
nested_template))
if nested_template:
self.nested_templates[
corresponding_resource.resource_type] = nested_template
@log.log
def update_resource_property(self,
resource_id,
resource_types=[],
**kwargs):
"""Update attributes of resource properties.
Get the resource information from template's resources section,
and update properties using kwargs information.
If resource type does not include in resource_types, nothing to do.
"""
def _update(template, resource_id, resource_types, kwargs):
resource = template.get('resources', {}).get(resource_id)
if not resource:
return
if resource.get('type', {}) not in resource_types:
return
resource_properties = resource.get('properties', {})
if not resource_properties:
return
for key, value in kwargs.items():
if value is not None:
resource_properties.update({key: value})
elif resource_properties.get(key):
del resource_properties[key]
_update(self.template, resource_id, resource_types, kwargs)
for value in self.nested_templates.values():
nested_template = value
_update(nested_template, resource_id, resource_types, kwargs)
|
[
"oslo_log.log.getLogger"
] |
[((620, 647), 'oslo_log.log.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (637, 647), True, 'from oslo_log import log as logging\n')]
|
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
""" Set of interfaces that allow interaction with data. Currently
available interfaces are:
DataSource: Generic nifti to named Nifti interface
DataSink: Generic named output from interfaces to data store
XNATSource: preliminary interface to XNAT
To come :
XNATSink
Change directory to provide relative paths for doctests
>>> import os
>>> filepath = os.path.dirname( os.path.realpath( __file__ ) )
>>> datadir = os.path.realpath(os.path.join(filepath, '../testing/data'))
>>> os.chdir(datadir)
"""
from copy import deepcopy
import glob
import os
import shutil
from warnings import warn
from enthought.traits.trait_errors import TraitError
try:
from xnatlib import Interface as XNATInterface
except:
pass
from nipype.interfaces.base import (Interface, CommandLine, Bunch,
InterfaceResult, Interface,
TraitedSpec, traits, File, Directory,
BaseInterface, InputMultiPath,
OutputMultiPath, DynamicTraitedSpec,
BaseTraitedSpec, Undefined)
from nipype.utils.misc import isdefined
from nipype.utils.filemanip import (copyfile, list_to_filename,
filename_to_list, FileNotFoundError)
import logging
iflogger = logging.getLogger('interface')
def add_traits(base, names, trait_type=None):
""" Add traits to a traited class.
All traits are set to Undefined by default
"""
if trait_type is None:
trait_type = traits.Any
undefined_traits = {}
for key in names:
base.add_trait(key, trait_type)
undefined_traits[key] = Undefined
base.trait_set(trait_change_notify=False, **undefined_traits)
# access each trait
for key in names:
value = getattr(base, key)
return base
class IOBase(BaseInterface):
def _run_interface(self, runtime):
runtime.returncode = 0
return runtime
def _list_outputs(self):
raise NotImplementedError
def _outputs(self):
return self._add_output_traits(super(IOBase, self)._outputs())
def _add_output_traits(self, base):
return base
class DataSinkInputSpec(DynamicTraitedSpec):
base_directory = Directory(
desc='Path to the base directory for storing data.')
container = traits.Str(desc = 'Folder within base directory in which to store output')
parameterization = traits.Bool(True, usedefault=True,
desc='store output in parameterized structure')
strip_dir = Directory(desc='path to strip out of filename')
substitutions = InputMultiPath(traits.Tuple(traits.Str,traits.Str),
desc=('List of 2-tuples reflecting string'
'to substitute and string to replace'
'it with'))
_outputs = traits.Dict(traits.Str, value={}, usedefault=True)
def __setattr__(self, key, value):
if key not in self.copyable_trait_names():
self._outputs[key] = value
else:
super(DataSinkInputSpec, self).__setattr__(key, value)
class DataSink(IOBase):
""" Generic datasink module to store structured outputs
Primarily for use within a workflow. This interface all arbitrary
creation of input attributes. The names of these attributes define the
directory structure to create for storage of the files or directories.
The attributes take the following form:
string[[@|.]string[[@|.]string]] ...
An attribute such as contrasts@con will create a contrasts directory to
store the results linked to the attribute. If the @ is replaced with a
'.', such as 'contrasts.con' a subdirectory 'con' will be created under
contrasts.
Examples
--------
>>> ds = DataSink()
>>> ds.inputs.base_directory = 'results_dir'
>>> ds.inputs.container = 'subject'
>>> ds.inputs.structural = 'structural.nii'
>>> setattr(ds.inputs, 'contrasts@con', ['cont1.nii', 'cont2.nii'])
>>> setattr(ds.inputs, 'contrasts.alt', ['cont1a.nii', 'cont2a.nii'])
>>> ds.run() # doctest: +SKIP
"""
input_spec = DataSinkInputSpec
def _get_dst(self, src):
path, fname = os.path.split(src)
if self.inputs.parameterization:
dst = path
if isdefined(self.inputs.strip_dir):
dst = dst.replace(self.inputs.strip_dir,'')
folders = [folder for folder in dst.split(os.path.sep) if folder.startswith('_')]
dst = os.path.sep.join(folders)
if fname:
dst = os.path.join(dst,fname)
else:
if fname:
dst = fname
else:
dst = path.split(os.path.sep)[-1]
if dst[0] == os.path.sep:
dst = dst[1:]
return dst
def _substitute(self, pathstr):
if isdefined(self.inputs.substitutions):
for key, val in self.inputs.substitutions:
iflogger.debug(str((pathstr, key, val)))
pathstr = pathstr.replace(key, val)
iflogger.debug('new: ' + pathstr)
return pathstr
def _list_outputs(self):
"""Execute this module.
"""
outdir = self.inputs.base_directory
if not isdefined(outdir):
outdir = '.'
outdir = os.path.abspath(outdir)
if isdefined(self.inputs.container):
outdir = os.path.join(outdir, self.inputs.container)
if not os.path.exists(outdir):
os.makedirs(outdir)
for key,files in self.inputs._outputs.items():
iflogger.debug("key: %s files: %s"%(key, str(files)))
files = filename_to_list(files)
outfiles = []
tempoutdir = outdir
for d in key.split('.'):
if d[0] == '@':
continue
tempoutdir = os.path.join(tempoutdir,d)
for src in filename_to_list(files):
src = os.path.abspath(src)
if os.path.isfile(src):
dst = self._get_dst(src)
dst = os.path.join(tempoutdir, dst)
dst = self._substitute(dst)
path,_ = os.path.split(dst)
if not os.path.exists(path):
os.makedirs(path)
iflogger.debug("copyfile: %s %s"%(src, dst))
copyfile(src, dst, copy=True)
elif os.path.isdir(src):
dst = self._get_dst(os.path.join(src,''))
dst = os.path.join(tempoutdir, dst)
dst = self._substitute(dst)
path,_ = os.path.split(dst)
if not os.path.exists(path):
os.makedirs(path)
if os.path.exists(dst):
iflogger.debug("removing: %s"%dst)
shutil.rmtree(dst)
iflogger.debug("copydir: %s %s"%(src, dst))
shutil.copytree(src, dst)
return None
class DataGrabberInputSpec(DynamicTraitedSpec): #InterfaceInputSpec):
base_directory = Directory(exists=True,
desc='Path to the base directory consisting of subject data.')
template = traits.Str(mandatory=True,
desc='Layout used to get files. relative to base directory if defined')
template_args = traits.Dict(traits.Str,
traits.List(traits.List),
value=dict(outfiles=[]), usedefault=True,
desc='Information to plug into template')
class DataGrabber(IOBase):
""" Generic datagrabber module that wraps around glob in an
intelligent way for neuroimaging tasks to grab files
.. note::
Doesn't support directories currently
Examples
--------
>>> from nipype.interfaces.io import DataGrabber
Pick all files from current directory
>>> dg = DataGrabber()
>>> dg.inputs.template = '*'
Pick file foo/foo.nii from current directory
>>> dg.inputs.template = '%s/%s.dcm'
>>> dg.inputs.template_args['outfiles']=[['dicomdir','123456-1-1.dcm']]
Same thing but with dynamically created fields
>>> dg = DataGrabber(infields=['arg1','arg2'])
>>> dg.inputs.template = '%s/%s.nii'
>>> dg.inputs.arg1 = 'foo'
>>> dg.inputs.arg2 = 'foo'
however this latter form can be used with iterables and iterfield in a
pipeline.
Dynamically created, user-defined input and output fields
>>> dg = DataGrabber(infields=['sid'], outfields=['func','struct','ref'])
>>> dg.inputs.base_directory = '.'
>>> dg.inputs.template = '%s/%s.nii'
>>> dg.inputs.template_args['func'] = [['sid',['f3','f5']]]
>>> dg.inputs.template_args['struct'] = [['sid',['struct']]]
>>> dg.inputs.template_args['ref'] = [['sid','ref']]
>>> dg.inputs.sid = 's1'
Change the template only for output field struct. The rest use the
general template
>>> dg.inputs.field_template = dict(struct='%s/struct.nii')
>>> dg.inputs.template_args['struct'] = [['sid']]
"""
input_spec = DataGrabberInputSpec
output_spec = DynamicTraitedSpec
def __init__(self, infields=None, outfields=None, **kwargs):
"""
Parameters
----------
infields : list of str
Indicates the input fields to be dynamically created
outfields: list of str
Indicates output fields to be dynamically created
See class examples for usage
"""
super(DataGrabber, self).__init__(**kwargs)
undefined_traits = {}
# used for mandatory inputs check
self._infields = infields
if infields:
for key in infields:
self.inputs.add_trait(key, traits.Any)
undefined_traits[key] = Undefined
self.inputs.template_args['outfiles'] = [infields]
if outfields:
# add ability to insert field specific templates
self.inputs.add_trait('field_template',
traits.Dict(traits.Enum(outfields),
desc="arguments that fit into template"))
undefined_traits['field_template'] = Undefined
#self.inputs.remove_trait('template_args')
outdict = {}
for key in outfields:
outdict[key] = []
self.inputs.template_args = outdict
self.inputs.trait_set(trait_change_notify=False, **undefined_traits)
def _add_output_traits(self, base):
"""
Using traits.Any instead out OutputMultiPath till add_trait bug
is fixed.
"""
return add_traits(base, self.inputs.template_args.keys())
def _list_outputs(self):
# infields are mandatory, however I could not figure out how to set 'mandatory' flag dynamically
# hence manual check
if self._infields:
for key in self._infields:
value = getattr(self.inputs,key)
if not isdefined(value):
msg = "%s requires a value for input '%s' because it was listed in 'infields'" % \
(self.__class__.__name__, key)
raise ValueError(msg)
outputs = {}
for key, args in self.inputs.template_args.items():
outputs[key] = []
template = self.inputs.template
if hasattr(self.inputs, 'field_template') and \
isdefined(self.inputs.field_template) and \
self.inputs.field_template.has_key(key):
template = self.inputs.field_template[key]
if isdefined(self.inputs.base_directory):
template = os.path.join(os.path.abspath(self.inputs.base_directory), template)
else:
template = os.path.abspath(template)
if not args:
filelist = glob.glob(template)
if len(filelist) == 0:
warn('Output key: %s Template: %s returned no files'%(key, template))
else:
outputs[key] = list_to_filename(filelist)
for argnum, arglist in enumerate(args):
maxlen = 1
for arg in arglist:
if isinstance(arg, str) and hasattr(self.inputs, arg):
arg = getattr(self.inputs, arg)
if isinstance(arg, list):
if (maxlen > 1) and (len(arg) != maxlen):
raise ValueError('incompatible number of arguments for %s' % key)
if len(arg)>maxlen:
maxlen = len(arg)
outfiles = []
for i in range(maxlen):
argtuple = []
for arg in arglist:
if isinstance(arg, str) and hasattr(self.inputs, arg):
arg = getattr(self.inputs, arg)
if isinstance(arg, list):
argtuple.append(arg[i])
else:
argtuple.append(arg)
filledtemplate = template
if argtuple:
filledtemplate = template%tuple(argtuple)
outfiles = glob.glob(filledtemplate)
if len(outfiles) == 0:
warn('Output key: %s Template: %s returned no files'%(key, filledtemplate))
outputs[key].insert(i, None)
else:
outputs[key].insert(i,list_to_filename(outfiles))
if any([val==None for val in outputs[key]]):
outputs[key] = []
if len(outputs[key]) == 0:
outputs[key] = None
elif len(outputs[key]) == 1:
outputs[key] = outputs[key][0]
return outputs
class FSSourceInputSpec(TraitedSpec):
subjects_dir = Directory(mandatory=True,
desc='Freesurfer subjects directory.')
subject_id = traits.Str(mandatory=True,
desc='Subject name for whom to retrieve data')
hemi = traits.Enum('both', 'lh', 'rh', usedefault=True,
desc='Selects hemisphere specific outputs')
class FSSourceOutputSpec(TraitedSpec):
T1 = File(exists=True, desc='T1 image', loc='mri')
aseg = File(exists=True, desc='Auto-seg image', loc='mri')
brain = File(exists=True, desc='brain only image', loc='mri')
brainmask = File(exists=True, desc='brain binary mask', loc='mri')
filled = File(exists=True, desc='?', loc='mri')
norm = File(exists=True, desc='intensity normalized image', loc='mri')
nu = File(exists=True, desc='?', loc='mri')
orig = File(exists=True, desc='original image conformed to FS space',
loc='mri')
rawavg = File(exists=True, desc='averaged input images to recon-all',
loc='mri')
ribbon = OutputMultiPath(File(exists=True), desc='cortical ribbon', loc='mri',
altkey='*ribbon')
wm = File(exists=True, desc='white matter image', loc='mri')
wmparc = File(exists=True, desc='white matter parcellation', loc='mri')
curv = OutputMultiPath(File(exists=True), desc='surface curvature files',
loc='surf')
inflated = OutputMultiPath(File(exists=True), desc='inflated surface meshes',
loc='surf')
pial = OutputMultiPath(File(exists=True), desc='pial surface meshes', loc='surf')
smoothwm = OutputMultiPath(File(exists=True), loc='surf',
desc='smooth white-matter surface meshes')
sphere = OutputMultiPath(File(exists=True), desc='spherical surface meshes',
loc='surf')
sulc = OutputMultiPath(File(exists=True), desc='surface sulci files', loc='surf')
thickness = OutputMultiPath(File(exists=True), loc='surf',
desc='surface thickness files')
volume = OutputMultiPath(File(exists=True), desc='surface volume files', loc='surf')
white = OutputMultiPath(File(exists=True), desc='white matter surface meshes',
loc='surf')
label = OutputMultiPath(File(exists=True), desc='volume and surface label files',
loc='label', altkey='*label')
annot = OutputMultiPath(File(exists=True), desc='surface annotation files',
loc='label', altkey='*annot')
aparc_aseg = OutputMultiPath(File(exists=True), loc='mri', altkey='aparc*aseg',
desc='aparc+aseg file')
sphere_reg = OutputMultiPath(File(exists=True), loc='surf', altkey='sphere.reg',
desc='spherical registration file')
class FreeSurferSource(IOBase):
"""Generates freesurfer subject info from their directories
Examples
--------
>>> from nipype.interfaces.io import FreeSurferSource
>>> fs = FreeSurferSource()
>>> #fs.inputs.subjects_dir = '.'
>>> fs.inputs.subject_id = 'PWS04'
>>> res = fs.run() # doctest: +SKIP
>>> fs.inputs.hemi = 'lh'
>>> res = fs.run() # doctest: +SKIP
"""
input_spec = FSSourceInputSpec
output_spec = FSSourceOutputSpec
def _get_files(self, path, key, dirval, altkey=None):
globsuffix = ''
if dirval == 'mri':
globsuffix = '.mgz'
globprefix = ''
if key == 'ribbon' or dirval in ['surf', 'label']:
if self.inputs.hemi != 'both':
globprefix = self.inputs.hemi+'.'
else:
globprefix = '*'
keydir = os.path.join(path,dirval)
if altkey:
key = altkey
globpattern = os.path.join(keydir,''.join((globprefix,key,globsuffix)))
return glob.glob(globpattern)
def _list_outputs(self):
subjects_dir = self.inputs.subjects_dir
subject_path = os.path.join(subjects_dir, self.inputs.subject_id)
output_traits = self._outputs()
outputs = output_traits.get()
for k in outputs.keys():
val = self._get_files(subject_path, k,
output_traits.traits()[k].loc,
output_traits.traits()[k].altkey)
if val:
outputs[k] = list_to_filename(val)
return outputs
class XNATSourceInputSpec(DynamicTraitedSpec): #InterfaceInputSpec):
config_file = File(exists=True, mandatory=True,
desc='a json config file containing xnat access info: url, username and password')
query_template = traits.Str(mandatory=True,
desc='Layout used to get files. relative to base directory if defined')
query_template_args = traits.Dict(traits.Str,
traits.List(traits.List),
value=dict(outfiles=[]), usedefault=True,
desc='Information to plug into template')
class XNATSource(IOBase):
""" Generic XNATSource module that wraps around glob in an
intelligent way for neuroimaging tasks to grab files
Examples
--------
>>> from nipype.interfaces.io import XNATSource
Pick all files from current directory
>>> dg = XNATSource()
>>> dg.inputs.template = '*'
>>> dg = XNATSource(infields=['project','subject','experiment','assessor','inout'])
>>> dg.inputs.query_template = '/projects/%s/subjects/%s/experiments/%s' \
'/assessors/%s/%s_resources/files'
>>> dg.inputs.project = 'IMAGEN'
>>> dg.inputs.subject = 'IMAGEN_000000001274'
>>> dg.inputs.experiment = '*SessionA*'
>>> dg.inputs.assessor = '*ADNI_MPRAGE_nii'
>>> dg.inputs.inout = 'out'
>>> dg = XNATSource(infields=['sid'],outfields=['struct','func'])
>>> dg.inputs.query_template = '/projects/IMAGEN/subjects/%s/experiments/*SessionA*' \
'/assessors/*%s_nii/out_resources/files'
>>> dg.inputs.query_template_args['struct'] = [['sid','ADNI_MPRAGE']]
>>> dg.inputs.query_template_args['func'] = [['sid','EPI_faces']]
>>> dg.inputs.sid = 'IMAGEN_000000001274'
"""
input_spec = XNATSourceInputSpec
output_spec = DynamicTraitedSpec
def __init__(self, infields=None, outfields=None, **kwargs):
"""
Parameters
----------
infields : list of str
Indicates the input fields to be dynamically created
outfields: list of str
Indicates output fields to be dynamically created
See class examples for usage
"""
super(XNATSource, self).__init__(**kwargs)
undefined_traits = {}
# used for mandatory inputs check
self._infields = infields
if infields:
for key in infields:
self.inputs.add_trait(key, traits.Any)
undefined_traits[key] = Undefined
self.inputs.query_template_args['outfiles'] = [infields]
if outfields:
# add ability to insert field specific templates
self.inputs.add_trait('field_template',
traits.Dict(traits.Enum(outfields),
desc="arguments that fit into query_template"))
undefined_traits['field_template'] = Undefined
#self.inputs.remove_trait('query_template_args')
outdict = {}
for key in outfields:
outdict[key] = []
self.inputs.query_template_args = outdict
self.inputs.trait_set(trait_change_notify=False, **undefined_traits)
def _add_output_traits(self, base):
"""
Using traits.Any instead out OutputMultiPath till add_trait bug
is fixed.
"""
return add_traits(base, self.inputs.query_template_args.keys())
def _list_outputs(self):
# infields are mandatory, however I could not figure out how to set 'mandatory' flag dynamically
# hence manual check
config_info = load_json(self.inputs.config_file)
cwd = os.getcwd()
xnat = XNATInterface(config_info['url'], config_info['username'], config_info['password'], cachedir=cwd)
if self._infields:
for key in self._infields:
value = getattr(self.inputs,key)
if not isdefined(value):
msg = "%s requires a value for input '%s' because it was listed in 'infields'" % \
(self.__class__.__name__, key)
raise ValueError(msg)
outputs = {}
for key, args in self.inputs.query_template_args.items():
outputs[key] = []
template = self.inputs.query_template
if hasattr(self.inputs, 'field_template') and \
isdefined(self.inputs.field_template) and \
self.inputs.field_template.has_key(key):
template = self.inputs.field_template[key]
if not args:
file_objects = xnat.select(template).request_objects()
if file_objects == []:
raise IOError('Template %s returned no files'%template)
outputs[key] = list_to_filename([str(file_object.get()) for file_object in file_objects])
for argnum, arglist in enumerate(args):
maxlen = 1
for arg in arglist:
if isinstance(arg, str) and hasattr(self.inputs, arg):
arg = getattr(self.inputs, arg)
if isinstance(arg, list):
if (maxlen > 1) and (len(arg) != maxlen):
raise ValueError('incompatible number of arguments for %s' % key)
if len(arg)>maxlen:
maxlen = len(arg)
outfiles = []
for i in range(maxlen):
argtuple = []
for arg in arglist:
if isinstance(arg, str) and hasattr(self.inputs, arg):
arg = getattr(self.inputs, arg)
if isinstance(arg, list):
argtuple.append(arg[i])
else:
argtuple.append(arg)
if argtuple:
file_objects = xnat.select(template%tuple(argtuple)).request_objects()
if file_objects == []:
raise IOError('Template %s returned no files'%(template%tuple(argtuple)))
outfiles = list_to_filename([str(file_object.get()) for file_object in file_objects])
else:
file_objects = xnat.select(template).request_objects()
if file_objects == []:
raise IOError('Template %s returned no files'%template)
outfiles = list_to_filename([str(file_object.get()) for file_object in file_objects])
outputs[key].insert(i,outfiles)
if len(outputs[key]) == 0:
outputs[key] = None
elif len(outputs[key]) == 1:
outputs[key] = outputs[key][0]
return outputs
|
[
"nipype.utils.filemanip.list_to_filename",
"os.path.isfile",
"xnatlib.Interface",
"glob.glob",
"shutil.rmtree",
"os.path.join",
"os.path.sep.join",
"os.path.abspath",
"nipype.utils.filemanip.filename_to_list",
"os.path.exists",
"nipype.interfaces.base.Directory",
"nipype.interfaces.base.traits.Bool",
"nipype.interfaces.base.traits.List",
"nipype.interfaces.base.traits.Tuple",
"nipype.interfaces.base.traits.Enum",
"nipype.interfaces.base.traits.Dict",
"nipype.interfaces.base.traits.Str",
"nipype.utils.misc.isdefined",
"os.makedirs",
"shutil.copytree",
"os.getcwd",
"os.path.isdir",
"nipype.utils.filemanip.copyfile",
"nipype.interfaces.base.File",
"warnings.warn",
"os.path.split",
"logging.getLogger"
] |
[((1493, 1523), 'logging.getLogger', 'logging.getLogger', (['"""interface"""'], {}), "('interface')\n", (1510, 1523), False, 'import logging\n'), ((2438, 2500), 'nipype.interfaces.base.Directory', 'Directory', ([], {'desc': '"""Path to the base directory for storing data."""'}), "(desc='Path to the base directory for storing data.')\n", (2447, 2500), False, 'from nipype.interfaces.base import Interface, CommandLine, Bunch, InterfaceResult, Interface, TraitedSpec, traits, File, Directory, BaseInterface, InputMultiPath, OutputMultiPath, DynamicTraitedSpec, BaseTraitedSpec, Undefined\n'), ((2527, 2599), 'nipype.interfaces.base.traits.Str', 'traits.Str', ([], {'desc': '"""Folder within base directory in which to store output"""'}), "(desc='Folder within base directory in which to store output')\n", (2537, 2599), False, 'from nipype.interfaces.base import Interface, CommandLine, Bunch, InterfaceResult, Interface, TraitedSpec, traits, File, Directory, BaseInterface, InputMultiPath, OutputMultiPath, DynamicTraitedSpec, BaseTraitedSpec, Undefined\n'), ((2625, 2712), 'nipype.interfaces.base.traits.Bool', 'traits.Bool', (['(True)'], {'usedefault': '(True)', 'desc': '"""store output in parameterized structure"""'}), "(True, usedefault=True, desc=\n 'store output in parameterized structure')\n", (2636, 2712), False, 'from nipype.interfaces.base import Interface, CommandLine, Bunch, InterfaceResult, Interface, TraitedSpec, traits, File, Directory, BaseInterface, InputMultiPath, OutputMultiPath, DynamicTraitedSpec, BaseTraitedSpec, Undefined\n'), ((2759, 2806), 'nipype.interfaces.base.Directory', 'Directory', ([], {'desc': '"""path to strip out of filename"""'}), "(desc='path to strip out of filename')\n", (2768, 2806), False, 'from nipype.interfaces.base import Interface, CommandLine, Bunch, InterfaceResult, Interface, TraitedSpec, traits, File, Directory, BaseInterface, InputMultiPath, OutputMultiPath, DynamicTraitedSpec, BaseTraitedSpec, Undefined\n'), ((3104, 3154), 'nipype.interfaces.base.traits.Dict', 'traits.Dict', (['traits.Str'], {'value': '{}', 'usedefault': '(True)'}), '(traits.Str, value={}, usedefault=True)\n', (3115, 3154), False, 'from nipype.interfaces.base import Interface, CommandLine, Bunch, InterfaceResult, Interface, TraitedSpec, traits, File, Directory, BaseInterface, InputMultiPath, OutputMultiPath, DynamicTraitedSpec, BaseTraitedSpec, Undefined\n'), ((7521, 7611), 'nipype.interfaces.base.Directory', 'Directory', ([], {'exists': '(True)', 'desc': '"""Path to the base directory consisting of subject data."""'}), "(exists=True, desc=\n 'Path to the base directory consisting of subject data.')\n", (7530, 7611), False, 'from nipype.interfaces.base import Interface, CommandLine, Bunch, InterfaceResult, Interface, TraitedSpec, traits, File, Directory, BaseInterface, InputMultiPath, OutputMultiPath, DynamicTraitedSpec, BaseTraitedSpec, Undefined\n'), ((7634, 7737), 'nipype.interfaces.base.traits.Str', 'traits.Str', ([], {'mandatory': '(True)', 'desc': '"""Layout used to get files. relative to base directory if defined"""'}), "(mandatory=True, desc=\n 'Layout used to get files. relative to base directory if defined')\n", (7644, 7737), False, 'from nipype.interfaces.base import Interface, CommandLine, Bunch, InterfaceResult, Interface, TraitedSpec, traits, File, Directory, BaseInterface, InputMultiPath, OutputMultiPath, DynamicTraitedSpec, BaseTraitedSpec, Undefined\n'), ((14621, 14685), 'nipype.interfaces.base.Directory', 'Directory', ([], {'mandatory': '(True)', 'desc': '"""Freesurfer subjects directory."""'}), "(mandatory=True, desc='Freesurfer subjects directory.')\n", (14630, 14685), False, 'from nipype.interfaces.base import Interface, CommandLine, Bunch, InterfaceResult, Interface, TraitedSpec, traits, File, Directory, BaseInterface, InputMultiPath, OutputMultiPath, DynamicTraitedSpec, BaseTraitedSpec, Undefined\n'), ((14732, 14805), 'nipype.interfaces.base.traits.Str', 'traits.Str', ([], {'mandatory': '(True)', 'desc': '"""Subject name for whom to retrieve data"""'}), "(mandatory=True, desc='Subject name for whom to retrieve data')\n", (14742, 14805), False, 'from nipype.interfaces.base import Interface, CommandLine, Bunch, InterfaceResult, Interface, TraitedSpec, traits, File, Directory, BaseInterface, InputMultiPath, OutputMultiPath, DynamicTraitedSpec, BaseTraitedSpec, Undefined\n'), ((14845, 14942), 'nipype.interfaces.base.traits.Enum', 'traits.Enum', (['"""both"""', '"""lh"""', '"""rh"""'], {'usedefault': '(True)', 'desc': '"""Selects hemisphere specific outputs"""'}), "('both', 'lh', 'rh', usedefault=True, desc=\n 'Selects hemisphere specific outputs')\n", (14856, 14942), False, 'from nipype.interfaces.base import Interface, CommandLine, Bunch, InterfaceResult, Interface, TraitedSpec, traits, File, Directory, BaseInterface, InputMultiPath, OutputMultiPath, DynamicTraitedSpec, BaseTraitedSpec, Undefined\n'), ((15010, 15055), 'nipype.interfaces.base.File', 'File', ([], {'exists': '(True)', 'desc': '"""T1 image"""', 'loc': '"""mri"""'}), "(exists=True, desc='T1 image', loc='mri')\n", (15014, 15055), False, 'from nipype.interfaces.base import Interface, CommandLine, Bunch, InterfaceResult, Interface, TraitedSpec, traits, File, Directory, BaseInterface, InputMultiPath, OutputMultiPath, DynamicTraitedSpec, BaseTraitedSpec, Undefined\n'), ((15067, 15118), 'nipype.interfaces.base.File', 'File', ([], {'exists': '(True)', 'desc': '"""Auto-seg image"""', 'loc': '"""mri"""'}), "(exists=True, desc='Auto-seg image', loc='mri')\n", (15071, 15118), False, 'from nipype.interfaces.base import Interface, CommandLine, Bunch, InterfaceResult, Interface, TraitedSpec, traits, File, Directory, BaseInterface, InputMultiPath, OutputMultiPath, DynamicTraitedSpec, BaseTraitedSpec, Undefined\n'), ((15131, 15184), 'nipype.interfaces.base.File', 'File', ([], {'exists': '(True)', 'desc': '"""brain only image"""', 'loc': '"""mri"""'}), "(exists=True, desc='brain only image', loc='mri')\n", (15135, 15184), False, 'from nipype.interfaces.base import Interface, CommandLine, Bunch, InterfaceResult, Interface, TraitedSpec, traits, File, Directory, BaseInterface, InputMultiPath, OutputMultiPath, DynamicTraitedSpec, BaseTraitedSpec, Undefined\n'), ((15201, 15255), 'nipype.interfaces.base.File', 'File', ([], {'exists': '(True)', 'desc': '"""brain binary mask"""', 'loc': '"""mri"""'}), "(exists=True, desc='brain binary mask', loc='mri')\n", (15205, 15255), False, 'from nipype.interfaces.base import Interface, CommandLine, Bunch, InterfaceResult, Interface, TraitedSpec, traits, File, Directory, BaseInterface, InputMultiPath, OutputMultiPath, DynamicTraitedSpec, BaseTraitedSpec, Undefined\n'), ((15269, 15307), 'nipype.interfaces.base.File', 'File', ([], {'exists': '(True)', 'desc': '"""?"""', 'loc': '"""mri"""'}), "(exists=True, desc='?', loc='mri')\n", (15273, 15307), False, 'from nipype.interfaces.base import Interface, CommandLine, Bunch, InterfaceResult, Interface, TraitedSpec, traits, File, Directory, BaseInterface, InputMultiPath, OutputMultiPath, DynamicTraitedSpec, BaseTraitedSpec, Undefined\n'), ((15319, 15382), 'nipype.interfaces.base.File', 'File', ([], {'exists': '(True)', 'desc': '"""intensity normalized image"""', 'loc': '"""mri"""'}), "(exists=True, desc='intensity normalized image', loc='mri')\n", (15323, 15382), False, 'from nipype.interfaces.base import Interface, CommandLine, Bunch, InterfaceResult, Interface, TraitedSpec, traits, File, Directory, BaseInterface, InputMultiPath, OutputMultiPath, DynamicTraitedSpec, BaseTraitedSpec, Undefined\n'), ((15392, 15430), 'nipype.interfaces.base.File', 'File', ([], {'exists': '(True)', 'desc': '"""?"""', 'loc': '"""mri"""'}), "(exists=True, desc='?', loc='mri')\n", (15396, 15430), False, 'from nipype.interfaces.base import Interface, CommandLine, Bunch, InterfaceResult, Interface, TraitedSpec, traits, File, Directory, BaseInterface, InputMultiPath, OutputMultiPath, DynamicTraitedSpec, BaseTraitedSpec, Undefined\n'), ((15442, 15515), 'nipype.interfaces.base.File', 'File', ([], {'exists': '(True)', 'desc': '"""original image conformed to FS space"""', 'loc': '"""mri"""'}), "(exists=True, desc='original image conformed to FS space', loc='mri')\n", (15446, 15515), False, 'from nipype.interfaces.base import Interface, CommandLine, Bunch, InterfaceResult, Interface, TraitedSpec, traits, File, Directory, BaseInterface, InputMultiPath, OutputMultiPath, DynamicTraitedSpec, BaseTraitedSpec, Undefined\n'), ((15545, 15616), 'nipype.interfaces.base.File', 'File', ([], {'exists': '(True)', 'desc': '"""averaged input images to recon-all"""', 'loc': '"""mri"""'}), "(exists=True, desc='averaged input images to recon-all', loc='mri')\n", (15549, 15616), False, 'from nipype.interfaces.base import Interface, CommandLine, Bunch, InterfaceResult, Interface, TraitedSpec, traits, File, Directory, BaseInterface, InputMultiPath, OutputMultiPath, DynamicTraitedSpec, BaseTraitedSpec, Undefined\n'), ((15768, 15823), 'nipype.interfaces.base.File', 'File', ([], {'exists': '(True)', 'desc': '"""white matter image"""', 'loc': '"""mri"""'}), "(exists=True, desc='white matter image', loc='mri')\n", (15772, 15823), False, 'from nipype.interfaces.base import Interface, CommandLine, Bunch, InterfaceResult, Interface, TraitedSpec, traits, File, Directory, BaseInterface, InputMultiPath, OutputMultiPath, DynamicTraitedSpec, BaseTraitedSpec, Undefined\n'), ((15837, 15899), 'nipype.interfaces.base.File', 'File', ([], {'exists': '(True)', 'desc': '"""white matter parcellation"""', 'loc': '"""mri"""'}), "(exists=True, desc='white matter parcellation', loc='mri')\n", (15841, 15899), False, 'from nipype.interfaces.base import Interface, CommandLine, Bunch, InterfaceResult, Interface, TraitedSpec, traits, File, Directory, BaseInterface, InputMultiPath, OutputMultiPath, DynamicTraitedSpec, BaseTraitedSpec, Undefined\n'), ((19147, 19273), 'nipype.interfaces.base.File', 'File', ([], {'exists': '(True)', 'mandatory': '(True)', 'desc': '"""a json config file containing xnat access info: url, username and password"""'}), "(exists=True, mandatory=True, desc=\n 'a json config file containing xnat access info: url, username and password'\n )\n", (19151, 19273), False, 'from nipype.interfaces.base import Interface, CommandLine, Bunch, InterfaceResult, Interface, TraitedSpec, traits, File, Directory, BaseInterface, InputMultiPath, OutputMultiPath, DynamicTraitedSpec, BaseTraitedSpec, Undefined\n'), ((19309, 19412), 'nipype.interfaces.base.traits.Str', 'traits.Str', ([], {'mandatory': '(True)', 'desc': '"""Layout used to get files. relative to base directory if defined"""'}), "(mandatory=True, desc=\n 'Layout used to get files. relative to base directory if defined')\n", (19319, 19412), False, 'from nipype.interfaces.base import Interface, CommandLine, Bunch, InterfaceResult, Interface, TraitedSpec, traits, File, Directory, BaseInterface, InputMultiPath, OutputMultiPath, DynamicTraitedSpec, BaseTraitedSpec, Undefined\n'), ((2842, 2878), 'nipype.interfaces.base.traits.Tuple', 'traits.Tuple', (['traits.Str', 'traits.Str'], {}), '(traits.Str, traits.Str)\n', (2854, 2878), False, 'from nipype.interfaces.base import Interface, CommandLine, Bunch, InterfaceResult, Interface, TraitedSpec, traits, File, Directory, BaseInterface, InputMultiPath, OutputMultiPath, DynamicTraitedSpec, BaseTraitedSpec, Undefined\n'), ((4556, 4574), 'os.path.split', 'os.path.split', (['src'], {}), '(src)\n', (4569, 4574), False, 'import os\n'), ((5213, 5249), 'nipype.utils.misc.isdefined', 'isdefined', (['self.inputs.substitutions'], {}), '(self.inputs.substitutions)\n', (5222, 5249), False, 'from nipype.utils.misc import isdefined\n'), ((5690, 5713), 'os.path.abspath', 'os.path.abspath', (['outdir'], {}), '(outdir)\n', (5705, 5713), False, 'import os\n'), ((5725, 5757), 'nipype.utils.misc.isdefined', 'isdefined', (['self.inputs.container'], {}), '(self.inputs.container)\n', (5734, 5757), False, 'from nipype.utils.misc import isdefined\n'), ((7822, 7846), 'nipype.interfaces.base.traits.List', 'traits.List', (['traits.List'], {}), '(traits.List)\n', (7833, 7846), False, 'from nipype.interfaces.base import Interface, CommandLine, Bunch, InterfaceResult, Interface, TraitedSpec, traits, File, Directory, BaseInterface, InputMultiPath, OutputMultiPath, DynamicTraitedSpec, BaseTraitedSpec, Undefined\n'), ((15664, 15681), 'nipype.interfaces.base.File', 'File', ([], {'exists': '(True)'}), '(exists=True)\n', (15668, 15681), False, 'from nipype.interfaces.base import Interface, CommandLine, Bunch, InterfaceResult, Interface, TraitedSpec, traits, File, Directory, BaseInterface, InputMultiPath, OutputMultiPath, DynamicTraitedSpec, BaseTraitedSpec, Undefined\n'), ((15927, 15944), 'nipype.interfaces.base.File', 'File', ([], {'exists': '(True)'}), '(exists=True)\n', (15931, 15944), False, 'from nipype.interfaces.base import Interface, CommandLine, Bunch, InterfaceResult, Interface, TraitedSpec, traits, File, Directory, BaseInterface, InputMultiPath, OutputMultiPath, DynamicTraitedSpec, BaseTraitedSpec, Undefined\n'), ((16042, 16059), 'nipype.interfaces.base.File', 'File', ([], {'exists': '(True)'}), '(exists=True)\n', (16046, 16059), False, 'from nipype.interfaces.base import Interface, CommandLine, Bunch, InterfaceResult, Interface, TraitedSpec, traits, File, Directory, BaseInterface, InputMultiPath, OutputMultiPath, DynamicTraitedSpec, BaseTraitedSpec, Undefined\n'), ((16157, 16174), 'nipype.interfaces.base.File', 'File', ([], {'exists': '(True)'}), '(exists=True)\n', (16161, 16174), False, 'from nipype.interfaces.base import Interface, CommandLine, Bunch, InterfaceResult, Interface, TraitedSpec, traits, File, Directory, BaseInterface, InputMultiPath, OutputMultiPath, DynamicTraitedSpec, BaseTraitedSpec, Undefined\n'), ((16247, 16264), 'nipype.interfaces.base.File', 'File', ([], {'exists': '(True)'}), '(exists=True)\n', (16251, 16264), False, 'from nipype.interfaces.base import Interface, CommandLine, Bunch, InterfaceResult, Interface, TraitedSpec, traits, File, Directory, BaseInterface, InputMultiPath, OutputMultiPath, DynamicTraitedSpec, BaseTraitedSpec, Undefined\n'), ((16375, 16392), 'nipype.interfaces.base.File', 'File', ([], {'exists': '(True)'}), '(exists=True)\n', (16379, 16392), False, 'from nipype.interfaces.base import Interface, CommandLine, Bunch, InterfaceResult, Interface, TraitedSpec, traits, File, Directory, BaseInterface, InputMultiPath, OutputMultiPath, DynamicTraitedSpec, BaseTraitedSpec, Undefined\n'), ((16489, 16506), 'nipype.interfaces.base.File', 'File', ([], {'exists': '(True)'}), '(exists=True)\n', (16493, 16506), False, 'from nipype.interfaces.base import Interface, CommandLine, Bunch, InterfaceResult, Interface, TraitedSpec, traits, File, Directory, BaseInterface, InputMultiPath, OutputMultiPath, DynamicTraitedSpec, BaseTraitedSpec, Undefined\n'), ((16580, 16597), 'nipype.interfaces.base.File', 'File', ([], {'exists': '(True)'}), '(exists=True)\n', (16584, 16597), False, 'from nipype.interfaces.base import Interface, CommandLine, Bunch, InterfaceResult, Interface, TraitedSpec, traits, File, Directory, BaseInterface, InputMultiPath, OutputMultiPath, DynamicTraitedSpec, BaseTraitedSpec, Undefined\n'), ((16698, 16715), 'nipype.interfaces.base.File', 'File', ([], {'exists': '(True)'}), '(exists=True)\n', (16702, 16715), False, 'from nipype.interfaces.base import Interface, CommandLine, Bunch, InterfaceResult, Interface, TraitedSpec, traits, File, Directory, BaseInterface, InputMultiPath, OutputMultiPath, DynamicTraitedSpec, BaseTraitedSpec, Undefined\n'), ((16786, 16803), 'nipype.interfaces.base.File', 'File', ([], {'exists': '(True)'}), '(exists=True)\n', (16790, 16803), False, 'from nipype.interfaces.base import Interface, CommandLine, Bunch, InterfaceResult, Interface, TraitedSpec, traits, File, Directory, BaseInterface, InputMultiPath, OutputMultiPath, DynamicTraitedSpec, BaseTraitedSpec, Undefined\n'), ((16903, 16920), 'nipype.interfaces.base.File', 'File', ([], {'exists': '(True)'}), '(exists=True)\n', (16907, 16920), False, 'from nipype.interfaces.base import Interface, CommandLine, Bunch, InterfaceResult, Interface, TraitedSpec, traits, File, Directory, BaseInterface, InputMultiPath, OutputMultiPath, DynamicTraitedSpec, BaseTraitedSpec, Undefined\n'), ((17041, 17058), 'nipype.interfaces.base.File', 'File', ([], {'exists': '(True)'}), '(exists=True)\n', (17045, 17058), False, 'from nipype.interfaces.base import Interface, CommandLine, Bunch, InterfaceResult, Interface, TraitedSpec, traits, File, Directory, BaseInterface, InputMultiPath, OutputMultiPath, DynamicTraitedSpec, BaseTraitedSpec, Undefined\n'), ((17178, 17195), 'nipype.interfaces.base.File', 'File', ([], {'exists': '(True)'}), '(exists=True)\n', (17182, 17195), False, 'from nipype.interfaces.base import Interface, CommandLine, Bunch, InterfaceResult, Interface, TraitedSpec, traits, File, Directory, BaseInterface, InputMultiPath, OutputMultiPath, DynamicTraitedSpec, BaseTraitedSpec, Undefined\n'), ((17313, 17330), 'nipype.interfaces.base.File', 'File', ([], {'exists': '(True)'}), '(exists=True)\n', (17317, 17330), False, 'from nipype.interfaces.base import Interface, CommandLine, Bunch, InterfaceResult, Interface, TraitedSpec, traits, File, Directory, BaseInterface, InputMultiPath, OutputMultiPath, DynamicTraitedSpec, BaseTraitedSpec, Undefined\n'), ((18299, 18325), 'os.path.join', 'os.path.join', (['path', 'dirval'], {}), '(path, dirval)\n', (18311, 18325), False, 'import os\n'), ((18464, 18486), 'glob.glob', 'glob.glob', (['globpattern'], {}), '(globpattern)\n', (18473, 18486), False, 'import glob\n'), ((18592, 18642), 'os.path.join', 'os.path.join', (['subjects_dir', 'self.inputs.subject_id'], {}), '(subjects_dir, self.inputs.subject_id)\n', (18604, 18642), False, 'import os\n'), ((19503, 19527), 'nipype.interfaces.base.traits.List', 'traits.List', (['traits.List'], {}), '(traits.List)\n', (19514, 19527), False, 'from nipype.interfaces.base import Interface, CommandLine, Bunch, InterfaceResult, Interface, TraitedSpec, traits, File, Directory, BaseInterface, InputMultiPath, OutputMultiPath, DynamicTraitedSpec, BaseTraitedSpec, Undefined\n'), ((22891, 22902), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (22900, 22902), False, 'import os\n'), ((22918, 23020), 'xnatlib.Interface', 'XNATInterface', (["config_info['url']", "config_info['username']", "config_info['password']"], {'cachedir': 'cwd'}), "(config_info['url'], config_info['username'], config_info[\n 'password'], cachedir=cwd)\n", (22931, 23020), True, 'from xnatlib import Interface as XNATInterface\n'), ((4654, 4686), 'nipype.utils.misc.isdefined', 'isdefined', (['self.inputs.strip_dir'], {}), '(self.inputs.strip_dir)\n', (4663, 4686), False, 'from nipype.utils.misc import isdefined\n'), ((4860, 4885), 'os.path.sep.join', 'os.path.sep.join', (['folders'], {}), '(folders)\n', (4876, 4885), False, 'import os\n'), ((5629, 5646), 'nipype.utils.misc.isdefined', 'isdefined', (['outdir'], {}), '(outdir)\n', (5638, 5646), False, 'from nipype.utils.misc import isdefined\n'), ((5780, 5823), 'os.path.join', 'os.path.join', (['outdir', 'self.inputs.container'], {}), '(outdir, self.inputs.container)\n', (5792, 5823), False, 'import os\n'), ((5839, 5861), 'os.path.exists', 'os.path.exists', (['outdir'], {}), '(outdir)\n', (5853, 5861), False, 'import os\n'), ((5875, 5894), 'os.makedirs', 'os.makedirs', (['outdir'], {}), '(outdir)\n', (5886, 5894), False, 'import os\n'), ((6036, 6059), 'nipype.utils.filemanip.filename_to_list', 'filename_to_list', (['files'], {}), '(files)\n', (6052, 6059), False, 'from nipype.utils.filemanip import copyfile, list_to_filename, filename_to_list, FileNotFoundError\n'), ((6295, 6318), 'nipype.utils.filemanip.filename_to_list', 'filename_to_list', (['files'], {}), '(files)\n', (6311, 6318), False, 'from nipype.utils.filemanip import copyfile, list_to_filename, filename_to_list, FileNotFoundError\n'), ((12291, 12328), 'nipype.utils.misc.isdefined', 'isdefined', (['self.inputs.base_directory'], {}), '(self.inputs.base_directory)\n', (12300, 12328), False, 'from nipype.utils.misc import isdefined\n'), ((4930, 4954), 'os.path.join', 'os.path.join', (['dst', 'fname'], {}), '(dst, fname)\n', (4942, 4954), False, 'import os\n'), ((6245, 6272), 'os.path.join', 'os.path.join', (['tempoutdir', 'd'], {}), '(tempoutdir, d)\n', (6257, 6272), False, 'import os\n'), ((6342, 6362), 'os.path.abspath', 'os.path.abspath', (['src'], {}), '(src)\n', (6357, 6362), False, 'import os\n'), ((6382, 6401), 'os.path.isfile', 'os.path.isfile', (['src'], {}), '(src)\n', (6396, 6401), False, 'import os\n'), ((12112, 12149), 'nipype.utils.misc.isdefined', 'isdefined', (['self.inputs.field_template'], {}), '(self.inputs.field_template)\n', (12121, 12149), False, 'from nipype.utils.misc import isdefined\n'), ((12470, 12495), 'os.path.abspath', 'os.path.abspath', (['template'], {}), '(template)\n', (12485, 12495), False, 'import os\n'), ((12548, 12567), 'glob.glob', 'glob.glob', (['template'], {}), '(template)\n', (12557, 12567), False, 'import glob\n'), ((18987, 19008), 'nipype.utils.filemanip.list_to_filename', 'list_to_filename', (['val'], {}), '(val)\n', (19003, 19008), False, 'from nipype.utils.filemanip import copyfile, list_to_filename, filename_to_list, FileNotFoundError\n'), ((23632, 23669), 'nipype.utils.misc.isdefined', 'isdefined', (['self.inputs.field_template'], {}), '(self.inputs.field_template)\n', (23641, 23669), False, 'from nipype.utils.misc import isdefined\n'), ((6474, 6503), 'os.path.join', 'os.path.join', (['tempoutdir', 'dst'], {}), '(tempoutdir, dst)\n', (6486, 6503), False, 'import os\n'), ((6581, 6599), 'os.path.split', 'os.path.split', (['dst'], {}), '(dst)\n', (6594, 6599), False, 'import os\n'), ((6776, 6805), 'nipype.utils.filemanip.copyfile', 'copyfile', (['src', 'dst'], {'copy': '(True)'}), '(src, dst, copy=True)\n', (6784, 6805), False, 'from nipype.utils.filemanip import copyfile, list_to_filename, filename_to_list, FileNotFoundError\n'), ((6827, 6845), 'os.path.isdir', 'os.path.isdir', (['src'], {}), '(src)\n', (6840, 6845), False, 'import os\n'), ((10687, 10709), 'nipype.interfaces.base.traits.Enum', 'traits.Enum', (['outfields'], {}), '(outfields)\n', (10698, 10709), False, 'from nipype.interfaces.base import Interface, CommandLine, Bunch, InterfaceResult, Interface, TraitedSpec, traits, File, Directory, BaseInterface, InputMultiPath, OutputMultiPath, DynamicTraitedSpec, BaseTraitedSpec, Undefined\n'), ((11646, 11662), 'nipype.utils.misc.isdefined', 'isdefined', (['value'], {}), '(value)\n', (11655, 11662), False, 'from nipype.utils.misc import isdefined\n'), ((12370, 12413), 'os.path.abspath', 'os.path.abspath', (['self.inputs.base_directory'], {}), '(self.inputs.base_directory)\n', (12385, 12413), False, 'import os\n'), ((12627, 12698), 'warnings.warn', 'warn', (["('Output key: %s Template: %s returned no files' % (key, template))"], {}), "('Output key: %s Template: %s returned no files' % (key, template))\n", (12631, 12698), False, 'from warnings import warn\n'), ((12754, 12780), 'nipype.utils.filemanip.list_to_filename', 'list_to_filename', (['filelist'], {}), '(filelist)\n', (12770, 12780), False, 'from nipype.utils.filemanip import copyfile, list_to_filename, filename_to_list, FileNotFoundError\n'), ((13963, 13988), 'glob.glob', 'glob.glob', (['filledtemplate'], {}), '(filledtemplate)\n', (13972, 13988), False, 'import glob\n'), ((21975, 21997), 'nipype.interfaces.base.traits.Enum', 'traits.Enum', (['outfields'], {}), '(outfields)\n', (21986, 21997), False, 'from nipype.interfaces.base import Interface, CommandLine, Bunch, InterfaceResult, Interface, TraitedSpec, traits, File, Directory, BaseInterface, InputMultiPath, OutputMultiPath, DynamicTraitedSpec, BaseTraitedSpec, Undefined\n'), ((23154, 23170), 'nipype.utils.misc.isdefined', 'isdefined', (['value'], {}), '(value)\n', (23163, 23170), False, 'from nipype.utils.misc import isdefined\n'), ((6627, 6647), 'os.path.exists', 'os.path.exists', (['path'], {}), '(path)\n', (6641, 6647), False, 'import os\n'), ((6673, 6690), 'os.makedirs', 'os.makedirs', (['path'], {}), '(path)\n', (6684, 6690), False, 'import os\n'), ((6935, 6964), 'os.path.join', 'os.path.join', (['tempoutdir', 'dst'], {}), '(tempoutdir, dst)\n', (6947, 6964), False, 'import os\n'), ((7042, 7060), 'os.path.split', 'os.path.split', (['dst'], {}), '(dst)\n', (7055, 7060), False, 'import os\n'), ((7175, 7194), 'os.path.exists', 'os.path.exists', (['dst'], {}), '(dst)\n', (7189, 7194), False, 'import os\n'), ((7382, 7407), 'shutil.copytree', 'shutil.copytree', (['src', 'dst'], {}), '(src, dst)\n', (7397, 7407), False, 'import shutil\n'), ((14056, 14133), 'warnings.warn', 'warn', (["('Output key: %s Template: %s returned no files' % (key, filledtemplate))"], {}), "('Output key: %s Template: %s returned no files' % (key, filledtemplate))\n", (14060, 14133), False, 'from warnings import warn\n'), ((6887, 6908), 'os.path.join', 'os.path.join', (['src', '""""""'], {}), "(src, '')\n", (6899, 6908), False, 'import os\n'), ((7088, 7108), 'os.path.exists', 'os.path.exists', (['path'], {}), '(path)\n', (7102, 7108), False, 'import os\n'), ((7134, 7151), 'os.makedirs', 'os.makedirs', (['path'], {}), '(path)\n', (7145, 7151), False, 'import os\n'), ((7279, 7297), 'shutil.rmtree', 'shutil.rmtree', (['dst'], {}), '(dst)\n', (7292, 7297), False, 'import shutil\n'), ((14257, 14283), 'nipype.utils.filemanip.list_to_filename', 'list_to_filename', (['outfiles'], {}), '(outfiles)\n', (14273, 14283), False, 'from nipype.utils.filemanip import copyfile, list_to_filename, filename_to_list, FileNotFoundError\n')]
|
#!/usr/bin/env python3
# This is a script that analyses the simulation results from
# the script `PICMI_inputs_2d`.
import sys
import matplotlib
matplotlib.use('Agg')
import yt
yt.funcs.mylog.setLevel(50)
import numpy as np
sys.path.insert(1, '../../../../warpx/Regression/Checksum/')
import checksum
# this will be the name of the first plot file
fn1 = "Python_pass_mpi_comm_plt1_00010"
# second plot file
fn2 = "Python_pass_mpi_comm_plt2_00010"
test_name1 = fn1[:-9]
test_name2 = fn2[:-9]
checksum1 = checksum.Checksum(test_name1, fn1, do_fields=True,
do_particles=True)
checksum2 = checksum.Checksum(test_name2, fn2, do_fields=True,
do_particles=True)
rtol=1.e-9
atol=1.e-40
# Evaluate checksums against each other, adapted from
# Checksum.evaluate() method
# Dictionaries have same outer keys (levels, species)?
if (checksum1.data.keys() != checksum2.data.keys()):
print("ERROR: plotfile 1 and plotfile 2 checksums "
"have different outer keys:")
print("Plot1: %s" % checksum1.data.keys())
print("Plot2: %s" % checksum2.data.keys())
sys.exit(1)
# Dictionaries have same inner keys (field and particle quantities)?
for key1 in checksum1.data.keys():
if (checksum1.data[key1].keys() != checksum2.data[key1].keys()):
print("ERROR: plotfile 1 and plotfile 2 checksums have "
"different inner keys:")
print("Common outer keys: %s" % checksum2.data.keys())
print("Plotfile 1 inner keys in %s: %s"
% (key1, checksum1.data[key1].keys()))
print("Plotfile 2 inner keys in %s: %s"
% (key1, checksum2.data[key1].keys()))
sys.exit(1)
# Dictionaries have same values?
checksums_same = False
for key1 in checksum1.data.keys():
for key2 in checksum1.data[key1].keys():
passed = np.isclose(checksum2.data[key1][key2],
checksum1.data[key1][key2],
rtol=rtol, atol=atol)
# skip over these, since they will be the same if communicators
# have same number of procs
if key2 in ["particle_cpu", "particle_id", "particle_position_y"]:
continue
if passed:
print("ERROR: plotfile 1 and plotfile 2 checksums have "
"same values for key [%s,%s]" % (key1, key2))
print("Plotfile 1: [%s,%s] %.15e"
% (key1, key2, checksum1.data[key1][key2]))
print("Plotfile 2: [%s,%s] %.15e"
% (key1, key2, checksum2.data[key1][key2]))
checksums_same = True
if checksums_same:
sys.exit(1)
|
[
"yt.funcs.mylog.setLevel",
"sys.path.insert",
"numpy.isclose",
"matplotlib.use",
"checksum.Checksum",
"sys.exit"
] |
[((147, 168), 'matplotlib.use', 'matplotlib.use', (['"""Agg"""'], {}), "('Agg')\n", (161, 168), False, 'import matplotlib\n'), ((179, 206), 'yt.funcs.mylog.setLevel', 'yt.funcs.mylog.setLevel', (['(50)'], {}), '(50)\n', (202, 206), False, 'import yt\n'), ((226, 286), 'sys.path.insert', 'sys.path.insert', (['(1)', '"""../../../../warpx/Regression/Checksum/"""'], {}), "(1, '../../../../warpx/Regression/Checksum/')\n", (241, 286), False, 'import sys\n'), ((510, 579), 'checksum.Checksum', 'checksum.Checksum', (['test_name1', 'fn1'], {'do_fields': '(True)', 'do_particles': '(True)'}), '(test_name1, fn1, do_fields=True, do_particles=True)\n', (527, 579), False, 'import checksum\n'), ((622, 691), 'checksum.Checksum', 'checksum.Checksum', (['test_name2', 'fn2'], {'do_fields': '(True)', 'do_particles': '(True)'}), '(test_name2, fn2, do_fields=True, do_particles=True)\n', (639, 691), False, 'import checksum\n'), ((1134, 1145), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (1142, 1145), False, 'import sys\n'), ((2649, 2660), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (2657, 2660), False, 'import sys\n'), ((1703, 1714), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (1711, 1714), False, 'import sys\n'), ((1869, 1962), 'numpy.isclose', 'np.isclose', (['checksum2.data[key1][key2]', 'checksum1.data[key1][key2]'], {'rtol': 'rtol', 'atol': 'atol'}), '(checksum2.data[key1][key2], checksum1.data[key1][key2], rtol=\n rtol, atol=atol)\n', (1879, 1962), True, 'import numpy as np\n')]
|
# coding: utf-8
"""
Inputhook management for GUI event loop integration.
"""
#-----------------------------------------------------------------------------
# Copyright (C) 2008-2009 The IPython Development Team
#
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING, distributed as part of this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
import ctypes
import sys
import warnings
#-----------------------------------------------------------------------------
# Constants
#-----------------------------------------------------------------------------
# Constants for identifying the GUI toolkits.
GUI_WX = 'wx'
GUI_QT = 'qt'
GUI_QT4 = 'qt4'
GUI_GTK = 'gtk'
GUI_TK = 'tk'
GUI_OSX = 'osx'
#-----------------------------------------------------------------------------
# Utility classes
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Main InputHookManager class
#-----------------------------------------------------------------------------
class InputHookManager(object):
"""Manage PyOS_InputHook for different GUI toolkits.
This class installs various hooks under ``PyOSInputHook`` to handle
GUI event loop integration.
"""
def __init__(self):
self.PYFUNC = ctypes.PYFUNCTYPE(ctypes.c_int)
self._apps = {}
self._reset()
def _reset(self):
self._callback_pyfunctype = None
self._callback = None
self._installed = False
self._current_gui = None
def get_pyos_inputhook(self):
"""Return the current PyOS_InputHook as a ctypes.c_void_p."""
return ctypes.c_void_p.in_dll(ctypes.pythonapi,"PyOS_InputHook")
def get_pyos_inputhook_as_func(self):
"""Return the current PyOS_InputHook as a ctypes.PYFUNCYPE."""
return self.PYFUNC.in_dll(ctypes.pythonapi,"PyOS_InputHook")
def set_inputhook(self, callback):
"""Set PyOS_InputHook to callback and return the previous one."""
self._callback = callback
self._callback_pyfunctype = self.PYFUNC(callback)
pyos_inputhook_ptr = self.get_pyos_inputhook()
original = self.get_pyos_inputhook_as_func()
pyos_inputhook_ptr.value = \
ctypes.cast(self._callback_pyfunctype, ctypes.c_void_p).value
self._installed = True
return original
def clear_inputhook(self, app=None):
"""Set PyOS_InputHook to NULL and return the previous one.
Parameters
----------
app : optional, ignored
This parameter is allowed only so that clear_inputhook() can be
called with a similar interface as all the ``enable_*`` methods. But
the actual value of the parameter is ignored. This uniform interface
makes it easier to have user-level entry points in the main IPython
app like :meth:`enable_gui`."""
pyos_inputhook_ptr = self.get_pyos_inputhook()
original = self.get_pyos_inputhook_as_func()
pyos_inputhook_ptr.value = ctypes.c_void_p(None).value
self._reset()
return original
def clear_app_refs(self, gui=None):
"""Clear IPython's internal reference to an application instance.
Whenever we create an app for a user on qt4 or wx, we hold a
reference to the app. This is needed because in some cases bad things
can happen if a user doesn't hold a reference themselves. This
method is provided to clear the references we are holding.
Parameters
----------
gui : None or str
If None, clear all app references. If ('wx', 'qt4') clear
the app for that toolkit. References are not held for gtk or tk
as those toolkits don't have the notion of an app.
"""
if gui is None:
self._apps = {}
elif self._apps.has_key(gui):
del self._apps[gui]
def enable_wx(self, app=None):
"""Enable event loop integration with wxPython.
Parameters
----------
app : WX Application, optional.
Running application to use. If not given, we probe WX for an
existing application object, and create a new one if none is found.
Notes
-----
This methods sets the ``PyOS_InputHook`` for wxPython, which allows
the wxPython to integrate with terminal based applications like
IPython.
If ``app`` is not given we probe for an existing one, and return it if
found. If no existing app is found, we create an :class:`wx.App` as
follows::
import wx
app = wx.App(redirect=False, clearSigInt=False)
"""
from IPython.lib.inputhookwx import inputhook_wx
self.set_inputhook(inputhook_wx)
self._current_gui = GUI_WX
import wx
if app is None:
app = wx.GetApp()
if app is None:
app = wx.App(redirect=False, clearSigInt=False)
app._in_event_loop = True
self._apps[GUI_WX] = app
return app
def disable_wx(self):
"""Disable event loop integration with wxPython.
This merely sets PyOS_InputHook to NULL.
"""
if self._apps.has_key(GUI_WX):
self._apps[GUI_WX]._in_event_loop = False
self.clear_inputhook()
def enable_qt4(self, app=None):
"""Enable event loop integration with PyQt4.
Parameters
----------
app : Qt Application, optional.
Running application to use. If not given, we probe Qt for an
existing application object, and create a new one if none is found.
Notes
-----
This methods sets the PyOS_InputHook for PyQt4, which allows
the PyQt4 to integrate with terminal based applications like
IPython.
If ``app`` is not given we probe for an existing one, and return it if
found. If no existing app is found, we create an :class:`QApplication`
as follows::
from PyQt4 import QtCore
app = QtGui.QApplication(sys.argv)
"""
from IPython.external.qt_for_kernel import QtCore, QtGui
if 'pyreadline' in sys.modules:
# see IPython GitHub Issue #281 for more info on this issue
# Similar intermittent behavior has been reported on OSX,
# but not consistently reproducible
warnings.warn("""PyReadline's inputhook can conflict with Qt, causing delays
in interactive input. If you do see this issue, we recommend using another GUI
toolkit if you can, or disable readline with the configuration option
'TerminalInteractiveShell.readline_use=False', specified in a config file or
at the command-line""",
RuntimeWarning)
# PyQt4 has had this since 4.3.1. In version 4.2, PyOS_InputHook
# was set when QtCore was imported, but if it ever got removed,
# you couldn't reset it. For earlier versions we can
# probably implement a ctypes version.
try:
QtCore.pyqtRestoreInputHook()
except AttributeError:
pass
self._current_gui = GUI_QT4
if app is None:
app = QtCore.QCoreApplication.instance()
if app is None:
app = QtGui.QApplication([" "])
app._in_event_loop = True
self._apps[GUI_QT4] = app
return app
def disable_qt4(self):
"""Disable event loop integration with PyQt4.
This merely sets PyOS_InputHook to NULL.
"""
if self._apps.has_key(GUI_QT4):
self._apps[GUI_QT4]._in_event_loop = False
self.clear_inputhook()
def enable_gtk(self, app=None):
"""Enable event loop integration with PyGTK.
Parameters
----------
app : ignored
Ignored, it's only a placeholder to keep the call signature of all
gui activation methods consistent, which simplifies the logic of
supporting magics.
Notes
-----
This methods sets the PyOS_InputHook for PyGTK, which allows
the PyGTK to integrate with terminal based applications like
IPython.
"""
import gtk
try:
gtk.set_interactive(True)
self._current_gui = GUI_GTK
except AttributeError:
# For older versions of gtk, use our own ctypes version
from IPython.lib.inputhookgtk import inputhook_gtk
self.set_inputhook(inputhook_gtk)
self._current_gui = GUI_GTK
def disable_gtk(self):
"""Disable event loop integration with PyGTK.
This merely sets PyOS_InputHook to NULL.
"""
self.clear_inputhook()
def enable_tk(self, app=None):
"""Enable event loop integration with Tk.
Parameters
----------
app : toplevel :class:`Tkinter.Tk` widget, optional.
Running toplevel widget to use. If not given, we probe Tk for an
existing one, and create a new one if none is found.
Notes
-----
If you have already created a :class:`Tkinter.Tk` object, the only
thing done by this method is to register with the
:class:`InputHookManager`, since creating that object automatically
sets ``PyOS_InputHook``.
"""
self._current_gui = GUI_TK
if app is None:
import Tkinter
app = Tkinter.Tk()
app.withdraw()
self._apps[GUI_TK] = app
return app
def disable_tk(self):
"""Disable event loop integration with Tkinter.
This merely sets PyOS_InputHook to NULL.
"""
self.clear_inputhook()
def current_gui(self):
"""Return a string indicating the currently active GUI or None."""
return self._current_gui
inputhook_manager = InputHookManager()
enable_wx = inputhook_manager.enable_wx
disable_wx = inputhook_manager.disable_wx
enable_qt4 = inputhook_manager.enable_qt4
disable_qt4 = inputhook_manager.disable_qt4
enable_gtk = inputhook_manager.enable_gtk
disable_gtk = inputhook_manager.disable_gtk
enable_tk = inputhook_manager.enable_tk
disable_tk = inputhook_manager.disable_tk
clear_inputhook = inputhook_manager.clear_inputhook
set_inputhook = inputhook_manager.set_inputhook
current_gui = inputhook_manager.current_gui
clear_app_refs = inputhook_manager.clear_app_refs
# Convenience function to switch amongst them
def enable_gui(gui=None, app=None):
"""Switch amongst GUI input hooks by name.
This is just a utility wrapper around the methods of the InputHookManager
object.
Parameters
----------
gui : optional, string or None
If None, clears input hook, otherwise it must be one of the recognized
GUI names (see ``GUI_*`` constants in module).
app : optional, existing application object.
For toolkits that have the concept of a global app, you can supply an
existing one. If not given, the toolkit will be probed for one, and if
none is found, a new one will be created. Note that GTK does not have
this concept, and passing an app if `gui`=="GTK" will raise an error.
Returns
-------
The output of the underlying gui switch routine, typically the actual
PyOS_InputHook wrapper object or the GUI toolkit app created, if there was
one.
"""
guis = {None: clear_inputhook,
GUI_OSX: lambda app=False: None,
GUI_TK: enable_tk,
GUI_GTK: enable_gtk,
GUI_WX: enable_wx,
GUI_QT: enable_qt4, # qt3 not supported
GUI_QT4: enable_qt4 }
try:
gui_hook = guis[gui]
except KeyError:
e = "Invalid GUI request %r, valid ones are:%s" % (gui, guis.keys())
raise ValueError(e)
return gui_hook(app)
|
[
"Tkinter.Tk",
"ctypes.PYFUNCTYPE",
"IPython.external.qt_for_kernel.QtCore.QCoreApplication.instance",
"gtk.set_interactive",
"ctypes.cast",
"IPython.external.qt_for_kernel.QtCore.pyqtRestoreInputHook",
"ctypes.c_void_p",
"wx.App",
"IPython.external.qt_for_kernel.QtGui.QApplication",
"ctypes.c_void_p.in_dll",
"warnings.warn",
"wx.GetApp"
] |
[((1570, 1601), 'ctypes.PYFUNCTYPE', 'ctypes.PYFUNCTYPE', (['ctypes.c_int'], {}), '(ctypes.c_int)\n', (1587, 1601), False, 'import ctypes\n'), ((1927, 1985), 'ctypes.c_void_p.in_dll', 'ctypes.c_void_p.in_dll', (['ctypes.pythonapi', '"""PyOS_InputHook"""'], {}), "(ctypes.pythonapi, 'PyOS_InputHook')\n", (1949, 1985), False, 'import ctypes\n'), ((2531, 2586), 'ctypes.cast', 'ctypes.cast', (['self._callback_pyfunctype', 'ctypes.c_void_p'], {}), '(self._callback_pyfunctype, ctypes.c_void_p)\n', (2542, 2586), False, 'import ctypes\n'), ((3325, 3346), 'ctypes.c_void_p', 'ctypes.c_void_p', (['None'], {}), '(None)\n', (3340, 3346), False, 'import ctypes\n'), ((5194, 5205), 'wx.GetApp', 'wx.GetApp', ([], {}), '()\n', (5203, 5205), False, 'import wx\n'), ((5248, 5289), 'wx.App', 'wx.App', ([], {'redirect': '(False)', 'clearSigInt': '(False)'}), '(redirect=False, clearSigInt=False)\n', (5254, 5289), False, 'import wx\n'), ((6747, 7147), 'warnings.warn', 'warnings.warn', (['"""PyReadline\'s inputhook can conflict with Qt, causing delays\n in interactive input. If you do see this issue, we recommend using another GUI\n toolkit if you can, or disable readline with the configuration option\n \'TerminalInteractiveShell.readline_use=False\', specified in a config file or\n at the command-line"""', 'RuntimeWarning'], {}), '(\n """PyReadline\'s inputhook can conflict with Qt, causing delays\n in interactive input. If you do see this issue, we recommend using another GUI\n toolkit if you can, or disable readline with the configuration option\n \'TerminalInteractiveShell.readline_use=False\', specified in a config file or\n at the command-line"""\n , RuntimeWarning)\n', (6760, 7147), False, 'import warnings\n'), ((7439, 7468), 'IPython.external.qt_for_kernel.QtCore.pyqtRestoreInputHook', 'QtCore.pyqtRestoreInputHook', ([], {}), '()\n', (7466, 7468), False, 'from IPython.external.qt_for_kernel import QtCore, QtGui\n'), ((7596, 7630), 'IPython.external.qt_for_kernel.QtCore.QCoreApplication.instance', 'QtCore.QCoreApplication.instance', ([], {}), '()\n', (7628, 7630), False, 'from IPython.external.qt_for_kernel import QtCore, QtGui\n'), ((7673, 7698), 'IPython.external.qt_for_kernel.QtGui.QApplication', 'QtGui.QApplication', (["[' ']"], {}), "([' '])\n", (7691, 7698), False, 'from IPython.external.qt_for_kernel import QtCore, QtGui\n'), ((8631, 8656), 'gtk.set_interactive', 'gtk.set_interactive', (['(True)'], {}), '(True)\n', (8650, 8656), False, 'import gtk\n'), ((9844, 9856), 'Tkinter.Tk', 'Tkinter.Tk', ([], {}), '()\n', (9854, 9856), False, 'import Tkinter\n')]
|
# -*- coding: utf-8 -*-
###############################################################################
#
# RestoreThread
# Restore a thread.
#
# Python versions 2.6, 2.7, 3.x
#
# Copyright 2014, Temboo Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
#
#
###############################################################################
from temboo.core.choreography import Choreography
from temboo.core.choreography import InputSet
from temboo.core.choreography import ResultSet
from temboo.core.choreography import ChoreographyExecution
import json
class RestoreThread(Choreography):
def __init__(self, temboo_session):
"""
Create a new instance of the RestoreThread Choreo. A TembooSession object, containing a valid
set of Temboo credentials, must be supplied.
"""
super(RestoreThread, self).__init__(temboo_session, '/Library/Disqus/Threads/RestoreThread')
def new_input_set(self):
return RestoreThreadInputSet()
def _make_result_set(self, result, path):
return RestoreThreadResultSet(result, path)
def _make_execution(self, session, exec_id, path):
return RestoreThreadChoreographyExecution(session, exec_id, path)
class RestoreThreadInputSet(InputSet):
"""
An InputSet with methods appropriate for specifying the inputs to the RestoreThread
Choreo. The InputSet object is used to specify input parameters when executing this Choreo.
"""
def set_AccessToken(self, value):
"""
Set the value of the AccessToken input for this Choreo. ((required, string) A valid OAuth 2.0 access token.)
"""
super(RestoreThreadInputSet, self)._set_input('AccessToken', value)
def set_Forum(self, value):
"""
Set the value of the Forum input for this Choreo. ((optional, string) Forum Short Name (i.e., the subdomain of the Disqus Site URL) of a thread that is to be restored. Required if setting either ThreadByIdentification, or ThreadByLink.)
"""
super(RestoreThreadInputSet, self)._set_input('Forum', value)
def set_PublicKey(self, value):
"""
Set the value of the PublicKey input for this Choreo. ((required, string) The Public Key provided by Disqus (AKA the API Key).)
"""
super(RestoreThreadInputSet, self)._set_input('PublicKey', value)
def set_ResponseFormat(self, value):
"""
Set the value of the ResponseFormat input for this Choreo. ((optional, string) The format that the response should be in. Valid values are: json (the default) and jsonp.)
"""
super(RestoreThreadInputSet, self)._set_input('ResponseFormat', value)
def set_ThreadID(self, value):
"""
Set the value of the ThreadID input for this Choreo. ((conditional, integer) The ID of a thread that is to be restored. Required unless specifying ThreadIdentifier or ThreadLink. If using this parameter, ThreadIdentifier cannot be set.)
"""
super(RestoreThreadInputSet, self)._set_input('ThreadID', value)
def set_ThreadIdentifier(self, value):
"""
Set the value of the ThreadIdentifier input for this Choreo. ((conditional, string) The identifier for the thread that is to be restored. Note that a Forum must also be provided when using this parameter. If set, ThreadID and ThreadLink cannot be used.)
"""
super(RestoreThreadInputSet, self)._set_input('ThreadIdentifier', value)
def set_ThreadLink(self, value):
"""
Set the value of the ThreadLink input for this Choreo. ((conditional, string) A link pointing to the thread that is to be restored. Note that a Forum must also be provided when using this parameter. If set, ThreadID and ThreadIdentifier cannot be set.)
"""
super(RestoreThreadInputSet, self)._set_input('ThreadLink', value)
class RestoreThreadResultSet(ResultSet):
"""
A ResultSet with methods tailored to the values returned by the RestoreThread Choreo.
The ResultSet object is used to retrieve the results of a Choreo execution.
"""
def getJSONFromString(self, str):
return json.loads(str)
def get_Response(self):
"""
Retrieve the value for the "Response" output from this Choreo execution. ((json) The response from Disqus.)
"""
return self._output.get('Response', None)
class RestoreThreadChoreographyExecution(ChoreographyExecution):
def _make_result_set(self, response, path):
return RestoreThreadResultSet(response, path)
|
[
"json.loads"
] |
[((4635, 4650), 'json.loads', 'json.loads', (['str'], {}), '(str)\n', (4645, 4650), False, 'import json\n')]
|
# Authors: <NAME> <<EMAIL>>
#
# License: BSD (3-clause)
from collections import defaultdict
import pytest
import numpy as np
import mne
import mne_nirs
from mne.datasets import testing
from mne.utils import catch_logging, check_version
from mne_nirs.experimental_design.tests.test_experimental_design import \
_load_dataset
from mne_nirs.experimental_design import make_first_level_design_matrix
from mne_nirs.statistics import run_glm
from mne_nirs.visualisation import plot_glm_topo, plot_glm_surface_projection
from mne_nirs.utils import glm_to_tidy
testing_path = testing.data_path(download=False)
raw_path = testing_path + '/NIRx/nirscout/nirx_15_2_recording_w_short'
subjects_dir = testing_path + '/subjects'
requires_mne_1 = pytest.mark.skipif(not check_version('mne', '1.0'),
reason='Needs MNE-Python 1.0')
def test_plot_nirs_source_detector_pyvista(requires_pyvista):
raw = mne.io.read_raw_nirx(raw_path)
mne_nirs.visualisation.plot_nirs_source_detector(
np.random.randn(len(raw.ch_names)),
raw.info, show_axes=True,
subject='fsaverage',
trans='fsaverage',
surfaces=['white'],
fnirs=False,
subjects_dir=subjects_dir,
verbose=True)
mne_nirs.visualisation.plot_nirs_source_detector(
np.abs(np.random.randn(len(raw.ch_names))) + 5,
raw.info, show_axes=True,
subject='fsaverage',
trans='fsaverage',
surfaces=['white'],
fnirs=False,
subjects_dir=subjects_dir,
verbose=True)
@pytest.mark.filterwarnings('ignore:"plot_glm_topo" has been deprecated.*:')
def test_run_plot_GLM_topo():
raw_intensity = _load_dataset()
raw_intensity.crop(450, 600) # Keep the test fast
design_matrix = make_first_level_design_matrix(raw_intensity,
drift_order=1,
drift_model='polynomial')
raw_od = mne.preprocessing.nirs.optical_density(raw_intensity)
raw_haemo = mne.preprocessing.nirs.beer_lambert_law(raw_od, ppf=0.1)
glm_estimates = run_glm(raw_haemo, design_matrix)
fig = plot_glm_topo(raw_haemo, glm_estimates.data, design_matrix)
# 5 conditions (A,B,C,Drift,Constant) * two chroma + 2xcolorbar
assert len(fig.axes) == 12
# Two conditions * two chroma + 2 x colorbar
fig = plot_glm_topo(raw_haemo, glm_estimates.data, design_matrix,
requested_conditions=['A', 'B'])
assert len(fig.axes) == 6
# Two conditions * one chroma + 1 x colorbar
with pytest.warns(RuntimeWarning, match='Reducing GLM results'):
fig = plot_glm_topo(raw_haemo.copy().pick(picks="hbo"),
glm_estimates.data, design_matrix,
requested_conditions=['A', 'B'])
assert len(fig.axes) == 3
# One conditions * two chroma + 2 x colorbar
fig = plot_glm_topo(raw_haemo, glm_estimates.data, design_matrix,
requested_conditions=['A'])
assert len(fig.axes) == 4
# One conditions * one chroma + 1 x colorbar
with pytest.warns(RuntimeWarning, match='Reducing GLM results'):
fig = plot_glm_topo(raw_haemo.copy().pick(picks="hbo"),
glm_estimates.data,
design_matrix, requested_conditions=['A'])
assert len(fig.axes) == 2
# One conditions * one chroma + 0 x colorbar
with pytest.warns(RuntimeWarning, match='Reducing GLM results'):
fig = plot_glm_topo(raw_haemo.copy().pick(picks="hbo"),
glm_estimates.data, design_matrix,
colorbar=False, requested_conditions=['A'])
assert len(fig.axes) == 1
# Ensure warning thrown if glm estimates is missing channels from raw
glm_estimates_subset = {a: glm_estimates.data[a]
for a in raw_haemo.ch_names[0:3]}
with pytest.raises(RuntimeError, match="does not match regression"):
plot_glm_topo(raw_haemo, glm_estimates_subset, design_matrix)
def test_run_plot_GLM_contrast_topo():
raw_intensity = _load_dataset()
raw_intensity.crop(450, 600) # Keep the test fast
design_matrix = make_first_level_design_matrix(raw_intensity,
drift_order=1,
drift_model='polynomial')
raw_od = mne.preprocessing.nirs.optical_density(raw_intensity)
raw_haemo = mne.preprocessing.nirs.beer_lambert_law(raw_od, ppf=0.1)
glm_est = run_glm(raw_haemo, design_matrix)
contrast_matrix = np.eye(design_matrix.shape[1])
basic_conts = dict([(column, contrast_matrix[i])
for i, column in enumerate(design_matrix.columns)])
contrast_LvR = basic_conts['A'] - basic_conts['B']
with pytest.deprecated_call(match='comprehensive GLM'):
contrast = mne_nirs.statistics.compute_contrast(
glm_est.data, contrast_LvR)
with pytest.deprecated_call(match='comprehensive GLM'):
fig = mne_nirs.visualisation.plot_glm_contrast_topo(
raw_haemo, contrast)
assert len(fig.axes) == 3
def test_run_plot_GLM_contrast_topo_single_chroma():
raw_intensity = _load_dataset()
raw_intensity.crop(450, 600) # Keep the test fast
design_matrix = make_first_level_design_matrix(raw_intensity,
drift_order=1,
drift_model='polynomial')
raw_od = mne.preprocessing.nirs.optical_density(raw_intensity)
raw_haemo = mne.preprocessing.nirs.beer_lambert_law(raw_od, ppf=0.1)
raw_haemo = raw_haemo.pick(picks='hbo')
glm_est = run_glm(raw_haemo, design_matrix)
contrast_matrix = np.eye(design_matrix.shape[1])
basic_conts = dict([(column, contrast_matrix[i])
for i, column in enumerate(design_matrix.columns)])
contrast_LvR = basic_conts['A'] - basic_conts['B']
with pytest.deprecated_call(match='comprehensive GLM'):
contrast = mne_nirs.statistics.compute_contrast(
glm_est.data, contrast_LvR)
with pytest.deprecated_call(match='comprehensive GLM'):
fig = mne_nirs.visualisation.plot_glm_contrast_topo(
raw_haemo, contrast)
assert len(fig.axes) == 2
def test_fig_from_axes():
from mne_nirs.visualisation._plot_GLM_topo import _get_fig_from_axes
with pytest.raises(RuntimeError, match="Unable to extract figure"):
_get_fig_from_axes([1, 2, 3])
# surface arg
@requires_mne_1
def test_run_plot_GLM_projection(requires_pyvista):
raw_intensity = _load_dataset()
raw_intensity.crop(450, 600) # Keep the test fast
design_matrix = make_first_level_design_matrix(raw_intensity,
drift_order=1,
drift_model='polynomial')
raw_od = mne.preprocessing.nirs.optical_density(raw_intensity)
raw_haemo = mne.preprocessing.nirs.beer_lambert_law(raw_od, ppf=0.1)
glm_estimates = run_glm(raw_haemo, design_matrix)
df = glm_to_tidy(raw_haemo, glm_estimates.data, design_matrix)
df = df.query("Chroma in 'hbo'")
df = df.query("Condition in 'A'")
brain = plot_glm_surface_projection(raw_haemo.copy().pick("hbo"),
df, clim='auto', view='dorsal',
colorbar=True, size=(800, 700),
value="theta", surface='white',
subjects_dir=subjects_dir)
assert type(brain) == mne.viz._brain.Brain
@requires_mne_1
@pytest.mark.parametrize('fname_raw, to_1020, ch_names', [
(raw_path, False, None),
(raw_path, True, 'numbered'),
(raw_path, True, defaultdict(lambda: '')),
])
def test_plot_3d_montage(requires_pyvista, fname_raw, to_1020, ch_names):
import pyvista
pyvista.close_all()
assert len(pyvista.plotting._ALL_PLOTTERS) == 0
raw = mne.io.read_raw_nirx(fname_raw)
if to_1020:
need = set(sum(
(ch_name.split()[0].split('_') for ch_name in raw.ch_names),
list()))
mon = mne.channels.make_standard_montage('standard_1020')
mon.rename_channels({h: n for h, n in zip(mon.ch_names, need)})
raw.set_montage(mon)
n_labels = len(raw.ch_names) // 2
view_map = {'left-lat': np.arange(1, n_labels // 2),
'caudal': np.arange(n_labels // 2, n_labels + 1)}
# We use "sample" here even though it's wrong so that we can have a head
# surface
with catch_logging() as log:
mne_nirs.viz.plot_3d_montage(
raw.info, view_map, subject='sample', surface='white',
subjects_dir=subjects_dir, ch_names=ch_names, verbose=True)
assert len(pyvista.plotting._ALL_PLOTTERS) == 0
log = log.getvalue().lower()
if to_1020:
assert 'automatically mapped' in log
else:
assert 'could not' in log
|
[
"mne.channels.make_standard_montage",
"mne_nirs.visualisation.plot_glm_contrast_topo",
"mne.preprocessing.nirs.beer_lambert_law",
"collections.defaultdict",
"pyvista.close_all",
"numpy.arange",
"mne_nirs.visualisation.plot_glm_topo",
"mne.utils.catch_logging",
"pytest.warns",
"mne_nirs.viz.plot_3d_montage",
"mne_nirs.utils.glm_to_tidy",
"mne_nirs.statistics.compute_contrast",
"mne_nirs.experimental_design.tests.test_experimental_design._load_dataset",
"pytest.raises",
"mne.datasets.testing.data_path",
"pytest.mark.filterwarnings",
"mne.io.read_raw_nirx",
"mne_nirs.experimental_design.make_first_level_design_matrix",
"mne.preprocessing.nirs.optical_density",
"mne_nirs.visualisation._plot_GLM_topo._get_fig_from_axes",
"mne_nirs.statistics.run_glm",
"mne.utils.check_version",
"numpy.eye",
"pytest.deprecated_call"
] |
[((578, 611), 'mne.datasets.testing.data_path', 'testing.data_path', ([], {'download': '(False)'}), '(download=False)\n', (595, 611), False, 'from mne.datasets import testing\n'), ((1572, 1647), 'pytest.mark.filterwarnings', 'pytest.mark.filterwarnings', (['"""ignore:"plot_glm_topo" has been deprecated.*:"""'], {}), '(\'ignore:"plot_glm_topo" has been deprecated.*:\')\n', (1598, 1647), False, 'import pytest\n'), ((936, 966), 'mne.io.read_raw_nirx', 'mne.io.read_raw_nirx', (['raw_path'], {}), '(raw_path)\n', (956, 966), False, 'import mne\n'), ((1698, 1713), 'mne_nirs.experimental_design.tests.test_experimental_design._load_dataset', '_load_dataset', ([], {}), '()\n', (1711, 1713), False, 'from mne_nirs.experimental_design.tests.test_experimental_design import _load_dataset\n'), ((1790, 1881), 'mne_nirs.experimental_design.make_first_level_design_matrix', 'make_first_level_design_matrix', (['raw_intensity'], {'drift_order': '(1)', 'drift_model': '"""polynomial"""'}), "(raw_intensity, drift_order=1, drift_model=\n 'polynomial')\n", (1820, 1881), False, 'from mne_nirs.experimental_design import make_first_level_design_matrix\n'), ((1992, 2045), 'mne.preprocessing.nirs.optical_density', 'mne.preprocessing.nirs.optical_density', (['raw_intensity'], {}), '(raw_intensity)\n', (2030, 2045), False, 'import mne\n'), ((2062, 2118), 'mne.preprocessing.nirs.beer_lambert_law', 'mne.preprocessing.nirs.beer_lambert_law', (['raw_od'], {'ppf': '(0.1)'}), '(raw_od, ppf=0.1)\n', (2101, 2118), False, 'import mne\n'), ((2139, 2172), 'mne_nirs.statistics.run_glm', 'run_glm', (['raw_haemo', 'design_matrix'], {}), '(raw_haemo, design_matrix)\n', (2146, 2172), False, 'from mne_nirs.statistics import run_glm\n'), ((2183, 2242), 'mne_nirs.visualisation.plot_glm_topo', 'plot_glm_topo', (['raw_haemo', 'glm_estimates.data', 'design_matrix'], {}), '(raw_haemo, glm_estimates.data, design_matrix)\n', (2196, 2242), False, 'from mne_nirs.visualisation import plot_glm_topo, plot_glm_surface_projection\n'), ((2402, 2498), 'mne_nirs.visualisation.plot_glm_topo', 'plot_glm_topo', (['raw_haemo', 'glm_estimates.data', 'design_matrix'], {'requested_conditions': "['A', 'B']"}), "(raw_haemo, glm_estimates.data, design_matrix,\n requested_conditions=['A', 'B'])\n", (2415, 2498), False, 'from mne_nirs.visualisation import plot_glm_topo, plot_glm_surface_projection\n'), ((2946, 3037), 'mne_nirs.visualisation.plot_glm_topo', 'plot_glm_topo', (['raw_haemo', 'glm_estimates.data', 'design_matrix'], {'requested_conditions': "['A']"}), "(raw_haemo, glm_estimates.data, design_matrix,\n requested_conditions=['A'])\n", (2959, 3037), False, 'from mne_nirs.visualisation import plot_glm_topo, plot_glm_surface_projection\n'), ((4162, 4177), 'mne_nirs.experimental_design.tests.test_experimental_design._load_dataset', '_load_dataset', ([], {}), '()\n', (4175, 4177), False, 'from mne_nirs.experimental_design.tests.test_experimental_design import _load_dataset\n'), ((4254, 4345), 'mne_nirs.experimental_design.make_first_level_design_matrix', 'make_first_level_design_matrix', (['raw_intensity'], {'drift_order': '(1)', 'drift_model': '"""polynomial"""'}), "(raw_intensity, drift_order=1, drift_model=\n 'polynomial')\n", (4284, 4345), False, 'from mne_nirs.experimental_design import make_first_level_design_matrix\n'), ((4456, 4509), 'mne.preprocessing.nirs.optical_density', 'mne.preprocessing.nirs.optical_density', (['raw_intensity'], {}), '(raw_intensity)\n', (4494, 4509), False, 'import mne\n'), ((4526, 4582), 'mne.preprocessing.nirs.beer_lambert_law', 'mne.preprocessing.nirs.beer_lambert_law', (['raw_od'], {'ppf': '(0.1)'}), '(raw_od, ppf=0.1)\n', (4565, 4582), False, 'import mne\n'), ((4597, 4630), 'mne_nirs.statistics.run_glm', 'run_glm', (['raw_haemo', 'design_matrix'], {}), '(raw_haemo, design_matrix)\n', (4604, 4630), False, 'from mne_nirs.statistics import run_glm\n'), ((4653, 4683), 'numpy.eye', 'np.eye', (['design_matrix.shape[1]'], {}), '(design_matrix.shape[1])\n', (4659, 4683), True, 'import numpy as np\n'), ((5284, 5299), 'mne_nirs.experimental_design.tests.test_experimental_design._load_dataset', '_load_dataset', ([], {}), '()\n', (5297, 5299), False, 'from mne_nirs.experimental_design.tests.test_experimental_design import _load_dataset\n'), ((5376, 5467), 'mne_nirs.experimental_design.make_first_level_design_matrix', 'make_first_level_design_matrix', (['raw_intensity'], {'drift_order': '(1)', 'drift_model': '"""polynomial"""'}), "(raw_intensity, drift_order=1, drift_model=\n 'polynomial')\n", (5406, 5467), False, 'from mne_nirs.experimental_design import make_first_level_design_matrix\n'), ((5578, 5631), 'mne.preprocessing.nirs.optical_density', 'mne.preprocessing.nirs.optical_density', (['raw_intensity'], {}), '(raw_intensity)\n', (5616, 5631), False, 'import mne\n'), ((5648, 5704), 'mne.preprocessing.nirs.beer_lambert_law', 'mne.preprocessing.nirs.beer_lambert_law', (['raw_od'], {'ppf': '(0.1)'}), '(raw_od, ppf=0.1)\n', (5687, 5704), False, 'import mne\n'), ((5763, 5796), 'mne_nirs.statistics.run_glm', 'run_glm', (['raw_haemo', 'design_matrix'], {}), '(raw_haemo, design_matrix)\n', (5770, 5796), False, 'from mne_nirs.statistics import run_glm\n'), ((5819, 5849), 'numpy.eye', 'np.eye', (['design_matrix.shape[1]'], {}), '(design_matrix.shape[1])\n', (5825, 5849), True, 'import numpy as np\n'), ((6690, 6705), 'mne_nirs.experimental_design.tests.test_experimental_design._load_dataset', '_load_dataset', ([], {}), '()\n', (6703, 6705), False, 'from mne_nirs.experimental_design.tests.test_experimental_design import _load_dataset\n'), ((6782, 6873), 'mne_nirs.experimental_design.make_first_level_design_matrix', 'make_first_level_design_matrix', (['raw_intensity'], {'drift_order': '(1)', 'drift_model': '"""polynomial"""'}), "(raw_intensity, drift_order=1, drift_model=\n 'polynomial')\n", (6812, 6873), False, 'from mne_nirs.experimental_design import make_first_level_design_matrix\n'), ((6984, 7037), 'mne.preprocessing.nirs.optical_density', 'mne.preprocessing.nirs.optical_density', (['raw_intensity'], {}), '(raw_intensity)\n', (7022, 7037), False, 'import mne\n'), ((7054, 7110), 'mne.preprocessing.nirs.beer_lambert_law', 'mne.preprocessing.nirs.beer_lambert_law', (['raw_od'], {'ppf': '(0.1)'}), '(raw_od, ppf=0.1)\n', (7093, 7110), False, 'import mne\n'), ((7131, 7164), 'mne_nirs.statistics.run_glm', 'run_glm', (['raw_haemo', 'design_matrix'], {}), '(raw_haemo, design_matrix)\n', (7138, 7164), False, 'from mne_nirs.statistics import run_glm\n'), ((7174, 7231), 'mne_nirs.utils.glm_to_tidy', 'glm_to_tidy', (['raw_haemo', 'glm_estimates.data', 'design_matrix'], {}), '(raw_haemo, glm_estimates.data, design_matrix)\n', (7185, 7231), False, 'from mne_nirs.utils import glm_to_tidy\n'), ((7995, 8014), 'pyvista.close_all', 'pyvista.close_all', ([], {}), '()\n', (8012, 8014), False, 'import pyvista\n'), ((8077, 8108), 'mne.io.read_raw_nirx', 'mne.io.read_raw_nirx', (['fname_raw'], {}), '(fname_raw)\n', (8097, 8108), False, 'import mne\n'), ((766, 793), 'mne.utils.check_version', 'check_version', (['"""mne"""', '"""1.0"""'], {}), "('mne', '1.0')\n", (779, 793), False, 'from mne.utils import catch_logging, check_version\n'), ((2608, 2666), 'pytest.warns', 'pytest.warns', (['RuntimeWarning'], {'match': '"""Reducing GLM results"""'}), "(RuntimeWarning, match='Reducing GLM results')\n", (2620, 2666), False, 'import pytest\n'), ((3147, 3205), 'pytest.warns', 'pytest.warns', (['RuntimeWarning'], {'match': '"""Reducing GLM results"""'}), "(RuntimeWarning, match='Reducing GLM results')\n", (3159, 3205), False, 'import pytest\n'), ((3479, 3537), 'pytest.warns', 'pytest.warns', (['RuntimeWarning'], {'match': '"""Reducing GLM results"""'}), "(RuntimeWarning, match='Reducing GLM results')\n", (3491, 3537), False, 'import pytest\n'), ((3967, 4029), 'pytest.raises', 'pytest.raises', (['RuntimeError'], {'match': '"""does not match regression"""'}), "(RuntimeError, match='does not match regression')\n", (3980, 4029), False, 'import pytest\n'), ((4039, 4100), 'mne_nirs.visualisation.plot_glm_topo', 'plot_glm_topo', (['raw_haemo', 'glm_estimates_subset', 'design_matrix'], {}), '(raw_haemo, glm_estimates_subset, design_matrix)\n', (4052, 4100), False, 'from mne_nirs.visualisation import plot_glm_topo, plot_glm_surface_projection\n'), ((4877, 4926), 'pytest.deprecated_call', 'pytest.deprecated_call', ([], {'match': '"""comprehensive GLM"""'}), "(match='comprehensive GLM')\n", (4899, 4926), False, 'import pytest\n'), ((4947, 5011), 'mne_nirs.statistics.compute_contrast', 'mne_nirs.statistics.compute_contrast', (['glm_est.data', 'contrast_LvR'], {}), '(glm_est.data, contrast_LvR)\n', (4983, 5011), False, 'import mne_nirs\n'), ((5034, 5083), 'pytest.deprecated_call', 'pytest.deprecated_call', ([], {'match': '"""comprehensive GLM"""'}), "(match='comprehensive GLM')\n", (5056, 5083), False, 'import pytest\n'), ((5099, 5165), 'mne_nirs.visualisation.plot_glm_contrast_topo', 'mne_nirs.visualisation.plot_glm_contrast_topo', (['raw_haemo', 'contrast'], {}), '(raw_haemo, contrast)\n', (5144, 5165), False, 'import mne_nirs\n'), ((6043, 6092), 'pytest.deprecated_call', 'pytest.deprecated_call', ([], {'match': '"""comprehensive GLM"""'}), "(match='comprehensive GLM')\n", (6065, 6092), False, 'import pytest\n'), ((6113, 6177), 'mne_nirs.statistics.compute_contrast', 'mne_nirs.statistics.compute_contrast', (['glm_est.data', 'contrast_LvR'], {}), '(glm_est.data, contrast_LvR)\n', (6149, 6177), False, 'import mne_nirs\n'), ((6200, 6249), 'pytest.deprecated_call', 'pytest.deprecated_call', ([], {'match': '"""comprehensive GLM"""'}), "(match='comprehensive GLM')\n", (6222, 6249), False, 'import pytest\n'), ((6265, 6331), 'mne_nirs.visualisation.plot_glm_contrast_topo', 'mne_nirs.visualisation.plot_glm_contrast_topo', (['raw_haemo', 'contrast'], {}), '(raw_haemo, contrast)\n', (6310, 6331), False, 'import mne_nirs\n'), ((6485, 6546), 'pytest.raises', 'pytest.raises', (['RuntimeError'], {'match': '"""Unable to extract figure"""'}), "(RuntimeError, match='Unable to extract figure')\n", (6498, 6546), False, 'import pytest\n'), ((6556, 6585), 'mne_nirs.visualisation._plot_GLM_topo._get_fig_from_axes', '_get_fig_from_axes', (['[1, 2, 3]'], {}), '([1, 2, 3])\n', (6574, 6585), False, 'from mne_nirs.visualisation._plot_GLM_topo import _get_fig_from_axes\n'), ((8257, 8308), 'mne.channels.make_standard_montage', 'mne.channels.make_standard_montage', (['"""standard_1020"""'], {}), "('standard_1020')\n", (8291, 8308), False, 'import mne\n'), ((8476, 8503), 'numpy.arange', 'np.arange', (['(1)', '(n_labels // 2)'], {}), '(1, n_labels // 2)\n', (8485, 8503), True, 'import numpy as np\n'), ((8531, 8569), 'numpy.arange', 'np.arange', (['(n_labels // 2)', '(n_labels + 1)'], {}), '(n_labels // 2, n_labels + 1)\n', (8540, 8569), True, 'import numpy as np\n'), ((8671, 8686), 'mne.utils.catch_logging', 'catch_logging', ([], {}), '()\n', (8684, 8686), False, 'from mne.utils import catch_logging, check_version\n'), ((8703, 8851), 'mne_nirs.viz.plot_3d_montage', 'mne_nirs.viz.plot_3d_montage', (['raw.info', 'view_map'], {'subject': '"""sample"""', 'surface': '"""white"""', 'subjects_dir': 'subjects_dir', 'ch_names': 'ch_names', 'verbose': '(True)'}), "(raw.info, view_map, subject='sample', surface=\n 'white', subjects_dir=subjects_dir, ch_names=ch_names, verbose=True)\n", (8731, 8851), False, 'import mne_nirs\n'), ((7869, 7893), 'collections.defaultdict', 'defaultdict', (["(lambda : '')"], {}), "(lambda : '')\n", (7880, 7893), False, 'from collections import defaultdict\n')]
|
from django.conf.urls import patterns, url, include
from django.conf import settings
urlpatterns = patterns(
'api.base.views',
url(r'^task/', include('api.task.urls')),
url(r'^accounts/', include('api.accounts.urls')),
url(r'^vm/', include('api.vm.urls')),
url(r'^node/', include('api.node.urls')),
url(r'^network/', include('api.network.urls')),
url(r'^image/', include('api.image.urls')),
url(r'^imagestore/', include('api.imagestore.urls')),
url(r'^template/', include('api.template.urls')),
url(r'^iso/', include('api.iso.urls')),
url(r'^dc/', include('api.dc.urls')),
url(r'^system/', include('api.system.urls')),
url(r'ping/$', 'api_ping', name='api_ping'),
)
if settings.SMS_ENABLED:
urlpatterns += patterns('', url(r'^sms/', include('api.sms.urls')))
if settings.MON_ZABBIX_ENABLED:
urlpatterns += patterns('', url(r'^mon/', include('api.mon.urls')))
if settings.DNS_ENABLED:
urlpatterns += patterns('', url(r'^dns/', include('api.dns.urls')))
|
[
"django.conf.urls.include",
"django.conf.urls.url"
] |
[((673, 715), 'django.conf.urls.url', 'url', (['"""ping/$"""', '"""api_ping"""'], {'name': '"""api_ping"""'}), "('ping/$', 'api_ping', name='api_ping')\n", (676, 715), False, 'from django.conf.urls import patterns, url, include\n'), ((152, 176), 'django.conf.urls.include', 'include', (['"""api.task.urls"""'], {}), "('api.task.urls')\n", (159, 176), False, 'from django.conf.urls import patterns, url, include\n'), ((202, 230), 'django.conf.urls.include', 'include', (['"""api.accounts.urls"""'], {}), "('api.accounts.urls')\n", (209, 230), False, 'from django.conf.urls import patterns, url, include\n'), ((250, 272), 'django.conf.urls.include', 'include', (['"""api.vm.urls"""'], {}), "('api.vm.urls')\n", (257, 272), False, 'from django.conf.urls import patterns, url, include\n'), ((294, 318), 'django.conf.urls.include', 'include', (['"""api.node.urls"""'], {}), "('api.node.urls')\n", (301, 318), False, 'from django.conf.urls import patterns, url, include\n'), ((343, 370), 'django.conf.urls.include', 'include', (['"""api.network.urls"""'], {}), "('api.network.urls')\n", (350, 370), False, 'from django.conf.urls import patterns, url, include\n'), ((393, 418), 'django.conf.urls.include', 'include', (['"""api.image.urls"""'], {}), "('api.image.urls')\n", (400, 418), False, 'from django.conf.urls import patterns, url, include\n'), ((446, 476), 'django.conf.urls.include', 'include', (['"""api.imagestore.urls"""'], {}), "('api.imagestore.urls')\n", (453, 476), False, 'from django.conf.urls import patterns, url, include\n'), ((502, 530), 'django.conf.urls.include', 'include', (['"""api.template.urls"""'], {}), "('api.template.urls')\n", (509, 530), False, 'from django.conf.urls import patterns, url, include\n'), ((551, 574), 'django.conf.urls.include', 'include', (['"""api.iso.urls"""'], {}), "('api.iso.urls')\n", (558, 574), False, 'from django.conf.urls import patterns, url, include\n'), ((594, 616), 'django.conf.urls.include', 'include', (['"""api.dc.urls"""'], {}), "('api.dc.urls')\n", (601, 616), False, 'from django.conf.urls import patterns, url, include\n'), ((640, 666), 'django.conf.urls.include', 'include', (['"""api.system.urls"""'], {}), "('api.system.urls')\n", (647, 666), False, 'from django.conf.urls import patterns, url, include\n'), ((792, 815), 'django.conf.urls.include', 'include', (['"""api.sms.urls"""'], {}), "('api.sms.urls')\n", (799, 815), False, 'from django.conf.urls import patterns, url, include\n'), ((897, 920), 'django.conf.urls.include', 'include', (['"""api.mon.urls"""'], {}), "('api.mon.urls')\n", (904, 920), False, 'from django.conf.urls import patterns, url, include\n'), ((995, 1018), 'django.conf.urls.include', 'include', (['"""api.dns.urls"""'], {}), "('api.dns.urls')\n", (1002, 1018), False, 'from django.conf.urls import patterns, url, include\n')]
|
import itertools
import pytest
TYPE_WEIGHTS = [5e17, 1e19]
GAUGE_WEIGHTS = [1e19, 1e18, 5e17]
GAUGE_TYPES = [0, 0, 1]
MONTH = 86400 * 30
WEEK = 7 * 86400
@pytest.fixture(scope="module", autouse=True)
def setup(
accounts, mock_lp_token, token, minter, gauge_controller, liquidity_gauge, gauge_wrapper,
):
token.set_minter(minter, {"from": accounts[0]})
gauge_controller.add_type(b"Liquidity", 10 ** 10, {"from": accounts[0]})
gauge_controller.add_gauge(liquidity_gauge, 0, 10 ** 18, {"from": accounts[0]})
# transfer tokens
for acct in accounts[1:4]:
mock_lp_token.transfer(acct, 1e18, {"from": accounts[0]})
# approve gauge and wrapper
for gauge, acct in itertools.product([liquidity_gauge, gauge_wrapper], accounts[1:4]):
mock_lp_token.approve(gauge, 1e18, {"from": acct})
def test_claim(accounts, chain, liquidity_gauge, gauge_wrapper, token):
gauge_wrapper.deposit(1e18, {"from": accounts[1]})
chain.sleep(MONTH)
gauge_wrapper.claim_tokens({"from": accounts[1]})
expected = liquidity_gauge.integrate_fraction(gauge_wrapper)
assert expected > 0
assert token.balanceOf(accounts[1]) == expected
def test_multiple_depositors(accounts, chain, liquidity_gauge, gauge_wrapper, token):
gauge_wrapper.deposit(1e18, {"from": accounts[1]})
chain.sleep(MONTH)
gauge_wrapper.deposit(1e18, {"from": accounts[2]})
chain.sleep(MONTH)
gauge_wrapper.withdraw(1e18, {"from": accounts[1]})
gauge_wrapper.withdraw(1e18, {"from": accounts[2]})
chain.sleep(10)
gauge_wrapper.claim_tokens({"from": accounts[1]})
gauge_wrapper.claim_tokens({"from": accounts[2]})
expected = liquidity_gauge.integrate_fraction(gauge_wrapper)
actual = token.balanceOf(accounts[1]) + token.balanceOf(accounts[2])
assert expected > 0
assert 0 <= expected - actual <= 1
def test_multiple_claims(accounts, chain, liquidity_gauge, gauge_wrapper, token):
gauge_wrapper.deposit(1e18, {"from": accounts[1]})
chain.sleep(MONTH)
gauge_wrapper.claim_tokens({"from": accounts[1]})
balance = token.balanceOf(accounts[1])
chain.sleep(MONTH)
gauge_wrapper.claim_tokens({"from": accounts[1]})
expected = liquidity_gauge.integrate_fraction(gauge_wrapper)
final_balance = token.balanceOf(accounts[1])
assert final_balance > balance
assert final_balance == expected
def test_claim_without_deposit(accounts, chain, gauge_wrapper, token):
gauge_wrapper.claim_tokens({"from": accounts[1]})
assert token.balanceOf(accounts[1]) == 0
def test_claimable_no_deposit(accounts, gauge_wrapper):
assert gauge_wrapper.claimable_tokens.call(accounts[1]) == 0
def test_claimable_tokens(accounts, chain, gauge_wrapper, token):
gauge_wrapper.deposit(1e18, {"from": accounts[1]})
chain.sleep(MONTH)
chain.mine()
claimable = gauge_wrapper.claimable_tokens.call(accounts[1])
gauge_wrapper.claim_tokens({"from": accounts[1]})
assert token.balanceOf(accounts[1]) >= claimable > 0
|
[
"pytest.fixture",
"itertools.product"
] |
[((160, 204), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""module"""', 'autouse': '(True)'}), "(scope='module', autouse=True)\n", (174, 204), False, 'import pytest\n'), ((703, 769), 'itertools.product', 'itertools.product', (['[liquidity_gauge, gauge_wrapper]', 'accounts[1:4]'], {}), '([liquidity_gauge, gauge_wrapper], accounts[1:4])\n', (720, 769), False, 'import itertools\n')]
|
import pandas as pd
from inmationhttpclient import Client
baseURL = "http://localhost:8002"
options = {
"auth": {
"username": '%USERNAME%',
"password": '%PASSWORD%',
"authority": 'inmation',
"grant_type": "password"
}
}
start_time = "2018-01-01T00:00:00.000Z"
end_time = "2018-02-01T00:00:00.000Z"
intervals_no = 31
items = [
{
"p": "%PATH%",
"aggregate": "AGG_TYPE_INTERPOLATIVE"
},
]
client = Client(baseURL, options)
def azureml_main(dataframe1 = None, dataframe2 = None):
r = client.readHistoricalData(items, start_time, end_time, intervals_no)
print("JSON")
jsonRes = r.json()
print(type(jsonRes))
dataFrameData = {}
if isinstance(jsonRes, dict):
print("r.json is an dictionary!!")
data = jsonRes['data']
if isinstance(data, dict):
print("data is an dictionary!!")
items = data['items']
if isinstance(items, list):
print("items is a list!!")
for item in items:
objPath = item['p']
itempath = objPath.split('/')
objName = itempath[-1]
intervals = item['intervals']
v = []
q = []
t = []
print(type(v))
dataFrameData[objName + '_v'] = v
dataFrameData[objName + '_q'] = q
dataFrameData[objName + '_t'] = t
print(type(v))
if isinstance(intervals, list):
print("intervals is a list!!")
for interval in intervals:
v.append(interval['V'])
q.append(interval['Q'])
t.append(interval['T'])
print(v, q, t)
dataframe = pd.DataFrame(data = dataFrameData)
return dataframe
|
[
"pandas.DataFrame",
"inmationhttpclient.Client"
] |
[((462, 486), 'inmationhttpclient.Client', 'Client', (['baseURL', 'options'], {}), '(baseURL, options)\n', (468, 486), False, 'from inmationhttpclient import Client\n'), ((1897, 1929), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': 'dataFrameData'}), '(data=dataFrameData)\n', (1909, 1929), True, 'import pandas as pd\n')]
|
import setuptools
def read_file(name):
with open(name, "r", encoding="UTF-8") as f:
return f.read().strip()
install_requires = read_file("./config/install_requires.txt").split('\n')
project_name = read_file("./config/project_name.txt")
version = __import__(project_name).__version__
long_description = read_file('./README.md')
setuptools.setup(
name=project_name,
version=version,
author="xsthunder",
author_email="<EMAIL>",
description="personal lib",
long_description=long_description,
long_description_content_type="text/markdown",
url=f"https://github.com/xsthunder/{project_name}",
packages=setuptools.find_packages(),
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
entry_points = {
"console_scripts": [
"nb2py = xs_lib.common:fire_main",
]
},
install_requires=install_requires,
python_requires='>=3.4',
)
|
[
"setuptools.find_packages"
] |
[((660, 686), 'setuptools.find_packages', 'setuptools.find_packages', ([], {}), '()\n', (684, 686), False, 'import setuptools\n')]
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import os
import re
import json
# //////////////// #
# /// JSON I/O /// #
# //////////////// #
def filepath(term, base, schema, service="api"):
"""
generate file path for given parameters
"""
main = "-".join([term, base, service, schema])
main = re.sub("^-", "", main)
main = re.sub("-*$", "", main)
main = re.sub("--", "-", main)
return main + ".json"
def writer(data, file, path="."):
"""
write given dict data to json file at path
"""
if path and not os.path.exists(path):
os.makedirs(path)
out = os.path.join(path, file)
with open(out, 'w', encoding="utf-8") as f:
s = json.dumps(data, ensure_ascii=False, indent=2)
f.write(s)
def reader(path):
"""
read json file from given path
"""
result = {}
with open(path, 'r', encoding='utf-8') as f:
result = json.load(f)
return result
|
[
"json.load",
"os.makedirs",
"os.path.join",
"os.path.exists",
"json.dumps",
"re.sub"
] |
[((318, 340), 're.sub', 're.sub', (['"""^-"""', '""""""', 'main'], {}), "('^-', '', main)\n", (324, 340), False, 'import re\n'), ((352, 375), 're.sub', 're.sub', (['"""-*$"""', '""""""', 'main'], {}), "('-*$', '', main)\n", (358, 375), False, 'import re\n'), ((387, 410), 're.sub', 're.sub', (['"""--"""', '"""-"""', 'main'], {}), "('--', '-', main)\n", (393, 410), False, 'import re\n'), ((614, 638), 'os.path.join', 'os.path.join', (['path', 'file'], {}), '(path, file)\n', (626, 638), False, 'import os\n'), ((586, 603), 'os.makedirs', 'os.makedirs', (['path'], {}), '(path)\n', (597, 603), False, 'import os\n'), ((699, 745), 'json.dumps', 'json.dumps', (['data'], {'ensure_ascii': '(False)', 'indent': '(2)'}), '(data, ensure_ascii=False, indent=2)\n', (709, 745), False, 'import json\n'), ((918, 930), 'json.load', 'json.load', (['f'], {}), '(f)\n', (927, 930), False, 'import json\n'), ((556, 576), 'os.path.exists', 'os.path.exists', (['path'], {}), '(path)\n', (570, 576), False, 'import os\n')]
|
import os
import unittest
from datetime import datetime
from collections import OrderedDict
from sqlalchemy import TEXT, BIGINT
from sqlalchemy.exc import IntegrityError, SQLAlchemyError, ArgumentError
from dataset import connect, chunked
from .sample_data import TEST_DATA, TEST_CITY_1
class DatabaseTestCase(unittest.TestCase):
def setUp(self):
self.db = connect()
self.tbl = self.db["weather"]
self.tbl.insert_many(TEST_DATA)
def tearDown(self):
for table in self.db.tables:
self.db[table].drop()
def test_valid_database_url(self):
assert self.db.url, os.environ["DATABASE_URL"]
def test_database_url_query_string(self):
db = connect("sqlite:///:memory:/?cached_statements=1")
assert "cached_statements" in db.url, db.url
def test_tables(self):
assert self.db.tables == ["weather"], self.db.tables
def test_contains(self):
assert "weather" in self.db, self.db.tables
def test_create_table(self):
table = self.db["foo"]
assert table.table.exists()
assert len(table.table.columns) == 1, table.table.columns
assert "id" in table.table.c, table.table.c
def test_create_table_no_ids(self):
if "mysql" in self.db.engine.dialect.dbapi.__name__:
return
if "sqlite" in self.db.engine.dialect.dbapi.__name__:
return
table = self.db.create_table("foo_no_id", primary_id=False)
assert table.table.exists()
assert len(table.table.columns) == 0, table.table.columns
def test_create_table_custom_id1(self):
pid = "string_id"
table = self.db.create_table("foo2", pid, self.db.types.string(255))
assert table.table.exists()
assert len(table.table.columns) == 1, table.table.columns
assert pid in table.table.c, table.table.c
table.insert({pid: "foobar"})
assert table.find_one(string_id="foobar")[pid] == "foobar"
def test_create_table_custom_id2(self):
pid = "string_id"
table = self.db.create_table("foo3", pid, self.db.types.string(50))
assert table.table.exists()
assert len(table.table.columns) == 1, table.table.columns
assert pid in table.table.c, table.table.c
table.insert({pid: "foobar"})
assert table.find_one(string_id="foobar")[pid] == "foobar"
def test_create_table_custom_id3(self):
pid = "int_id"
table = self.db.create_table("foo4", primary_id=pid)
assert table.table.exists()
assert len(table.table.columns) == 1, table.table.columns
assert pid in table.table.c, table.table.c
table.insert({pid: 123})
table.insert({pid: 124})
assert table.find_one(int_id=123)[pid] == 123
assert table.find_one(int_id=124)[pid] == 124
self.assertRaises(IntegrityError, lambda: table.insert({pid: 123}))
def test_create_table_shorthand1(self):
pid = "int_id"
table = self.db.get_table("foo5", pid)
assert table.table.exists
assert len(table.table.columns) == 1, table.table.columns
assert pid in table.table.c, table.table.c
table.insert({"int_id": 123})
table.insert({"int_id": 124})
assert table.find_one(int_id=123)["int_id"] == 123
assert table.find_one(int_id=124)["int_id"] == 124
self.assertRaises(IntegrityError, lambda: table.insert({"int_id": 123}))
def test_create_table_shorthand2(self):
pid = "string_id"
table = self.db.get_table(
"foo6", primary_id=pid, primary_type=self.db.types.string(255)
)
assert table.table.exists
assert len(table.table.columns) == 1, table.table.columns
assert pid in table.table.c, table.table.c
table.insert({"string_id": "foobar"})
assert table.find_one(string_id="foobar")["string_id"] == "foobar"
def test_with(self):
init_length = len(self.db["weather"])
with self.assertRaises(ValueError):
with self.db as tx:
tx["weather"].insert(
{
"date": datetime(2011, 1, 1),
"temperature": 1,
"place": "tmp_place",
}
)
raise ValueError()
assert len(self.db["weather"]) == init_length
def test_invalid_values(self):
if "mysql" in self.db.engine.dialect.dbapi.__name__:
# WARNING: mysql seems to be doing some weird type casting
# upon insert. The mysql-python driver is not affected but
# it isn't compatible with Python 3
# Conclusion: use postgresql.
return
with self.assertRaises(SQLAlchemyError):
tbl = self.db["weather"]
tbl.insert(
{"date": True, "temperature": "wrong_value", "place": "tmp_place"}
)
def test_load_table(self):
tbl = self.db.load_table("weather")
assert tbl.table.name == self.tbl.table.name
def test_query(self):
r = self.db.query("SELECT COUNT(*) AS num FROM weather").next()
assert r["num"] == len(TEST_DATA), r
def test_table_cache_updates(self):
tbl1 = self.db.get_table("people")
data = OrderedDict([("first_name", "John"), ("last_name", "Smith")])
tbl1.insert(data)
data["id"] = 1
tbl2 = self.db.get_table("people")
assert dict(tbl2.all().next()) == dict(data), (tbl2.all().next(), data)
class TableTestCase(unittest.TestCase):
def setUp(self):
self.db = connect()
self.tbl = self.db["weather"]
for row in TEST_DATA:
self.tbl.insert(row)
def tearDown(self):
self.tbl.drop()
def test_insert(self):
assert len(self.tbl) == len(TEST_DATA), len(self.tbl)
last_id = self.tbl.insert(
{"date": datetime(2011, 1, 2), "temperature": -10, "place": "Berlin"}
)
assert len(self.tbl) == len(TEST_DATA) + 1, len(self.tbl)
assert self.tbl.find_one(id=last_id)["place"] == "Berlin"
def test_insert_ignore(self):
self.tbl.insert_ignore(
{"date": datetime(2011, 1, 2), "temperature": -10, "place": "Berlin"},
["place"],
)
assert len(self.tbl) == len(TEST_DATA) + 1, len(self.tbl)
self.tbl.insert_ignore(
{"date": datetime(2011, 1, 2), "temperature": -10, "place": "Berlin"},
["place"],
)
assert len(self.tbl) == len(TEST_DATA) + 1, len(self.tbl)
def test_insert_ignore_all_key(self):
for i in range(0, 4):
self.tbl.insert_ignore(
{"date": datetime(2011, 1, 2), "temperature": -10, "place": "Berlin"},
["date", "temperature", "place"],
)
assert len(self.tbl) == len(TEST_DATA) + 1, len(self.tbl)
def test_insert_json(self):
last_id = self.tbl.insert(
{
"date": datetime(2011, 1, 2),
"temperature": -10,
"place": "Berlin",
"info": {
"currency": "EUR",
"language": "German",
"population": 3292365,
},
}
)
assert len(self.tbl) == len(TEST_DATA) + 1, len(self.tbl)
assert self.tbl.find_one(id=last_id)["place"] == "Berlin"
def test_upsert(self):
self.tbl.upsert(
{"date": datetime(2011, 1, 2), "temperature": -10, "place": "Berlin"},
["place"],
)
assert len(self.tbl) == len(TEST_DATA) + 1, len(self.tbl)
self.tbl.upsert(
{"date": datetime(2011, 1, 2), "temperature": -10, "place": "Berlin"},
["place"],
)
assert len(self.tbl) == len(TEST_DATA) + 1, len(self.tbl)
def test_upsert_single_column(self):
table = self.db["banana_single_col"]
table.upsert({"color": "Yellow"}, ["color"])
assert len(table) == 1, len(table)
table.upsert({"color": "Yellow"}, ["color"])
assert len(table) == 1, len(table)
def test_upsert_all_key(self):
assert len(self.tbl) == len(TEST_DATA), len(self.tbl)
for i in range(0, 2):
self.tbl.upsert(
{"date": datetime(2011, 1, 2), "temperature": -10, "place": "Berlin"},
["date", "temperature", "place"],
)
assert len(self.tbl) == len(TEST_DATA) + 1, len(self.tbl)
def test_upsert_id(self):
table = self.db["banana_with_id"]
data = dict(id=10, title="I am a banana!")
table.upsert(data, ["id"])
assert len(table) == 1, len(table)
def test_update_while_iter(self):
for row in self.tbl:
row["foo"] = "bar"
self.tbl.update(row, ["place", "date"])
assert len(self.tbl) == len(TEST_DATA), len(self.tbl)
def test_weird_column_names(self):
with self.assertRaises(ValueError):
self.tbl.insert(
{
"date": datetime(2011, 1, 2),
"temperature": -10,
"foo.bar": "Berlin",
"qux.bar": "Huhu",
}
)
def test_cased_column_names(self):
tbl = self.db["cased_column_names"]
tbl.insert({"place": "Berlin"})
tbl.insert({"Place": "Berlin"})
tbl.insert({"PLACE ": "Berlin"})
assert len(tbl.columns) == 2, tbl.columns
assert len(list(tbl.find(Place="Berlin"))) == 3
assert len(list(tbl.find(place="Berlin"))) == 3
assert len(list(tbl.find(PLACE="Berlin"))) == 3
def test_invalid_column_names(self):
tbl = self.db["weather"]
with self.assertRaises(ValueError):
tbl.insert({None: "banana"})
with self.assertRaises(ValueError):
tbl.insert({"": "banana"})
with self.assertRaises(ValueError):
tbl.insert({"-": "banana"})
def test_delete(self):
self.tbl.insert(
{"date": datetime(2011, 1, 2), "temperature": -10, "place": "Berlin"}
)
original_count = len(self.tbl)
assert len(self.tbl) == len(TEST_DATA) + 1, len(self.tbl)
# Test bad use of API
with self.assertRaises(ArgumentError):
self.tbl.delete({"place": "Berlin"})
assert len(self.tbl) == original_count, len(self.tbl)
assert self.tbl.delete(place="Berlin") is True, "should return 1"
assert len(self.tbl) == len(TEST_DATA), len(self.tbl)
assert self.tbl.delete() is True, "should return non zero"
assert len(self.tbl) == 0, len(self.tbl)
def test_repr(self):
assert (
repr(self.tbl) == "<Table(weather)>"
), "the representation should be <Table(weather)>"
def test_delete_nonexist_entry(self):
assert (
self.tbl.delete(place="Berlin") is False
), "entry not exist, should fail to delete"
def test_find_one(self):
self.tbl.insert(
{"date": datetime(2011, 1, 2), "temperature": -10, "place": "Berlin"}
)
d = self.tbl.find_one(place="Berlin")
assert d["temperature"] == -10, d
d = self.tbl.find_one(place="Atlantis")
assert d is None, d
def test_count(self):
assert len(self.tbl) == 6, len(self.tbl)
length = self.tbl.count(place=TEST_CITY_1)
assert length == 3, length
def test_find(self):
ds = list(self.tbl.find(place=TEST_CITY_1))
assert len(ds) == 3, ds
ds = list(self.tbl.find(place=TEST_CITY_1, _limit=2))
assert len(ds) == 2, ds
ds = list(self.tbl.find(place=TEST_CITY_1, _limit=2, _step=1))
assert len(ds) == 2, ds
ds = list(self.tbl.find(place=TEST_CITY_1, _limit=1, _step=2))
assert len(ds) == 1, ds
ds = list(self.tbl.find(_step=2))
assert len(ds) == len(TEST_DATA), ds
ds = list(self.tbl.find(order_by=["temperature"]))
assert ds[0]["temperature"] == -1, ds
ds = list(self.tbl.find(order_by=["-temperature"]))
assert ds[0]["temperature"] == 8, ds
ds = list(self.tbl.find(self.tbl.table.columns.temperature > 4))
assert len(ds) == 3, ds
def test_find_dsl(self):
ds = list(self.tbl.find(place={"like": "%lw%"}))
assert len(ds) == 3, ds
ds = list(self.tbl.find(temperature={">": 5}))
assert len(ds) == 2, ds
ds = list(self.tbl.find(temperature={">=": 5}))
assert len(ds) == 3, ds
ds = list(self.tbl.find(temperature={"<": 0}))
assert len(ds) == 1, ds
ds = list(self.tbl.find(temperature={"<=": 0}))
assert len(ds) == 2, ds
ds = list(self.tbl.find(temperature={"!=": -1}))
assert len(ds) == 5, ds
ds = list(self.tbl.find(temperature={"between": [5, 8]}))
assert len(ds) == 3, ds
ds = list(self.tbl.find(place={"=": "G€lway"}))
assert len(ds) == 3, ds
ds = list(self.tbl.find(place={"ilike": "%LwAy"}))
assert len(ds) == 3, ds
def test_offset(self):
ds = list(self.tbl.find(place=TEST_CITY_1, _offset=1))
assert len(ds) == 2, ds
ds = list(self.tbl.find(place=TEST_CITY_1, _limit=2, _offset=2))
assert len(ds) == 1, ds
def test_streamed(self):
ds = list(self.tbl.find(place=TEST_CITY_1, _streamed=True, _step=1))
assert len(ds) == 3, len(ds)
for row in self.tbl.find(place=TEST_CITY_1, _streamed=True, _step=1):
row["temperature"] = -1
self.tbl.update(row, ["id"])
def test_distinct(self):
x = list(self.tbl.distinct("place"))
assert len(x) == 2, x
x = list(self.tbl.distinct("place", "date"))
assert len(x) == 6, x
x = list(
self.tbl.distinct(
"place",
"date",
self.tbl.table.columns.date >= datetime(2011, 1, 2, 0, 0),
)
)
assert len(x) == 4, x
x = list(self.tbl.distinct("temperature", place="B€rkeley"))
assert len(x) == 3, x
x = list(self.tbl.distinct("temperature", place=["B€rkeley", "G€lway"]))
assert len(x) == 6, x
def test_insert_many(self):
data = TEST_DATA * 100
self.tbl.insert_many(data, chunk_size=13)
assert len(self.tbl) == len(data) + 6, (len(self.tbl), len(data))
def test_chunked_insert(self):
data = TEST_DATA * 100
with chunked.ChunkedInsert(self.tbl) as chunk_tbl:
for item in data:
chunk_tbl.insert(item)
assert len(self.tbl) == len(data) + 6, (len(self.tbl), len(data))
def test_chunked_insert_callback(self):
data = TEST_DATA * 100
N = 0
def callback(queue):
nonlocal N
N += len(queue)
with chunked.ChunkedInsert(self.tbl, callback=callback) as chunk_tbl:
for item in data:
chunk_tbl.insert(item)
assert len(data) == N
assert len(self.tbl) == len(data) + 6
def test_update_many(self):
tbl = self.db["update_many_test"]
tbl.insert_many([dict(temp=10), dict(temp=20), dict(temp=30)])
tbl.update_many([dict(id=1, temp=50), dict(id=3, temp=50)], "id")
# Ensure data has been updated.
assert tbl.find_one(id=1)["temp"] == tbl.find_one(id=3)["temp"]
def test_chunked_update(self):
tbl = self.db["update_many_test"]
tbl.insert_many(
[
dict(temp=10, location="asdf"),
dict(temp=20, location="qwer"),
dict(temp=30, location="asdf"),
]
)
chunked_tbl = chunked.ChunkedUpdate(tbl, "id")
chunked_tbl.update(dict(id=1, temp=50))
chunked_tbl.update(dict(id=2, location="asdf"))
chunked_tbl.update(dict(id=3, temp=50))
chunked_tbl.flush()
# Ensure data has been updated.
assert tbl.find_one(id=1)["temp"] == tbl.find_one(id=3)["temp"] == 50
assert (
tbl.find_one(id=2)["location"] == tbl.find_one(id=3)["location"] == "asdf"
) # noqa
def test_upsert_many(self):
# Also tests updating on records with different attributes
tbl = self.db["upsert_many_test"]
W = 100
tbl.upsert_many([dict(age=10), dict(weight=W)], "id")
assert tbl.find_one(id=1)["age"] == 10
tbl.upsert_many([dict(id=1, age=70), dict(id=2, weight=W / 2)], "id")
assert tbl.find_one(id=2)["weight"] == W / 2
def test_drop_operations(self):
assert self.tbl._table is not None, "table shouldn't be dropped yet"
self.tbl.drop()
assert self.tbl._table is None, "table should be dropped now"
assert list(self.tbl.all()) == [], self.tbl.all()
assert self.tbl.count() == 0, self.tbl.count()
def test_table_drop(self):
assert "weather" in self.db
self.db["weather"].drop()
assert "weather" not in self.db
def test_table_drop_then_create(self):
assert "weather" in self.db
self.db["weather"].drop()
assert "weather" not in self.db
self.db["weather"].insert({"foo": "bar"})
def test_columns(self):
cols = self.tbl.columns
assert len(list(cols)) == 4, "column count mismatch"
assert "date" in cols and "temperature" in cols and "place" in cols
def test_drop_column(self):
try:
self.tbl.drop_column("date")
assert "date" not in self.tbl.columns
except RuntimeError:
pass
def test_iter(self):
c = 0
for row in self.tbl:
c += 1
assert c == len(self.tbl)
def test_update(self):
date = datetime(2011, 1, 2)
res = self.tbl.update(
{"date": date, "temperature": -10, "place": TEST_CITY_1}, ["place", "date"]
)
assert res, "update should return True"
m = self.tbl.find_one(place=TEST_CITY_1, date=date)
assert m["temperature"] == -10, (
"new temp. should be -10 but is %d" % m["temperature"]
)
def test_create_column(self):
tbl = self.tbl
flt = self.db.types.float
tbl.create_column("foo", flt)
assert "foo" in tbl.table.c, tbl.table.c
assert isinstance(tbl.table.c["foo"].type, flt), tbl.table.c["foo"].type
assert "foo" in tbl.columns, tbl.columns
def test_ensure_column(self):
tbl = self.tbl
flt = self.db.types.float
tbl.create_column_by_example("foo", 0.1)
assert "foo" in tbl.table.c, tbl.table.c
assert isinstance(tbl.table.c["foo"].type, flt), tbl.table.c["bar"].type
tbl.create_column_by_example("bar", 1)
assert "bar" in tbl.table.c, tbl.table.c
assert isinstance(tbl.table.c["bar"].type, BIGINT), tbl.table.c["bar"].type
tbl.create_column_by_example("pippo", "test")
assert "pippo" in tbl.table.c, tbl.table.c
assert isinstance(tbl.table.c["pippo"].type, TEXT), tbl.table.c["pippo"].type
tbl.create_column_by_example("bigbar", 11111111111)
assert "bigbar" in tbl.table.c, tbl.table.c
assert isinstance(tbl.table.c["bigbar"].type, BIGINT), tbl.table.c[
"bigbar"
].type
tbl.create_column_by_example("littlebar", -11111111111)
assert "littlebar" in tbl.table.c, tbl.table.c
assert isinstance(tbl.table.c["littlebar"].type, BIGINT), tbl.table.c[
"littlebar"
].type
def test_key_order(self):
res = self.db.query("SELECT temperature, place FROM weather LIMIT 1")
keys = list(res.next().keys())
assert keys[0] == "temperature"
assert keys[1] == "place"
def test_empty_query(self):
empty = list(self.tbl.find(place="not in data"))
assert len(empty) == 0, empty
class Constructor(dict):
"""Very simple low-functionality extension to ``dict`` to
provide attribute access to dictionary contents"""
def __getattr__(self, name):
return self[name]
class RowTypeTestCase(unittest.TestCase):
def setUp(self):
self.db = connect(row_type=Constructor)
self.tbl = self.db["weather"]
for row in TEST_DATA:
self.tbl.insert(row)
def tearDown(self):
for table in self.db.tables:
self.db[table].drop()
def test_find_one(self):
self.tbl.insert(
{"date": datetime(2011, 1, 2), "temperature": -10, "place": "Berlin"}
)
d = self.tbl.find_one(place="Berlin")
assert d["temperature"] == -10, d
assert d.temperature == -10, d
d = self.tbl.find_one(place="Atlantis")
assert d is None, d
def test_find(self):
ds = list(self.tbl.find(place=TEST_CITY_1))
assert len(ds) == 3, ds
for item in ds:
assert isinstance(item, Constructor), item
ds = list(self.tbl.find(place=TEST_CITY_1, _limit=2))
assert len(ds) == 2, ds
for item in ds:
assert isinstance(item, Constructor), item
def test_distinct(self):
x = list(self.tbl.distinct("place"))
assert len(x) == 2, x
for item in x:
assert isinstance(item, Constructor), item
x = list(self.tbl.distinct("place", "date"))
assert len(x) == 6, x
for item in x:
assert isinstance(item, Constructor), item
def test_iter(self):
c = 0
for row in self.tbl:
c += 1
assert isinstance(row, Constructor), row
assert c == len(self.tbl)
if __name__ == "__main__":
unittest.main()
|
[
"unittest.main",
"dataset.chunked.ChunkedInsert",
"dataset.chunked.ChunkedUpdate",
"dataset.connect",
"datetime.datetime",
"collections.OrderedDict"
] |
[((21824, 21839), 'unittest.main', 'unittest.main', ([], {}), '()\n', (21837, 21839), False, 'import unittest\n'), ((373, 382), 'dataset.connect', 'connect', ([], {}), '()\n', (380, 382), False, 'from dataset import connect, chunked\n'), ((712, 762), 'dataset.connect', 'connect', (['"""sqlite:///:memory:/?cached_statements=1"""'], {}), "('sqlite:///:memory:/?cached_statements=1')\n", (719, 762), False, 'from dataset import connect, chunked\n'), ((5335, 5396), 'collections.OrderedDict', 'OrderedDict', (["[('first_name', 'John'), ('last_name', 'Smith')]"], {}), "([('first_name', 'John'), ('last_name', 'Smith')])\n", (5346, 5396), False, 'from collections import OrderedDict\n'), ((5650, 5659), 'dataset.connect', 'connect', ([], {}), '()\n', (5657, 5659), False, 'from dataset import connect, chunked\n'), ((15840, 15872), 'dataset.chunked.ChunkedUpdate', 'chunked.ChunkedUpdate', (['tbl', '"""id"""'], {}), "(tbl, 'id')\n", (15861, 15872), False, 'from dataset import connect, chunked\n'), ((17907, 17927), 'datetime.datetime', 'datetime', (['(2011)', '(1)', '(2)'], {}), '(2011, 1, 2)\n', (17915, 17927), False, 'from datetime import datetime\n'), ((20333, 20362), 'dataset.connect', 'connect', ([], {'row_type': 'Constructor'}), '(row_type=Constructor)\n', (20340, 20362), False, 'from dataset import connect, chunked\n'), ((14615, 14646), 'dataset.chunked.ChunkedInsert', 'chunked.ChunkedInsert', (['self.tbl'], {}), '(self.tbl)\n', (14636, 14646), False, 'from dataset import connect, chunked\n'), ((14989, 15039), 'dataset.chunked.ChunkedInsert', 'chunked.ChunkedInsert', (['self.tbl'], {'callback': 'callback'}), '(self.tbl, callback=callback)\n', (15010, 15039), False, 'from dataset import connect, chunked\n'), ((5956, 5976), 'datetime.datetime', 'datetime', (['(2011)', '(1)', '(2)'], {}), '(2011, 1, 2)\n', (5964, 5976), False, 'from datetime import datetime\n'), ((6247, 6267), 'datetime.datetime', 'datetime', (['(2011)', '(1)', '(2)'], {}), '(2011, 1, 2)\n', (6255, 6267), False, 'from datetime import datetime\n'), ((6461, 6481), 'datetime.datetime', 'datetime', (['(2011)', '(1)', '(2)'], {}), '(2011, 1, 2)\n', (6469, 6481), False, 'from datetime import datetime\n'), ((7054, 7074), 'datetime.datetime', 'datetime', (['(2011)', '(1)', '(2)'], {}), '(2011, 1, 2)\n', (7062, 7074), False, 'from datetime import datetime\n'), ((7546, 7566), 'datetime.datetime', 'datetime', (['(2011)', '(1)', '(2)'], {}), '(2011, 1, 2)\n', (7554, 7566), False, 'from datetime import datetime\n'), ((7753, 7773), 'datetime.datetime', 'datetime', (['(2011)', '(1)', '(2)'], {}), '(2011, 1, 2)\n', (7761, 7773), False, 'from datetime import datetime\n'), ((10141, 10161), 'datetime.datetime', 'datetime', (['(2011)', '(1)', '(2)'], {}), '(2011, 1, 2)\n', (10149, 10161), False, 'from datetime import datetime\n'), ((11150, 11170), 'datetime.datetime', 'datetime', (['(2011)', '(1)', '(2)'], {}), '(2011, 1, 2)\n', (11158, 11170), False, 'from datetime import datetime\n'), ((20636, 20656), 'datetime.datetime', 'datetime', (['(2011)', '(1)', '(2)'], {}), '(2011, 1, 2)\n', (20644, 20656), False, 'from datetime import datetime\n'), ((6756, 6776), 'datetime.datetime', 'datetime', (['(2011)', '(1)', '(2)'], {}), '(2011, 1, 2)\n', (6764, 6776), False, 'from datetime import datetime\n'), ((8375, 8395), 'datetime.datetime', 'datetime', (['(2011)', '(1)', '(2)'], {}), '(2011, 1, 2)\n', (8383, 8395), False, 'from datetime import datetime\n'), ((9141, 9161), 'datetime.datetime', 'datetime', (['(2011)', '(1)', '(2)'], {}), '(2011, 1, 2)\n', (9149, 9161), False, 'from datetime import datetime\n'), ((14054, 14080), 'datetime.datetime', 'datetime', (['(2011)', '(1)', '(2)', '(0)', '(0)'], {}), '(2011, 1, 2, 0, 0)\n', (14062, 14080), False, 'from datetime import datetime\n'), ((4169, 4189), 'datetime.datetime', 'datetime', (['(2011)', '(1)', '(1)'], {}), '(2011, 1, 1)\n', (4177, 4189), False, 'from datetime import datetime\n')]
|
import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name='simplecsv',
version = '0.2',
author = '<NAME>',
author_email = '<EMAIL>',
description = 'A tool for students working with CSVs',
long_description = long_description,
long_description_content_type="text/markdown",
url="http://stanford.edu/~cpiech/",
packages=setuptools.find_packages(),
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
)
|
[
"setuptools.find_packages"
] |
[((406, 432), 'setuptools.find_packages', 'setuptools.find_packages', ([], {}), '()\n', (430, 432), False, 'import setuptools\n')]
|
# Copyright 2015, 2016 OpenMarket Ltd
# Copyright 2017 Vector Creations Ltd
# Copyright 2018 New Vector Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for interacting with Identity Servers"""
import logging
import urllib.parse
from typing import TYPE_CHECKING, Awaitable, Callable, Dict, List, Optional, Tuple
from synapse.api.errors import (
CodeMessageException,
Codes,
HttpResponseException,
SynapseError,
)
from synapse.api.ratelimiting import Ratelimiter
from synapse.config.emailconfig import ThreepidBehaviour
from synapse.http import RequestTimedOutError
from synapse.http.client import SimpleHttpClient
from synapse.http.site import SynapseRequest
from synapse.types import JsonDict, Requester
from synapse.util import json_decoder
from synapse.util.hash import sha256_and_url_safe_base64
from synapse.util.stringutils import (
assert_valid_client_secret,
random_string,
valid_id_server_location,
)
if TYPE_CHECKING:
from synapse.server import HomeServer
logger = logging.getLogger(__name__)
id_server_scheme = "https://"
class IdentityHandler:
def __init__(self, hs: "HomeServer"):
self.store = hs.get_datastore()
# An HTTP client for contacting trusted URLs.
self.http_client = SimpleHttpClient(hs)
# An HTTP client for contacting identity servers specified by clients.
self.blacklisting_http_client = SimpleHttpClient(
hs, ip_blacklist=hs.config.server.federation_ip_range_blacklist
)
self.federation_http_client = hs.get_federation_http_client()
self.hs = hs
self._web_client_location = hs.config.email.invite_client_location
# Ratelimiters for `/requestToken` endpoints.
self._3pid_validation_ratelimiter_ip = Ratelimiter(
store=self.store,
clock=hs.get_clock(),
rate_hz=hs.config.ratelimiting.rc_3pid_validation.per_second,
burst_count=hs.config.ratelimiting.rc_3pid_validation.burst_count,
)
self._3pid_validation_ratelimiter_address = Ratelimiter(
store=self.store,
clock=hs.get_clock(),
rate_hz=hs.config.ratelimiting.rc_3pid_validation.per_second,
burst_count=hs.config.ratelimiting.rc_3pid_validation.burst_count,
)
async def ratelimit_request_token_requests(
self,
request: SynapseRequest,
medium: str,
address: str,
) -> None:
"""Used to ratelimit requests to `/requestToken` by IP and address.
Args:
request: The associated request
medium: The type of threepid, e.g. "msisdn" or "email"
address: The actual threepid ID, e.g. the phone number or email address
"""
await self._3pid_validation_ratelimiter_ip.ratelimit(
None, (medium, request.getClientIP())
)
await self._3pid_validation_ratelimiter_address.ratelimit(
None, (medium, address)
)
async def threepid_from_creds(
self, id_server: str, creds: Dict[str, str]
) -> Optional[JsonDict]:
"""
Retrieve and validate a threepid identifier from a "credentials" dictionary against a
given identity server
Args:
id_server: The identity server to validate 3PIDs against. Must be a
complete URL including the protocol (http(s)://)
creds: Dictionary containing the following keys:
* client_secret|clientSecret: A unique secret str provided by the client
* sid: The ID of the validation session
Returns:
A dictionary consisting of response params to the /getValidated3pid
endpoint of the Identity Service API, or None if the threepid was not found
"""
client_secret = creds.get("client_secret") or creds.get("clientSecret")
if not client_secret:
raise SynapseError(
400, "Missing param client_secret in creds", errcode=Codes.MISSING_PARAM
)
assert_valid_client_secret(client_secret)
session_id = creds.get("sid")
if not session_id:
raise SynapseError(
400, "Missing param session_id in creds", errcode=Codes.MISSING_PARAM
)
query_params = {"sid": session_id, "client_secret": client_secret}
url = id_server + "/_matrix/identity/api/v1/3pid/getValidated3pid"
try:
data = await self.http_client.get_json(url, query_params)
except RequestTimedOutError:
raise SynapseError(500, "Timed out contacting identity server")
except HttpResponseException as e:
logger.info(
"%s returned %i for threepid validation for: %s",
id_server,
e.code,
creds,
)
return None
# Old versions of Sydent return a 200 http code even on a failed validation
# check. Thus, in addition to the HttpResponseException check above (which
# checks for non-200 errors), we need to make sure validation_session isn't
# actually an error, identified by the absence of a "medium" key
# See https://github.com/matrix-org/sydent/issues/215 for details
if "medium" in data:
return data
logger.info("%s reported non-validated threepid: %s", id_server, creds)
return None
async def bind_threepid(
self,
client_secret: str,
sid: str,
mxid: str,
id_server: str,
id_access_token: Optional[str] = None,
use_v2: bool = True,
) -> JsonDict:
"""Bind a 3PID to an identity server
Args:
client_secret: A unique secret provided by the client
sid: The ID of the validation session
mxid: The MXID to bind the 3PID to
id_server: The domain of the identity server to query
id_access_token: The access token to authenticate to the identity
server with, if necessary. Required if use_v2 is true
use_v2: Whether to use v2 Identity Service API endpoints. Defaults to True
Raises:
SynapseError: On any of the following conditions
- the supplied id_server is not a valid identity server name
- we failed to contact the supplied identity server
Returns:
The response from the identity server
"""
logger.debug("Proxying threepid bind request for %s to %s", mxid, id_server)
# If an id_access_token is not supplied, force usage of v1
if id_access_token is None:
use_v2 = False
if not valid_id_server_location(id_server):
raise SynapseError(
400,
"id_server must be a valid hostname with optional port and path components",
)
# Decide which API endpoint URLs to use
headers = {}
bind_data = {"sid": sid, "client_secret": client_secret, "mxid": mxid}
if use_v2:
bind_url = "https://%s/_matrix/identity/v2/3pid/bind" % (id_server,)
headers["Authorization"] = create_id_access_token_header(id_access_token) # type: ignore
else:
bind_url = "https://%s/_matrix/identity/api/v1/3pid/bind" % (id_server,)
try:
# Use the blacklisting http client as this call is only to identity servers
# provided by a client
data = await self.blacklisting_http_client.post_json_get_json(
bind_url, bind_data, headers=headers
)
# Remember where we bound the threepid
await self.store.add_user_bound_threepid(
user_id=mxid,
medium=data["medium"],
address=data["address"],
id_server=id_server,
)
return data
except HttpResponseException as e:
if e.code != 404 or not use_v2:
logger.error("3PID bind failed with Matrix error: %r", e)
raise e.to_synapse_error()
except RequestTimedOutError:
raise SynapseError(500, "Timed out contacting identity server")
except CodeMessageException as e:
data = json_decoder.decode(e.msg) # XXX WAT?
return data
logger.info("Got 404 when POSTing JSON %s, falling back to v1 URL", bind_url)
res = await self.bind_threepid(
client_secret, sid, mxid, id_server, id_access_token, use_v2=False
)
return res
async def try_unbind_threepid(self, mxid: str, threepid: dict) -> bool:
"""Attempt to remove a 3PID from an identity server, or if one is not provided, all
identity servers we're aware the binding is present on
Args:
mxid: Matrix user ID of binding to be removed
threepid: Dict with medium & address of binding to be
removed, and an optional id_server.
Raises:
SynapseError: If we failed to contact the identity server
Returns:
True on success, otherwise False if the identity
server doesn't support unbinding (or no identity server found to
contact).
"""
if threepid.get("id_server"):
id_servers = [threepid["id_server"]]
else:
id_servers = await self.store.get_id_servers_user_bound(
user_id=mxid, medium=threepid["medium"], address=threepid["address"]
)
# We don't know where to unbind, so we don't have a choice but to return
if not id_servers:
return False
changed = True
for id_server in id_servers:
changed &= await self.try_unbind_threepid_with_id_server(
mxid, threepid, id_server
)
return changed
async def try_unbind_threepid_with_id_server(
self, mxid: str, threepid: dict, id_server: str
) -> bool:
"""Removes a binding from an identity server
Args:
mxid: Matrix user ID of binding to be removed
threepid: Dict with medium & address of binding to be removed
id_server: Identity server to unbind from
Raises:
SynapseError: On any of the following conditions
- the supplied id_server is not a valid identity server name
- we failed to contact the supplied identity server
Returns:
True on success, otherwise False if the identity
server doesn't support unbinding
"""
if not valid_id_server_location(id_server):
raise SynapseError(
400,
"id_server must be a valid hostname with optional port and path components",
)
url = "https://%s/_matrix/identity/api/v1/3pid/unbind" % (id_server,)
url_bytes = b"/_matrix/identity/api/v1/3pid/unbind"
content = {
"mxid": mxid,
"threepid": {"medium": threepid["medium"], "address": threepid["address"]},
}
# we abuse the federation http client to sign the request, but we have to send it
# using the normal http client since we don't want the SRV lookup and want normal
# 'browser-like' HTTPS.
auth_headers = self.federation_http_client.build_auth_headers(
destination=None,
method=b"POST",
url_bytes=url_bytes,
content=content,
destination_is=id_server.encode("ascii"),
)
headers = {b"Authorization": auth_headers}
try:
# Use the blacklisting http client as this call is only to identity servers
# provided by a client
await self.blacklisting_http_client.post_json_get_json(
url, content, headers
)
changed = True
except HttpResponseException as e:
changed = False
if e.code in (400, 404, 501):
# The remote server probably doesn't support unbinding (yet)
logger.warning("Received %d response while unbinding threepid", e.code)
else:
logger.error("Failed to unbind threepid on identity server: %s", e)
raise SynapseError(500, "Failed to contact identity server")
except RequestTimedOutError:
raise SynapseError(500, "Timed out contacting identity server")
await self.store.remove_user_bound_threepid(
user_id=mxid,
medium=threepid["medium"],
address=threepid["address"],
id_server=id_server,
)
return changed
async def send_threepid_validation(
self,
email_address: str,
client_secret: str,
send_attempt: int,
send_email_func: Callable[[str, str, str, str], Awaitable],
next_link: Optional[str] = None,
) -> str:
"""Send a threepid validation email for password reset or
registration purposes
Args:
email_address: The user's email address
client_secret: The provided client secret
send_attempt: Which send attempt this is
send_email_func: A function that takes an email address, token,
client_secret and session_id, sends an email
and returns an Awaitable.
next_link: The URL to redirect the user to after validation
Returns:
The new session_id upon success
Raises:
SynapseError is an error occurred when sending the email
"""
# Check that this email/client_secret/send_attempt combo is new or
# greater than what we've seen previously
session = await self.store.get_threepid_validation_session(
"email", client_secret, address=email_address, validated=False
)
# Check to see if a session already exists and that it is not yet
# marked as validated
if session and session.get("validated_at") is None:
session_id = session["session_id"]
last_send_attempt = session["last_send_attempt"]
# Check that the send_attempt is higher than previous attempts
if send_attempt <= last_send_attempt:
# If not, just return a success without sending an email
return session_id
else:
# An non-validated session does not exist yet.
# Generate a session id
session_id = random_string(16)
if next_link:
# Manipulate the next_link to add the sid, because the caller won't get
# it until we send a response, by which time we've sent the mail.
if "?" in next_link:
next_link += "&"
else:
next_link += "?"
next_link += "sid=" + urllib.parse.quote(session_id)
# Generate a new validation token
token = random_string(32)
# Send the mail with the link containing the token, client_secret
# and session_id
try:
await send_email_func(email_address, token, client_secret, session_id)
except Exception:
logger.exception(
"Error sending threepid validation email to %s", email_address
)
raise SynapseError(500, "An error was encountered when sending the email")
token_expires = (
self.hs.get_clock().time_msec()
+ self.hs.config.email.email_validation_token_lifetime
)
await self.store.start_or_continue_validation_session(
"email",
email_address,
session_id,
client_secret,
send_attempt,
next_link,
token,
token_expires,
)
return session_id
async def requestEmailToken(
self,
id_server: str,
email: str,
client_secret: str,
send_attempt: int,
next_link: Optional[str] = None,
) -> JsonDict:
"""
Request an external server send an email on our behalf for the purposes of threepid
validation.
Args:
id_server: The identity server to proxy to
email: The email to send the message to
client_secret: The unique client_secret sends by the user
send_attempt: Which attempt this is
next_link: A link to redirect the user to once they submit the token
Returns:
The json response body from the server
"""
params = {
"email": email,
"client_secret": client_secret,
"send_attempt": send_attempt,
}
if next_link:
params["next_link"] = next_link
if self.hs.config.email.using_identity_server_from_trusted_list:
# Warn that a deprecated config option is in use
logger.warning(
'The config option "trust_identity_server_for_password_resets" '
'has been replaced by "account_threepid_delegate". '
"Please consult the sample config at docs/sample_config.yaml for "
"details and update your config file."
)
try:
data = await self.http_client.post_json_get_json(
id_server + "/_matrix/identity/api/v1/validate/email/requestToken",
params,
)
return data
except HttpResponseException as e:
logger.info("Proxied requestToken failed: %r", e)
raise e.to_synapse_error()
except RequestTimedOutError:
raise SynapseError(500, "Timed out contacting identity server")
async def requestMsisdnToken(
self,
id_server: str,
country: str,
phone_number: str,
client_secret: str,
send_attempt: int,
next_link: Optional[str] = None,
) -> JsonDict:
"""
Request an external server send an SMS message on our behalf for the purposes of
threepid validation.
Args:
id_server: The identity server to proxy to
country: The country code of the phone number
phone_number: The number to send the message to
client_secret: The unique client_secret sends by the user
send_attempt: Which attempt this is
next_link: A link to redirect the user to once they submit the token
Returns:
The json response body from the server
"""
params = {
"country": country,
"phone_number": phone_number,
"client_secret": client_secret,
"send_attempt": send_attempt,
}
if next_link:
params["next_link"] = next_link
if self.hs.config.email.using_identity_server_from_trusted_list:
# Warn that a deprecated config option is in use
logger.warning(
'The config option "trust_identity_server_for_password_resets" '
'has been replaced by "account_threepid_delegate". '
"Please consult the sample config at docs/sample_config.yaml for "
"details and update your config file."
)
try:
data = await self.http_client.post_json_get_json(
id_server + "/_matrix/identity/api/v1/validate/msisdn/requestToken",
params,
)
except HttpResponseException as e:
logger.info("Proxied requestToken failed: %r", e)
raise e.to_synapse_error()
except RequestTimedOutError:
raise SynapseError(500, "Timed out contacting identity server")
# It is already checked that public_baseurl is configured since this code
# should only be used if account_threepid_delegate_msisdn is true.
assert self.hs.config.server.public_baseurl
# we need to tell the client to send the token back to us, since it doesn't
# otherwise know where to send it, so add submit_url response parameter
# (see also MSC2078)
data["submit_url"] = (
self.hs.config.server.public_baseurl
+ "_matrix/client/unstable/add_threepid/msisdn/submit_token"
)
return data
async def validate_threepid_session(
self, client_secret: str, sid: str
) -> Optional[JsonDict]:
"""Validates a threepid session with only the client secret and session ID
Tries validating against any configured account_threepid_delegates as well as locally.
Args:
client_secret: A secret provided by the client
sid: The ID of the session
Returns:
The json response if validation was successful, otherwise None
"""
# XXX: We shouldn't need to keep wrapping and unwrapping this value
threepid_creds = {"client_secret": client_secret, "sid": sid}
# We don't actually know which medium this 3PID is. Thus we first assume it's email,
# and if validation fails we try msisdn
validation_session = None
# Try to validate as email
if self.hs.config.email.threepid_behaviour_email == ThreepidBehaviour.REMOTE:
# Remote emails will only be used if a valid identity server is provided.
assert (
self.hs.config.registration.account_threepid_delegate_email is not None
)
# Ask our delegated email identity server
validation_session = await self.threepid_from_creds(
self.hs.config.registration.account_threepid_delegate_email,
threepid_creds,
)
elif self.hs.config.email.threepid_behaviour_email == ThreepidBehaviour.LOCAL:
# Get a validated session matching these details
validation_session = await self.store.get_threepid_validation_session(
"email", client_secret, sid=sid, validated=True
)
if validation_session:
return validation_session
# Try to validate as msisdn
if self.hs.config.registration.account_threepid_delegate_msisdn:
# Ask our delegated msisdn identity server
validation_session = await self.threepid_from_creds(
self.hs.config.registration.account_threepid_delegate_msisdn,
threepid_creds,
)
return validation_session
async def proxy_msisdn_submit_token(
self, id_server: str, client_secret: str, sid: str, token: str
) -> JsonDict:
"""Proxy a POST submitToken request to an identity server for verification purposes
Args:
id_server: The identity server URL to contact
client_secret: Secret provided by the client
sid: The ID of the session
token: The verification token
Raises:
SynapseError: If we failed to contact the identity server
Returns:
The response dict from the identity server
"""
body = {"client_secret": client_secret, "sid": sid, "token": token}
try:
return await self.http_client.post_json_get_json(
id_server + "/_matrix/identity/api/v1/validate/msisdn/submitToken",
body,
)
except RequestTimedOutError:
raise SynapseError(500, "Timed out contacting identity server")
except HttpResponseException as e:
logger.warning("Error contacting msisdn account_threepid_delegate: %s", e)
raise SynapseError(400, "Error contacting the identity server")
async def lookup_3pid(
self,
id_server: str,
medium: str,
address: str,
id_access_token: Optional[str] = None,
) -> Optional[str]:
"""Looks up a 3pid in the passed identity server.
Args:
id_server: The server name (including port, if required)
of the identity server to use.
medium: The type of the third party identifier (e.g. "email").
address: The third party identifier (e.g. "<EMAIL>").
id_access_token: The access token to authenticate to the identity
server with
Returns:
the matrix ID of the 3pid, or None if it is not recognized.
"""
if id_access_token is not None:
try:
results = await self._lookup_3pid_v2(
id_server, id_access_token, medium, address
)
return results
except Exception as e:
# Catch HttpResponseExcept for a non-200 response code
# Check if this identity server does not know about v2 lookups
if isinstance(e, HttpResponseException) and e.code == 404:
# This is an old identity server that does not yet support v2 lookups
logger.warning(
"Attempted v2 lookup on v1 identity server %s. Falling "
"back to v1",
id_server,
)
else:
logger.warning("Error when looking up hashing details: %s", e)
return None
return await self._lookup_3pid_v1(id_server, medium, address)
async def _lookup_3pid_v1(
self, id_server: str, medium: str, address: str
) -> Optional[str]:
"""Looks up a 3pid in the passed identity server using v1 lookup.
Args:
id_server: The server name (including port, if required)
of the identity server to use.
medium: The type of the third party identifier (e.g. "email").
address: The third party identifier (e.g. "<EMAIL>").
Returns:
the matrix ID of the 3pid, or None if it is not recognized.
"""
try:
data = await self.blacklisting_http_client.get_json(
"%s%s/_matrix/identity/api/v1/lookup" % (id_server_scheme, id_server),
{"medium": medium, "address": address},
)
if "mxid" in data:
# note: we used to verify the identity server's signature here, but no longer
# require or validate it. See the following for context:
# https://github.com/matrix-org/synapse/issues/5253#issuecomment-666246950
return data["mxid"]
except RequestTimedOutError:
raise SynapseError(500, "Timed out contacting identity server")
except OSError as e:
logger.warning("Error from v1 identity server lookup: %s" % (e,))
return None
async def _lookup_3pid_v2(
self, id_server: str, id_access_token: str, medium: str, address: str
) -> Optional[str]:
"""Looks up a 3pid in the passed identity server using v2 lookup.
Args:
id_server: The server name (including port, if required)
of the identity server to use.
id_access_token: The access token to authenticate to the identity server with
medium: The type of the third party identifier (e.g. "email").
address: The third party identifier (e.g. "<EMAIL>").
Returns:
the matrix ID of the 3pid, or None if it is not recognised.
"""
# Check what hashing details are supported by this identity server
try:
hash_details = await self.blacklisting_http_client.get_json(
"%s%s/_matrix/identity/v2/hash_details" % (id_server_scheme, id_server),
{"access_token": id_access_token},
)
except RequestTimedOutError:
raise SynapseError(500, "Timed out contacting identity server")
if not isinstance(hash_details, dict):
logger.warning(
"Got non-dict object when checking hash details of %s%s: %s",
id_server_scheme,
id_server,
hash_details,
)
raise SynapseError(
400,
"Non-dict object from %s%s during v2 hash_details request: %s"
% (id_server_scheme, id_server, hash_details),
)
# Extract information from hash_details
supported_lookup_algorithms = hash_details.get("algorithms")
lookup_pepper = hash_details.get("lookup_pepper")
if (
not supported_lookup_algorithms
or not isinstance(supported_lookup_algorithms, list)
or not lookup_pepper
or not isinstance(lookup_pepper, str)
):
raise SynapseError(
400,
"Invalid hash details received from identity server %s%s: %s"
% (id_server_scheme, id_server, hash_details),
)
# Check if any of the supported lookup algorithms are present
if LookupAlgorithm.SHA256 in supported_lookup_algorithms:
# Perform a hashed lookup
lookup_algorithm = LookupAlgorithm.SHA256
# Hash address, medium and the pepper with sha256
to_hash = "%s %s %s" % (address, medium, lookup_pepper)
lookup_value = sha256_and_url_safe_base64(to_hash)
elif LookupAlgorithm.NONE in supported_lookup_algorithms:
# Perform a non-hashed lookup
lookup_algorithm = LookupAlgorithm.NONE
# Combine together plaintext address and medium
lookup_value = "%s %s" % (address, medium)
else:
logger.warning(
"None of the provided lookup algorithms of %s are supported: %s",
id_server,
supported_lookup_algorithms,
)
raise SynapseError(
400,
"Provided identity server does not support any v2 lookup "
"algorithms that this homeserver supports.",
)
# Authenticate with identity server given the access token from the client
headers = {"Authorization": create_id_access_token_header(id_access_token)}
try:
lookup_results = await self.blacklisting_http_client.post_json_get_json(
"%s%s/_matrix/identity/v2/lookup" % (id_server_scheme, id_server),
{
"addresses": [lookup_value],
"algorithm": lookup_algorithm,
"pepper": lookup_pepper,
},
headers=headers,
)
except RequestTimedOutError:
raise SynapseError(500, "Timed out contacting identity server")
except Exception as e:
logger.warning("Error when performing a v2 3pid lookup: %s", e)
raise SynapseError(
500, "Unknown error occurred during identity server lookup"
)
# Check for a mapping from what we looked up to an MXID
if "mappings" not in lookup_results or not isinstance(
lookup_results["mappings"], dict
):
logger.warning("No results from 3pid lookup")
return None
# Return the MXID if it's available, or None otherwise
mxid = lookup_results["mappings"].get(lookup_value)
return mxid
async def ask_id_server_for_third_party_invite(
self,
requester: Requester,
id_server: str,
medium: str,
address: str,
room_id: str,
inviter_user_id: str,
room_alias: str,
room_avatar_url: str,
room_join_rules: str,
room_name: str,
room_type: Optional[str],
inviter_display_name: str,
inviter_avatar_url: str,
id_access_token: Optional[str] = None,
) -> Tuple[str, List[Dict[str, str]], Dict[str, str], str]:
"""
Asks an identity server for a third party invite.
Args:
requester
id_server: hostname + optional port for the identity server.
medium: The literal string "email".
address: The third party address being invited.
room_id: The ID of the room to which the user is invited.
inviter_user_id: The user ID of the inviter.
room_alias: An alias for the room, for cosmetic notifications.
room_avatar_url: The URL of the room's avatar, for cosmetic
notifications.
room_join_rules: The join rules of the email (e.g. "public").
room_name: The m.room.name of the room.
room_type: The type of the room from its m.room.create event (e.g "m.space").
inviter_display_name: The current display name of the
inviter.
inviter_avatar_url: The URL of the inviter's avatar.
id_access_token (str|None): The access token to authenticate to the identity
server with
Returns:
A tuple containing:
token: The token which must be signed to prove authenticity.
public_keys ([{"public_key": str, "key_validity_url": str}]):
public_key is a base64-encoded ed25519 public key.
fallback_public_key: One element from public_keys.
display_name: A user-friendly name to represent the invited user.
"""
invite_config = {
"medium": medium,
"address": address,
"room_id": room_id,
"room_alias": room_alias,
"room_avatar_url": room_avatar_url,
"room_join_rules": room_join_rules,
"room_name": room_name,
"sender": inviter_user_id,
"sender_display_name": inviter_display_name,
"sender_avatar_url": inviter_avatar_url,
}
if room_type is not None:
invite_config["org.matrix.msc3288.room_type"] = room_type
# If a custom web client location is available, include it in the request.
if self._web_client_location:
invite_config["org.matrix.web_client_location"] = self._web_client_location
# Add the identity service access token to the JSON body and use the v2
# Identity Service endpoints if id_access_token is present
data = None
base_url = "%s%s/_matrix/identity" % (id_server_scheme, id_server)
if id_access_token:
key_validity_url = "%s%s/_matrix/identity/v2/pubkey/isvalid" % (
id_server_scheme,
id_server,
)
# Attempt a v2 lookup
url = base_url + "/v2/store-invite"
try:
data = await self.blacklisting_http_client.post_json_get_json(
url,
invite_config,
{"Authorization": create_id_access_token_header(id_access_token)},
)
except RequestTimedOutError:
raise SynapseError(500, "Timed out contacting identity server")
except HttpResponseException as e:
if e.code != 404:
logger.info("Failed to POST %s with JSON: %s", url, e)
raise e
if data is None:
key_validity_url = "%s%s/_matrix/identity/api/v1/pubkey/isvalid" % (
id_server_scheme,
id_server,
)
url = base_url + "/api/v1/store-invite"
try:
data = await self.blacklisting_http_client.post_json_get_json(
url, invite_config
)
except RequestTimedOutError:
raise SynapseError(500, "Timed out contacting identity server")
except HttpResponseException as e:
logger.warning(
"Error trying to call /store-invite on %s%s: %s",
id_server_scheme,
id_server,
e,
)
if data is None:
# Some identity servers may only support application/x-www-form-urlencoded
# types. This is especially true with old instances of Sydent, see
# https://github.com/matrix-org/sydent/pull/170
try:
data = await self.blacklisting_http_client.post_urlencoded_get_json(
url, invite_config
)
except HttpResponseException as e:
logger.warning(
"Error calling /store-invite on %s%s with fallback "
"encoding: %s",
id_server_scheme,
id_server,
e,
)
raise e
# TODO: Check for success
token = data["token"]
public_keys = data.get("public_keys", [])
if "public_key" in data:
fallback_public_key = {
"public_key": data["public_key"],
"key_validity_url": key_validity_url,
}
else:
fallback_public_key = public_keys[0]
if not public_keys:
public_keys.append(fallback_public_key)
display_name = data["display_name"]
return token, public_keys, fallback_public_key, display_name
def create_id_access_token_header(id_access_token: str) -> List[str]:
"""Create an Authorization header for passing to SimpleHttpClient as the header value
of an HTTP request.
Args:
id_access_token: An identity server access token.
Returns:
The ascii-encoded bearer token encased in a list.
"""
# Prefix with Bearer
bearer_token = "Bearer %s" % id_access_token
# Encode headers to standard ascii
bearer_token.encode("ascii")
# Return as a list as that's how SimpleHttpClient takes header values
return [bearer_token]
class LookupAlgorithm:
"""
Supported hashing algorithms when performing a 3PID lookup.
SHA256 - Hashing an (address, medium, pepper) combo with sha256, then url-safe base64
encoding
NONE - Not performing any hashing. Simply sending an (address, medium) combo in plaintext
"""
SHA256 = "sha256"
NONE = "none"
|
[
"synapse.util.hash.sha256_and_url_safe_base64",
"synapse.http.client.SimpleHttpClient",
"synapse.util.json_decoder.decode",
"synapse.util.stringutils.random_string",
"synapse.util.stringutils.assert_valid_client_secret",
"synapse.util.stringutils.valid_id_server_location",
"synapse.api.errors.SynapseError",
"logging.getLogger"
] |
[((1525, 1552), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (1542, 1552), False, 'import logging\n'), ((1772, 1792), 'synapse.http.client.SimpleHttpClient', 'SimpleHttpClient', (['hs'], {}), '(hs)\n', (1788, 1792), False, 'from synapse.http.client import SimpleHttpClient\n'), ((1912, 1998), 'synapse.http.client.SimpleHttpClient', 'SimpleHttpClient', (['hs'], {'ip_blacklist': 'hs.config.server.federation_ip_range_blacklist'}), '(hs, ip_blacklist=hs.config.server.\n federation_ip_range_blacklist)\n', (1928, 1998), False, 'from synapse.http.client import SimpleHttpClient\n'), ((4575, 4616), 'synapse.util.stringutils.assert_valid_client_secret', 'assert_valid_client_secret', (['client_secret'], {}), '(client_secret)\n', (4601, 4616), False, 'from synapse.util.stringutils import assert_valid_client_secret, random_string, valid_id_server_location\n'), ((15620, 15637), 'synapse.util.stringutils.random_string', 'random_string', (['(32)'], {}), '(32)\n', (15633, 15637), False, 'from synapse.util.stringutils import assert_valid_client_secret, random_string, valid_id_server_location\n'), ((4450, 4541), 'synapse.api.errors.SynapseError', 'SynapseError', (['(400)', '"""Missing param client_secret in creds"""'], {'errcode': 'Codes.MISSING_PARAM'}), "(400, 'Missing param client_secret in creds', errcode=Codes.\n MISSING_PARAM)\n", (4462, 4541), False, 'from synapse.api.errors import CodeMessageException, Codes, HttpResponseException, SynapseError\n'), ((4701, 4789), 'synapse.api.errors.SynapseError', 'SynapseError', (['(400)', '"""Missing param session_id in creds"""'], {'errcode': 'Codes.MISSING_PARAM'}), "(400, 'Missing param session_id in creds', errcode=Codes.\n MISSING_PARAM)\n", (4713, 4789), False, 'from synapse.api.errors import CodeMessageException, Codes, HttpResponseException, SynapseError\n'), ((7250, 7285), 'synapse.util.stringutils.valid_id_server_location', 'valid_id_server_location', (['id_server'], {}), '(id_server)\n', (7274, 7285), False, 'from synapse.util.stringutils import assert_valid_client_secret, random_string, valid_id_server_location\n'), ((7305, 7408), 'synapse.api.errors.SynapseError', 'SynapseError', (['(400)', '"""id_server must be a valid hostname with optional port and path components"""'], {}), "(400,\n 'id_server must be a valid hostname with optional port and path components'\n )\n", (7317, 7408), False, 'from synapse.api.errors import CodeMessageException, Codes, HttpResponseException, SynapseError\n'), ((11209, 11244), 'synapse.util.stringutils.valid_id_server_location', 'valid_id_server_location', (['id_server'], {}), '(id_server)\n', (11233, 11244), False, 'from synapse.util.stringutils import assert_valid_client_secret, random_string, valid_id_server_location\n'), ((11264, 11367), 'synapse.api.errors.SynapseError', 'SynapseError', (['(400)', '"""id_server must be a valid hostname with optional port and path components"""'], {}), "(400,\n 'id_server must be a valid hostname with optional port and path components'\n )\n", (11276, 11367), False, 'from synapse.api.errors import CodeMessageException, Codes, HttpResponseException, SynapseError\n'), ((15176, 15193), 'synapse.util.stringutils.random_string', 'random_string', (['(16)'], {}), '(16)\n', (15189, 15193), False, 'from synapse.util.stringutils import assert_valid_client_secret, random_string, valid_id_server_location\n'), ((28820, 28957), 'synapse.api.errors.SynapseError', 'SynapseError', (['(400)', "('Non-dict object from %s%s during v2 hash_details request: %s' % (\n id_server_scheme, id_server, hash_details))"], {}), "(400, \n 'Non-dict object from %s%s during v2 hash_details request: %s' % (\n id_server_scheme, id_server, hash_details))\n", (28832, 28957), False, 'from synapse.api.errors import CodeMessageException, Codes, HttpResponseException, SynapseError\n'), ((29421, 29557), 'synapse.api.errors.SynapseError', 'SynapseError', (['(400)', "('Invalid hash details received from identity server %s%s: %s' % (\n id_server_scheme, id_server, hash_details))"], {}), "(400, \n 'Invalid hash details received from identity server %s%s: %s' % (\n id_server_scheme, id_server, hash_details))\n", (29433, 29557), False, 'from synapse.api.errors import CodeMessageException, Codes, HttpResponseException, SynapseError\n'), ((29998, 30033), 'synapse.util.hash.sha256_and_url_safe_base64', 'sha256_and_url_safe_base64', (['to_hash'], {}), '(to_hash)\n', (30024, 30033), False, 'from synapse.util.hash import sha256_and_url_safe_base64\n'), ((5106, 5163), 'synapse.api.errors.SynapseError', 'SynapseError', (['(500)', '"""Timed out contacting identity server"""'], {}), "(500, 'Timed out contacting identity server')\n", (5118, 5163), False, 'from synapse.api.errors import CodeMessageException, Codes, HttpResponseException, SynapseError\n'), ((8727, 8784), 'synapse.api.errors.SynapseError', 'SynapseError', (['(500)', '"""Timed out contacting identity server"""'], {}), "(500, 'Timed out contacting identity server')\n", (8739, 8784), False, 'from synapse.api.errors import CodeMessageException, Codes, HttpResponseException, SynapseError\n'), ((8846, 8872), 'synapse.util.json_decoder.decode', 'json_decoder.decode', (['e.msg'], {}), '(e.msg)\n', (8865, 8872), False, 'from synapse.util import json_decoder\n'), ((13005, 13062), 'synapse.api.errors.SynapseError', 'SynapseError', (['(500)', '"""Timed out contacting identity server"""'], {}), "(500, 'Timed out contacting identity server')\n", (13017, 13062), False, 'from synapse.api.errors import CodeMessageException, Codes, HttpResponseException, SynapseError\n'), ((16001, 16069), 'synapse.api.errors.SynapseError', 'SynapseError', (['(500)', '"""An error was encountered when sending the email"""'], {}), "(500, 'An error was encountered when sending the email')\n", (16013, 16069), False, 'from synapse.api.errors import CodeMessageException, Codes, HttpResponseException, SynapseError\n'), ((18341, 18398), 'synapse.api.errors.SynapseError', 'SynapseError', (['(500)', '"""Timed out contacting identity server"""'], {}), "(500, 'Timed out contacting identity server')\n", (18353, 18398), False, 'from synapse.api.errors import CodeMessageException, Codes, HttpResponseException, SynapseError\n'), ((20351, 20408), 'synapse.api.errors.SynapseError', 'SynapseError', (['(500)', '"""Timed out contacting identity server"""'], {}), "(500, 'Timed out contacting identity server')\n", (20363, 20408), False, 'from synapse.api.errors import CodeMessageException, Codes, HttpResponseException, SynapseError\n'), ((24104, 24161), 'synapse.api.errors.SynapseError', 'SynapseError', (['(500)', '"""Timed out contacting identity server"""'], {}), "(500, 'Timed out contacting identity server')\n", (24116, 24161), False, 'from synapse.api.errors import CodeMessageException, Codes, HttpResponseException, SynapseError\n'), ((24310, 24367), 'synapse.api.errors.SynapseError', 'SynapseError', (['(400)', '"""Error contacting the identity server"""'], {}), "(400, 'Error contacting the identity server')\n", (24322, 24367), False, 'from synapse.api.errors import CodeMessageException, Codes, HttpResponseException, SynapseError\n'), ((27257, 27314), 'synapse.api.errors.SynapseError', 'SynapseError', (['(500)', '"""Timed out contacting identity server"""'], {}), "(500, 'Timed out contacting identity server')\n", (27269, 27314), False, 'from synapse.api.errors import CodeMessageException, Codes, HttpResponseException, SynapseError\n'), ((28485, 28542), 'synapse.api.errors.SynapseError', 'SynapseError', (['(500)', '"""Timed out contacting identity server"""'], {}), "(500, 'Timed out contacting identity server')\n", (28497, 28542), False, 'from synapse.api.errors import CodeMessageException, Codes, HttpResponseException, SynapseError\n'), ((30540, 30667), 'synapse.api.errors.SynapseError', 'SynapseError', (['(400)', '"""Provided identity server does not support any v2 lookup algorithms that this homeserver supports."""'], {}), "(400,\n 'Provided identity server does not support any v2 lookup algorithms that this homeserver supports.'\n )\n", (30552, 30667), False, 'from synapse.api.errors import CodeMessageException, Codes, HttpResponseException, SynapseError\n'), ((31359, 31416), 'synapse.api.errors.SynapseError', 'SynapseError', (['(500)', '"""Timed out contacting identity server"""'], {}), "(500, 'Timed out contacting identity server')\n", (31371, 31416), False, 'from synapse.api.errors import CodeMessageException, Codes, HttpResponseException, SynapseError\n'), ((31542, 31615), 'synapse.api.errors.SynapseError', 'SynapseError', (['(500)', '"""Unknown error occurred during identity server lookup"""'], {}), "(500, 'Unknown error occurred during identity server lookup')\n", (31554, 31615), False, 'from synapse.api.errors import CodeMessageException, Codes, HttpResponseException, SynapseError\n'), ((12895, 12949), 'synapse.api.errors.SynapseError', 'SynapseError', (['(500)', '"""Failed to contact identity server"""'], {}), "(500, 'Failed to contact identity server')\n", (12907, 12949), False, 'from synapse.api.errors import CodeMessageException, Codes, HttpResponseException, SynapseError\n'), ((35708, 35765), 'synapse.api.errors.SynapseError', 'SynapseError', (['(500)', '"""Timed out contacting identity server"""'], {}), "(500, 'Timed out contacting identity server')\n", (35720, 35765), False, 'from synapse.api.errors import CodeMessageException, Codes, HttpResponseException, SynapseError\n'), ((36401, 36458), 'synapse.api.errors.SynapseError', 'SynapseError', (['(500)', '"""Timed out contacting identity server"""'], {}), "(500, 'Timed out contacting identity server')\n", (36413, 36458), False, 'from synapse.api.errors import CodeMessageException, Codes, HttpResponseException, SynapseError\n')]
|
# Lint as: python2, python3
# Copyright 2019 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Common utility for testing Kubeflow-based orchestrator."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import re
import shutil
import subprocess
import tarfile
import tempfile
import time
from typing import Any, Dict, List, Text
import absl
import tensorflow as tf
import tensorflow_model_analysis as tfma
from tfx.components import CsvExampleGen
from tfx.components import Evaluator
from tfx.components import ExampleValidator
from tfx.components import InfraValidator
from tfx.components import Pusher
from tfx.components import ResolverNode
from tfx.components import SchemaGen
from tfx.components import StatisticsGen
from tfx.components import Trainer
from tfx.components import Transform
from tfx.components.base import executor_spec
from tfx.components.base.base_component import BaseComponent
from tfx.dsl.experimental import latest_artifacts_resolver
from tfx.orchestration import pipeline as tfx_pipeline
from tfx.orchestration import test_utils
from tfx.orchestration.kubeflow import kubeflow_dag_runner
from tfx.orchestration.kubeflow.proto import kubeflow_pb2
from tfx.proto import infra_validator_pb2
from tfx.proto import pusher_pb2
from tfx.proto import trainer_pb2
from tfx.types import Channel
from tfx.types import channel_utils
from tfx.types import component_spec
from tfx.types import standard_artifacts
from tfx.types.standard_artifacts import Model
from tfx.utils import dsl_utils
# Custom component definitions for testing purpose.
class _HelloWorldSpec(component_spec.ComponentSpec):
INPUTS = {}
OUTPUTS = {
'greeting':
component_spec.ChannelParameter(type=standard_artifacts.String)
}
PARAMETERS = {
'word': component_spec.ExecutionParameter(type=str),
}
class _ByeWorldSpec(component_spec.ComponentSpec):
INPUTS = {
'hearing':
component_spec.ChannelParameter(type=standard_artifacts.String)
}
OUTPUTS = {}
PARAMETERS = {}
class HelloWorldComponent(BaseComponent):
"""Producer component."""
SPEC_CLASS = _HelloWorldSpec
EXECUTOR_SPEC = executor_spec.ExecutorContainerSpec(
# TODO(b/143965964): move the image to private repo if the test is flaky
# due to docker hub.
image='google/cloud-sdk:latest',
command=['sh', '-c'],
# TODO(b/147242148): Remove /value after decision is made regarding uri
# structure.
args=[
'echo "hello {{exec_properties.word}}" | gsutil cp - {{output_dict["greeting"][0].uri}}/value'
])
def __init__(self, word, greeting=None):
if not greeting:
artifact = standard_artifacts.String()
greeting = channel_utils.as_channel([artifact])
super(HelloWorldComponent,
self).__init__(_HelloWorldSpec(word=word, greeting=greeting))
class ByeWorldComponent(BaseComponent):
"""Consumer component."""
SPEC_CLASS = _ByeWorldSpec
EXECUTOR_SPEC = executor_spec.ExecutorContainerSpec(
image='bash:latest',
command=['echo'],
args=['received {{input_dict["hearing"][0].value}}'])
def __init__(self, hearing):
super(ByeWorldComponent, self).__init__(_ByeWorldSpec(hearing=hearing))
def create_primitive_type_components(
pipeline_name: Text) -> List[BaseComponent]:
"""Creates components for testing primitive type artifact passing.
Args:
pipeline_name: Name of this pipeline.
Returns:
A list of TFX custom container components.
"""
hello_world = HelloWorldComponent(word=pipeline_name)
bye_world = ByeWorldComponent(hearing=hello_world.outputs['greeting'])
return [hello_world, bye_world]
def create_e2e_components(
pipeline_root: Text,
csv_input_location: Text,
transform_module: Text,
trainer_module: Text,
) -> List[BaseComponent]:
"""Creates components for a simple Chicago Taxi TFX pipeline for testing.
Args:
pipeline_root: The root of the pipeline output.
csv_input_location: The location of the input data directory.
transform_module: The location of the transform module file.
trainer_module: The location of the trainer module file.
Returns:
A list of TFX components that constitutes an end-to-end test pipeline.
"""
examples = dsl_utils.csv_input(csv_input_location)
example_gen = CsvExampleGen(input=examples)
statistics_gen = StatisticsGen(examples=example_gen.outputs['examples'])
schema_gen = SchemaGen(
statistics=statistics_gen.outputs['statistics'],
infer_feature_shape=False)
example_validator = ExampleValidator(
statistics=statistics_gen.outputs['statistics'],
schema=schema_gen.outputs['schema'])
transform = Transform(
examples=example_gen.outputs['examples'],
schema=schema_gen.outputs['schema'],
module_file=transform_module)
latest_model_resolver = ResolverNode(
instance_name='latest_model_resolver',
resolver_class=latest_artifacts_resolver.LatestArtifactsResolver,
latest_model=Channel(type=Model))
trainer = Trainer(
transformed_examples=transform.outputs['transformed_examples'],
schema=schema_gen.outputs['schema'],
base_model=latest_model_resolver.outputs['latest_model'],
transform_graph=transform.outputs['transform_graph'],
train_args=trainer_pb2.TrainArgs(num_steps=10),
eval_args=trainer_pb2.EvalArgs(num_steps=5),
module_file=trainer_module,
)
# Set the TFMA config for Model Evaluation and Validation.
eval_config = tfma.EvalConfig(
model_specs=[tfma.ModelSpec(signature_name='eval')],
metrics_specs=[
tfma.MetricsSpec(
metrics=[tfma.MetricConfig(class_name='ExampleCount')],
thresholds={
'accuracy':
tfma.MetricThreshold(
value_threshold=tfma.GenericValueThreshold(
lower_bound={'value': 0.5}),
change_threshold=tfma.GenericChangeThreshold(
direction=tfma.MetricDirection.HIGHER_IS_BETTER,
absolute={'value': -1e-10}))
})
],
slicing_specs=[
tfma.SlicingSpec(),
tfma.SlicingSpec(feature_keys=['trip_start_hour'])
])
evaluator = Evaluator(
examples=example_gen.outputs['examples'],
model=trainer.outputs['model'],
eval_config=eval_config)
infra_validator = InfraValidator(
model=trainer.outputs['model'],
examples=example_gen.outputs['examples'],
serving_spec=infra_validator_pb2.ServingSpec(
tensorflow_serving=infra_validator_pb2.TensorFlowServing(
tags=['latest']),
kubernetes=infra_validator_pb2.KubernetesConfig()),
request_spec=infra_validator_pb2.RequestSpec(
tensorflow_serving=infra_validator_pb2.TensorFlowServingRequestSpec())
)
pusher = Pusher(
model=trainer.outputs['model'],
model_blessing=evaluator.outputs['blessing'],
push_destination=pusher_pb2.PushDestination(
filesystem=pusher_pb2.PushDestination.Filesystem(
base_directory=os.path.join(pipeline_root, 'model_serving'))))
return [
example_gen,
statistics_gen,
schema_gen,
example_validator,
transform,
latest_model_resolver,
trainer,
evaluator,
infra_validator,
pusher,
]
class BaseKubeflowTest(tf.test.TestCase):
"""Base class that defines testing harness for pipeline on KubeflowRunner."""
_POLLING_INTERVAL_IN_SECONDS = 10
# The following environment variables need to be set prior to calling the test
# in this file. All variables are required and do not have a default.
# The base container image name to use when building the image used in tests.
_BASE_CONTAINER_IMAGE = os.environ['KFP_E2E_BASE_CONTAINER_IMAGE']
# The src path to use to build docker image
_REPO_BASE = os.environ['KFP_E2E_SRC']
# The project id to use to run tests.
_GCP_PROJECT_ID = os.environ['KFP_E2E_GCP_PROJECT_ID']
# The GCP region in which the end-to-end test is run.
_GCP_REGION = os.environ['KFP_E2E_GCP_REGION']
# The GCP bucket to use to write output artifacts.
_BUCKET_NAME = os.environ['KFP_E2E_BUCKET_NAME']
# The location of test data. The input files are copied to a test-local
# location for each invocation, and cleaned up at the end of test.
_TEST_DATA_ROOT = os.environ['KFP_E2E_TEST_DATA_ROOT']
# The location of test user module
# It is retrieved from inside the container subject to testing.
_MODULE_ROOT = '/tfx-src/tfx/components/testdata/module_file'
_CONTAINER_IMAGE = '{}:{}'.format(_BASE_CONTAINER_IMAGE,
test_utils.random_id())
@classmethod
def setUpClass(cls):
super(BaseKubeflowTest, cls).setUpClass()
# Create a container image for use by test pipelines.
test_utils.build_and_push_docker_image(cls._CONTAINER_IMAGE, cls._REPO_BASE)
@classmethod
def tearDownClass(cls):
super(BaseKubeflowTest, cls).tearDownClass()
# Delete container image used in tests.
absl.logging.info('Deleting image %s', cls._CONTAINER_IMAGE)
subprocess.run(
['gcloud', 'container', 'images', 'delete', cls._CONTAINER_IMAGE],
check=True)
@classmethod
def _get_mysql_pod_name(cls):
"""Returns MySQL pod name in the cluster."""
pod_name = subprocess.check_output([
'kubectl',
'-n',
'kubeflow',
'get',
'pods',
'-l',
'app=mysql',
'--no-headers',
'-o',
'custom-columns=:metadata.name',
]).decode('utf-8').strip('\n')
absl.logging.info('MySQL pod name is: {}'.format(pod_name))
return pod_name
@classmethod
def _get_mlmd_db_name(cls, pipeline_name: Text):
# MySQL DB names must not contain '-' while k8s names must not contain '_'.
# So we replace the dashes here for the DB name.
valid_mysql_name = pipeline_name.replace('-', '_')
# MySQL database name cannot exceed 64 characters.
return 'mlmd_{}'.format(valid_mysql_name[-59:])
def setUp(self):
super(BaseKubeflowTest, self).setUp()
self._old_cwd = os.getcwd()
self._test_dir = tempfile.mkdtemp()
os.chdir(self._test_dir)
self._test_output_dir = 'gs://{}/test_output'.format(self._BUCKET_NAME)
test_id = test_utils.random_id()
self._testdata_root = 'gs://{}/test_data/{}'.format(self._BUCKET_NAME,
test_id)
subprocess.run(
['gsutil', 'cp', '-r', self._TEST_DATA_ROOT, self._testdata_root],
check=True,
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL,
)
self._data_root = os.path.join(self._testdata_root, 'external', 'csv')
self._transform_module = os.path.join(self._MODULE_ROOT,
'transform_module.py')
self._trainer_module = os.path.join(self._MODULE_ROOT, 'trainer_module.py')
self.addCleanup(self._delete_test_dir, test_id)
def tearDown(self):
super(BaseKubeflowTest, self).tearDown()
os.chdir(self._old_cwd)
shutil.rmtree(self._test_dir)
def _delete_test_dir(self, test_id: Text):
"""Deletes files for this test including the module file and data files.
Args:
test_id: Randomly generated id of the test.
"""
test_utils.delete_gcs_files(self._GCP_PROJECT_ID, self._BUCKET_NAME,
'test_data/{}'.format(test_id))
def _delete_workflow(self, workflow_name: Text):
"""Deletes the specified Argo workflow."""
absl.logging.info('Deleting workflow {}'.format(workflow_name))
subprocess.run(['argo', '--namespace', 'kubeflow', 'delete', workflow_name],
check=True)
def _run_workflow(self,
workflow_file: Text,
workflow_name: Text,
parameter: Dict[Text, Text] = None):
"""Runs the specified workflow with Argo.
Blocks until the workflow has run (successfully or not) to completion.
Args:
workflow_file: YAML file with Argo workflow spec for the pipeline.
workflow_name: Name to use for the workflow.
parameter: mapping from pipeline parameter name to its runtime value.
"""
# TODO(ajaygopinathan): Consider using KFP cli instead.
def _format_parameter(parameter: Dict[Text, Any]) -> List[Text]:
"""Format the pipeline parameter section of argo workflow."""
if parameter:
result = []
for k, v in parameter.items():
result.append('-p')
result.append('%s=%s' % (k, v))
return result
else:
return []
run_command = [
'argo',
'submit',
'--name',
workflow_name,
'--namespace',
'kubeflow',
'--serviceaccount',
'pipeline-runner',
workflow_file,
]
run_command += _format_parameter(parameter)
absl.logging.info('Launching workflow {} with parameter {}'.format(
workflow_name, _format_parameter(parameter)))
with test_utils.Timer('RunningPipelineToCompletion'):
subprocess.run(run_command, check=True)
# Wait in the loop while pipeline is running.
status = 'Running'
while status == 'Running':
time.sleep(self._POLLING_INTERVAL_IN_SECONDS)
status = self._get_argo_pipeline_status(workflow_name)
def _delete_pipeline_output(self, pipeline_name: Text):
"""Deletes output produced by the named pipeline.
Args:
pipeline_name: The name of the pipeline.
"""
test_utils.delete_gcs_files(self._GCP_PROJECT_ID, self._BUCKET_NAME,
'test_output/{}'.format(pipeline_name))
def _delete_pipeline_metadata(self, pipeline_name: Text):
"""Drops the database containing metadata produced by the pipeline.
Args:
pipeline_name: The name of the pipeline owning the database.
"""
pod_name = self._get_mysql_pod_name()
db_name = self._get_mlmd_db_name(pipeline_name)
command = [
'kubectl',
'-n',
'kubeflow',
'exec',
'-it',
pod_name,
'--',
'mysql',
'--user',
'root',
'--execute',
'drop database if exists {};'.format(db_name),
]
absl.logging.info('Dropping MLMD DB with name: {}'.format(db_name))
with test_utils.Timer('DeletingMLMDDatabase'):
subprocess.run(command, check=True)
def _pipeline_root(self, pipeline_name: Text):
return os.path.join(self._test_output_dir, pipeline_name)
def _create_pipeline(self, pipeline_name: Text,
components: List[BaseComponent]):
"""Creates a pipeline given name and list of components."""
return tfx_pipeline.Pipeline(
pipeline_name=pipeline_name,
pipeline_root=self._pipeline_root(pipeline_name),
components=components,
enable_cache=True,
)
def _create_dataflow_pipeline(self, pipeline_name: Text,
components: List[BaseComponent]):
"""Creates a pipeline with Beam DataflowRunner."""
pipeline = self._create_pipeline(pipeline_name, components)
pipeline.beam_pipeline_args = [
'--runner=DataflowRunner',
'--project=' + self._GCP_PROJECT_ID,
'--temp_location=' +
os.path.join(self._pipeline_root(pipeline_name), 'tmp'),
'--region=' + self._GCP_REGION,
]
return pipeline
def _get_kubeflow_metadata_config(
self) -> kubeflow_pb2.KubeflowMetadataConfig:
config = kubeflow_dag_runner.get_default_kubeflow_metadata_config()
return config
def _get_argo_pipeline_status(self, workflow_name: Text) -> Text:
"""Get Pipeline status.
Args:
workflow_name: The name of the workflow.
Returns:
Simple status string which is returned from `argo get` command.
"""
get_workflow_command = [
'argo', '--namespace', 'kubeflow', 'get', workflow_name
]
output = subprocess.check_output(get_workflow_command).decode('utf-8')
absl.logging.info('Argo output ----\n%s', output)
match = re.search(r'^Status:\s+(.+)$', output, flags=re.MULTILINE)
self.assertIsNotNone(match)
return match.group(1)
def _compile_and_run_pipeline(self,
pipeline: tfx_pipeline.Pipeline,
workflow_name: Text = None,
parameters: Dict[Text, Any] = None):
"""Compiles and runs a KFP pipeline.
Args:
pipeline: The logical pipeline to run.
workflow_name: The argo workflow name, default to pipeline name.
parameters: Value of runtime paramters of the pipeline.
"""
pipeline_name = pipeline.pipeline_info.pipeline_name
config = kubeflow_dag_runner.KubeflowDagRunnerConfig(
kubeflow_metadata_config=self._get_kubeflow_metadata_config(),
tfx_image=self._CONTAINER_IMAGE)
kubeflow_dag_runner.KubeflowDagRunner(config=config).run(pipeline)
file_path = os.path.join(self._test_dir, '{}.tar.gz'.format(pipeline_name))
self.assertTrue(tf.io.gfile.exists(file_path))
tarfile.TarFile.open(file_path).extract('pipeline.yaml')
pipeline_file = os.path.join(self._test_dir, 'pipeline.yaml')
self.assertIsNotNone(pipeline_file)
workflow_name = workflow_name or pipeline_name
# Ensure cleanup regardless of whether pipeline succeeds or fails.
self.addCleanup(self._delete_workflow, workflow_name)
self.addCleanup(self._delete_pipeline_metadata, pipeline_name)
self.addCleanup(self._delete_pipeline_output, pipeline_name)
# Run the pipeline to completion.
self._run_workflow(pipeline_file, workflow_name, parameters)
# Obtain workflow logs.
get_logs_command = [
'argo', '--namespace', 'kubeflow', 'logs', '-w', workflow_name
]
logs_output = subprocess.check_output(get_logs_command).decode('utf-8')
# Check if pipeline completed successfully.
status = self._get_argo_pipeline_status(workflow_name)
self.assertEqual(
'Succeeded', status, 'Pipeline {} failed to complete successfully: {}'
'\nFailed workflow logs:\n{}'.format(pipeline_name, status,
logs_output))
|
[
"tensorflow_model_analysis.GenericChangeThreshold",
"absl.logging.info",
"tfx.proto.infra_validator_pb2.TensorFlowServingRequestSpec",
"tfx.components.Evaluator",
"tfx.components.StatisticsGen",
"tfx.types.standard_artifacts.String",
"shutil.rmtree",
"os.path.join",
"os.chdir",
"tfx.components.CsvExampleGen",
"tensorflow_model_analysis.ModelSpec",
"tfx.components.Transform",
"tensorflow_model_analysis.SlicingSpec",
"tfx.components.SchemaGen",
"tfx.types.component_spec.ChannelParameter",
"tfx.orchestration.test_utils.Timer",
"tfx.proto.trainer_pb2.TrainArgs",
"tempfile.mkdtemp",
"tfx.components.base.executor_spec.ExecutorContainerSpec",
"tfx.types.channel_utils.as_channel",
"tfx.types.Channel",
"tfx.types.component_spec.ExecutionParameter",
"re.search",
"tfx.proto.trainer_pb2.EvalArgs",
"tfx.orchestration.test_utils.random_id",
"subprocess.check_output",
"tensorflow_model_analysis.GenericValueThreshold",
"tfx.components.ExampleValidator",
"tensorflow_model_analysis.MetricConfig",
"tfx.orchestration.test_utils.build_and_push_docker_image",
"tfx.orchestration.kubeflow.kubeflow_dag_runner.get_default_kubeflow_metadata_config",
"time.sleep",
"tfx.proto.infra_validator_pb2.KubernetesConfig",
"tfx.orchestration.kubeflow.kubeflow_dag_runner.KubeflowDagRunner",
"tarfile.TarFile.open",
"tensorflow.io.gfile.exists",
"subprocess.run",
"tfx.proto.infra_validator_pb2.TensorFlowServing",
"os.getcwd",
"tfx.utils.dsl_utils.csv_input"
] |
[((2738, 2945), 'tfx.components.base.executor_spec.ExecutorContainerSpec', 'executor_spec.ExecutorContainerSpec', ([], {'image': '"""google/cloud-sdk:latest"""', 'command': "['sh', '-c']", 'args': '[\'echo "hello {{exec_properties.word}}" | gsutil cp - {{output_dict["greeting"][0].uri}}/value\'\n ]'}), '(image=\'google/cloud-sdk:latest\',\n command=[\'sh\', \'-c\'], args=[\n \'echo "hello {{exec_properties.word}}" | gsutil cp - {{output_dict["greeting"][0].uri}}/value\'\n ])\n', (2773, 2945), False, 'from tfx.components.base import executor_spec\n'), ((3557, 3689), 'tfx.components.base.executor_spec.ExecutorContainerSpec', 'executor_spec.ExecutorContainerSpec', ([], {'image': '"""bash:latest"""', 'command': "['echo']", 'args': '[\'received {{input_dict["hearing"][0].value}}\']'}), '(image=\'bash:latest\', command=[\'echo\'],\n args=[\'received {{input_dict["hearing"][0].value}}\'])\n', (3592, 3689), False, 'from tfx.components.base import executor_spec\n'), ((4850, 4889), 'tfx.utils.dsl_utils.csv_input', 'dsl_utils.csv_input', (['csv_input_location'], {}), '(csv_input_location)\n', (4869, 4889), False, 'from tfx.utils import dsl_utils\n'), ((4907, 4936), 'tfx.components.CsvExampleGen', 'CsvExampleGen', ([], {'input': 'examples'}), '(input=examples)\n', (4920, 4936), False, 'from tfx.components import CsvExampleGen\n'), ((4956, 5011), 'tfx.components.StatisticsGen', 'StatisticsGen', ([], {'examples': "example_gen.outputs['examples']"}), "(examples=example_gen.outputs['examples'])\n", (4969, 5011), False, 'from tfx.components import StatisticsGen\n'), ((5027, 5116), 'tfx.components.SchemaGen', 'SchemaGen', ([], {'statistics': "statistics_gen.outputs['statistics']", 'infer_feature_shape': '(False)'}), "(statistics=statistics_gen.outputs['statistics'],\n infer_feature_shape=False)\n", (5036, 5116), False, 'from tfx.components import SchemaGen\n'), ((5148, 5255), 'tfx.components.ExampleValidator', 'ExampleValidator', ([], {'statistics': "statistics_gen.outputs['statistics']", 'schema': "schema_gen.outputs['schema']"}), "(statistics=statistics_gen.outputs['statistics'], schema=\n schema_gen.outputs['schema'])\n", (5164, 5255), False, 'from tfx.components import ExampleValidator\n'), ((5278, 5401), 'tfx.components.Transform', 'Transform', ([], {'examples': "example_gen.outputs['examples']", 'schema': "schema_gen.outputs['schema']", 'module_file': 'transform_module'}), "(examples=example_gen.outputs['examples'], schema=schema_gen.\n outputs['schema'], module_file=transform_module)\n", (5287, 5401), False, 'from tfx.components import Transform\n'), ((6889, 7002), 'tfx.components.Evaluator', 'Evaluator', ([], {'examples': "example_gen.outputs['examples']", 'model': "trainer.outputs['model']", 'eval_config': 'eval_config'}), "(examples=example_gen.outputs['examples'], model=trainer.outputs[\n 'model'], eval_config=eval_config)\n", (6898, 7002), False, 'from tfx.components import Evaluator\n'), ((2274, 2337), 'tfx.types.component_spec.ChannelParameter', 'component_spec.ChannelParameter', ([], {'type': 'standard_artifacts.String'}), '(type=standard_artifacts.String)\n', (2305, 2337), False, 'from tfx.types import component_spec\n'), ((2373, 2416), 'tfx.types.component_spec.ExecutionParameter', 'component_spec.ExecutionParameter', ([], {'type': 'str'}), '(type=str)\n', (2406, 2416), False, 'from tfx.types import component_spec\n'), ((2515, 2578), 'tfx.types.component_spec.ChannelParameter', 'component_spec.ChannelParameter', ([], {'type': 'standard_artifacts.String'}), '(type=standard_artifacts.String)\n', (2546, 2578), False, 'from tfx.types import component_spec\n'), ((9331, 9353), 'tfx.orchestration.test_utils.random_id', 'test_utils.random_id', ([], {}), '()\n', (9351, 9353), False, 'from tfx.orchestration import test_utils\n'), ((9503, 9579), 'tfx.orchestration.test_utils.build_and_push_docker_image', 'test_utils.build_and_push_docker_image', (['cls._CONTAINER_IMAGE', 'cls._REPO_BASE'], {}), '(cls._CONTAINER_IMAGE, cls._REPO_BASE)\n', (9541, 9579), False, 'from tfx.orchestration import test_utils\n'), ((9720, 9780), 'absl.logging.info', 'absl.logging.info', (['"""Deleting image %s"""', 'cls._CONTAINER_IMAGE'], {}), "('Deleting image %s', cls._CONTAINER_IMAGE)\n", (9737, 9780), False, 'import absl\n'), ((9785, 9883), 'subprocess.run', 'subprocess.run', (["['gcloud', 'container', 'images', 'delete', cls._CONTAINER_IMAGE]"], {'check': '(True)'}), "(['gcloud', 'container', 'images', 'delete', cls.\n _CONTAINER_IMAGE], check=True)\n", (9799, 9883), False, 'import subprocess\n'), ((10795, 10806), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (10804, 10806), False, 'import os\n'), ((10828, 10846), 'tempfile.mkdtemp', 'tempfile.mkdtemp', ([], {}), '()\n', (10844, 10846), False, 'import tempfile\n'), ((10851, 10875), 'os.chdir', 'os.chdir', (['self._test_dir'], {}), '(self._test_dir)\n', (10859, 10875), False, 'import os\n'), ((10968, 10990), 'tfx.orchestration.test_utils.random_id', 'test_utils.random_id', ([], {}), '()\n', (10988, 10990), False, 'from tfx.orchestration import test_utils\n'), ((11136, 11293), 'subprocess.run', 'subprocess.run', (["['gsutil', 'cp', '-r', self._TEST_DATA_ROOT, self._testdata_root]"], {'check': '(True)', 'stdout': 'subprocess.DEVNULL', 'stderr': 'subprocess.DEVNULL'}), "(['gsutil', 'cp', '-r', self._TEST_DATA_ROOT, self.\n _testdata_root], check=True, stdout=subprocess.DEVNULL, stderr=\n subprocess.DEVNULL)\n", (11150, 11293), False, 'import subprocess\n'), ((11346, 11398), 'os.path.join', 'os.path.join', (['self._testdata_root', '"""external"""', '"""csv"""'], {}), "(self._testdata_root, 'external', 'csv')\n", (11358, 11398), False, 'import os\n'), ((11428, 11482), 'os.path.join', 'os.path.join', (['self._MODULE_ROOT', '"""transform_module.py"""'], {}), "(self._MODULE_ROOT, 'transform_module.py')\n", (11440, 11482), False, 'import os\n'), ((11552, 11604), 'os.path.join', 'os.path.join', (['self._MODULE_ROOT', '"""trainer_module.py"""'], {}), "(self._MODULE_ROOT, 'trainer_module.py')\n", (11564, 11604), False, 'import os\n'), ((11730, 11753), 'os.chdir', 'os.chdir', (['self._old_cwd'], {}), '(self._old_cwd)\n', (11738, 11753), False, 'import os\n'), ((11758, 11787), 'shutil.rmtree', 'shutil.rmtree', (['self._test_dir'], {}), '(self._test_dir)\n', (11771, 11787), False, 'import shutil\n'), ((12288, 12380), 'subprocess.run', 'subprocess.run', (["['argo', '--namespace', 'kubeflow', 'delete', workflow_name]"], {'check': '(True)'}), "(['argo', '--namespace', 'kubeflow', 'delete', workflow_name],\n check=True)\n", (12302, 12380), False, 'import subprocess\n'), ((15162, 15212), 'os.path.join', 'os.path.join', (['self._test_output_dir', 'pipeline_name'], {}), '(self._test_output_dir, pipeline_name)\n', (15174, 15212), False, 'import os\n'), ((16202, 16260), 'tfx.orchestration.kubeflow.kubeflow_dag_runner.get_default_kubeflow_metadata_config', 'kubeflow_dag_runner.get_default_kubeflow_metadata_config', ([], {}), '()\n', (16258, 16260), False, 'from tfx.orchestration.kubeflow import kubeflow_dag_runner\n'), ((16704, 16756), 'absl.logging.info', 'absl.logging.info', (['"""Argo output ----\n%s"""', 'output'], {}), '("""Argo output ----\n%s""", output)\n', (16721, 16756), False, 'import absl\n'), ((16766, 16824), 're.search', 're.search', (['"""^Status:\\\\s+(.+)$"""', 'output'], {'flags': 're.MULTILINE'}), "('^Status:\\\\s+(.+)$', output, flags=re.MULTILINE)\n", (16775, 16824), False, 'import re\n'), ((17865, 17910), 'os.path.join', 'os.path.join', (['self._test_dir', '"""pipeline.yaml"""'], {}), "(self._test_dir, 'pipeline.yaml')\n", (17877, 17910), False, 'import os\n'), ((3254, 3281), 'tfx.types.standard_artifacts.String', 'standard_artifacts.String', ([], {}), '()\n', (3279, 3281), False, 'from tfx.types import standard_artifacts\n'), ((3299, 3335), 'tfx.types.channel_utils.as_channel', 'channel_utils.as_channel', (['[artifact]'], {}), '([artifact])\n', (3323, 3335), False, 'from tfx.types import channel_utils\n'), ((5592, 5611), 'tfx.types.Channel', 'Channel', ([], {'type': 'Model'}), '(type=Model)\n', (5599, 5611), False, 'from tfx.types import Channel\n'), ((5888, 5923), 'tfx.proto.trainer_pb2.TrainArgs', 'trainer_pb2.TrainArgs', ([], {'num_steps': '(10)'}), '(num_steps=10)\n', (5909, 5923), False, 'from tfx.proto import trainer_pb2\n'), ((5941, 5974), 'tfx.proto.trainer_pb2.EvalArgs', 'trainer_pb2.EvalArgs', ([], {'num_steps': '(5)'}), '(num_steps=5)\n', (5961, 5974), False, 'from tfx.proto import trainer_pb2\n'), ((13710, 13757), 'tfx.orchestration.test_utils.Timer', 'test_utils.Timer', (['"""RunningPipelineToCompletion"""'], {}), "('RunningPipelineToCompletion')\n", (13726, 13757), False, 'from tfx.orchestration import test_utils\n'), ((13765, 13804), 'subprocess.run', 'subprocess.run', (['run_command'], {'check': '(True)'}), '(run_command, check=True)\n', (13779, 13804), False, 'import subprocess\n'), ((15017, 15057), 'tfx.orchestration.test_utils.Timer', 'test_utils.Timer', (['"""DeletingMLMDDatabase"""'], {}), "('DeletingMLMDDatabase')\n", (15033, 15057), False, 'from tfx.orchestration import test_utils\n'), ((15065, 15100), 'subprocess.run', 'subprocess.run', (['command'], {'check': '(True)'}), '(command, check=True)\n', (15079, 15100), False, 'import subprocess\n'), ((17753, 17782), 'tensorflow.io.gfile.exists', 'tf.io.gfile.exists', (['file_path'], {}), '(file_path)\n', (17771, 17782), True, 'import tensorflow as tf\n'), ((6127, 6164), 'tensorflow_model_analysis.ModelSpec', 'tfma.ModelSpec', ([], {'signature_name': '"""eval"""'}), "(signature_name='eval')\n", (6141, 6164), True, 'import tensorflow_model_analysis as tfma\n'), ((6785, 6803), 'tensorflow_model_analysis.SlicingSpec', 'tfma.SlicingSpec', ([], {}), '()\n', (6801, 6803), True, 'import tensorflow_model_analysis as tfma\n'), ((6815, 6865), 'tensorflow_model_analysis.SlicingSpec', 'tfma.SlicingSpec', ([], {'feature_keys': "['trip_start_hour']"}), "(feature_keys=['trip_start_hour'])\n", (6831, 6865), True, 'import tensorflow_model_analysis as tfma\n'), ((13923, 13968), 'time.sleep', 'time.sleep', (['self._POLLING_INTERVAL_IN_SECONDS'], {}), '(self._POLLING_INTERVAL_IN_SECONDS)\n', (13933, 13968), False, 'import time\n'), ((16638, 16683), 'subprocess.check_output', 'subprocess.check_output', (['get_workflow_command'], {}), '(get_workflow_command)\n', (16661, 16683), False, 'import subprocess\n'), ((17585, 17637), 'tfx.orchestration.kubeflow.kubeflow_dag_runner.KubeflowDagRunner', 'kubeflow_dag_runner.KubeflowDagRunner', ([], {'config': 'config'}), '(config=config)\n', (17622, 17637), False, 'from tfx.orchestration.kubeflow import kubeflow_dag_runner\n'), ((17788, 17819), 'tarfile.TarFile.open', 'tarfile.TarFile.open', (['file_path'], {}), '(file_path)\n', (17808, 17819), False, 'import tarfile\n'), ((18517, 18558), 'subprocess.check_output', 'subprocess.check_output', (['get_logs_command'], {}), '(get_logs_command)\n', (18540, 18558), False, 'import subprocess\n'), ((7221, 7275), 'tfx.proto.infra_validator_pb2.TensorFlowServing', 'infra_validator_pb2.TensorFlowServing', ([], {'tags': "['latest']"}), "(tags=['latest'])\n", (7258, 7275), False, 'from tfx.proto import infra_validator_pb2\n'), ((7313, 7351), 'tfx.proto.infra_validator_pb2.KubernetesConfig', 'infra_validator_pb2.KubernetesConfig', ([], {}), '()\n', (7349, 7351), False, 'from tfx.proto import infra_validator_pb2\n'), ((7435, 7485), 'tfx.proto.infra_validator_pb2.TensorFlowServingRequestSpec', 'infra_validator_pb2.TensorFlowServingRequestSpec', ([], {}), '()\n', (7483, 7485), False, 'from tfx.proto import infra_validator_pb2\n'), ((10008, 10155), 'subprocess.check_output', 'subprocess.check_output', (["['kubectl', '-n', 'kubeflow', 'get', 'pods', '-l', 'app=mysql',\n '--no-headers', '-o', 'custom-columns=:metadata.name']"], {}), "(['kubectl', '-n', 'kubeflow', 'get', 'pods', '-l',\n 'app=mysql', '--no-headers', '-o', 'custom-columns=:metadata.name'])\n", (10031, 10155), False, 'import subprocess\n'), ((6240, 6284), 'tensorflow_model_analysis.MetricConfig', 'tfma.MetricConfig', ([], {'class_name': '"""ExampleCount"""'}), "(class_name='ExampleCount')\n", (6257, 6284), True, 'import tensorflow_model_analysis as tfma\n'), ((7741, 7785), 'os.path.join', 'os.path.join', (['pipeline_root', '"""model_serving"""'], {}), "(pipeline_root, 'model_serving')\n", (7753, 7785), False, 'import os\n'), ((6430, 6484), 'tensorflow_model_analysis.GenericValueThreshold', 'tfma.GenericValueThreshold', ([], {'lower_bound': "{'value': 0.5}"}), "(lower_bound={'value': 0.5})\n", (6456, 6484), True, 'import tensorflow_model_analysis as tfma\n'), ((6560, 6668), 'tensorflow_model_analysis.GenericChangeThreshold', 'tfma.GenericChangeThreshold', ([], {'direction': 'tfma.MetricDirection.HIGHER_IS_BETTER', 'absolute': "{'value': -1e-10}"}), "(direction=tfma.MetricDirection.HIGHER_IS_BETTER,\n absolute={'value': -1e-10})\n", (6587, 6668), True, 'import tensorflow_model_analysis as tfma\n')]
|
# Generated by Django 2.0.3 on 2018-07-04 03:45
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import uuid
class Migration(migrations.Migration):
initial = True
dependencies = [migrations.swappable_dependency(settings.AUTH_USER_MODEL)]
operations = [
migrations.CreateModel(
name="News",
fields=[
("timestamp", models.DateTimeField(auto_now_add=True)),
(
"uuid_id",
models.UUIDField(
default=uuid.uuid4,
editable=False,
primary_key=True,
serialize=False,
),
),
("content", models.TextField(max_length=280)),
(
"reply",
models.BooleanField(default=False, verbose_name="Is a reply?"),
),
(
"liked",
models.ManyToManyField(
blank=True,
related_name="liked_news",
to=settings.AUTH_USER_MODEL,
),
),
(
"parent",
models.ForeignKey(
blank=True,
null=True,
on_delete=django.db.models.deletion.CASCADE,
related_name="thread",
to="news.News",
),
),
(
"user",
models.ForeignKey(
null=True,
on_delete=django.db.models.deletion.CASCADE,
related_name="publisher",
to=settings.AUTH_USER_MODEL,
),
),
],
options={
"verbose_name": "News",
"verbose_name_plural": "News",
"ordering": ("-timestamp",),
},
)
]
|
[
"django.db.models.TextField",
"django.db.migrations.swappable_dependency",
"django.db.models.UUIDField",
"django.db.models.ManyToManyField",
"django.db.models.ForeignKey",
"django.db.models.BooleanField",
"django.db.models.DateTimeField"
] |
[((249, 306), 'django.db.migrations.swappable_dependency', 'migrations.swappable_dependency', (['settings.AUTH_USER_MODEL'], {}), '(settings.AUTH_USER_MODEL)\n', (280, 306), False, 'from django.db import migrations, models\n'), ((436, 475), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now_add': '(True)'}), '(auto_now_add=True)\n', (456, 475), False, 'from django.db import migrations, models\n'), ((547, 638), 'django.db.models.UUIDField', 'models.UUIDField', ([], {'default': 'uuid.uuid4', 'editable': '(False)', 'primary_key': '(True)', 'serialize': '(False)'}), '(default=uuid.uuid4, editable=False, primary_key=True,\n serialize=False)\n', (563, 638), False, 'from django.db import migrations, models\n'), ((802, 834), 'django.db.models.TextField', 'models.TextField', ([], {'max_length': '(280)'}), '(max_length=280)\n', (818, 834), False, 'from django.db import migrations, models\n'), ((904, 966), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(False)', 'verbose_name': '"""Is a reply?"""'}), "(default=False, verbose_name='Is a reply?')\n", (923, 966), False, 'from django.db import migrations, models\n'), ((1054, 1149), 'django.db.models.ManyToManyField', 'models.ManyToManyField', ([], {'blank': '(True)', 'related_name': '"""liked_news"""', 'to': 'settings.AUTH_USER_MODEL'}), "(blank=True, related_name='liked_news', to=settings.\n AUTH_USER_MODEL)\n", (1076, 1149), False, 'from django.db import migrations, models\n'), ((1328, 1457), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'blank': '(True)', 'null': '(True)', 'on_delete': 'django.db.models.deletion.CASCADE', 'related_name': '"""thread"""', 'to': '"""news.News"""'}), "(blank=True, null=True, on_delete=django.db.models.\n deletion.CASCADE, related_name='thread', to='news.News')\n", (1345, 1457), False, 'from django.db import migrations, models\n'), ((1682, 1814), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'null': '(True)', 'on_delete': 'django.db.models.deletion.CASCADE', 'related_name': '"""publisher"""', 'to': 'settings.AUTH_USER_MODEL'}), "(null=True, on_delete=django.db.models.deletion.CASCADE,\n related_name='publisher', to=settings.AUTH_USER_MODEL)\n", (1699, 1814), False, 'from django.db import migrations, models\n')]
|
# coding=utf-8
# Copyright 2020 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from copy import deepcopy
import json
import random
import os
import string
import numpy as np
from parlai.core.agents import create_agent
from parlai.core.message import Message
from parlai.core.worlds import DialogPartnerWorld, validate
from parlai.tasks.self_chat.worlds import SelfChatWorld as SelfChatBaseWorld
from parlai.utils.misc import warn_once
from .agents import _path, load_from_path
class InteractiveWorld(DialogPartnerWorld):
"""
Interactive world for Airdialogue.
Used for models trained on the task `-t wizard_of_wikipedia`. Automatically
retrieves knowledge from Wikipedia based on the conversation history using a
TF-IDF
retriever. Then uses a Transformer-based model to select a checked sentence
from
these retrieved passages.
"""
@staticmethod
def add_cmdline_args(argparser):
group = argparser.add_argument_group('Air Interactive World Args')
group.add_argument(
'--intent-choice',
type=int,
default=3,
help='number of intent choices in dialogue (default: 3)')
def __init__(self, opt, agents, shared=None):
super().__init__(opt, agents, shared)
print('[ loading airdialogue.. ]')
self.opt = opt
self.cnt = 0
self.human_agent = self.agents[0]
self.model_agent = self.agents[1]
defaultagent = 'agent'
defaultsize = -1
task = opt.get('task', f'airdialogue:{defaultagent}:{defaultsize}')
self.agenttype = task.split(':')[1] if len(
task.split(':')) > 1 else defaultagent
self.datasize = int(
task.split(':')[2]) if len(task.split(':')) > 2 else defaultsize
if shared is not None:
self.messages = shared['messages']
self.actions = shared['actions']
self.expected_actions = shared['expected_actions']
self.num_ex = shared['num_ex']
self.intents = shared['intents']
self.intent_objs = shared['intent_objs']
self.kbs = shared['kbs']
else:
self.messages = []
self.actions = []
self.expected_actions = []
self.intents = []
self.intent_objs = []
self.kbs = []
self.num_ex = 0
jsons_path = _path(opt)
self._setup_data(jsons_path)
self.num_intent_choice = opt.get('intent_choice', 3)
def _setup_data(self, jsons_path):
data_path = os.path.join(jsons_path, 'dev_data.json')
kb_path = os.path.join(jsons_path, 'dev_kb.json')
size = self.datasize
load_from_path(
self,
data_path,
kb_path,
size,
load_intent=True,
load_kb=True,
load_expected_action=True)
def _get_new_intent(self):
random.seed()
intent_ids = random.sample(
range(len(self.intents)), self.num_intent_choice - 1)
intents = [self.intents[i] for i in intent_ids]
intents.append('[OTHER INTENT]')
letters = list(string.ascii_uppercase)[:self.num_intent_choice]
intent_list = {x: y for x, y in zip(letters, intents)}
intent_text = '\n'.join(
['{}: {}'.format(k, v) for k, v in intent_list.items()])
intent_id_list = {x: y for x, y in zip(letters[:-1], intent_ids)}
done = False
while not done:
self.human_agent.observe({
'text':
'Your role is {}\nPlease choose one of the following intents by typing '
'A, B, C, ..., etc. : \n\n{}\n'.format(self.agenttype,
intent_text)
})
intent_act = self.human_agent.act()
choice = intent_act['text'][0].upper()
if choice in intent_list:
if intent_list[choice] == '[OTHER INTENT]':
intent_ids = random.sample(
range(len(self.intents)), self.num_intent_choice - 1)
intents = [self.intents[i] for i in intent_ids]
intents.append('[OTHER INTENT]')
letters = list(string.ascii_uppercase)[:self.num_intent_choice]
intent_list = {x: y for x, y in zip(letters, intents)}
intent_text = '\n'.join(
['{}: {}'.format(k, v) for k, v in intent_list.items()])
intent_id_list = {x: y for x, y in zip(letters[:-1], intent_ids)}
else:
done = True
else:
self.human_agent.observe(
{'text': 'Invalid response, please try again.'})
self.human_agent.observe(
{'text': f'[Your chosen intent is: {intent_list[choice]}]'})
chosen_id = intent_id_list[choice]
expected_action = self.expected_actions[chosen_id]
self.human_agent.observe(
{'text': f'[expected action is: {expected_action}]'})
for flight in expected_action['flight']:
expected_flight = flight - 1000
# import ipdb; ipdb.set_trace()
expected_flight = self.kbs[chosen_id]['kb'][expected_flight]
self.human_agent.observe(
{'text': f'[expected flight is: {expected_flight}]'})
if len(expected_action['flight']) == 0:
self.human_agent.observe({'text': f'[expected flight is: None]'})
reservation = self.kbs[chosen_id]['reservation']
self.human_agent.observe(
{'text': f'[reservation flight is: {reservation}]'})
return chosen_id
def _add_context(self, action):
entrys = self.messages[self.context_id][0].split('\n')
entrys[-1] = action['text']
if self.agenttype == 'agent':
action['tickets'] = entrys[:-2]
action['reservation'] = entrys[-2]
# the following are actually not used in eval just for calculate loss
# need to remove in the future
action['action_name'] = self.actions[self.context_id]['name']
action['action_flight'] = self.actions[self.context_id]['flight']
action['action_status'] = self.actions[self.context_id]['status']
action['action_intent'] = self.actions[self.context_id]['intent']
elif self.agenttype == 'customer':
action['intent'] = entrys[0]
assert len(entrys) == 2
action['return_encoder_state'] = True
return action
def reset(self):
super().reset()
self.cnt = 0
self.context_id = None
self.model_agent.reset()
self.human_agent.reset()
self.acts = [None, None]
def get_air_score(self):
score_obj = self.model_agent.get_air_score(
self.acts[1]['encoder_states'], self.expected_actions[self.context_id],
self.kbs[self.context_id])
score_text = '\n'.join([f" - {k}: {v}" for k, v in score_obj.items()])
for flight in score_obj['flight']:
chosen_flight = self.kbs[self.context_id]['kb'][flight - 1000]
score_text += f'\nChosen Flight: {chosen_flight}'
self.human_agent.observe({
'id': 'Final Agent Prediction',
'text': '\n' + score_text
})
return score_obj
def parley(self):
"""
Loop between model and human.
"""
if self.cnt == 0:
self.context_id = self._get_new_intent()
self.acts = [None, None]
self.human_first = random.choice([0, 1])
# possibly get human act first
if self.cnt == 0 and not self.human_first:
self.acts[0] = Message({'text': '__SILENCE__', 'episode_done': False})
else:
try:
self.acts[0] = self.human_agent.act()
except StopIteration:
if self.agenttype != 'customer':
self.get_air_score()
print('[ CHAT DONE ]')
print('\n[ Preparing new chat... ]\n')
self.reset()
return
act = deepcopy(self.acts[0])
# add context to the model observation
act = self._add_context(act)
# model observes context and human (apprentice) act
self.model_agent.observe(validate(act))
# model agent act
self.acts[1] = self.model_agent.act()
# human (apprentice) agent observes model act
# remove encoder_states to prevent output
act = deepcopy(self.acts[1])
if 'encoder_states' in act:
del act['encoder_states']
self.human_agent.observe(validate(act))
self.update_counters()
self.cnt += 1
if self.episode_done():
print('[ CHAT DONE ]')
print('\n[ Preparing new chat... ]\n')
self.cnt = 0
self.model_agent.reset()
class InteractiveCustomerWorld(InteractiveWorld):
pass
class InteractiveAgentWorld(InteractiveWorld):
pass
class SelfChatBothWorld(InteractiveWorld):
def __init__(self, opt, agents, shared=None):
super().__init__(opt, agents, shared)
assert self.agenttype == 'both', 'agenttype must be both for selfplay'
if opt['model_file'].split(':')[0] == 'human':
print('[Human Evaluation]')
self.human_eval = True
else:
self.human_eval = False
self.customer_agent = self.agents[0]
self.agent_agent = self.agents[1]
self.max_turn_cnt = self.opt.get('selfchat_max_turns', 10)
self.episode_cnt = 0
self.agent_encoder_states = None
self.score = None
self.gather_rewards = {
'reward': [],
'flight_score': [],
'name_score': [],
'status_score': [],
}
self.start_cid = self.opt.get('start_cid', 0)
@staticmethod
def add_cmdline_args(argparser):
group = argparser.add_argument_group('Air SelfChat World Args')
group.add_argument(
'--start-cid', type=int, default=0, help='offset of contextid')
def display(self):
s = super().display()
if self.cnt == 0:
s += '\n==============================\n'
return s
def _add_context(self, action, agenttype):
entrys = self.messages[self.context_id][0].split('\n')
entrys[-1] = action['text']
if agenttype == 'agent':
action['tickets'] = entrys[:-3]
action['reservation'] = entrys[-3]
# the following are actually not used in eval just for calculate loss
# need to remove in the future
action['action_name'] = self.actions[self.context_id]['name']
action['action_flight'] = self.actions[self.context_id]['flight']
action['action_status'] = self.actions[self.context_id]['status']
action['action_intent'] = self.actions[self.context_id]['intent']
elif agenttype == 'customer':
action['intent'] = entrys[-2]
return action
def episode_done(self):
# add a heuristic for episode_done
# this one will break the current parlai selfplay script
if self.acts[0] is not None and self.acts[1] is not None:
if 'thank you' in self.acts[0]['text'].lower(
) and 'thank you' in self.acts[1]['text'].lower():
return True
if 'have a nice day' in self.acts[0]['text'].lower(
) or 'have a nice day' in self.acts[1]['text'].lower():
return True
if 'thank you' in self.acts[0]['text'].lower(
) and 'welcome' in self.acts[1]['text'].lower():
return True
if 'welcome' in self.acts[0]['text'].lower(
) and 'thank you' in self.acts[1]['text'].lower():
return True
if self.human_done:
return True
return self.cnt >= self.max_turn_cnt
def get_air_score(self):
score_obj = self.model_agent.get_air_score(
self.agent_encoder_states, self.expected_actions[self.context_id],
self.kbs[self.context_id])
score_text = '\n'.join([f" - {k}: {v}" for k, v in score_obj.items()])
for flight in score_obj['flight']:
chosen_flight = self.kbs[self.context_id]['kb'][flight - 1000]
score_text += f'\nChosen Flight: {chosen_flight}'
return score_obj, score_text
def write(self, logger, reports, outdir):
os.makedirs(outdir, exist_ok=True)
outfile = os.path.join(outdir, 'log.jsonl')
conversations = logger.get_logs()
# dont really how it works
# hack to remove empty logs
conversations = [i for i in conversations if len(i) > 0]
def format_conv(conv):
new_conv = []
for i in conv:
new_conv.append({'speaker': 'customer', 'text': i[0]['text']})
new_conv.append({'speaker': 'agent', 'text': i[1]['text']})
return new_conv
if len(conversations) != len(reports):
print('WARNING! length difference')
import ipdb
ipdb.set_trace()
with open(outfile, 'w') as fout:
#import ipdb; ipdb.set_trace()
for conv, re in zip(conversations, reports):
r = {}
r['conversation'] = format_conv(conv)
r['report'] = re
context_id = re['id']
r['expected_action'] = self.expected_actions[context_id]
r['intent'] = self.intent_objs[context_id]
r['kb'] = self.kbs[context_id]
fout.write(json.dumps(r) + '\n')
def report(self):
for k, v in self.gather_rewards.items():
v.append(self.score[k])
v = np.array(v).mean()
print(f"Gather {k} : {v}")
r = deepcopy(self.score)
r['id'] = self.context_id
return r
def reset(self):
#self.reset()
self.customer_agent.reset()
self.agent_agent.reset()
self.episode_cnt += 1
self.cnt = 0
self.acts = [None, None]
def customer_obs(self, act):
_act = act
self.predefine_acts = []
if self.human_eval:
_act = {}
_act['text'] = act['text']
_act['id'] = act['id']
if self.cnt == 0:
_act['intent'] = act['intent']
# define some template reponses to ease human eval
intent = self.intent_objs[self.context_id]
if self.cnt == 0:
print(intent)
if intent['goal'] == 'book':
self.predefine_acts.append('Hi, I want to book a ticket.')
else:
self.predefine_acts.append(
f"Hi, I want to {intent['goal']} a reservation.")
else:
self.predefine_acts.append(f"My name is {intent['name']}")
if intent['goal'] in ['book', 'change']:
self.predefine_acts.append(
f"My origin is {intent['departure_airport']} and destination is {intent['return_airport']}."
)
# Add dates
MONTH_DICT = {
'Jan': '01',
'Feb': '02',
'Mar': '03',
'Apr': '04',
'May': '05',
'Jun': '06',
'Jul': '07',
'Aug': '08',
'Sep': '09',
'Oct': '10',
'Nov': '11',
'Dec': '12'
}
m1 = MONTH_DICT[intent['departure_month'][:3]]
m2 = MONTH_DICT[intent['return_month'][:3]]
d1 = m1 + '/' + intent['departure_day']
if 'departure_time' in intent and intent['departure_time'] != 'None':
d1 += ' ' + intent['departure_time']
d2 = m2 + '/' + intent['return_day']
if 'return_time' in intent and intent['return_time'] != 'None':
d2 += ' ' + intent['return_time']
self.predefine_acts.append(f"Start on {d1} and return on {d2}.")
# Add specification
spec = ''
if 'max_connections' in intent:
spec += f"The connection limit is {intent['max_connections']} . "
if 'max_price' in intent:
spec += f"The price limit is {intent['max_price']} . "
pref = []
if 'class' in intent and intent['class'] != 'None':
pref.append(f"{intent['class']} class")
if 'airline' in intent:
pref.append(f"{intent['airline']} airline")
if len(pref) == 1:
spec += f"And I prefer {pref[0]} ."
elif len(pref) == 1:
spec += f"And I prefer {pref[0]} and {pref[1]} ."
self.predefine_acts.append(spec)
self.predefine_acts.extend(
['Yes.', 'Ok.', 'Thank you.', "That's fine, thank you."])
if 'sorry' in _act['text'] or 'no reservation' in _act['text']:
# say that's fine
self.predefine_acts = [self.predefine_acts[-1]
] + self.predefine_acts[:-1]
elif 'airport' in _act['text'] or 'scource' in _act['text'] or 'destination' in _act['text'] \
or 'details' in _act['text'] or 'codes' in _act['text']:
# say airport
self.predefine_acts = [
self.predefine_acts[1]
] + self.predefine_acts[0:1] + self.predefine_acts[2:]
elif 'dates' in _act['text']:
# say dates
self.predefine_acts = [
self.predefine_acts[2]
] + self.predefine_acts[0:2] + self.predefine_acts[3:]
elif 'proceed for booking' in _act['text'] or 'shall' in _act['text'] or 'are you ok with' in _act['text'] \
or 'would you like' in _act['text'] or 'can i' in _act['text']:
# say yes
self.predefine_acts = [
self.predefine_acts[-4]
] + self.predefine_acts[:-4] + self.predefine_acts[-3:]
elif 'wait' in _act['text']:
# say ok
self.predefine_acts = [
self.predefine_acts[-3]
] + self.predefine_acts[:-3] + self.predefine_acts[-2:]
elif 'booked' in _act['text'] or 'has been' in _act['text'] or \
'is done' in _act['text'] or 'is confirmed' in _act['text']:
# say thank you
self.predefine_acts = [
self.predefine_acts[-2]
] + self.predefine_acts[:-2] + self.predefine_acts[-1:]
try:
if self.customer_agent.ref_data is not None:
ref_text = self.customer_agent.ref_data[self.context_id][self.cnt * 2
+ 2]['text']
self.predefine_acts = [ref_text] + self.predefine_acts
except:
pass
for i, t in enumerate(self.predefine_acts):
_act[f"Act -{i}"] = t
self.customer_agent.observe(validate(_act))
def customer_act(self):
if not self.human_eval or len(self.predefine_acts) == 0:
return self.customer_agent.act()
else:
act = self.customer_agent.act()
text = act['text']
if len(text) == 2 and text[0] == '-' and text[1:].isdigit():
text = text[1:]
if int(text) < len(self.predefine_acts):
act.force_set('text', self.predefine_acts[int(text)])
act.force_set('id', 'customer')
print(act['text'])
if 'thank you' in act['text'].lower():
self.human_done = True
return act
def parley(self):
"""
Loop between model and human.
"""
self.human_done = False
if self.cnt == 0:
self.context_id = self.episode_cnt + self.start_cid
self.acts = [None, None]
self.agent_first = False
# possibly get customer act first
if self.cnt == 0 and not self.agent_first:
self.acts[0] = Message({
'id': 'customer',
'text': '__SILENCE__',
'episode_done': False
})
else:
if self.cnt == 0:
preact = Message({'text': '__SILENCE__', 'episode_done': False})
preact = self._add_context(preact, 'customer')
self.customer_obs(preact)
act = self.customer_act()
self.acts[0] = act
# add context to the model observation
act = deepcopy(self.acts[0])
act = self._add_context(act, 'agent')
act['return_encoder_state'] = True
# agent observes context and human (apprentice) act
self.agent_agent.observe(validate(act))
# agent agent act
act = self.agent_agent.act()
self.agent_encoder_states = act.pop('encoder_states')
self.acts[1] = act
# customer agent observes model act
act = deepcopy(self.acts[1])
act = self._add_context(act, 'customer')
self.customer_obs(act)
self.update_counters()
self.cnt += 1
if self.episode_done():
score_obj, score_text = self.get_air_score()
self.score = score_obj
#print(score_text)
return True
return False
|
[
"copy.deepcopy",
"os.makedirs",
"ipdb.set_trace",
"random.choice",
"json.dumps",
"parlai.core.worlds.validate",
"random.seed",
"numpy.array",
"parlai.core.message.Message",
"os.path.join"
] |
[((2913, 2954), 'os.path.join', 'os.path.join', (['jsons_path', '"""dev_data.json"""'], {}), "(jsons_path, 'dev_data.json')\n", (2925, 2954), False, 'import os\n'), ((2969, 3008), 'os.path.join', 'os.path.join', (['jsons_path', '"""dev_kb.json"""'], {}), "(jsons_path, 'dev_kb.json')\n", (2981, 3008), False, 'import os\n'), ((3235, 3248), 'random.seed', 'random.seed', ([], {}), '()\n', (3246, 3248), False, 'import random\n'), ((7951, 7973), 'copy.deepcopy', 'deepcopy', (['self.acts[0]'], {}), '(self.acts[0])\n', (7959, 7973), False, 'from copy import deepcopy\n'), ((8324, 8346), 'copy.deepcopy', 'deepcopy', (['self.acts[1]'], {}), '(self.acts[1])\n', (8332, 8346), False, 'from copy import deepcopy\n'), ((11936, 11970), 'os.makedirs', 'os.makedirs', (['outdir'], {'exist_ok': '(True)'}), '(outdir, exist_ok=True)\n', (11947, 11970), False, 'import os\n'), ((11985, 12018), 'os.path.join', 'os.path.join', (['outdir', '"""log.jsonl"""'], {}), "(outdir, 'log.jsonl')\n", (11997, 12018), False, 'import os\n'), ((13141, 13161), 'copy.deepcopy', 'deepcopy', (['self.score'], {}), '(self.score)\n', (13149, 13161), False, 'from copy import deepcopy\n'), ((19402, 19424), 'copy.deepcopy', 'deepcopy', (['self.acts[0]'], {}), '(self.acts[0])\n', (19410, 19424), False, 'from copy import deepcopy\n'), ((19795, 19817), 'copy.deepcopy', 'deepcopy', (['self.acts[1]'], {}), '(self.acts[1])\n', (19803, 19817), False, 'from copy import deepcopy\n'), ((7477, 7498), 'random.choice', 'random.choice', (['[0, 1]'], {}), '([0, 1])\n', (7490, 7498), False, 'import random\n'), ((7603, 7658), 'parlai.core.message.Message', 'Message', (["{'text': '__SILENCE__', 'episode_done': False}"], {}), "({'text': '__SILENCE__', 'episode_done': False})\n", (7610, 7658), False, 'from parlai.core.message import Message\n'), ((8137, 8150), 'parlai.core.worlds.validate', 'validate', (['act'], {}), '(act)\n', (8145, 8150), False, 'from parlai.core.worlds import DialogPartnerWorld, validate\n'), ((8440, 8453), 'parlai.core.worlds.validate', 'validate', (['act'], {}), '(act)\n', (8448, 8453), False, 'from parlai.core.worlds import DialogPartnerWorld, validate\n'), ((12521, 12537), 'ipdb.set_trace', 'ipdb.set_trace', ([], {}), '()\n', (12535, 12537), False, 'import ipdb\n'), ((18043, 18057), 'parlai.core.worlds.validate', 'validate', (['_act'], {}), '(_act)\n', (18051, 18057), False, 'from parlai.core.worlds import DialogPartnerWorld, validate\n'), ((18983, 19056), 'parlai.core.message.Message', 'Message', (["{'id': 'customer', 'text': '__SILENCE__', 'episode_done': False}"], {}), "({'id': 'customer', 'text': '__SILENCE__', 'episode_done': False})\n", (18990, 19056), False, 'from parlai.core.message import Message\n'), ((19592, 19605), 'parlai.core.worlds.validate', 'validate', (['act'], {}), '(act)\n', (19600, 19605), False, 'from parlai.core.worlds import DialogPartnerWorld, validate\n'), ((19146, 19201), 'parlai.core.message.Message', 'Message', (["{'text': '__SILENCE__', 'episode_done': False}"], {}), "({'text': '__SILENCE__', 'episode_done': False})\n", (19153, 19201), False, 'from parlai.core.message import Message\n'), ((13081, 13092), 'numpy.array', 'np.array', (['v'], {}), '(v)\n', (13089, 13092), True, 'import numpy as np\n'), ((12953, 12966), 'json.dumps', 'json.dumps', (['r'], {}), '(r)\n', (12963, 12966), False, 'import json\n')]
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
CLI for running a Private Lift study
Usage:
pc-cli create_instance <instance_id> --config=<config_file> --role=<pl_role> --game_type=<game_type> --input_path=<input_path> --output_dir=<output_dir> --num_pid_containers=<num_pid_containers> --num_mpc_containers=<num_mpc_containers> [--attribution_rule=<attribution_rule> --aggregation_type=<aggregation_type> --concurrency=<concurrency> --num_files_per_mpc_container=<num_files_per_mpc_container> --padding_size=<padding_size> --k_anonymity_threshold=<k_anonymity_threshold> --hmac_key=<base64_key> --stage_flow=<stage_flow>] [options]
pc-cli validate <instance_id> --config=<config_file> --expected_result_path=<expected_result_path> [--aggregated_result_path=<aggregated_result_path>] [options]
pc-cli run_next <instance_id> --config=<config_file> [--server_ips=<server_ips>] [options]
pc-cli run_stage <instance_id> --stage=<stage> --config=<config_file> [--server_ips=<server_ips> --dry_run] [options]
pc-cli get_instance <instance_id> --config=<config_file> [options]
pc-cli get_server_ips <instance_id> --config=<config_file> [options]
pc-cli get_pid <instance_id> --config=<config_file> [options]
pc-cli get_mpc <instance_id> --config=<config_file> [options]
pc-cli run_instance <instance_id> --config=<config_file> --input_path=<input_path> --num_shards=<num_shards> [--tries_per_stage=<tries_per_stage> --dry_run] [options]
pc-cli run_instances <instance_ids> --config=<config_file> --input_paths=<input_paths> --num_shards_list=<num_shards_list> [--tries_per_stage=<tries_per_stage> --dry_run] [options]
pc-cli run_study <study_id> --config=<config_file> --objective_ids=<objective_ids> --input_paths=<input_paths> [--tries_per_stage=<tries_per_stage> --dry_run] [options]
pc-cli cancel_current_stage <instance_id> --config=<config_file> [options]
pc-cli print_instance <instance_id> --config=<config_file> [options]
pc-cli print_log_urls <instance_id> --config=<config_file> [options]
pc-cli get_attribution_dataset_info --dataset_id=<dataset_id> --config=<config_file> [options]
pc-cli run_attribution --config=<config_file> --dataset_id=<dataset_id> --input_path=<input_path> --start_date=<start_date> --end_date=<end_date> --attribution_rule=<attribution_rule> --result_type=<result_type> --aggregation_type=<aggregation_type> --concurrency=<concurrency> --num_files_per_mpc_container=<num_files_per_mpc_container> --k_anonymity_threshold=<k_anonymity_threshold>[options]
Options:
-h --help Show this help
--log_path=<path> Override the default path where logs are saved
--verbose Set logging level to DEBUG
"""
import logging
import os
from pathlib import Path, PurePath
from typing import List, Optional
import schema
from docopt import docopt
from fbpcs.pl_coordinator.pl_instance_runner import run_instance, run_instances
from fbpcs.pl_coordinator.pl_study_runner import run_study
from fbpcs.private_computation.entity.private_computation_instance import (
AggregationType,
AttributionRule,
PrivateComputationRole,
PrivateComputationGameType,
)
from fbpcs.private_computation.pc_attribution_runner import (
get_attribution_dataset_info,
run_attribution,
)
from fbpcs.private_computation.stage_flows.private_computation_base_stage_flow import (
PrivateComputationBaseStageFlow,
)
from fbpcs.private_computation.stage_flows.private_computation_decoupled_stage_flow import (
PrivateComputationDecoupledStageFlow,
)
from fbpcs.private_computation.stage_flows.private_computation_stage_flow import (
PrivateComputationStageFlow,
)
from fbpcs.private_computation_cli.private_computation_service_wrapper import (
cancel_current_stage,
create_instance,
get_instance,
get_mpc,
get_pid,
get_server_ips,
print_instance,
print_log_urls,
run_next,
run_stage,
validate,
)
from fbpcs.utils.config_yaml.config_yaml_dict import ConfigYamlDict
def main(argv: Optional[List[str]] = None) -> None:
s = schema.Schema(
{
"create_instance": bool,
"validate": bool,
"run_next": bool,
"run_stage": bool,
"get_instance": bool,
"get_server_ips": bool,
"get_pid": bool,
"get_mpc": bool,
"run_instance": bool,
"run_instances": bool,
"run_study": bool,
"run_attribution": bool,
"cancel_current_stage": bool,
"print_instance": bool,
"print_log_urls": bool,
"get_attribution_dataset_info": bool,
"<instance_id>": schema.Or(None, str),
"<instance_ids>": schema.Or(None, schema.Use(lambda arg: arg.split(","))),
"<study_id>": schema.Or(None, str),
"--config": schema.And(schema.Use(PurePath), os.path.exists),
"--role": schema.Or(
None,
schema.And(
schema.Use(str.upper),
lambda s: s in ("PUBLISHER", "PARTNER"),
schema.Use(PrivateComputationRole),
),
),
"--game_type": schema.Or(
None,
schema.And(
schema.Use(str.upper),
lambda s: s in ("LIFT", "ATTRIBUTION"),
schema.Use(PrivateComputationGameType),
),
),
"--objective_ids": schema.Or(None, schema.Use(lambda arg: arg.split(","))),
"--dataset_id": schema.Or(None, str),
"--input_path": schema.Or(None, str),
"--input_paths": schema.Or(None, schema.Use(lambda arg: arg.split(","))),
"--output_dir": schema.Or(None, str),
"--aggregated_result_path": schema.Or(None, str),
"--expected_result_path": schema.Or(None, str),
"--num_pid_containers": schema.Or(None, schema.Use(int)),
"--num_mpc_containers": schema.Or(None, schema.Use(int)),
"--aggregation_type": schema.Or(None, schema.Use(AggregationType)),
"--attribution_rule": schema.Or(None, schema.Use(AttributionRule)),
"--result_type": schema.Or(None, str),
"--start_date": schema.Or(None, str),
"--end_date": schema.Or(None, str),
"--num_files_per_mpc_container": schema.Or(None, schema.Use(int)),
"--num_shards": schema.Or(None, schema.Use(int)),
"--num_shards_list": schema.Or(
None, schema.Use(lambda arg: arg.split(","))
),
"--server_ips": schema.Or(None, schema.Use(lambda arg: arg.split(","))),
"--concurrency": schema.Or(None, schema.Use(int)),
"--padding_size": schema.Or(None, schema.Use(int)),
"--k_anonymity_threshold": schema.Or(None, schema.Use(int)),
"--hmac_key": schema.Or(None, str),
"--tries_per_stage": schema.Or(None, schema.Use(int)),
"--dry_run": bool,
"--log_path": schema.Or(None, schema.Use(Path)),
"--stage_flow": schema.Or(
None,
schema.Use(
lambda arg: PrivateComputationBaseStageFlow.cls_name_to_cls(arg)
),
),
"--stage": schema.Or(None, str),
"--verbose": bool,
"--help": bool,
}
)
arguments = s.validate(docopt(__doc__, argv))
config = ConfigYamlDict.from_file(arguments["--config"])
log_path = arguments["--log_path"]
log_level = logging.DEBUG if arguments["--verbose"] else logging.INFO
instance_id = arguments["<instance_id>"]
logging.basicConfig(filename=log_path, level=logging.INFO)
logger = logging.getLogger(__name__)
logger.setLevel(log_level)
if arguments["create_instance"]:
logger.info(f"Create instance: {instance_id}")
create_instance(
config=config,
instance_id=instance_id,
role=arguments["--role"],
game_type=arguments["--game_type"],
logger=logger,
input_path=arguments["--input_path"],
output_dir=arguments["--output_dir"],
num_pid_containers=arguments["--num_pid_containers"],
num_mpc_containers=arguments["--num_mpc_containers"],
attribution_rule=arguments["--attribution_rule"],
aggregation_type=arguments["--aggregation_type"],
concurrency=arguments["--concurrency"],
num_files_per_mpc_container=arguments["--num_files_per_mpc_container"],
hmac_key=arguments["--hmac_key"],
padding_size=arguments["--padding_size"],
k_anonymity_threshold=arguments["--k_anonymity_threshold"],
stage_flow_cls=arguments["--stage_flow"],
)
elif arguments["run_next"]:
logger.info(f"run_next instance: {instance_id}")
run_next(
config=config,
instance_id=instance_id,
logger=logger,
server_ips=arguments["--server_ips"],
)
elif arguments["run_stage"]:
stage_name = arguments["--stage"]
logger.info(f"run_stage: {instance_id=}, {stage_name=}")
instance = get_instance(config, instance_id, logger)
stage = instance.stage_flow.get_stage_from_str(stage_name)
run_stage(
config=config,
instance_id=instance_id,
stage=stage,
logger=logger,
server_ips=arguments["--server_ips"],
dry_run=arguments["--dry_run"],
)
elif arguments["get_instance"]:
logger.info(f"Get instance: {instance_id}")
instance = get_instance(config, instance_id, logger)
logger.info(instance)
elif arguments["get_server_ips"]:
get_server_ips(config, instance_id, logger)
elif arguments["get_pid"]:
logger.info(f"Get PID instance: {instance_id}")
get_pid(config, instance_id, logger)
elif arguments["get_mpc"]:
logger.info(f"Get MPC instance: {instance_id}")
get_mpc(config, instance_id, logger)
elif arguments["validate"]:
logger.info(f"Validate instance: {instance_id}")
validate(
config=config,
instance_id=instance_id,
aggregated_result_path=arguments["--aggregated_result_path"],
expected_result_path=arguments["--expected_result_path"],
logger=logger,
)
elif arguments["run_instance"]:
stage_flow = PrivateComputationStageFlow
logger.info(f"Running instance: {instance_id}")
run_instance(
config=config,
instance_id=instance_id,
input_path=arguments["--input_path"],
game_type=arguments["--game_type"],
num_mpc_containers=arguments["--num_shards"],
num_pid_containers=arguments["--num_shards"],
stage_flow=stage_flow,
logger=logger,
num_tries=arguments["--tries_per_stage"],
dry_run=arguments["--dry_run"],
)
elif arguments["run_instances"]:
stage_flow = PrivateComputationStageFlow
run_instances(
config=config,
instance_ids=arguments["<instance_ids>"],
input_paths=arguments["--input_paths"],
num_shards_list=arguments["--num_shards_list"],
stage_flow=stage_flow,
logger=logger,
num_tries=arguments["--tries_per_stage"],
dry_run=arguments["--dry_run"],
)
elif arguments["run_study"]:
stage_flow = PrivateComputationStageFlow
run_study(
config=config,
study_id=arguments["<study_id>"],
objective_ids=arguments["--objective_ids"],
input_paths=arguments["--input_paths"],
logger=logger,
stage_flow=stage_flow,
num_tries=arguments["--tries_per_stage"],
dry_run=arguments["--dry_run"],
)
elif arguments["run_attribution"]:
stage_flow = PrivateComputationDecoupledStageFlow
run_attribution(
config=config,
dataset_id=arguments["--dataset_id"],
input_path=arguments["--input_path"],
start_date=arguments["--start_date"],
end_date=arguments["--end_date"],
attribution_rule=arguments["--attribution_rule"],
aggregation_type=arguments["--aggregation_type"],
concurrency=arguments["--concurrency"],
num_files_per_mpc_container=arguments["--num_files_per_mpc_container"],
k_anonymity_threshold=arguments["--k_anonymity_threshold"],
result_type=arguments["--result_type"],
logger=logger,
stage_flow=stage_flow,
num_tries=2,
)
elif arguments["cancel_current_stage"]:
logger.info(f"Canceling the current running stage of instance: {instance_id}")
cancel_current_stage(
config=config,
instance_id=instance_id,
logger=logger,
)
elif arguments["print_instance"]:
print_instance(
config=config,
instance_id=instance_id,
logger=logger,
)
elif arguments["print_log_urls"]:
print_log_urls(
config=config,
instance_id=instance_id,
logger=logger,
)
elif arguments["get_attribution_dataset_info"]:
print(
get_attribution_dataset_info(
config=config, dataset_id=arguments["--dataset_id"], logger=logger
)
)
if __name__ == "__main__":
main()
|
[
"schema.Or",
"fbpcs.private_computation_cli.private_computation_service_wrapper.run_stage",
"fbpcs.private_computation.pc_attribution_runner.get_attribution_dataset_info",
"docopt.docopt",
"fbpcs.pl_coordinator.pl_study_runner.run_study",
"fbpcs.private_computation_cli.private_computation_service_wrapper.run_next",
"fbpcs.private_computation_cli.private_computation_service_wrapper.create_instance",
"fbpcs.utils.config_yaml.config_yaml_dict.ConfigYamlDict.from_file",
"fbpcs.private_computation_cli.private_computation_service_wrapper.get_server_ips",
"fbpcs.private_computation_cli.private_computation_service_wrapper.print_log_urls",
"schema.Use",
"fbpcs.private_computation_cli.private_computation_service_wrapper.print_instance",
"fbpcs.private_computation_cli.private_computation_service_wrapper.validate",
"fbpcs.private_computation_cli.private_computation_service_wrapper.cancel_current_stage",
"fbpcs.private_computation_cli.private_computation_service_wrapper.get_mpc",
"fbpcs.private_computation.pc_attribution_runner.run_attribution",
"fbpcs.pl_coordinator.pl_instance_runner.run_instance",
"fbpcs.pl_coordinator.pl_instance_runner.run_instances",
"logging.basicConfig",
"fbpcs.private_computation.stage_flows.private_computation_base_stage_flow.PrivateComputationBaseStageFlow.cls_name_to_cls",
"fbpcs.private_computation_cli.private_computation_service_wrapper.get_instance",
"fbpcs.private_computation_cli.private_computation_service_wrapper.get_pid",
"logging.getLogger"
] |
[((7706, 7753), 'fbpcs.utils.config_yaml.config_yaml_dict.ConfigYamlDict.from_file', 'ConfigYamlDict.from_file', (["arguments['--config']"], {}), "(arguments['--config'])\n", (7730, 7753), False, 'from fbpcs.utils.config_yaml.config_yaml_dict import ConfigYamlDict\n'), ((7918, 7976), 'logging.basicConfig', 'logging.basicConfig', ([], {'filename': 'log_path', 'level': 'logging.INFO'}), '(filename=log_path, level=logging.INFO)\n', (7937, 7976), False, 'import logging\n'), ((7990, 8017), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (8007, 8017), False, 'import logging\n'), ((7670, 7691), 'docopt.docopt', 'docopt', (['__doc__', 'argv'], {}), '(__doc__, argv)\n', (7676, 7691), False, 'from docopt import docopt\n'), ((8151, 8903), 'fbpcs.private_computation_cli.private_computation_service_wrapper.create_instance', 'create_instance', ([], {'config': 'config', 'instance_id': 'instance_id', 'role': "arguments['--role']", 'game_type': "arguments['--game_type']", 'logger': 'logger', 'input_path': "arguments['--input_path']", 'output_dir': "arguments['--output_dir']", 'num_pid_containers': "arguments['--num_pid_containers']", 'num_mpc_containers': "arguments['--num_mpc_containers']", 'attribution_rule': "arguments['--attribution_rule']", 'aggregation_type': "arguments['--aggregation_type']", 'concurrency': "arguments['--concurrency']", 'num_files_per_mpc_container': "arguments['--num_files_per_mpc_container']", 'hmac_key': "arguments['--hmac_key']", 'padding_size': "arguments['--padding_size']", 'k_anonymity_threshold': "arguments['--k_anonymity_threshold']", 'stage_flow_cls': "arguments['--stage_flow']"}), "(config=config, instance_id=instance_id, role=arguments[\n '--role'], game_type=arguments['--game_type'], logger=logger,\n input_path=arguments['--input_path'], output_dir=arguments[\n '--output_dir'], num_pid_containers=arguments['--num_pid_containers'],\n num_mpc_containers=arguments['--num_mpc_containers'], attribution_rule=\n arguments['--attribution_rule'], aggregation_type=arguments[\n '--aggregation_type'], concurrency=arguments['--concurrency'],\n num_files_per_mpc_container=arguments['--num_files_per_mpc_container'],\n hmac_key=arguments['--hmac_key'], padding_size=arguments[\n '--padding_size'], k_anonymity_threshold=arguments[\n '--k_anonymity_threshold'], stage_flow_cls=arguments['--stage_flow'])\n", (8166, 8903), False, 'from fbpcs.private_computation_cli.private_computation_service_wrapper import cancel_current_stage, create_instance, get_instance, get_mpc, get_pid, get_server_ips, print_instance, print_log_urls, run_next, run_stage, validate\n'), ((4873, 4893), 'schema.Or', 'schema.Or', (['None', 'str'], {}), '(None, str)\n', (4882, 4893), False, 'import schema\n'), ((5008, 5028), 'schema.Or', 'schema.Or', (['None', 'str'], {}), '(None, str)\n', (5017, 5028), False, 'import schema\n'), ((5782, 5802), 'schema.Or', 'schema.Or', (['None', 'str'], {}), '(None, str)\n', (5791, 5802), False, 'import schema\n'), ((5832, 5852), 'schema.Or', 'schema.Or', (['None', 'str'], {}), '(None, str)\n', (5841, 5852), False, 'import schema\n'), ((5968, 5988), 'schema.Or', 'schema.Or', (['None', 'str'], {}), '(None, str)\n', (5977, 5988), False, 'import schema\n'), ((6030, 6050), 'schema.Or', 'schema.Or', (['None', 'str'], {}), '(None, str)\n', (6039, 6050), False, 'import schema\n'), ((6090, 6110), 'schema.Or', 'schema.Or', (['None', 'str'], {}), '(None, str)\n', (6099, 6110), False, 'import schema\n'), ((6441, 6461), 'schema.Or', 'schema.Or', (['None', 'str'], {}), '(None, str)\n', (6450, 6461), False, 'import schema\n'), ((6491, 6511), 'schema.Or', 'schema.Or', (['None', 'str'], {}), '(None, str)\n', (6500, 6511), False, 'import schema\n'), ((6539, 6559), 'schema.Or', 'schema.Or', (['None', 'str'], {}), '(None, str)\n', (6548, 6559), False, 'import schema\n'), ((7133, 7153), 'schema.Or', 'schema.Or', (['None', 'str'], {}), '(None, str)\n', (7142, 7153), False, 'import schema\n'), ((7545, 7565), 'schema.Or', 'schema.Or', (['None', 'str'], {}), '(None, str)\n', (7554, 7565), False, 'import schema\n'), ((9170, 9276), 'fbpcs.private_computation_cli.private_computation_service_wrapper.run_next', 'run_next', ([], {'config': 'config', 'instance_id': 'instance_id', 'logger': 'logger', 'server_ips': "arguments['--server_ips']"}), "(config=config, instance_id=instance_id, logger=logger, server_ips=\n arguments['--server_ips'])\n", (9178, 9276), False, 'from fbpcs.private_computation_cli.private_computation_service_wrapper import cancel_current_stage, create_instance, get_instance, get_mpc, get_pid, get_server_ips, print_instance, print_log_urls, run_next, run_stage, validate\n'), ((5065, 5085), 'schema.Use', 'schema.Use', (['PurePath'], {}), '(PurePath)\n', (5075, 5085), False, 'import schema\n'), ((6164, 6179), 'schema.Use', 'schema.Use', (['int'], {}), '(int)\n', (6174, 6179), False, 'import schema\n'), ((6234, 6249), 'schema.Use', 'schema.Use', (['int'], {}), '(int)\n', (6244, 6249), False, 'import schema\n'), ((6302, 6329), 'schema.Use', 'schema.Use', (['AggregationType'], {}), '(AggregationType)\n', (6312, 6329), False, 'import schema\n'), ((6382, 6409), 'schema.Use', 'schema.Use', (['AttributionRule'], {}), '(AttributionRule)\n', (6392, 6409), False, 'import schema\n'), ((6622, 6637), 'schema.Use', 'schema.Use', (['int'], {}), '(int)\n', (6632, 6637), False, 'import schema\n'), ((6684, 6699), 'schema.Use', 'schema.Use', (['int'], {}), '(int)\n', (6694, 6699), False, 'import schema\n'), ((6952, 6967), 'schema.Use', 'schema.Use', (['int'], {}), '(int)\n', (6962, 6967), False, 'import schema\n'), ((7016, 7031), 'schema.Use', 'schema.Use', (['int'], {}), '(int)\n', (7026, 7031), False, 'import schema\n'), ((7089, 7104), 'schema.Use', 'schema.Use', (['int'], {}), '(int)\n', (7099, 7104), False, 'import schema\n'), ((7204, 7219), 'schema.Use', 'schema.Use', (['int'], {}), '(int)\n', (7214, 7219), False, 'import schema\n'), ((7295, 7311), 'schema.Use', 'schema.Use', (['Path'], {}), '(Path)\n', (7305, 7311), False, 'import schema\n'), ((9490, 9531), 'fbpcs.private_computation_cli.private_computation_service_wrapper.get_instance', 'get_instance', (['config', 'instance_id', 'logger'], {}), '(config, instance_id, logger)\n', (9502, 9531), False, 'from fbpcs.private_computation_cli.private_computation_service_wrapper import cancel_current_stage, create_instance, get_instance, get_mpc, get_pid, get_server_ips, print_instance, print_log_urls, run_next, run_stage, validate\n'), ((9607, 9764), 'fbpcs.private_computation_cli.private_computation_service_wrapper.run_stage', 'run_stage', ([], {'config': 'config', 'instance_id': 'instance_id', 'stage': 'stage', 'logger': 'logger', 'server_ips': "arguments['--server_ips']", 'dry_run': "arguments['--dry_run']"}), "(config=config, instance_id=instance_id, stage=stage, logger=\n logger, server_ips=arguments['--server_ips'], dry_run=arguments[\n '--dry_run'])\n", (9616, 9764), False, 'from fbpcs.private_computation_cli.private_computation_service_wrapper import cancel_current_stage, create_instance, get_instance, get_mpc, get_pid, get_server_ips, print_instance, print_log_urls, run_next, run_stage, validate\n'), ((5207, 5228), 'schema.Use', 'schema.Use', (['str.upper'], {}), '(str.upper)\n', (5217, 5228), False, 'import schema\n'), ((5311, 5345), 'schema.Use', 'schema.Use', (['PrivateComputationRole'], {}), '(PrivateComputationRole)\n', (5321, 5345), False, 'import schema\n'), ((5489, 5510), 'schema.Use', 'schema.Use', (['str.upper'], {}), '(str.upper)\n', (5499, 5510), False, 'import schema\n'), ((5592, 5630), 'schema.Use', 'schema.Use', (['PrivateComputationGameType'], {}), '(PrivateComputationGameType)\n', (5602, 5630), False, 'import schema\n'), ((9945, 9986), 'fbpcs.private_computation_cli.private_computation_service_wrapper.get_instance', 'get_instance', (['config', 'instance_id', 'logger'], {}), '(config, instance_id, logger)\n', (9957, 9986), False, 'from fbpcs.private_computation_cli.private_computation_service_wrapper import cancel_current_stage, create_instance, get_instance, get_mpc, get_pid, get_server_ips, print_instance, print_log_urls, run_next, run_stage, validate\n'), ((7435, 7487), 'fbpcs.private_computation.stage_flows.private_computation_base_stage_flow.PrivateComputationBaseStageFlow.cls_name_to_cls', 'PrivateComputationBaseStageFlow.cls_name_to_cls', (['arg'], {}), '(arg)\n', (7482, 7487), False, 'from fbpcs.private_computation.stage_flows.private_computation_base_stage_flow import PrivateComputationBaseStageFlow\n'), ((10063, 10106), 'fbpcs.private_computation_cli.private_computation_service_wrapper.get_server_ips', 'get_server_ips', (['config', 'instance_id', 'logger'], {}), '(config, instance_id, logger)\n', (10077, 10106), False, 'from fbpcs.private_computation_cli.private_computation_service_wrapper import cancel_current_stage, create_instance, get_instance, get_mpc, get_pid, get_server_ips, print_instance, print_log_urls, run_next, run_stage, validate\n'), ((10202, 10238), 'fbpcs.private_computation_cli.private_computation_service_wrapper.get_pid', 'get_pid', (['config', 'instance_id', 'logger'], {}), '(config, instance_id, logger)\n', (10209, 10238), False, 'from fbpcs.private_computation_cli.private_computation_service_wrapper import cancel_current_stage, create_instance, get_instance, get_mpc, get_pid, get_server_ips, print_instance, print_log_urls, run_next, run_stage, validate\n'), ((10334, 10370), 'fbpcs.private_computation_cli.private_computation_service_wrapper.get_mpc', 'get_mpc', (['config', 'instance_id', 'logger'], {}), '(config, instance_id, logger)\n', (10341, 10370), False, 'from fbpcs.private_computation_cli.private_computation_service_wrapper import cancel_current_stage, create_instance, get_instance, get_mpc, get_pid, get_server_ips, print_instance, print_log_urls, run_next, run_stage, validate\n'), ((10468, 10661), 'fbpcs.private_computation_cli.private_computation_service_wrapper.validate', 'validate', ([], {'config': 'config', 'instance_id': 'instance_id', 'aggregated_result_path': "arguments['--aggregated_result_path']", 'expected_result_path': "arguments['--expected_result_path']", 'logger': 'logger'}), "(config=config, instance_id=instance_id, aggregated_result_path=\n arguments['--aggregated_result_path'], expected_result_path=arguments[\n '--expected_result_path'], logger=logger)\n", (10476, 10661), False, 'from fbpcs.private_computation_cli.private_computation_service_wrapper import cancel_current_stage, create_instance, get_instance, get_mpc, get_pid, get_server_ips, print_instance, print_log_urls, run_next, run_stage, validate\n'), ((10872, 11222), 'fbpcs.pl_coordinator.pl_instance_runner.run_instance', 'run_instance', ([], {'config': 'config', 'instance_id': 'instance_id', 'input_path': "arguments['--input_path']", 'game_type': "arguments['--game_type']", 'num_mpc_containers': "arguments['--num_shards']", 'num_pid_containers': "arguments['--num_shards']", 'stage_flow': 'stage_flow', 'logger': 'logger', 'num_tries': "arguments['--tries_per_stage']", 'dry_run': "arguments['--dry_run']"}), "(config=config, instance_id=instance_id, input_path=arguments[\n '--input_path'], game_type=arguments['--game_type'], num_mpc_containers\n =arguments['--num_shards'], num_pid_containers=arguments['--num_shards'\n ], stage_flow=stage_flow, logger=logger, num_tries=arguments[\n '--tries_per_stage'], dry_run=arguments['--dry_run'])\n", (10884, 11222), False, 'from fbpcs.pl_coordinator.pl_instance_runner import run_instance, run_instances\n'), ((11428, 11712), 'fbpcs.pl_coordinator.pl_instance_runner.run_instances', 'run_instances', ([], {'config': 'config', 'instance_ids': "arguments['<instance_ids>']", 'input_paths': "arguments['--input_paths']", 'num_shards_list': "arguments['--num_shards_list']", 'stage_flow': 'stage_flow', 'logger': 'logger', 'num_tries': "arguments['--tries_per_stage']", 'dry_run': "arguments['--dry_run']"}), "(config=config, instance_ids=arguments['<instance_ids>'],\n input_paths=arguments['--input_paths'], num_shards_list=arguments[\n '--num_shards_list'], stage_flow=stage_flow, logger=logger, num_tries=\n arguments['--tries_per_stage'], dry_run=arguments['--dry_run'])\n", (11441, 11712), False, 'from fbpcs.pl_coordinator.pl_instance_runner import run_instance, run_instances\n'), ((11896, 12164), 'fbpcs.pl_coordinator.pl_study_runner.run_study', 'run_study', ([], {'config': 'config', 'study_id': "arguments['<study_id>']", 'objective_ids': "arguments['--objective_ids']", 'input_paths': "arguments['--input_paths']", 'logger': 'logger', 'stage_flow': 'stage_flow', 'num_tries': "arguments['--tries_per_stage']", 'dry_run': "arguments['--dry_run']"}), "(config=config, study_id=arguments['<study_id>'], objective_ids=\n arguments['--objective_ids'], input_paths=arguments['--input_paths'],\n logger=logger, stage_flow=stage_flow, num_tries=arguments[\n '--tries_per_stage'], dry_run=arguments['--dry_run'])\n", (11905, 12164), False, 'from fbpcs.pl_coordinator.pl_study_runner import run_study\n'), ((12363, 12940), 'fbpcs.private_computation.pc_attribution_runner.run_attribution', 'run_attribution', ([], {'config': 'config', 'dataset_id': "arguments['--dataset_id']", 'input_path': "arguments['--input_path']", 'start_date': "arguments['--start_date']", 'end_date': "arguments['--end_date']", 'attribution_rule': "arguments['--attribution_rule']", 'aggregation_type': "arguments['--aggregation_type']", 'concurrency': "arguments['--concurrency']", 'num_files_per_mpc_container': "arguments['--num_files_per_mpc_container']", 'k_anonymity_threshold': "arguments['--k_anonymity_threshold']", 'result_type': "arguments['--result_type']", 'logger': 'logger', 'stage_flow': 'stage_flow', 'num_tries': '(2)'}), "(config=config, dataset_id=arguments['--dataset_id'],\n input_path=arguments['--input_path'], start_date=arguments[\n '--start_date'], end_date=arguments['--end_date'], attribution_rule=\n arguments['--attribution_rule'], aggregation_type=arguments[\n '--aggregation_type'], concurrency=arguments['--concurrency'],\n num_files_per_mpc_container=arguments['--num_files_per_mpc_container'],\n k_anonymity_threshold=arguments['--k_anonymity_threshold'], result_type\n =arguments['--result_type'], logger=logger, stage_flow=stage_flow,\n num_tries=2)\n", (12378, 12940), False, 'from fbpcs.private_computation.pc_attribution_runner import get_attribution_dataset_info, run_attribution\n'), ((13224, 13299), 'fbpcs.private_computation_cli.private_computation_service_wrapper.cancel_current_stage', 'cancel_current_stage', ([], {'config': 'config', 'instance_id': 'instance_id', 'logger': 'logger'}), '(config=config, instance_id=instance_id, logger=logger)\n', (13244, 13299), False, 'from fbpcs.private_computation_cli.private_computation_service_wrapper import cancel_current_stage, create_instance, get_instance, get_mpc, get_pid, get_server_ips, print_instance, print_log_urls, run_next, run_stage, validate\n'), ((13393, 13462), 'fbpcs.private_computation_cli.private_computation_service_wrapper.print_instance', 'print_instance', ([], {'config': 'config', 'instance_id': 'instance_id', 'logger': 'logger'}), '(config=config, instance_id=instance_id, logger=logger)\n', (13407, 13462), False, 'from fbpcs.private_computation_cli.private_computation_service_wrapper import cancel_current_stage, create_instance, get_instance, get_mpc, get_pid, get_server_ips, print_instance, print_log_urls, run_next, run_stage, validate\n'), ((13556, 13625), 'fbpcs.private_computation_cli.private_computation_service_wrapper.print_log_urls', 'print_log_urls', ([], {'config': 'config', 'instance_id': 'instance_id', 'logger': 'logger'}), '(config=config, instance_id=instance_id, logger=logger)\n', (13570, 13625), False, 'from fbpcs.private_computation_cli.private_computation_service_wrapper import cancel_current_stage, create_instance, get_instance, get_mpc, get_pid, get_server_ips, print_instance, print_log_urls, run_next, run_stage, validate\n'), ((13752, 13853), 'fbpcs.private_computation.pc_attribution_runner.get_attribution_dataset_info', 'get_attribution_dataset_info', ([], {'config': 'config', 'dataset_id': "arguments['--dataset_id']", 'logger': 'logger'}), "(config=config, dataset_id=arguments[\n '--dataset_id'], logger=logger)\n", (13780, 13853), False, 'from fbpcs.private_computation.pc_attribution_runner import get_attribution_dataset_info, run_attribution\n')]
|
#!/usr/bin/python3
from models import storage
from models.base_model import BaseModel
from models.employee import Employee
all_objs = storage.all()
print("-- Reloaded objects --")
for obj_id in all_objs.keys():
obj = all_objs[obj_id]
print(obj)
print("-- Create a new Employee --")
my_user = Employee()
my_user.first_name = "Betty"
my_user.last_name = "Holberton"
my_user.email = "<EMAIL>"
my_user.save()
print(my_user)
print("-- Create a new User 2 --")
my_user2 = Employee()
my_user2.first_name = "John"
my_user2.email = "<EMAIL>"
my_user2.save()
print(my_user2)
|
[
"models.storage.all",
"models.employee.Employee"
] |
[((135, 148), 'models.storage.all', 'storage.all', ([], {}), '()\n', (146, 148), False, 'from models import storage\n'), ((302, 312), 'models.employee.Employee', 'Employee', ([], {}), '()\n', (310, 312), False, 'from models.employee import Employee\n'), ((477, 487), 'models.employee.Employee', 'Employee', ([], {}), '()\n', (485, 487), False, 'from models.employee import Employee\n')]
|
"""Utilities for setting up a Python logger."""
import logging
def get_logger() -> logging.Logger:
""" Set up a logger to print out log messages in the following format:
{TIME} {LOGGING LEVEL} {FILENAME GENERATING THE LOG MESSAGE} line {LINE NUMBER} {LOG MESSAGE}
"""
logger_name = "main-logger"
logger = logging.getLogger(logger_name)
logger.setLevel(logging.INFO)
if not logger.handlers:
handler = logging.StreamHandler()
fmt = "[%(asctime)s %(levelname)s %(filename)s line %(lineno)d %(process)d] %(message)s"
handler.setFormatter(logging.Formatter(fmt))
logger.addHandler(handler)
return logger
|
[
"logging.Formatter",
"logging.StreamHandler",
"logging.getLogger"
] |
[((328, 358), 'logging.getLogger', 'logging.getLogger', (['logger_name'], {}), '(logger_name)\n', (345, 358), False, 'import logging\n'), ((439, 462), 'logging.StreamHandler', 'logging.StreamHandler', ([], {}), '()\n', (460, 462), False, 'import logging\n'), ((589, 611), 'logging.Formatter', 'logging.Formatter', (['fmt'], {}), '(fmt)\n', (606, 611), False, 'import logging\n')]
|
"""
This resource provides information about the data holdings of the
server. This information is used by ArcGIS for Desktop and other
clients to validate data paths referenced by GIS services.
You can register new data items with the server by using the
Register Data Item operation. Use the Find Data Items operation to
search through the hierarchy of data items.
The Compute Ref Count operation counts and lists all references to a
specific data item. This operation helps you determine if a
particular data item can be safely deleted or refreshed.
"""
from __future__ import absolute_import
import os
import re
import json
from .._common import BaseServer
from .._common.util import contextmanager, _tempinput
########################################################################
class DataStoreManager(BaseServer):
"""
This resource provides information about the data holdings of the
server, as well as the ability to manage (add new items, update primary
data store, remove a data store item, etc) the data store. Data items
are used by ArcGIS for Desktop and other clients to validate data paths
referenced by GIS services.
.. note::
A relational data store type represents a database platform that has been
registered for use on a portal's hosting server by the ArcGIS Server
administrator. Each relational data store type describes the
properties ArcGIS Server requires in order to connect to an instance of
a database for a particular platform. At least one registered
relational data store type is required before client applications such
as Insights for ArcGIS can create Relational Database Connection portal
items.
================== ====================================================================
**Argument** **Description**
------------------ --------------------------------------------------------------------
url Required string. The URL to the Data Store URL.
------------------ --------------------------------------------------------------------
gis Optional string. The GIS, Server, or ServicesDirectory object.
================== ====================================================================
"""
_con = None
_json_dict = None
_url = None
_json = None
_gis = None
_datastores = None
#----------------------------------------------------------------------
def __init__(self,
url,
gis=None,
**kwargs):
"""Constructor
Inputs:
url - admin url
gis - gis object
initialize - optional initializes the componenents in the class
"""
connection = kwargs.pop('connection', None)
initialize = False
super(DataStoreManager, self).__init__(
gis=gis,
url=url)
if hasattr(gis, '_con'):
self._con = gis._con
elif hasattr(gis, 'post'):
self._con = gis
if connection:
self._con = connection
self._url = url
if initialize:
self._init()
#----------------------------------------------------------------------
def __str__(self):
return '<%s for %s>' % (type(self).__name__, self._url)
#----------------------------------------------------------------------
def __repr__(self):
return '<%s for %s>' % (type(self).__name__, self._url)
#----------------------------------------------------------------------
def list(self):
"""Retrieves a list of datastore objects.
:return:
The list of datastore items.
"""
self._datastores = None
if self._datastores is None:
self._datastores = []
for item in self.data_items['rootItems']:
for path in self.search(parent_path=item)['items']:
self._datastores.append(Datastore(datastore=self,
path=path['path'],
datadict=None))
return self._datastores
#----------------------------------------------------------------------
@property
def config(self):
"""
Gets the data store configuration properties. These properties
affect the behavior of the data holdings of the server. For
example, the blockDataCopy property - when this property is false,
or not set at all, copying data to the site when publishing services
from a client application is allowed. This is the default behavior.
When this property is true, the client application is not allowed to
copy data to the site when publishing. Rather, the publisher is
required to register data items through which the service being
published can reference data. Values: true | false
"""
""" jenn note -- need link or list of the possible data store configuration properties."""
params = {
"f" : "json"
}
url = self._url + "/config"
return self._con.get(path=url, params=params)
#----------------------------------------------------------------------
@config.setter
def config(self, config):
"""
This operation allows you to update the data store configuration
You can use this to allow or block the automatic copying of data
to the server at publish time
================== ====================================================================
**Argument** **Description**
------------------ --------------------------------------------------------------------
config Required string. A JSON string containing the data store configuration.
================== ====================================================================
:return:
JSON dictionary of the set configuration properties.
"""
if config is None:
config = {}
params = {
"f" : "json",
"datastoreConfig" : config
}
url = self._url + "/config/update"
return self._con.post(path=url, postdata=params)
#----------------------------------------------------------------------
def federate_data_item(self):
"""
This operation can be used to create a data store item in the portal
for a data store that has been registered with one of the portal's
federated ArcGIS Server sites.
Once the operation is complete, a data store item is created in the
portal with which the ArcGIS Server site is federated. If the data
store is registered with only this federated server, no further
steps are required. However, if the data store is registered with
multiple federated servers and you want any of those servers to
access the data store item, you need to bind the data store item
with an additional federated server or servers using the
`PortalDataStore.register` method.
================== ====================================================================
**Argument** **Description**
------------------ --------------------------------------------------------------------
path Required string. The item path for the database, cloud, or file
share data store for which you want to create a data store item.
================== ====================================================================
:return:
Boolean
"""
if path[0] != "/":
path = "/%s" % path
params = {"f" : "json",
"itemPath" : path}
url = "%s/federateDataItem"
res = self._con.post(url, params)
if 'success' in res:
return res['success']
return res
#----------------------------------------------------------------------
def get(self, path):
"""
Retrieves the data item object at the given path.
================== ====================================================================
**Argument** **Description**
------------------ --------------------------------------------------------------------
path Required string. The path to the data item.
================== ====================================================================
:return:
The data item object, None if not found.
"""
if path[0] != "/":
path = "/%s" % path
params = {"f" : "json"}
urlpath = self._url + "/items" + path
datadict = self._con.post(urlpath, params)
if 'status' not in datadict:
return Datastore(self, "/items" + path, datadict)
else:
return None
#----------------------------------------------------------------------
def add_folder(self,
name,
server_path,
client_path=None):
"""
Registers a folder with the data store.
================== ====================================================================
**Argument** **Description**
------------------ --------------------------------------------------------------------
name Required string. The unique fileshare name on the server.
------------------ --------------------------------------------------------------------
server_path Required string. The path to the folder from the server (and client, if shared path).
------------------ --------------------------------------------------------------------
client_path Optional string. If folder is replicated, the path to the folder from
the client; if folder is shared, don't set this parameter.
================== ====================================================================
:return:
The data item if successfully registered, None otherwise.
"""
conn_type = "shared"
if client_path is not None:
conn_type = "replicated"
item = {
"type" : "folder",
"path" : "/fileShares/" + name,
"info" : {
"path" : server_path,
"dataStoreConnectionType" : conn_type
}
}
if client_path is not None:
item['clientPath'] = client_path
res = self._register_data_item(item=item)
if res['status'] == 'success' or res['status'] == 'exists':
return Datastore(self, "/fileShares/" + name)
else:
return None
return
#----------------------------------------------------------------------
def add(self,
name,
item):
"""
Registers a new data item with the data store.
================== ====================================================================
**Argument** **Description**
------------------ --------------------------------------------------------------------
name Required string. The name of the new data item.
------------------ --------------------------------------------------------------------
item Required string. The dictionary representing the data item.
See http://resources.arcgis.com/en/help/arcgis-rest-api/index.html#//02r3000001s9000000
================== ====================================================================
:return:
The data item if registered successfully, None otherwise.
"""
res = self._register_data_item(item=item)
if res['status'] == 'success' or res['status'] == 'exists':
return Datastore(self, item['path'])
else:
#print(str(res))
return None
#----------------------------------------------------------------------
def add_bigdata(self,
name,
server_path=None,
connection_type="fileShare"):
"""
Registers a bigdata fileshare with the data store.
=============== ====================================================================
**Argument** **Description**
--------------- --------------------------------------------------------------------
name Required string. The unique bigdata fileshare name on the server.
--------------- --------------------------------------------------------------------
server_path Optional string. The path to the folder from the server.
--------------- --------------------------------------------------------------------
connection_type Optional string. Allows for the setting of the types of big data store.
The value 'fileShare' is used for local big data stores, and for
cloud stores, the connection_type should be 'dataStore'. The value
'fileShare' is the default value.
=============== ====================================================================
:return:
The big data fileshare if registered successfully, None otherwise.
"""
output = None
pattern = r'\\\\[a-zA-Z]+'
if re.match(pattern, server_path) is not None: # starts with double backslash, double the backslashes
server_path = server_path.replace('\\', '\\\\')
path_str = '{"path":"' + server_path + '"}'
item = {
"path": "/bigDataFileShares/" + name,
"type": "bigDataFileShare",
"info": {
"connectionString": path_str,
"connectionType": connection_type
}
}
res = self._register_data_item(item=item)
if res['status'] == 'success' or res['status'] == 'exists':
output = Datastore(self, "/bigDataFileShares/" + name)
return output
#----------------------------------------------------------------------
def generate_connection_string(self, sde):
"""
Converts an SDE connection file to a string with encrypted password.
=============== ====================================================
**Parameters** **Description**
--------------- ----------------------------------------------------
sde required string. Path to SDE connection file.
=============== ====================================================
returns: string on success, None on failure
**Usage**:
>>> con = dm.create_connection_string(r"c:\myfolder\postgres_db.sde")
>>> print(con)
'ENCRYPTED_PASSWORD=************************;SERVER=localhost;
INSTANCE=sde:postgresql:localhost,5432;DBCLIENT=postgresql;
DB_CONNECTION_PROPERTIES=localhost,5432;DATABASE=esri_spatial;
USER=sde;VERSION=sde.DEFAULT;AUTHENTICATION_MODE=DBMS'
"""
if str(sde).lower().endswith('.sde'):
from arcgis.gis.server._common import ServerConnection
from arcgis.gis.server.catalog import ServicesDirectory
from arcgis.gis.server import Uploads
up = Uploads(url=self._con.baseurl.replace("rest/services", "admin/uploads"),
gis=self._con)
if self._con.portal_connection:
d = ServicesDirectory(url=self._con.baseurl,
portal_connection=self._con.portal_connection)
elif isinstance(self._con, ServerConnection):
d = ServicesDirectory(url=self._con.baseurl)
d._con = self._con
try:
service = d.get("PublishingTools", 'System')
except:
service = d.get("PublishingToolsEx", 'System')
upload_res = up.upload(path=sde)
if upload_res[0] == True:
res = service.get_database_connection_string(
in_conndatatype="UPLOADED_CONNECTION_FILE_ID",
in_inputdata=upload_res[1]['item']['itemID'])
up.delete(item_id=upload_res[1]['item']['itemID'])
return res
return
#----------------------------------------------------------------------
def add_cloudstore(self, name, conn_str, object_store,
provider, managed=False, folder=None):
"""
Cloud Store data item represents a connection to a Amazon or Microsoft Azure store.
Connection information for the data store item is stored within conn_str as a
stringified JSON. ArcGIS Server encrypts connection string for storage. Connection
strings that are encrypted will include a {crypt} prefix. You can get a data store
item with decrypted connection string by passing a decrypt=true parameter in the request
for a data store item. Data store with decrypted connection string will be returned only for
requests made with https. The examples below show data stores with decrypted conn_str.
A valid object_store (S3 bucket or Azure Blob store) is required. Folders within an object
store are optional.
=============== ====================================================================
**Argument** **Description**
--------------- --------------------------------------------------------------------
name Required string. The name of the cloud store.
--------------- --------------------------------------------------------------------
conn_str Required string. The connection information for the cloud storage
product.
--------------- --------------------------------------------------------------------
object_store Required string. This is the amazon bucket path or Azuze path.
--------------- --------------------------------------------------------------------
provider Required string. Values must be amazon or azure.
--------------- --------------------------------------------------------------------
managed Optional boolean. When the data store is server only, the database
is entirely managed and owned by the server and cannot be accessed
by the publisher directly. When this option is chosen, the
managed property should be set to true. Otherwise it is false.
--------------- --------------------------------------------------------------------
folder Optional string. For some Azure cloud stores, an optional folder
can be specified.
=============== ====================================================================
:return: DataStore
"""
item = {
"path": "/cloudStores/%s" % name,
"type": "cloudStore",
"provider": provider,
"info": {
"isManaged": managed,
"connectionString": conn_str,
"objectStore": object_store
}
}
if folder is not None:
item['info']['folder'] = folder
res = self._register_data_item(item=item)
if res['status'] == 'success' or res['status'] == 'exists':
return Datastore(self, "/cloudStores/" + name)
else:
return None
return
#----------------------------------------------------------------------
def add_database(self,
name,
conn_str,
client_conn_str=None,
conn_type="shared"):
"""
Registers a database with the data store.
================== ====================================================================
**Argument** **Description**
------------------ --------------------------------------------------------------------
name Required string. The unique database name on the server.
------------------ --------------------------------------------------------------------
conn_str Required string. The path to the folder from the server (and client,
if shared or serverOnly database)
------------------ --------------------------------------------------------------------
client_conn_str Optional string. The connection string for client to connect to replicated enterprise database>
------------------ --------------------------------------------------------------------
conn_type Optional string. The connection type. Default value is shared,
other choices are replicated or serverOnly
================== ====================================================================
:return:
The data item if successfully registered, None otherwise.
"""
item = {
"type" : "egdb",
"path" : "/enterpriseDatabases/" + name,
"info" : {
"connectionString" : conn_str,
"dataStoreConnectionType" : conn_type
}
}
if client_conn_str is not None:
item['info']['clientConnectionString'] = client_conn_str
is_managed = False
if conn_type == "serverOnly":
is_managed = True
item['info']['isManaged'] = is_managed
res = self._register_data_item(item=item)
if res['status'] == 'success' or res['status'] == 'exists':
return Datastore(self, "/enterpriseDatabases/" + name)
else:
return None
return
#----------------------------------------------------------------------
def get_total_refcount(self, path):
"""
The total number of references to a given data item
that exists on the server. You can use this operation to
determine if a data resource can be safely deleted, or taken
down for maintenance.
================== ====================================================================
**Argument** **Description**
------------------ --------------------------------------------------------------------
path Required string. The complete hierarchical path to the item.
================== ====================================================================
:return:
A JSON dictionary containing a number representing the total count.
"""
url = self._url + "/computeTotalRefCount"
params = {
"f" : "json",
"path" : path
}
return self._con.post(path=url,
postdata=params)
#----------------------------------------------------------------------
def make_datastore_machine_primary(self,
item_name,
machine_name):
"""
Promotes a standby machine to the primary Data Store machine. The
existing primary machine is downgraded to a standby machine.
================== ====================================================================
**Argument** **Description**
------------------ --------------------------------------------------------------------
item_name Required string. The primary machine item name in the data store.
------------------ --------------------------------------------------------------------
machine_name Required string. The machine name of the machine to promote to primary.
================== ====================================================================
:return:
A boolean indicating success (True) or failure (False).
"""
url = self._url + "/items/enterpriseDatabases" + \
"/{datastoreitem}/machines/{machine_name}/makePrimary".format(
datastoreitem=item_name,
machine_name=machine_name)
params = {"f" : "json"}
return self._con.post(path=url,
postdata=params)
#----------------------------------------------------------------------
def get_relational_datastore_type(self, type_id):
"""
This resource lists the properties of a registered relational data
store of the given type. The properties returned are those that client
applications must provide when creating a Relational Database
Connection portal item.
================== ====================================================================
**Argument** **Description**
------------------ --------------------------------------------------------------------
type_id Required string. The datastore type ID of interest.
See http://resources.arcgis.com/en/help/arcgis-rest-api/index.html#/Relational_Data_Store_Types/02r300000303000000/
================== ====================================================================
:return:
A JSON string listing the properties
"""
""" jenn note: useful would be a list of possible datastore type IDs -- ??? esri.teradata, esri.sqlserver, esri.hana """
params = {"f" : "json"}
url = self._url + "/relationalDatastoreTypes/{i}".format(
i=type_id)
return self._con.get(path=url,
params=params)
#----------------------------------------------------------------------
@property
def relational_datastore_types(self):
"""
Gets a list of the relational data store types that have been
registered with the server. Each registered relational data store
type has both an id and a name property, as well as an array of
userDefinedProperties, which indicates the properties client
applications must provide when creating a Relational Database
Connection portal item. Only administrators can register and
unregister a relational data store type. The following database
platforms are supported: SAP HANA, Microsoft SQL Server and
Teradata.
"""
params = {"f" : "json"}
url = self._url + "/relationalDatastoreTypes"
return self._con.get(path=url,
params=params)
#----------------------------------------------------------------------
def search(self,
parent_path=None,
ancestor_path=None,
types=None,
id=None):
"""
Use this operation to search through the various data items that are registered in the server's data store.
================== ====================================================================
**Argument** **Description**
------------------ --------------------------------------------------------------------
parent_path Optional string. The path of the parent under which to find items.
------------------ --------------------------------------------------------------------
ancestor_path Optional string. The path of the ancestor under which to find items.
------------------ --------------------------------------------------------------------
types Optional string. A filter for the type of the items (for example, fgdb or folder or egdb).
------------------ --------------------------------------------------------------------
id Optional string. A filter to search by the ID of the item.
================== ====================================================================
:return:
A JSON list of the items found matching the search criteria.
"""
""" jenn note: list of possible types """
params = {
"f" : "json",
}
if parent_path is not None:
params['parentPath'] = parent_path
if ancestor_path is not None:
params['ancestorPath'] = ancestor_path
if types is not None:
params['types'] = types
if id is not None:
params['id'] = id
url = self._url + "/findItems"
return self._con.post(path=url,
postdata=params)
#----------------------------------------------------------------------
def _register_data_item(self, item):
"""
Registers a new data item with the server's data store.
================== ====================================================================
**Argument** **Description**
------------------ --------------------------------------------------------------------
item Required string. The JSON representing the data item.
See http://resources.arcgis.com/en/help/arcgis-rest-api/index.html#//02r3000001s9000000
================== ====================================================================
:return:
A response
"""
params = {
"item" : item,
"f" : "json"
}
url = self._url + "/registerItem"
return self._con.post(path=url,
postdata=params)
#----------------------------------------------------------------------
@property
def data_items(self):
"""
Gets the list of data items that are the root of all other data items in the data store.
"""
url = self._url + "/items"
params = {
"f" : "json"
}
return self._con.get(path=url,
params=params)
#----------------------------------------------------------------------
def validate(self):
"""
Validates all the items in the data store.
:return:
True if all items are valid.
"""
params = {
"f" : "json"
}
url = self._url + "/validateAllDataItems"
return self._con.post(path=url, postdata=params)
#----------------------------------------------------------------------
def make_primary(self, datastore_name, machine_name):
"""
Promotes a standby machine to the primary Data Store machine. The
existing primary machine is downgraded to a standby machine.
================== ====================================================================
**Argument** **Description**
------------------ --------------------------------------------------------------------
datastore_name Required string. The primary machine name in the data store.
------------------ --------------------------------------------------------------------
machine_name Required string. The machine name of the machine to promote to primary.
================== ====================================================================
:return:
A boolean indicating success (True) or failure (False).
"""
url = self._url + "/items/enterpriseDatabases/%s/machines/%s/makePrimary" % (datastore_name, machine_name)
params = {
"f" : "json"
}
return self._con.post(path=url, postdata=params)
#----------------------------------------------------------------------
def remove_datastore_machine(self, item_name, machine_name):
"""
Removes a standby machine from the Data Store. This operation is
not supported on the primary Data Store machine.
================== ====================================================================
**Argument** **Description**
------------------ --------------------------------------------------------------------
item_name Required string. The standby machine item name in the data store.
------------------ --------------------------------------------------------------------
machine_name Required string. The machine name of the machine to remove.
================== ====================================================================
:return:
A boolean indicating success (True) or failure (False).
"""
url = self._url + "/items/enterpriseDatabases/%s/machines/%s/remove" % (item_name, machine_name)
params = {
"f" : "json"
}
return self._con.post(path=url, postdata=params)
#----------------------------------------------------------------------
def start(self, item_name, machine_name):
"""
Starts the database instance running on the Data Store machine.
================== ====================================================================
**Argument** **Description**
------------------ --------------------------------------------------------------------
item_name Required string. The database item name in the data store to start.
------------------ --------------------------------------------------------------------
machine_name Required string. The machine name of the machine with the database
instance to start.
================== ====================================================================
:return:
A boolean indicating success (True) or failure (False).
"""
url = self._url + "/items/enterpriseDatabases/%s/machines/%s/start" % (item_name, machine_name)
params = {
"f": "json"
}
return self._con.post(path=url, postdata=params)
#----------------------------------------------------------------------
def stop(self, item_name, machine_name):
"""
Stop the database instance running on the Data Store machine.
================== ====================================================================
**Argument** **Description**
------------------ --------------------------------------------------------------------
item_name Required string. The database item name in the data store to stop.
------------------ --------------------------------------------------------------------
machine_name Required string. The machine name of the machine with the database
instance to stop.
================== ====================================================================
:return:
A boolean indicating success (True) or failure (False).
"""
url = self._url + "/items/enterpriseDatabases/%s/machines/%s/stop" % (item_name,
machine_name)
params = {
"f": "json"
}
return self._con.post(path=url, postdata=params)
#----------------------------------------------------------------------
def _unregister_data_item(self, path):
"""
Unregisters a data item that has been previously registered with
the server's data store.
================== ====================================================================
**Argument** **Description**
------------------ --------------------------------------------------------------------
path Required string. The path to the share folder.
================== ====================================================================
:return:
T
.. code-block:: python
EXAMPLE:
path = r"/fileShares/folder_share"
print data.unregisterDataItem(path)
"""
url = self._url + "/unregisterItem"
params = {
"f" : "json",
"itempath" : path
}
return self._con.post(path=url, postdata=params)
#----------------------------------------------------------------------
def validate_egdb(self, data_store_name, name):
"""
Checks the status of the given ArcGIS Data Store and provides a health check response.
================== ====================================================================
**Argument** **Description**
------------------ --------------------------------------------------------------------
data_store_name Required string. The item name of the data store.
------------------ --------------------------------------------------------------------
name Required string. The machine name of where the data store is.
================== ====================================================================
:return:
A JSON response containing general status information and an overall health report.
"""
url = self._url + "/items/enterpriseDatabases/%s/machines/%s/validate" % (data_store_name,
name)
params = {
"f" : "json"
}
return self._con.post(path=url, postdata=params)
###########################################################################
class Datastore(BaseServer):
"""
Represents a single Datastore in the Data Store Manager.
"""
_path = None
_datastore = None
_json_dict = None
_json = None
_con = None
_url = None
def __init__(self, datastore, path, datadict=None, **kwargs):
self._path = path
super(Datastore, self).__init__(datastore=datastore,
path=path,
url=datastore._url + "%s" % path,
connection=datastore._con,
initialize=True,
datadict=datadict)
path = "/items%s" % path
if datastore:
self._con = datastore._con
self._datastore = datastore
self._url = "%s%s" % (datastore._url, path)
self._init()
#----------------------------------------------------------------------
def __str__(self):
state = [" %s=%r" % (attribute, value) for (attribute, value) in self._json_dict.items()]
return '\n'.join(state)
#----------------------------------------------------------------------
def __repr__(self):
return '<%s title:"%s" type:"%s">' % (type(self).__name__, self._url, self.type)
#----------------------------------------------------------------------
@property
def manifest(self):
"""
Gets the manifest resource for a big data file share.
"""
data_item_manifest_url = self._url + "/manifest"
if data_item_manifest_url.find('/bigDataFileShares') != -1:
params = {
'f': 'json',
}
res = self._con.post(data_item_manifest_url,
params,
verify_cert=False)
else:
res = {}
return res
#----------------------------------------------------------------------
@manifest.setter
def manifest(self, value):
"""
Sets the manifest resource for a big data file share.
"""
manifest_upload_url = self._url + '/manifest/update'
if manifest_upload_url.find('/bigDataFileShares') != -1:
with _tempinput(json.dumps(value)) as tempfilename:
# Build the files list (tuples)
files = []
files.append(('manifest', tempfilename, os.path.basename(tempfilename)))
postdata = {
'f' : 'pjson'
}
resp = self._.con.post(manifest_upload_url, postdata, files, verify_cert=False)
if resp['status'] == 'success':
return True
else:
return False
else:
return None
#---------------------------------------------------------------------
@property
def hints(self):
"""
Gets the hints resource for a big data file share. Hints
are advanced parameters to control the generation of a manifest.
"""
params = {
'download' : True,
'read' : True
}
url = self._url + "/hints"
return self._con.get(path=url,
params=params)
#---------------------------------------------------------------------
@hints.setter
def hints(self,
hints):
"""
Sets the hints resource for a big data file share. Hints
are advanced parameters to control the generation of a manifest.
Upload a hints file for a big data file share item. This will
replace the existing hints file. To apply the control parameters in
the hints file and regenerate the manifest, use the editDataItem to
edit the big data file share (using the same data store item as
input) which will regenerate the manifest. When a manifest is
regenerated, it will be updated only for datasets that have hints
and for new datasets that are added to the existing big data file
share location.
================== ====================================================================
**Argument** **Description**
------------------ --------------------------------------------------------------------
name Required string. The name of the big data item to update.
------------------ --------------------------------------------------------------------
hints Required string. The hints file to be uploaded.
================== ====================================================================
"""
params = {
"f" : "json"
}
files = {"hints" : hints}
url = self._url + "/hints/update"
if url.find('/bigDataFileShares') == -1:
return None
return self._con.post(path=url,
files=files,
postdata=params)
#----------------------------------------------------------------------
@property
def ref_count(self):
"""
The total number of references to this data item that exist on the
server. You can use this property to determine if this data item
can be safely deleted or taken down for maintenance.
:return:
A number indictaing the number of references to this data item.
"""
return self.totalRefCount
#----------------------------------------------------------------------
def delete(self):
"""
Unregisters this data item from the data store.
:return:
A boolean indicating success (True) or failure (False).
"""
params = {
"f" : "json" ,
"itempath" : self.path,
"force": True
}
path = self._datastore._url + "/unregisterItem"
resp = self._con.post(path, params, verify_cert=False)
if resp:
return resp.get('success')
else:
return False
#----------------------------------------------------------------------
def update(self, item):
"""
Edits this data item to update its connection information.
================== ====================================================================
**Argument** **Description**
------------------ --------------------------------------------------------------------
item Required string. The dict representation of the updated item.
================== ====================================================================
:return:
True if the data item was successfully updated, False if the update failed.
"""
params = {
"f" : "json" ,
"item" : item
}
path = self._datastore._url + "/items" + self.path + "/edit"
resp = self._con.post(path, params, verify_cert=False)
if resp ['status'] == 'success':
self.regenerate()
return True
else:
return False
#----------------------------------------------------------------------
def validate(self):
"""
Validates that this data item's path (for file shares) or connection string (for databases)
is accessible to every server node in the site. This is necessary for the data item to be
registered and used successfully with the server's data store.
:return:
True if the data item was successfully validated.
"""
params = {
"f": "json",
"item": self._json_dict
}
path = self._datastore._url + "/validateDataItem"
if 'provider' in params['item'] and \
params['item']['provider'] == 'ArcGIS Data Store':
path = self._url + "/machines/" + params['item']['info']['machines'][0]['name'] + "/validate"
res = self._con.post(path, {"f": "json"}, verify_cert=False)
else:
res = self._con.post(path, params, verify_cert=False)
return res['status'] == 'success'
#----------------------------------------------------------------------
def regenerate(self):
"""
This regenerates the manifest for a big data file share. You can
regenerate a manifest if you have added new data or if you have
uploaded a hints file using the edit resource.
:returns: Boolean. True = Success, False = Failure
"""
url = self._datastore._url + "/regenerate"
params = {'f' : 'json'}
res = self._con.post(url, params)
if 'success' in res:
return res['success']
return res
#----------------------------------------------------------------------
@property
def datasets(self):
"""
Gets the datasets in the data store (currently implemented for big data file shares).
"""
data_item_manifest_url = self._url + "/manifest"
params = {
'f': 'json'
}
try:
res = self._con.post(data_item_manifest_url, params, verify_cert=False)
return res['datasets']
except:
return None
|
[
"arcgis.gis.server.catalog.ServicesDirectory",
"re.match",
"json.dumps",
"os.path.basename"
] |
[((13906, 13936), 're.match', 're.match', (['pattern', 'server_path'], {}), '(pattern, server_path)\n', (13914, 13936), False, 'import re\n'), ((16060, 16152), 'arcgis.gis.server.catalog.ServicesDirectory', 'ServicesDirectory', ([], {'url': 'self._con.baseurl', 'portal_connection': 'self._con.portal_connection'}), '(url=self._con.baseurl, portal_connection=self._con.\n portal_connection)\n', (16077, 16152), False, 'from arcgis.gis.server.catalog import ServicesDirectory\n'), ((16264, 16304), 'arcgis.gis.server.catalog.ServicesDirectory', 'ServicesDirectory', ([], {'url': 'self._con.baseurl'}), '(url=self._con.baseurl)\n', (16281, 16304), False, 'from arcgis.gis.server.catalog import ServicesDirectory\n'), ((40974, 40991), 'json.dumps', 'json.dumps', (['value'], {}), '(value)\n', (40984, 40991), False, 'import json\n'), ((41141, 41171), 'os.path.basename', 'os.path.basename', (['tempfilename'], {}), '(tempfilename)\n', (41157, 41171), False, 'import os\n')]
|
from django.test import TestCase, Client
from django.contrib.auth import get_user_model
from django.urls import reverse
class AdminTest(TestCase):
"""test for the user is listed in admin user list"""
def setUp(self):
self.client = Client()
self.admin_user = get_user_model().objects.create_superuser(
email='<EMAIL>',
password='<PASSWORD>'
)
self.client.force_login(self.admin_user)
self.user = get_user_model().objects.create_user(
email='<EMAIL>',
password='<PASSWORD>',
name='this user is for testing'
)
def test_user_listed(self):
"""test the usera are listed on the users admin page"""
url = reverse('admin:core_user_changelist')
res = self.client.get(url)
self.assertContains(res, self.user.email)
self.assertContains(res, self.user.name)
def test_user_change_page(self):
"""test that the user edit page works"""
url = reverse('admin:core_user_change', args=[self.user.id])
# /admin/core/user/1
res = self.client.get(url)
self.assertEquals(res.status_code, 200)
def test_create_user_page(self):
"""test that a new user is created"""
url = reverse('admin:core_user_add')
res = self.client.get(url)
self.assertEqual(res.status_code, 200)
|
[
"django.urls.reverse",
"django.contrib.auth.get_user_model",
"django.test.Client"
] |
[((250, 258), 'django.test.Client', 'Client', ([], {}), '()\n', (256, 258), False, 'from django.test import TestCase, Client\n'), ((737, 774), 'django.urls.reverse', 'reverse', (['"""admin:core_user_changelist"""'], {}), "('admin:core_user_changelist')\n", (744, 774), False, 'from django.urls import reverse\n'), ((1011, 1065), 'django.urls.reverse', 'reverse', (['"""admin:core_user_change"""'], {'args': '[self.user.id]'}), "('admin:core_user_change', args=[self.user.id])\n", (1018, 1065), False, 'from django.urls import reverse\n'), ((1276, 1306), 'django.urls.reverse', 'reverse', (['"""admin:core_user_add"""'], {}), "('admin:core_user_add')\n", (1283, 1306), False, 'from django.urls import reverse\n'), ((285, 301), 'django.contrib.auth.get_user_model', 'get_user_model', ([], {}), '()\n', (299, 301), False, 'from django.contrib.auth import get_user_model\n'), ((470, 486), 'django.contrib.auth.get_user_model', 'get_user_model', ([], {}), '()\n', (484, 486), False, 'from django.contrib.auth import get_user_model\n')]
|
import os
import os.path
import pytest
import keepachangelog
@pytest.fixture
def changelog(tmpdir):
changelog_file_path = os.path.join(tmpdir, "CHANGELOG.md")
with open(changelog_file_path, "wt") as file:
file.write("This is the changelog content.\n")
file.write("This is the second line.\n")
return changelog_file_path
def test_changelog_without_versions(changelog):
assert keepachangelog.to_dict(changelog) == {}
|
[
"os.path.join",
"keepachangelog.to_dict"
] |
[((130, 166), 'os.path.join', 'os.path.join', (['tmpdir', '"""CHANGELOG.md"""'], {}), "(tmpdir, 'CHANGELOG.md')\n", (142, 166), False, 'import os\n'), ((413, 446), 'keepachangelog.to_dict', 'keepachangelog.to_dict', (['changelog'], {}), '(changelog)\n', (435, 446), False, 'import keepachangelog\n')]
|
import pandas as pd
import great_expectations.expectations.metrics
from great_expectations.core import IDDict
from great_expectations.core.batch import Batch, BatchRequest, PartitionRequest
from great_expectations.core.expectation_configuration import ExpectationConfiguration
from great_expectations.core.expectation_validation_result import (
ExpectationValidationResult,
)
from great_expectations.exceptions.metric_exceptions import MetricProviderError
from great_expectations.execution_engine import PandasExecutionEngine
from great_expectations.expectations.core import ExpectColumnMaxToBeBetween
from great_expectations.expectations.core.expect_column_value_z_scores_to_be_less_than import (
ExpectColumnValueZScoresToBeLessThan,
)
from great_expectations.expectations.registry import get_expectation_impl
from great_expectations.validator.validation_graph import (
MetricConfiguration,
MetricEdge,
ValidationGraph,
)
from great_expectations.validator.validator import Validator
def test_parse_validation_graph():
df = pd.DataFrame({"a": [1, 5, 22, 3, 5, 10], "b": [1, 2, 3, 4, 5, 6]})
expectationConfiguration = ExpectationConfiguration(
expectation_type="expect_column_value_z_scores_to_be_less_than",
kwargs={
"column": "a",
"mostly": 0.9,
"threshold": 4,
"double_sided": True,
},
)
expectation = ExpectColumnValueZScoresToBeLessThan(expectationConfiguration)
batch = Batch(data=df)
graph = ValidationGraph()
engine = PandasExecutionEngine()
for configuration in [expectationConfiguration]:
expectation_impl = get_expectation_impl(
"expect_column_value_z_scores_to_be_less_than"
)
validation_dependencies = expectation_impl(
configuration
).get_validation_dependencies(configuration, engine)
for metric_configuration in validation_dependencies["metrics"].values():
Validator(execution_engine=engine).build_metric_dependency_graph(
graph, metric_configuration, configuration, execution_engine=engine
)
ready_metrics, needed_metrics = Validator(engine)._parse_validation_graph(
validation_graph=graph, metrics=dict()
)
assert len(ready_metrics) == 4 and len(needed_metrics) == 5
# Should be passing tests even if given incorrect MetricProvider data
def test_parse_validation_graph_with_bad_metrics_args():
df = pd.DataFrame({"a": [1, 5, 22, 3, 5, 10], "b": [1, 2, 3, 4, 5, 6]})
expectationConfiguration = ExpectationConfiguration(
expectation_type="expect_column_value_z_scores_to_be_less_than",
kwargs={
"column": "a",
"mostly": 0.9,
"threshold": 4,
"double_sided": True,
},
)
graph = ValidationGraph()
engine = PandasExecutionEngine()
validator = Validator(execution_engine=engine)
for configuration in [expectationConfiguration]:
expectation_impl = get_expectation_impl(
"expect_column_value_z_scores_to_be_less_than"
)
validation_dependencies = expectation_impl(
configuration
).get_validation_dependencies(
configuration,
execution_engine=engine,
)
for metric_configuration in validation_dependencies["metrics"].values():
validator.build_metric_dependency_graph(
graph, metric_configuration, configuration, execution_engine=engine
)
ready_metrics, needed_metrics = validator._parse_validation_graph(
validation_graph=graph, metrics=("nonexistent", "NONE")
)
assert len(ready_metrics) == 4 and len(needed_metrics) == 5
def test_populate_dependencies():
df = pd.DataFrame({"a": [1, 5, 22, 3, 5, 10], "b": [1, 2, 3, 4, 5, 6]})
expectationConfiguration = ExpectationConfiguration(
expectation_type="expect_column_value_z_scores_to_be_less_than",
kwargs={
"column": "a",
"mostly": 0.9,
"threshold": 4,
"double_sided": True,
},
)
expectation = ExpectColumnValueZScoresToBeLessThan(expectationConfiguration)
batch = Batch(data=df)
graph = ValidationGraph()
engine = PandasExecutionEngine()
for configuration in [expectationConfiguration]:
expectation_impl = get_expectation_impl(
"expect_column_value_z_scores_to_be_less_than"
)
validation_dependencies = expectation_impl(
configuration
).get_validation_dependencies(
configuration,
engine,
)
for metric_configuration in validation_dependencies["metrics"].values():
Validator(execution_engine=engine).build_metric_dependency_graph(
graph, metric_configuration, configuration, execution_engine=engine
)
assert len(graph.edges) == 10
def test_populate_dependencies_with_incorrect_metric_name():
df = pd.DataFrame({"a": [1, 5, 22, 3, 5, 10], "b": [1, 2, 3, 4, 5, 6]})
expectationConfiguration = ExpectationConfiguration(
expectation_type="expect_column_value_z_scores_to_be_less_than",
kwargs={
"column": "a",
"mostly": 0.9,
"threshold": 4,
"double_sided": True,
},
)
expectation = ExpectColumnValueZScoresToBeLessThan(expectationConfiguration)
batch = Batch(data=df)
graph = ValidationGraph()
engine = PandasExecutionEngine()
for configuration in [expectationConfiguration]:
expectation_impl = get_expectation_impl(
"expect_column_value_z_scores_to_be_less_than"
)
validation_dependencies = expectation_impl(
configuration
).get_validation_dependencies(
configuration,
engine,
)
try:
Validator(execution_engine=engine).build_metric_dependency_graph(
graph,
MetricConfiguration("column_values.not_a_metric", IDDict()),
configuration,
execution_engine=engine,
)
except MetricProviderError as e:
graph = e
assert isinstance(graph, MetricProviderError)
def test_graph_validate(basic_datasource):
df = pd.DataFrame({"a": [1, 5, 22, 3, 5, 10], "b": [1, 2, 3, 4, 5, None]})
expectationConfiguration = ExpectationConfiguration(
expectation_type="expect_column_value_z_scores_to_be_less_than",
kwargs={
"column": "b",
"mostly": 0.9,
"threshold": 4,
"double_sided": True,
},
)
batch = basic_datasource.get_single_batch_from_batch_request(
BatchRequest(
**{
"datasource_name": "my_datasource",
"data_connector_name": "test_runtime_data_connector",
"batch_data": df,
"partition_request": PartitionRequest(
**{
"partition_identifiers": {
"pipeline_stage_name": 0,
"airflow_run_id": 0,
"custom_key_0": 0,
}
}
),
}
)
)
result = Validator(
execution_engine=PandasExecutionEngine(), batches=[batch]
).graph_validate(configurations=[expectationConfiguration])
assert result == [
ExpectationValidationResult(
success=True,
expectation_config=None,
meta={},
result={
"element_count": 6,
"unexpected_count": 0,
"unexpected_percent": 0.0,
"partial_unexpected_list": [],
"missing_count": 1,
"missing_percent": 16.666666666666664,
"unexpected_percent_nonmissing": 0.0,
},
exception_info=None,
)
]
# this might indicate that we need to validate configuration a little more strictly prior to actually validating
def test_graph_validate_with_bad_config(basic_datasource):
df = pd.DataFrame({"a": [1, 5, 22, 3, 5, 10], "b": [1, 2, 3, 4, 5, None]})
expectationConfiguration = ExpectationConfiguration(
expectation_type="expect_column_max_to_be_between",
kwargs={"column": "not_in_table", "min_value": 1, "max_value": 29},
)
expectation = ExpectColumnMaxToBeBetween(expectationConfiguration)
batch = basic_datasource.get_single_batch_from_batch_request(
BatchRequest(
**{
"datasource_name": "my_datasource",
"data_connector_name": "test_runtime_data_connector",
"batch_data": df,
"partition_request": PartitionRequest(
**{
"partition_identifiers": {
"pipeline_stage_name": 0,
"airflow_run_id": 0,
"custom_key_0": 0,
}
}
),
}
)
)
try:
result = Validator(
execution_engine=PandasExecutionEngine(), batches=[batch]
).graph_validate(configurations=[expectationConfiguration])
except KeyError as e:
result = e
assert isinstance(result, KeyError)
# Tests that runtime configuration actually works during graph validation
def test_graph_validate_with_runtime_config(basic_datasource):
df = pd.DataFrame(
{"a": [1, 5, 22, 3, 5, 10, 2, 3], "b": [97, 332, 3, 4, 5, 6, 7, None]}
)
expectationConfiguration = ExpectationConfiguration(
expectation_type="expect_column_value_z_scores_to_be_less_than",
kwargs={"column": "b", "mostly": 1, "threshold": 2, "double_sided": True},
)
expectation = ExpectColumnValueZScoresToBeLessThan(expectationConfiguration)
batch = basic_datasource.get_single_batch_from_batch_request(
BatchRequest(
**{
"datasource_name": "my_datasource",
"data_connector_name": "test_runtime_data_connector",
"batch_data": df,
"partition_request": PartitionRequest(
**{
"partition_identifiers": {
"pipeline_stage_name": 0,
"airflow_run_id": 0,
"custom_key_0": 0,
}
}
),
}
)
)
try:
result = Validator(
execution_engine=PandasExecutionEngine(), batches=(batch,)
).graph_validate(
configurations=[expectationConfiguration],
runtime_configuration={"result_format": "COMPLETE"},
)
except AssertionError as e:
result = e
assert result == [
ExpectationValidationResult(
success=False,
meta={},
result={
"element_count": 8,
"unexpected_count": 1,
"unexpected_percent": 12.5,
"partial_unexpected_list": [332.0],
"missing_count": 1,
"missing_percent": 12.5,
"unexpected_percent_nonmissing": 14.285714285714285,
"partial_unexpected_index_list": None,
"partial_unexpected_counts": [{"value": 332.0, "count": 1}],
"unexpected_list": [332.0],
"unexpected_index_list": None,
},
expectation_config=None,
exception_info=None,
)
]
def test_validator_default_expectation_args__pandas(basic_datasource):
df = pd.DataFrame({"a": [1, 5, 22, 3, 5, 10], "b": [1, 2, 3, 4, 5, None]})
expectationConfiguration = ExpectationConfiguration(
expectation_type="expect_column_value_z_scores_to_be_less_than",
kwargs={
"column": "b",
"mostly": 0.9,
"threshold": 4,
"double_sided": True,
},
)
batch = basic_datasource.get_single_batch_from_batch_request(
BatchRequest(
**{
"datasource_name": "my_datasource",
"data_connector_name": "test_runtime_data_connector",
"batch_data": df,
"partition_request": PartitionRequest(
**{
"partition_identifiers": {
"pipeline_stage_name": 0,
"airflow_run_id": 0,
"custom_key_0": 0,
}
}
),
}
)
)
my_validator = Validator(execution_engine=PandasExecutionEngine(), batches=[batch])
print(my_validator.get_default_expectation_arguments())
def test_validator_default_expectation_args__sql(
data_context_with_sql_datasource_for_testing_get_batch,
):
context = data_context_with_sql_datasource_for_testing_get_batch
my_validator = context.get_validator(
datasource_name="my_sqlite_db",
data_connector_name="daily",
data_asset_name="table_partitioned_by_date_column__A",
partition_identifiers={"date": "2020-01-15"},
create_expectation_suite_with_name="test_suite",
)
print(my_validator.get_default_expectation_arguments())
|
[
"pandas.DataFrame",
"great_expectations.execution_engine.PandasExecutionEngine",
"great_expectations.core.expectation_validation_result.ExpectationValidationResult",
"great_expectations.core.batch.Batch",
"great_expectations.expectations.registry.get_expectation_impl",
"great_expectations.core.batch.PartitionRequest",
"great_expectations.expectations.core.ExpectColumnMaxToBeBetween",
"great_expectations.validator.validation_graph.ValidationGraph",
"great_expectations.core.IDDict",
"great_expectations.expectations.core.expect_column_value_z_scores_to_be_less_than.ExpectColumnValueZScoresToBeLessThan",
"great_expectations.validator.validator.Validator",
"great_expectations.core.expectation_configuration.ExpectationConfiguration"
] |
[((1052, 1118), 'pandas.DataFrame', 'pd.DataFrame', (["{'a': [1, 5, 22, 3, 5, 10], 'b': [1, 2, 3, 4, 5, 6]}"], {}), "({'a': [1, 5, 22, 3, 5, 10], 'b': [1, 2, 3, 4, 5, 6]})\n", (1064, 1118), True, 'import pandas as pd\n'), ((1150, 1325), 'great_expectations.core.expectation_configuration.ExpectationConfiguration', 'ExpectationConfiguration', ([], {'expectation_type': '"""expect_column_value_z_scores_to_be_less_than"""', 'kwargs': "{'column': 'a', 'mostly': 0.9, 'threshold': 4, 'double_sided': True}"}), "(expectation_type=\n 'expect_column_value_z_scores_to_be_less_than', kwargs={'column': 'a',\n 'mostly': 0.9, 'threshold': 4, 'double_sided': True})\n", (1174, 1325), False, 'from great_expectations.core.expectation_configuration import ExpectationConfiguration\n'), ((1417, 1479), 'great_expectations.expectations.core.expect_column_value_z_scores_to_be_less_than.ExpectColumnValueZScoresToBeLessThan', 'ExpectColumnValueZScoresToBeLessThan', (['expectationConfiguration'], {}), '(expectationConfiguration)\n', (1453, 1479), False, 'from great_expectations.expectations.core.expect_column_value_z_scores_to_be_less_than import ExpectColumnValueZScoresToBeLessThan\n'), ((1492, 1506), 'great_expectations.core.batch.Batch', 'Batch', ([], {'data': 'df'}), '(data=df)\n', (1497, 1506), False, 'from great_expectations.core.batch import Batch, BatchRequest, PartitionRequest\n'), ((1519, 1536), 'great_expectations.validator.validation_graph.ValidationGraph', 'ValidationGraph', ([], {}), '()\n', (1534, 1536), False, 'from great_expectations.validator.validation_graph import MetricConfiguration, MetricEdge, ValidationGraph\n'), ((1550, 1573), 'great_expectations.execution_engine.PandasExecutionEngine', 'PandasExecutionEngine', ([], {}), '()\n', (1571, 1573), False, 'from great_expectations.execution_engine import PandasExecutionEngine\n'), ((2477, 2543), 'pandas.DataFrame', 'pd.DataFrame', (["{'a': [1, 5, 22, 3, 5, 10], 'b': [1, 2, 3, 4, 5, 6]}"], {}), "({'a': [1, 5, 22, 3, 5, 10], 'b': [1, 2, 3, 4, 5, 6]})\n", (2489, 2543), True, 'import pandas as pd\n'), ((2575, 2750), 'great_expectations.core.expectation_configuration.ExpectationConfiguration', 'ExpectationConfiguration', ([], {'expectation_type': '"""expect_column_value_z_scores_to_be_less_than"""', 'kwargs': "{'column': 'a', 'mostly': 0.9, 'threshold': 4, 'double_sided': True}"}), "(expectation_type=\n 'expect_column_value_z_scores_to_be_less_than', kwargs={'column': 'a',\n 'mostly': 0.9, 'threshold': 4, 'double_sided': True})\n", (2599, 2750), False, 'from great_expectations.core.expectation_configuration import ExpectationConfiguration\n'), ((2836, 2853), 'great_expectations.validator.validation_graph.ValidationGraph', 'ValidationGraph', ([], {}), '()\n', (2851, 2853), False, 'from great_expectations.validator.validation_graph import MetricConfiguration, MetricEdge, ValidationGraph\n'), ((2867, 2890), 'great_expectations.execution_engine.PandasExecutionEngine', 'PandasExecutionEngine', ([], {}), '()\n', (2888, 2890), False, 'from great_expectations.execution_engine import PandasExecutionEngine\n'), ((2907, 2941), 'great_expectations.validator.validator.Validator', 'Validator', ([], {'execution_engine': 'engine'}), '(execution_engine=engine)\n', (2916, 2941), False, 'from great_expectations.validator.validator import Validator\n'), ((3787, 3853), 'pandas.DataFrame', 'pd.DataFrame', (["{'a': [1, 5, 22, 3, 5, 10], 'b': [1, 2, 3, 4, 5, 6]}"], {}), "({'a': [1, 5, 22, 3, 5, 10], 'b': [1, 2, 3, 4, 5, 6]})\n", (3799, 3853), True, 'import pandas as pd\n'), ((3885, 4060), 'great_expectations.core.expectation_configuration.ExpectationConfiguration', 'ExpectationConfiguration', ([], {'expectation_type': '"""expect_column_value_z_scores_to_be_less_than"""', 'kwargs': "{'column': 'a', 'mostly': 0.9, 'threshold': 4, 'double_sided': True}"}), "(expectation_type=\n 'expect_column_value_z_scores_to_be_less_than', kwargs={'column': 'a',\n 'mostly': 0.9, 'threshold': 4, 'double_sided': True})\n", (3909, 4060), False, 'from great_expectations.core.expectation_configuration import ExpectationConfiguration\n'), ((4152, 4214), 'great_expectations.expectations.core.expect_column_value_z_scores_to_be_less_than.ExpectColumnValueZScoresToBeLessThan', 'ExpectColumnValueZScoresToBeLessThan', (['expectationConfiguration'], {}), '(expectationConfiguration)\n', (4188, 4214), False, 'from great_expectations.expectations.core.expect_column_value_z_scores_to_be_less_than import ExpectColumnValueZScoresToBeLessThan\n'), ((4227, 4241), 'great_expectations.core.batch.Batch', 'Batch', ([], {'data': 'df'}), '(data=df)\n', (4232, 4241), False, 'from great_expectations.core.batch import Batch, BatchRequest, PartitionRequest\n'), ((4254, 4271), 'great_expectations.validator.validation_graph.ValidationGraph', 'ValidationGraph', ([], {}), '()\n', (4269, 4271), False, 'from great_expectations.validator.validation_graph import MetricConfiguration, MetricEdge, ValidationGraph\n'), ((4285, 4308), 'great_expectations.execution_engine.PandasExecutionEngine', 'PandasExecutionEngine', ([], {}), '()\n', (4306, 4308), False, 'from great_expectations.execution_engine import PandasExecutionEngine\n'), ((5018, 5084), 'pandas.DataFrame', 'pd.DataFrame', (["{'a': [1, 5, 22, 3, 5, 10], 'b': [1, 2, 3, 4, 5, 6]}"], {}), "({'a': [1, 5, 22, 3, 5, 10], 'b': [1, 2, 3, 4, 5, 6]})\n", (5030, 5084), True, 'import pandas as pd\n'), ((5116, 5291), 'great_expectations.core.expectation_configuration.ExpectationConfiguration', 'ExpectationConfiguration', ([], {'expectation_type': '"""expect_column_value_z_scores_to_be_less_than"""', 'kwargs': "{'column': 'a', 'mostly': 0.9, 'threshold': 4, 'double_sided': True}"}), "(expectation_type=\n 'expect_column_value_z_scores_to_be_less_than', kwargs={'column': 'a',\n 'mostly': 0.9, 'threshold': 4, 'double_sided': True})\n", (5140, 5291), False, 'from great_expectations.core.expectation_configuration import ExpectationConfiguration\n'), ((5383, 5445), 'great_expectations.expectations.core.expect_column_value_z_scores_to_be_less_than.ExpectColumnValueZScoresToBeLessThan', 'ExpectColumnValueZScoresToBeLessThan', (['expectationConfiguration'], {}), '(expectationConfiguration)\n', (5419, 5445), False, 'from great_expectations.expectations.core.expect_column_value_z_scores_to_be_less_than import ExpectColumnValueZScoresToBeLessThan\n'), ((5458, 5472), 'great_expectations.core.batch.Batch', 'Batch', ([], {'data': 'df'}), '(data=df)\n', (5463, 5472), False, 'from great_expectations.core.batch import Batch, BatchRequest, PartitionRequest\n'), ((5485, 5502), 'great_expectations.validator.validation_graph.ValidationGraph', 'ValidationGraph', ([], {}), '()\n', (5500, 5502), False, 'from great_expectations.validator.validation_graph import MetricConfiguration, MetricEdge, ValidationGraph\n'), ((5516, 5539), 'great_expectations.execution_engine.PandasExecutionEngine', 'PandasExecutionEngine', ([], {}), '()\n', (5537, 5539), False, 'from great_expectations.execution_engine import PandasExecutionEngine\n'), ((6331, 6400), 'pandas.DataFrame', 'pd.DataFrame', (["{'a': [1, 5, 22, 3, 5, 10], 'b': [1, 2, 3, 4, 5, None]}"], {}), "({'a': [1, 5, 22, 3, 5, 10], 'b': [1, 2, 3, 4, 5, None]})\n", (6343, 6400), True, 'import pandas as pd\n'), ((6432, 6607), 'great_expectations.core.expectation_configuration.ExpectationConfiguration', 'ExpectationConfiguration', ([], {'expectation_type': '"""expect_column_value_z_scores_to_be_less_than"""', 'kwargs': "{'column': 'b', 'mostly': 0.9, 'threshold': 4, 'double_sided': True}"}), "(expectation_type=\n 'expect_column_value_z_scores_to_be_less_than', kwargs={'column': 'b',\n 'mostly': 0.9, 'threshold': 4, 'double_sided': True})\n", (6456, 6607), False, 'from great_expectations.core.expectation_configuration import ExpectationConfiguration\n'), ((8196, 8265), 'pandas.DataFrame', 'pd.DataFrame', (["{'a': [1, 5, 22, 3, 5, 10], 'b': [1, 2, 3, 4, 5, None]}"], {}), "({'a': [1, 5, 22, 3, 5, 10], 'b': [1, 2, 3, 4, 5, None]})\n", (8208, 8265), True, 'import pandas as pd\n'), ((8297, 8445), 'great_expectations.core.expectation_configuration.ExpectationConfiguration', 'ExpectationConfiguration', ([], {'expectation_type': '"""expect_column_max_to_be_between"""', 'kwargs': "{'column': 'not_in_table', 'min_value': 1, 'max_value': 29}"}), "(expectation_type='expect_column_max_to_be_between',\n kwargs={'column': 'not_in_table', 'min_value': 1, 'max_value': 29})\n", (8321, 8445), False, 'from great_expectations.core.expectation_configuration import ExpectationConfiguration\n'), ((8483, 8535), 'great_expectations.expectations.core.ExpectColumnMaxToBeBetween', 'ExpectColumnMaxToBeBetween', (['expectationConfiguration'], {}), '(expectationConfiguration)\n', (8509, 8535), False, 'from great_expectations.expectations.core import ExpectColumnMaxToBeBetween\n'), ((9583, 9671), 'pandas.DataFrame', 'pd.DataFrame', (["{'a': [1, 5, 22, 3, 5, 10, 2, 3], 'b': [97, 332, 3, 4, 5, 6, 7, None]}"], {}), "({'a': [1, 5, 22, 3, 5, 10, 2, 3], 'b': [97, 332, 3, 4, 5, 6, 7,\n None]})\n", (9595, 9671), True, 'import pandas as pd\n'), ((9713, 9886), 'great_expectations.core.expectation_configuration.ExpectationConfiguration', 'ExpectationConfiguration', ([], {'expectation_type': '"""expect_column_value_z_scores_to_be_less_than"""', 'kwargs': "{'column': 'b', 'mostly': 1, 'threshold': 2, 'double_sided': True}"}), "(expectation_type=\n 'expect_column_value_z_scores_to_be_less_than', kwargs={'column': 'b',\n 'mostly': 1, 'threshold': 2, 'double_sided': True})\n", (9737, 9886), False, 'from great_expectations.core.expectation_configuration import ExpectationConfiguration\n'), ((9919, 9981), 'great_expectations.expectations.core.expect_column_value_z_scores_to_be_less_than.ExpectColumnValueZScoresToBeLessThan', 'ExpectColumnValueZScoresToBeLessThan', (['expectationConfiguration'], {}), '(expectationConfiguration)\n', (9955, 9981), False, 'from great_expectations.expectations.core.expect_column_value_z_scores_to_be_less_than import ExpectColumnValueZScoresToBeLessThan\n'), ((11788, 11857), 'pandas.DataFrame', 'pd.DataFrame', (["{'a': [1, 5, 22, 3, 5, 10], 'b': [1, 2, 3, 4, 5, None]}"], {}), "({'a': [1, 5, 22, 3, 5, 10], 'b': [1, 2, 3, 4, 5, None]})\n", (11800, 11857), True, 'import pandas as pd\n'), ((11889, 12064), 'great_expectations.core.expectation_configuration.ExpectationConfiguration', 'ExpectationConfiguration', ([], {'expectation_type': '"""expect_column_value_z_scores_to_be_less_than"""', 'kwargs': "{'column': 'b', 'mostly': 0.9, 'threshold': 4, 'double_sided': True}"}), "(expectation_type=\n 'expect_column_value_z_scores_to_be_less_than', kwargs={'column': 'b',\n 'mostly': 0.9, 'threshold': 4, 'double_sided': True})\n", (11913, 12064), False, 'from great_expectations.core.expectation_configuration import ExpectationConfiguration\n'), ((1654, 1722), 'great_expectations.expectations.registry.get_expectation_impl', 'get_expectation_impl', (['"""expect_column_value_z_scores_to_be_less_than"""'], {}), "('expect_column_value_z_scores_to_be_less_than')\n", (1674, 1722), False, 'from great_expectations.expectations.registry import get_expectation_impl\n'), ((3022, 3090), 'great_expectations.expectations.registry.get_expectation_impl', 'get_expectation_impl', (['"""expect_column_value_z_scores_to_be_less_than"""'], {}), "('expect_column_value_z_scores_to_be_less_than')\n", (3042, 3090), False, 'from great_expectations.expectations.registry import get_expectation_impl\n'), ((4389, 4457), 'great_expectations.expectations.registry.get_expectation_impl', 'get_expectation_impl', (['"""expect_column_value_z_scores_to_be_less_than"""'], {}), "('expect_column_value_z_scores_to_be_less_than')\n", (4409, 4457), False, 'from great_expectations.expectations.registry import get_expectation_impl\n'), ((5620, 5688), 'great_expectations.expectations.registry.get_expectation_impl', 'get_expectation_impl', (['"""expect_column_value_z_scores_to_be_less_than"""'], {}), "('expect_column_value_z_scores_to_be_less_than')\n", (5640, 5688), False, 'from great_expectations.expectations.registry import get_expectation_impl\n'), ((2178, 2195), 'great_expectations.validator.validator.Validator', 'Validator', (['engine'], {}), '(engine)\n', (2187, 2195), False, 'from great_expectations.validator.validator import Validator\n'), ((7505, 7824), 'great_expectations.core.expectation_validation_result.ExpectationValidationResult', 'ExpectationValidationResult', ([], {'success': '(True)', 'expectation_config': 'None', 'meta': '{}', 'result': "{'element_count': 6, 'unexpected_count': 0, 'unexpected_percent': 0.0,\n 'partial_unexpected_list': [], 'missing_count': 1, 'missing_percent': \n 16.666666666666664, 'unexpected_percent_nonmissing': 0.0}", 'exception_info': 'None'}), "(success=True, expectation_config=None, meta={},\n result={'element_count': 6, 'unexpected_count': 0, 'unexpected_percent':\n 0.0, 'partial_unexpected_list': [], 'missing_count': 1,\n 'missing_percent': 16.666666666666664, 'unexpected_percent_nonmissing':\n 0.0}, exception_info=None)\n", (7532, 7824), False, 'from great_expectations.core.expectation_validation_result import ExpectationValidationResult\n'), ((10967, 11466), 'great_expectations.core.expectation_validation_result.ExpectationValidationResult', 'ExpectationValidationResult', ([], {'success': '(False)', 'meta': '{}', 'result': "{'element_count': 8, 'unexpected_count': 1, 'unexpected_percent': 12.5,\n 'partial_unexpected_list': [332.0], 'missing_count': 1,\n 'missing_percent': 12.5, 'unexpected_percent_nonmissing': \n 14.285714285714285, 'partial_unexpected_index_list': None,\n 'partial_unexpected_counts': [{'value': 332.0, 'count': 1}],\n 'unexpected_list': [332.0], 'unexpected_index_list': None}", 'expectation_config': 'None', 'exception_info': 'None'}), "(success=False, meta={}, result={'element_count':\n 8, 'unexpected_count': 1, 'unexpected_percent': 12.5,\n 'partial_unexpected_list': [332.0], 'missing_count': 1,\n 'missing_percent': 12.5, 'unexpected_percent_nonmissing': \n 14.285714285714285, 'partial_unexpected_index_list': None,\n 'partial_unexpected_counts': [{'value': 332.0, 'count': 1}],\n 'unexpected_list': [332.0], 'unexpected_index_list': None},\n expectation_config=None, exception_info=None)\n", (10994, 11466), False, 'from great_expectations.core.expectation_validation_result import ExpectationValidationResult\n'), ((12823, 12846), 'great_expectations.execution_engine.PandasExecutionEngine', 'PandasExecutionEngine', ([], {}), '()\n', (12844, 12846), False, 'from great_expectations.execution_engine import PandasExecutionEngine\n'), ((1978, 2012), 'great_expectations.validator.validator.Validator', 'Validator', ([], {'execution_engine': 'engine'}), '(execution_engine=engine)\n', (1987, 2012), False, 'from great_expectations.validator.validator import Validator\n'), ((4748, 4782), 'great_expectations.validator.validator.Validator', 'Validator', ([], {'execution_engine': 'engine'}), '(execution_engine=engine)\n', (4757, 4782), False, 'from great_expectations.validator.validator import Validator\n'), ((5911, 5945), 'great_expectations.validator.validator.Validator', 'Validator', ([], {'execution_engine': 'engine'}), '(execution_engine=engine)\n', (5920, 5945), False, 'from great_expectations.validator.validator import Validator\n'), ((6066, 6074), 'great_expectations.core.IDDict', 'IDDict', ([], {}), '()\n', (6072, 6074), False, 'from great_expectations.core import IDDict\n'), ((6979, 7096), 'great_expectations.core.batch.PartitionRequest', 'PartitionRequest', ([], {}), "(**{'partition_identifiers': {'pipeline_stage_name': 0,\n 'airflow_run_id': 0, 'custom_key_0': 0}})\n", (6995, 7096), False, 'from great_expectations.core.batch import Batch, BatchRequest, PartitionRequest\n'), ((7369, 7392), 'great_expectations.execution_engine.PandasExecutionEngine', 'PandasExecutionEngine', ([], {}), '()\n', (7390, 7392), False, 'from great_expectations.execution_engine import PandasExecutionEngine\n'), ((8834, 8951), 'great_expectations.core.batch.PartitionRequest', 'PartitionRequest', ([], {}), "(**{'partition_identifiers': {'pipeline_stage_name': 0,\n 'airflow_run_id': 0, 'custom_key_0': 0}})\n", (8850, 8951), False, 'from great_expectations.core.batch import Batch, BatchRequest, PartitionRequest\n'), ((10280, 10397), 'great_expectations.core.batch.PartitionRequest', 'PartitionRequest', ([], {}), "(**{'partition_identifiers': {'pipeline_stage_name': 0,\n 'airflow_run_id': 0, 'custom_key_0': 0}})\n", (10296, 10397), False, 'from great_expectations.core.batch import Batch, BatchRequest, PartitionRequest\n'), ((12436, 12553), 'great_expectations.core.batch.PartitionRequest', 'PartitionRequest', ([], {}), "(**{'partition_identifiers': {'pipeline_stage_name': 0,\n 'airflow_run_id': 0, 'custom_key_0': 0}})\n", (12452, 12553), False, 'from great_expectations.core.batch import Batch, BatchRequest, PartitionRequest\n'), ((9241, 9264), 'great_expectations.execution_engine.PandasExecutionEngine', 'PandasExecutionEngine', ([], {}), '()\n', (9262, 9264), False, 'from great_expectations.execution_engine import PandasExecutionEngine\n'), ((10687, 10710), 'great_expectations.execution_engine.PandasExecutionEngine', 'PandasExecutionEngine', ([], {}), '()\n', (10708, 10710), False, 'from great_expectations.execution_engine import PandasExecutionEngine\n')]
|