code
stringlengths 22
1.05M
| apis
listlengths 1
3.31k
| extract_api
stringlengths 75
3.25M
|
|---|---|---|
from collections.abc import MutableMapping as DictMixin
import types
import threading
import base64
import pickle
import hmac
import hashlib
import email.utils
import time
from . import errors
def parse_date(ims):
""" Parse rfc1123, rfc850 and asctime timestamps and return UTC epoch. """
try:
ts = email.utils.parsedate_tz(ims)
return time.mktime(ts[:8] + (0,)) - (ts[9] or 0) - time.timezone
except (TypeError, ValueError, IndexError, OverflowError):
return None
# ---------------- [ Some helpers for string/byte handling ] ---------
def tob(s, enc='utf8'):
encode = getattr(s, 'encode', None)
if encode:
return encode(enc)
return bytes(s)
def touni(s, enc='utf8', err='strict'):
decode = getattr(s, 'decode', None)
if decode: # bytes
return decode(enc, err)
return str(s)
# ------------------[ cookie ] -------------------
def cookie_encode(data, key):
''' Encode and sign a pickle-able object. Return a (byte) string '''
msg = base64.b64encode(pickle.dumps(data, -1))
sig = base64.b64encode(hmac.new(tob(key), msg, digestmod=hashlib.md5).digest())
return tob('!') + sig + tob('?') + msg
def cookie_decode(data, key):
''' Verify and decode an encoded string. Return an object or None.'''
def cookie_is_encoded(data):
''' Return True if the argument looks like a encoded cookie.'''
return bool(data.startswith(tob('!')) and tob('?') in data)
def _lscmp(a, b):
''' Compares two strings in a cryptographically safe way:
Runtime is not affected by length of common prefix. '''
return not sum(0 if x == y else 1 for x, y in zip(a, b)) and len(a) == len(b)
data = tob(data)
if cookie_is_encoded(data):
sig, msg = data.split(tob('?'), 1)
if _lscmp(sig[1:], base64.b64encode(hmac.new(tob(key), msg, digestmod=hashlib.md5).digest())):
return pickle.loads(base64.b64decode(msg))
return None
# ------------------[ html escape `&<>'"`] -------------------
def html_escape(string):
''' Escape HTML special characters ``&<>`` and quotes ``'"``. '''
return string.replace('&', '&').replace('<', '<').replace('>', '>')\
.replace('"', '"').replace("'", ''')
# ------------------[ thread safe props] -------------------
def ts_props(*props, store_name=None):
def wrapper(cls):
local_store = None
cls_init = cls.__init__
def init_wrapper(self, *a, **kw):
nonlocal local_store
local_store = getattr(self, store_name, None)
if local_store is None:
local_store = threading.local()
setattr(self, store_name, local_store)
[setattr(local_store, k, None) for k in props]
cls_init(self, *a, **kw)
def make_prop(k):
def fget(s):
return getattr(local_store, k)
def fset(s, v):
return setattr(local_store, k, v)
def fdel(s):
return delattr(local_store, k)
doc = 'Local property: %s' % k
return property(fget, fset, fdel, doc)
cls.__init__ = init_wrapper
[setattr(cls, p, make_prop(p)) for p in props]
return cls
return wrapper
# ------------------[ exposes prop callable attrubutes at instance level] -------------------
def proxy(prop, attrs, cls = None):
def injector(cls):
for attr in attrs:
setattr(
cls, attr,
(lambda _attr = attr: lambda s, *a, **kw: getattr(getattr(s, prop), _attr)(*a, **kw))()
)
return cls
return injector if not cls else injector(cls)
# ------------------[ helper classes] -------------------
class cached_property(object):
''' A property that is only computed once per instance and then replaces
itself with an ordinary attribute. Deleting the attribute resets the
property. '''
def __init__(self, func):
self.__doc__ = getattr(func, '__doc__')
self.func = func
def __get__(self, obj, cls):
if obj is None:
return self
try:
value = self.func(obj)
except AttributeError as err:
raise errors.PropertyGetterError(
'AttributeError in cached_property getter of '
f'`{self.func.__name__}`: {str(err)}'
)
setattr(obj, self.func.__name__, value)
return value
class NameSpace(types.SimpleNamespace):
''' fast Name-Space-Dict
nsd.some # - fast
nsd['some'] # - 20...30% slower
'''
__getitem__ = types.SimpleNamespace.__getattribute__
__setitem__ = types.SimpleNamespace.__setattr__
get = lambda s, k, d=None: s.__dict__.get(k, d)
keys = lambda s: s.__dict__.keys()
values = lambda s: s.__dict__.values()
items = lambda s: s.__dict__.items()
setdefault = lambda s, k, d: s.__dict__.setdefault(k, d)
update = lambda s, d: s.__dict__.update(d)
def __init__(self, **kw):
super().__init__(**kw)
class _MetaSimpleConfig(type):
def __init__(cls, name, bases, dct):
keys = cls.__get_keys__(bases)
if keys:
for k in dct.keys():
if k.startswith('_'):
continue
if k not in keys:
raise KeyError(f'Unexpected key: {k}')
@staticmethod
def __get_keys__(bases):
ret_keys = None
for bcls in bases:
keys = getattr(bcls, '__keys__', None)
if not keys:
continue
if not ret_keys:
ret_keys = keys
elif keys != ret_keys:
raise TypeError('Multiple keys holders detected')
return ret_keys
class SimpleConfig(metaclass=_MetaSimpleConfig):
@classmethod
def keys_holder(cls, holder_cls):
assert cls.__base__ is object
keys_holder_cls = getattr(holder_cls, '__keys_holder__', None)
if keys_holder_cls:
raise RuntimeError(f'Keys holder is already registered: {keys_holder_cls}')
keys = set(holder_cls.keys())
for k in keys:
if hasattr(cls, k):
raise KeyError(f'Bad key `{k}`, reserved keys/attrs are {cls.keys()}')
holder_cls.__keys__ = keys
holder_cls.__keys_holder__ = holder_cls
return holder_cls
def __new__(cls, src_config=None, **kw):
return cls.get_from(src_config, **kw)
@classmethod
def keys(cls):
keys = getattr(cls, '__keys__', None)
if keys:
return keys.copy()
return (k for k in cls.__dict__ if not k.startswith('__'))
@classmethod
def items(cls):
return ((k, getattr(cls, k)) for k in cls.keys())
@classmethod
def get_from(cls, src_config=None, **kw):
if src_config is None:
src_config = {}
return NameSpace(**{
key: src_config.get(key, kw.get(key, default))
for key, default in cls.items()
})
@classmethod
def get(cls, k, default=None):
return getattr(cls, k) if k in cls.keys() else default
# ------------------[ headers parsing] -------------------
def _hval(value):
if not (value is None or isinstance(value, (str, int, float, bool))):
raise TypeError(f"Header value must be type of (str, int, float, bool, None), got: {type(value)}")
value = str(value)
if '\n' in value or '\r' in value or '\0' in value:
raise ValueError("Header value must not contain control characters: %r" % value)
return value
@proxy('dict', 'keys pop popitem values items get'.split())
class HeaderDict(DictMixin):
__slots__ = ('_ts',)
dict = property(
(lambda s: s._ts.dict),
(lambda s, v: setattr(s._ts, 'dict', v)),
)
def __init__(self, *a, **kw):
self._ts = threading.local()
self._ts.dict = dict(*a, **kw)
def __len__(self):
return len(self._ts.dict)
def __iter__(self):
return self._ts.dict.__iter__()
def __contains__(self, key):
return key in self._ts.dict
def __delitem__(self, key):
del self._ts.dict[key]
def __getitem__(self, key):
return self._ts.dict[key]
def __setitem__(self, key, value):
self._ts.dict[key] = _hval(value)
def copy(self):
ret = self.__class__()
ret.dict = {k: (v[:] if isinstance(v, list) else v) for k, v in self.items()}
return ret
def setdefault(self, key, value):
return self._ts.dict.setdefault(key, _hval(value) if not isinstance(value, list) else value)
def append(self, key, value):
d = self._ts.dict
value = _hval(value)
v = d.get(key)
if v is None:
d[key] = value
elif isinstance(v, list):
v.append(value)
else:
d[key] = [v, value]
def clear(self, *names):
if names:
for n in names:
if n in self:
del self[n]
else:
self._ts.dict.clear()
def update(self, d):
self._ts.dict.update(d)
def __repr__(self):
return f'<{self.__class__.__name__}: {self.dict}>'
class HeaderProperty:
__slots__ = ('name', 'default', 'reader', 'writer', '__doc__')
def __init__(self, name, reader=None, writer=None, default=''):
self.name, self.default = name, default
self.reader, self.writer = reader, writer
self.__doc__ = 'Current value of the %r header.' % name.title()
def __get__(self, obj, cls):
if obj is None:
return self
value = obj.headers.get(self.name, self.default)
return self.reader(value) if self.reader else value
def __set__(self, obj, value):
obj.headers[self.name] = self.writer(value) if self.writer else value
def __delete__(self, obj):
del obj.headers[self.name]
class WSGIFileWrapper(object):
def __init__(self, fp, buffer_size=1024 * 64):
self.fp, self.buffer_size = fp, buffer_size
for attr in ('fileno', 'close', 'read', 'readlines', 'tell', 'seek'):
a = getattr(fp, attr, None)
if a is not None:
setattr(self, attr, a)
def __iter__(self):
buff, read = self.buffer_size, self.read
part = read(buff)
while part:
yield part
part = read(buff)
|
[
"time.mktime",
"threading.local",
"base64.b64decode",
"pickle.dumps"
] |
[((1041, 1063), 'pickle.dumps', 'pickle.dumps', (['data', '(-1)'], {}), '(data, -1)\n', (1053, 1063), False, 'import pickle\n'), ((7948, 7965), 'threading.local', 'threading.local', ([], {}), '()\n', (7963, 7965), False, 'import threading\n'), ((362, 388), 'time.mktime', 'time.mktime', (['(ts[:8] + (0,))'], {}), '(ts[:8] + (0,))\n', (373, 388), False, 'import time\n'), ((1947, 1968), 'base64.b64decode', 'base64.b64decode', (['msg'], {}), '(msg)\n', (1963, 1968), False, 'import base64\n'), ((2676, 2693), 'threading.local', 'threading.local', ([], {}), '()\n', (2691, 2693), False, 'import threading\n')]
|
#!/usr/bin/env python
# coding: utf-8
# Copyright 2021 MICAS, KU LEUVEN
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http:#www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# -----------------------------------------------------
# Author: <NAME>
# Function: Automatic Framework for MAC benchmarking
# Synthesize - Power Simulation - Power Extraction
# Extract area and power breakdown in breakdown dir
# -----------------------------------------------------
from imports import *
import config as CFG
# IMPORTANT NOTE:
# DVAFS_0 OR DVAFS = False -> FU Designs
# DVAFS_1 OR DVAFS = True -> SWU Designs
DVAFS = False
# Create logger object for logging purposes
logger = logging.getLogger("auto_L4")
# start time of script execution
start_time = time.time()
# Clock periods to be synthesized in 'ns'
CLK_LIST = [1.00, 5.00]
# Precisions to be tested
# Supported precisions are:
# 8x8 (0000)
# 8x4 (0010)
# 8x2 (0011)
# 4x4 (1010)
# 2x2 (1111)
if not DVAFS:
# FU precision list
PREC = ["0000", "0010", "0011", "1010", "1111"]
else:
# SWU precision list
PREC = ["0000", "1010", "1111"]
# Create a new list of tuples with the product of (prec, clk)
PREC_LIST = list(product(PREC, CLK_LIST))
def main():
logger.info("Starting Script!")
# Populate temporary directory at CFG.TMP_DIR
CFG.populate_tmp_dir(CLK_LIST)
# Create MultiProcessing pool with 4 threads for synthesis
# Each MP thread spawns an 8-thread process (controlled by syn_L4_mac)
pool = mp.Pool(4)
# Synthesize designs in CFG.DESIGN_NAMES list
pool.starmap(CFG.synthesis, product(CLK_LIST, CFG.DESIGN_NAMES))
logger.info(f"Synthesized all designs! Starting power simulations")
pool.close()
pool.join()
# Create new Multi-Processing pool with 24 threads for power simulations
pool = mp.Pool(24)
pool.starmap(CFG.power_simulation, product(PREC_LIST, CFG.DESIGN_NAMES))
logger.info("Finished Power Simulations!")
############ Power and Area Breakdown ############
pool.starmap(CFG.generate_breakdown_df, product(CLK_LIST, [PREC], [DVAFS]))
pool.close()
pool.join()
CFG.cleanup(CFG.TMP_DIR)
logger.info(f"Log messages saved to ./{log_file}")
end_time = round(time.time() - start_time)
end_time = timedelta(seconds=end_time)
logger.info(f"THE SCRIPT TOOK ({end_time}) TO FINISH")
# To handle exceptions in a clean way
if __name__ == "__main__":
try:
main()
except KeyboardInterrupt:
# Handle KeyboardInterrupt
# Kill all running processes and delete TMP directory
logger.warning("Interrupted - Cleaning up and exiting")
try:
os.system("killall genus")
os.system("killall vsim")
os.system("killall sed")
CFG.cleanup(CFG.TMP_DIR)
sys.exit(0)
except Exception as E:
logger.warning(f"Couldn't exit normally - faced Exception: {E}")
os._exit(0)
except OSError as OSE:
logger.warning(f"OSError: Exception: {OSE}")
try:
os.system("killall genus")
os.system("killall vsim")
os.system("killall sed")
CFG.cleanup(CFG.TMP_DIR)
sys.exit(0)
except Exception as E:
logger.warning(f"Couldn't exit normally - faced Exception: {E}")
os._exit(0)
|
[
"config.populate_tmp_dir",
"config.cleanup"
] |
[((1819, 1849), 'config.populate_tmp_dir', 'CFG.populate_tmp_dir', (['CLK_LIST'], {}), '(CLK_LIST)\n', (1839, 1849), True, 'import config as CFG\n'), ((2638, 2662), 'config.cleanup', 'CFG.cleanup', (['CFG.TMP_DIR'], {}), '(CFG.TMP_DIR)\n', (2649, 2662), True, 'import config as CFG\n'), ((3289, 3313), 'config.cleanup', 'CFG.cleanup', (['CFG.TMP_DIR'], {}), '(CFG.TMP_DIR)\n', (3300, 3313), True, 'import config as CFG\n'), ((3689, 3713), 'config.cleanup', 'CFG.cleanup', (['CFG.TMP_DIR'], {}), '(CFG.TMP_DIR)\n', (3700, 3713), True, 'import config as CFG\n')]
|
#!/usr/bin/env python
#https://docs.opencv.org/3.3.1/d7/d8b/tutorial_py_lucas_kanade.html
import numpy as np
import cv2
import sys
feature_params = dict( maxCorners = 100, #100, # params for ShiTomasi corner detection
qualityLevel = 0.2, #0.3,,#0.2,
minDistance = 7, #12, #7,
blockSize = 7)# 7 ) #7 ) #12 )
lk_params = dict( winSize = (50, 50),#(200,200) #(15,15), # Parameters for lucas kanade optical flow
maxLevel = 2,
criteria = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 100, 0.03))
color = np.random.randint(0,255,(100,3)) # Create some random colors
#color = (0, 0, 255)
#color_of = (0, 255, 0)
pause = False
frameNumber = 0
i = 0
progressArray = ['-', '\\', '|', '/' ]
structures = []
corners = np.ndarray([])
#######################################################################################################
#capFileName = '../obtaningData/basin_DNS.avi'
#apFileName = '../obtaningData/simpson_1972_small.mpg'
#capFileName = '../obtaningData/simpson_1972_fast.mpg'
#capFileName = '../obtaningData/Simpson/frontSimpson.mpg'
capFileName = '../obtaningData/Neufeld/neufeld.mpg'
#capFileName = '../obtaningData/lockExchangeSimpson_filipi_Re2445/test.mp4'
#capFileName = '../frenteSimpson.mpg'
#capFileName = '../obtaningData/Mariana/mariana.mp4'
#######################################################################################################
cap = cv2.VideoCapture(capFileName)
# Take first frame and find corners in it
for i in range(0, 2):
ret, old_frame = cap.read()
ret, old_frame = cap.read()
mask = np.zeros_like(old_frame)
mask = (255-mask)
frame = old_frame
cv2.imshow('frame', frame)
old_gray = cv2.cvtColor(old_frame, cv2.COLOR_BGR2GRAY)
p0 = cv2.goodFeaturesToTrack(old_gray, mask = None, **feature_params)
while(1):
k = cv2.waitKey(30) & 0xff
if k == 27:
break
if k == 32:
# (frameNumber)
frameNumber = frameNumber + 1
#descomentar aqui para redescobrir os cantos
#old_gray = cv2.cvtColor(old_frame, cv2.COLOR_BGR2GRAY)
#p0 = cv2.goodFeaturesToTrack(old_gray, mask = None, **feature_params)
old_gray_test = cv2.cvtColor(old_frame, cv2.COLOR_BGR2GRAY)
p3 = cv2.goodFeaturesToTrack(old_gray_test, mask = None, **feature_params)
frame_gray = cv2.cvtColor(old_frame, cv2.COLOR_BGR2GRAY)
# calculate optical flow
p1, st, err = cv2.calcOpticalFlowPyrLK(old_gray, frame_gray, p0, None, **lk_params)
#p1 = cv2.goodFeaturesToTrack(old_gray, mask = None, **feature_params)
#print(p1)
#print(p0)
#break
#print(p3)
# Select good points
good_new = p1[st==1]
good_old = p0[st==1]
corner_new = p3.reshape(-1,2) # esses novos cantos vao vir em numero diferente e nao da pra usar o
# indice condicional st, mas precisa dar o reshpe para ficar um array
# de uma posicao com um outro array dentro comos pontos e nao varios arrays
# com os pontos
print(corner_new.size)
#print(good_old)
#print(corner_new)
#break
# draw the tracks
for k,(corner) in enumerate(corner_new):
e,f = corner.ravel()
frame2 = cv2.circle(old_frame,(e,f),5,color[k].tolist(),-1)
cv2.imshow('frame2', frame2)
for k,(new,old) in enumerate(zip(good_new,good_old)):
a,b = new.ravel()
c,d = old.ravel()
#print(a)
#quit()
mask = cv2.line(mask, (a,b),(c,d), color[k].tolist(), 2)
#mask = cv2.line(old_frame, (a,b),(c,d), color[k].tolist(), 5)
frame = cv2.circle(old_frame,(a,b),5,color[k].tolist(),-1)
#mask = cv2.line(mask, (a,b),(c,d), color_of, 5)
#frame = cv2.circle(old_frame,(a,b),10,color,-1)
#img = cv2.add(frame, mask)
#mask = mask + frame
mask = np.bitwise_and(mask, frame)#<<<<<<<<<<
cv2.imshow('mask', mask)
cv2.imshow('frame', frame)
# Now update the previous frame and previous points
old_gray = frame_gray.copy()
p0 = good_new.reshape(-1,1,2)
#break
i = ( i + 1 ) % 4
#print(progressArray[i])
sys.stdout.write('\rprocessing frames...[{0}] - {1} {2} '.format(frameNumber, k, progressArray[i]))
sys.stdout.flush()
ret, old_frame = cap.read()
structures.append(k)
if old_frame is None:
#print(frameNumber)
break
cv2.destroyAllWindows()
cap.release()
|
[
"numpy.zeros_like",
"cv2.cvtColor",
"cv2.destroyAllWindows",
"cv2.waitKey",
"cv2.VideoCapture",
"numpy.random.randint",
"cv2.goodFeaturesToTrack",
"numpy.bitwise_and",
"sys.stdout.flush",
"cv2.calcOpticalFlowPyrLK",
"cv2.imshow",
"numpy.ndarray"
] |
[((611, 646), 'numpy.random.randint', 'np.random.randint', (['(0)', '(255)', '(100, 3)'], {}), '(0, 255, (100, 3))\n', (628, 646), True, 'import numpy as np\n'), ((822, 836), 'numpy.ndarray', 'np.ndarray', (['[]'], {}), '([])\n', (832, 836), True, 'import numpy as np\n'), ((1490, 1519), 'cv2.VideoCapture', 'cv2.VideoCapture', (['capFileName'], {}), '(capFileName)\n', (1506, 1519), False, 'import cv2\n'), ((1653, 1677), 'numpy.zeros_like', 'np.zeros_like', (['old_frame'], {}), '(old_frame)\n', (1666, 1677), True, 'import numpy as np\n'), ((1715, 1741), 'cv2.imshow', 'cv2.imshow', (['"""frame"""', 'frame'], {}), "('frame', frame)\n", (1725, 1741), False, 'import cv2\n'), ((1754, 1797), 'cv2.cvtColor', 'cv2.cvtColor', (['old_frame', 'cv2.COLOR_BGR2GRAY'], {}), '(old_frame, cv2.COLOR_BGR2GRAY)\n', (1766, 1797), False, 'import cv2\n'), ((1804, 1866), 'cv2.goodFeaturesToTrack', 'cv2.goodFeaturesToTrack', (['old_gray'], {'mask': 'None'}), '(old_gray, mask=None, **feature_params)\n', (1827, 1866), False, 'import cv2\n'), ((5094, 5117), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (5115, 5117), False, 'import cv2\n'), ((1903, 1918), 'cv2.waitKey', 'cv2.waitKey', (['(30)'], {}), '(30)\n', (1914, 1918), False, 'import cv2\n'), ((2300, 2343), 'cv2.cvtColor', 'cv2.cvtColor', (['old_frame', 'cv2.COLOR_BGR2GRAY'], {}), '(old_frame, cv2.COLOR_BGR2GRAY)\n', (2312, 2343), False, 'import cv2\n'), ((2358, 2425), 'cv2.goodFeaturesToTrack', 'cv2.goodFeaturesToTrack', (['old_gray_test'], {'mask': 'None'}), '(old_gray_test, mask=None, **feature_params)\n', (2381, 2425), False, 'import cv2\n'), ((2482, 2525), 'cv2.cvtColor', 'cv2.cvtColor', (['old_frame', 'cv2.COLOR_BGR2GRAY'], {}), '(old_frame, cv2.COLOR_BGR2GRAY)\n', (2494, 2525), False, 'import cv2\n'), ((2581, 2650), 'cv2.calcOpticalFlowPyrLK', 'cv2.calcOpticalFlowPyrLK', (['old_gray', 'frame_gray', 'p0', 'None'], {}), '(old_gray, frame_gray, p0, None, **lk_params)\n', (2605, 2650), False, 'import cv2\n'), ((3626, 3654), 'cv2.imshow', 'cv2.imshow', (['"""frame2"""', 'frame2'], {}), "('frame2', frame2)\n", (3636, 3654), False, 'import cv2\n'), ((4349, 4376), 'numpy.bitwise_and', 'np.bitwise_and', (['mask', 'frame'], {}), '(mask, frame)\n', (4363, 4376), True, 'import numpy as np\n'), ((4400, 4424), 'cv2.imshow', 'cv2.imshow', (['"""mask"""', 'mask'], {}), "('mask', mask)\n", (4410, 4424), False, 'import cv2\n'), ((4437, 4463), 'cv2.imshow', 'cv2.imshow', (['"""frame"""', 'frame'], {}), "('frame', frame)\n", (4447, 4463), False, 'import cv2\n'), ((4875, 4893), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (4891, 4893), False, 'import sys\n')]
|
#!/usr/bin/python
# This script predicts body part in test dataset
import os
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
import numpy as np
import tensorflow
from tensorflow import keras
from keras import optimizers
from keras.models import load_model
from keras.preprocessing import image
import csv
import re
#csv
csvFile = open('delta.csv', 'a', newline="")
csvWriter = csv.writer(csvFile)
# Loading and Compiling Model
MODEL = load_model('inception_v3_0.9635416865348816.h5')
MODEL.compile(optimizer=optimizers.RMSprop(lr=2e-5),
loss='categorical_crossentropy',
metrics=['acc'])
# Path of image you want to predict
for imageFile in os.listdir('./tests/images/'):
# Find out real class
realClass = re.sub("([a-zA-Z]+)\-(\d+).jpg", r"\1", imageFile)
# Convert Img to an appropriate numpy array
IMG = image.load_img('./tests/images/'+imageFile, target_size=(299, 299))
X = image.img_to_array(IMG)
X = np.expand_dims(X, axis=0)
IMAGES = np.vstack([X])
# The actual prediction
CLASSES = MODEL.predict(IMAGES, batch_size=10)
#if(CLASSES[0][CLASSES.argmax(axis=1)] < 0.1):
# print('Predicted Classes for Images: others')
#else:
# Converting result of prediction to readable categories
CATEGORIES = {0: 'anal', 1: 'arms', 2: 'armsAndHands',
3: 'face', 4: 'feet', 5: 'genitalsFemale',
6: 'genitalsMale', 7: 'hands', 8: 'head',
9: 'legs', 10: 'legsAndfeet', 11: 'torso'}
#RESPONSE = [CATEGORIES[i] for i in CLASSES[0]]
# delta: max value - mean value
maxV = CLASSES[0][CLASSES.argmax()]
newClassesWithoutMax = np.delete(CLASSES[0], CLASSES.argmax())
print('Predicted Classes for Images: {}'.format(CATEGORIES[CLASSES.argmax()]))
print("max prediction is", maxV)
print("delta is", maxV - newClassesWithoutMax.mean())
csvWriter.writerow([imageFile, realClass, CATEGORIES[CLASSES.argmax()], maxV, maxV - newClassesWithoutMax.mean()])
|
[
"keras.models.load_model",
"csv.writer",
"numpy.expand_dims",
"keras.preprocessing.image.img_to_array",
"keras.preprocessing.image.load_img",
"keras.optimizers.RMSprop",
"re.sub",
"os.listdir",
"numpy.vstack"
] |
[((420, 439), 'csv.writer', 'csv.writer', (['csvFile'], {}), '(csvFile)\n', (430, 439), False, 'import csv\n'), ((480, 528), 'keras.models.load_model', 'load_model', (['"""inception_v3_0.9635416865348816.h5"""'], {}), "('inception_v3_0.9635416865348816.h5')\n", (490, 528), False, 'from keras.models import load_model\n'), ((714, 743), 'os.listdir', 'os.listdir', (['"""./tests/images/"""'], {}), "('./tests/images/')\n", (724, 743), False, 'import os\n'), ((787, 839), 're.sub', 're.sub', (['"""([a-zA-Z]+)\\\\-(\\\\d+).jpg"""', '"""\\\\1"""', 'imageFile'], {}), "('([a-zA-Z]+)\\\\-(\\\\d+).jpg', '\\\\1', imageFile)\n", (793, 839), False, 'import re\n'), ((898, 967), 'keras.preprocessing.image.load_img', 'image.load_img', (["('./tests/images/' + imageFile)"], {'target_size': '(299, 299)'}), "('./tests/images/' + imageFile, target_size=(299, 299))\n", (912, 967), False, 'from keras.preprocessing import image\n'), ((974, 997), 'keras.preprocessing.image.img_to_array', 'image.img_to_array', (['IMG'], {}), '(IMG)\n', (992, 997), False, 'from keras.preprocessing import image\n'), ((1006, 1031), 'numpy.expand_dims', 'np.expand_dims', (['X'], {'axis': '(0)'}), '(X, axis=0)\n', (1020, 1031), True, 'import numpy as np\n'), ((1045, 1059), 'numpy.vstack', 'np.vstack', (['[X]'], {}), '([X])\n', (1054, 1059), True, 'import numpy as np\n'), ((553, 581), 'keras.optimizers.RMSprop', 'optimizers.RMSprop', ([], {'lr': '(2e-05)'}), '(lr=2e-05)\n', (571, 581), False, 'from keras import optimizers\n')]
|
import pandas as pd
data = pd.read_csv("anonymized_crushes.csv")
# indegree
kerb_in = {}
# outdegree
kerb_out = {}
for row in range(0, data.shape[0]):
# filter out all the NaNs
person_info = list(filter(lambda x: not (pd.isna(x)), data.iloc[row]))
kerb = person_info[0]
crushes = list(person_info[1:])
# remove duplicates
crushes = list(set(crushes))
# make sure this entry exists in both dictionaries
if kerb not in kerb_in:
kerb_in[kerb] = 0
if kerb not in kerb_out:
kerb_out[kerb] = 0
# make sure all crushes exist in the indegree
for crush in crushes:
if crush not in kerb_in:
kerb_in[crush] = 0
# indegree
for crush in crushes:
kerb_in[crush] = kerb_in[crush] + 1
# outdegree
outdegree = len(crushes)
kerb_out[kerb] = outdegree
output = []
for kerb in kerb_out:
indegree = kerb_in[kerb]
outdegree = kerb_out[kerb]
output.append([kerb, indegree, outdegree])
csv_out = pd.DataFrame(output, columns=["kerb", "indegree", "outdegree"])
csv_out.to_csv("InOutDegrees.csv", index=False)
|
[
"pandas.read_csv",
"pandas.isna",
"pandas.DataFrame"
] |
[((28, 65), 'pandas.read_csv', 'pd.read_csv', (['"""anonymized_crushes.csv"""'], {}), "('anonymized_crushes.csv')\n", (39, 65), True, 'import pandas as pd\n'), ((1001, 1064), 'pandas.DataFrame', 'pd.DataFrame', (['output'], {'columns': "['kerb', 'indegree', 'outdegree']"}), "(output, columns=['kerb', 'indegree', 'outdegree'])\n", (1013, 1064), True, 'import pandas as pd\n'), ((229, 239), 'pandas.isna', 'pd.isna', (['x'], {}), '(x)\n', (236, 239), True, 'import pandas as pd\n')]
|
# import the necessary packages
from src.KeyClipWriter import KeyClipWriter
from imutils.video import VideoStream
import argparse
import datetime
import imutils
import time
import cv2
import os
class VideoFeedClipper(object):
"""docstring for VideoFeedClipper"""
def __init__(self, buffer=100, timeout=1.0, usePiCamera=False, width=320, output_folder=None, fps=20.0):
# store the maximum buffer size of frames to be kept
# in memory along with the sleep timeout during threading
self.bufSize = buffer
self.timeout = timeout
self.usePiCamera = usePiCamera
self.width = width
self.timestamp_format = '%Y-%m-%dT%H-%M-%S'
if output_folder is None: raise Exception("output_folder cannot be `None`.")
self.output_folder = output_folder
# TODO: should we create output folder beforehand?
self.fps=fps
self.running = False
# initialize the video stream and allow the camera sensor to
# warmup
print("[INFO] warming up camera...")
# initialize videostream
self.vs = VideoStream(usePiCamera=self.usePiCamera).start()
time.sleep(2.0)
# initialize key clip writer
self.kcw = KeyClipWriter(bufSize=self.bufSize)
consecFrames = 0
def run(self, showframe):
# TODO: implement while loop with fps control here instead of on the outside?
# get the original frame
self.frame = self.vs.read()
# get the timestamp, keep the datetime format here
self.timestamp = datetime.datetime.now()
# reshape it to desired resolution
# this fixes the issue of nasty resolution when not using PiCamera
self.frame = imutils.resize(self.frame, width = self.width)
# update the key frame clip buffer
self.update_kwc()
if showframe:
# making a copy to show a red rectangle without affecting the original
# comes with a bit of a performance hit.
# But the major hit in performance is showing the video itself
# to_show = self.frame.copy()
#if self.kcw.recording:
# to_show = cv2.rectangle(self.frame.copy(), (5, 5), (25, 25), (0, 0, 255), -1)
cv2.imshow("frame", self.frame)#to_show)
cv2.waitKey(1)
def update_kwc(self):
# put frames to queue as tupple
self.kcw.update((self.frame, self.timestamp))
def trigger_recording(self):
# Start recording
# we first update the queues so when we start we can get the resolution from
# self.frames[0].shape
# This can be changed by passing resolution as a number
init_timestamp = datetime.datetime.now().strftime(self.timestamp_format)
# TODO: extension hardcoded here.
video_filename = f"{init_timestamp}_video_clip.avi"
timestamp_filename = f"{init_timestamp}_timestamp.csv"
video_path = os.path.join(self.output_folder, video_filename)
timestamp_path = os.path.join(self.output_folder, timestamp_filename)
# TODO: codec hardcoded here
self.kcw.start(
timestampPath = timestamp_path,
videoPath = video_path,
fourcc=cv2.VideoWriter_fourcc(*'XVID'),
fps=self.fps)
def finish_kwc(self):
# kill the kwc object nicely
self.kcw.finish()
def stop(self):
if self.kcw.recording:
self.finish_kwc()
# finish video and kill videostream
cv2.destroyAllWindows()
self.vs.stop()
|
[
"imutils.video.VideoStream",
"os.path.join",
"cv2.VideoWriter_fourcc",
"cv2.waitKey",
"cv2.imshow",
"src.KeyClipWriter.KeyClipWriter",
"time.sleep",
"imutils.resize",
"cv2.destroyAllWindows",
"datetime.datetime.now"
] |
[((1050, 1065), 'time.sleep', 'time.sleep', (['(2.0)'], {}), '(2.0)\n', (1060, 1065), False, 'import time\n'), ((1110, 1145), 'src.KeyClipWriter.KeyClipWriter', 'KeyClipWriter', ([], {'bufSize': 'self.bufSize'}), '(bufSize=self.bufSize)\n', (1123, 1145), False, 'from src.KeyClipWriter import KeyClipWriter\n'), ((1406, 1429), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (1427, 1429), False, 'import datetime\n'), ((1551, 1595), 'imutils.resize', 'imutils.resize', (['self.frame'], {'width': 'self.width'}), '(self.frame, width=self.width)\n', (1565, 1595), False, 'import imutils\n'), ((2617, 2665), 'os.path.join', 'os.path.join', (['self.output_folder', 'video_filename'], {}), '(self.output_folder, video_filename)\n', (2629, 2665), False, 'import os\n'), ((2685, 2737), 'os.path.join', 'os.path.join', (['self.output_folder', 'timestamp_filename'], {}), '(self.output_folder, timestamp_filename)\n', (2697, 2737), False, 'import os\n'), ((3091, 3114), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (3112, 3114), False, 'import cv2\n'), ((2002, 2033), 'cv2.imshow', 'cv2.imshow', (['"""frame"""', 'self.frame'], {}), "('frame', self.frame)\n", (2012, 2033), False, 'import cv2\n'), ((2046, 2060), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (2057, 2060), False, 'import cv2\n'), ((998, 1039), 'imutils.video.VideoStream', 'VideoStream', ([], {'usePiCamera': 'self.usePiCamera'}), '(usePiCamera=self.usePiCamera)\n', (1009, 1039), False, 'from imutils.video import VideoStream\n'), ((2399, 2422), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (2420, 2422), False, 'import datetime\n'), ((2861, 2892), 'cv2.VideoWriter_fourcc', 'cv2.VideoWriter_fourcc', (["*'XVID'"], {}), "(*'XVID')\n", (2883, 2892), False, 'import cv2\n')]
|
import torch
import torchvision.models as models
import torch.autograd.profiler as profiler
model = models.resnet18()
inputs = torch.randn(5, 3, 224, 224)
with profiler.profile(record_shapes=True) as prof:
with profiler.record_function("model_inference"):
model(inputs)
print(prof.key_averages().table(sort_by="cpu_time_total", row_limit=10))
print(prof.key_averages(group_by_input_shape=True).table(sort_by="cpu_time_total", row_limit=10))
with profiler.profile(profile_memory=True, record_shapes=True) as prof:
model(inputs)
print(prof.key_averages().table(sort_by="self_cpu_memory_usage", row_limit=10))
print(prof.key_averages().table(sort_by="cpu_memory_usage", row_limit=10))
with profiler.profile() as prof:
with profiler.record_function("model_inference"):
model(inputs)
prof.export_chrome_trace("trace.json")
|
[
"torchvision.models.resnet18",
"torch.autograd.profiler.record_function",
"torch.randn",
"torch.autograd.profiler.profile"
] |
[((105, 122), 'torchvision.models.resnet18', 'models.resnet18', ([], {}), '()\n', (120, 122), True, 'import torchvision.models as models\n'), ((133, 160), 'torch.randn', 'torch.randn', (['(5)', '(3)', '(224)', '(224)'], {}), '(5, 3, 224, 224)\n', (144, 160), False, 'import torch\n'), ((169, 205), 'torch.autograd.profiler.profile', 'profiler.profile', ([], {'record_shapes': '(True)'}), '(record_shapes=True)\n', (185, 205), True, 'import torch.autograd.profiler as profiler\n'), ((480, 537), 'torch.autograd.profiler.profile', 'profiler.profile', ([], {'profile_memory': '(True)', 'record_shapes': '(True)'}), '(profile_memory=True, record_shapes=True)\n', (496, 537), True, 'import torch.autograd.profiler as profiler\n'), ((735, 753), 'torch.autograd.profiler.profile', 'profiler.profile', ([], {}), '()\n', (751, 753), True, 'import torch.autograd.profiler as profiler\n'), ((225, 268), 'torch.autograd.profiler.record_function', 'profiler.record_function', (['"""model_inference"""'], {}), "('model_inference')\n", (249, 268), True, 'import torch.autograd.profiler as profiler\n'), ((773, 816), 'torch.autograd.profiler.record_function', 'profiler.record_function', (['"""model_inference"""'], {}), "('model_inference')\n", (797, 816), True, 'import torch.autograd.profiler as profiler\n')]
|
import random
from rule_constants import trial_info
OP_TEXTS = {"AT": "Attacke", "PA": "Parade", "FK": "Schuss", "AW": "Ausweichen"}
def W6():
return random.randint(1, 6)
def W20():
return random.randint(1, 20)
class TP:
def __init__(self, weapon, effects):
self.dice_rolls = []
self.dice, self.bonus = weapon[1]["TP"]
for _ in range(self.dice):
self.dice_rolls.append(W6())
self.result = sum(self.dice_rolls) + self.bonus + sum(b for (_, b) in effects.values())
self.effects = effects
def __repr__(self):
dice_str = f" ({self.dice}W6+{self.bonus})" if not self.effects else ""
res = f"\n\nMögliche TP: {self.result}{dice_str}"
if self.effects:
res += f"\n{self.dice}W6+{self.bonus}: {sum(self.dice_rolls) + self.bonus}"
for effect, (_, value) in self.effects.items():
res += f"\n{effect}: {value}"
return res + "\n"
class ResultMeta(type):
def __repr__(cls):
return cls.title
class Result(metaclass=ResultMeta):
def __init__(self, character, trial, critical, dice_rolls, bonus_or_malus,
quality=None):
self.character = character
self.trial = trial
self.critical = (critical >= 2)
self.terrible = (critical == 3)
self.dice_rolls = dice_rolls
self.quality = quality
self.bonus_or_malus = bonus_or_malus
_, self.kind, self.modifier = trial_info(trial)
def __repr__(self):
if not self.terrible:
res = (f"{self.character.name}s {self.kind} {self.trial} ist ein "
f"{'kritischer ' if self.critical else ''}{self.title}\n\n")
else:
res = (f"{self.character.name}s {self.kind} {self.trial} endet in einem"
f"schrecklichen Missgeschick!\n\n")
return res
def dice_str(self):
res = (4 * " ").join(f"{pr}: {roll} / {getattr(self.character, pr)}"
for (pr, roll) in self.dice_rolls) + "\n"
fw, modifier_value = self.character.FW(self.trial)
if fw:
res += f"\nBonus aus FW: {fw}"
if modifier_value:
res += f"\nMalus aus {self.modifier}: {-modifier_value}"
if self.bonus_or_malus:
res += f"\nBonus/Malus: {self.bonus_or_malus}"
for impairment, impact in self.character.impairments.items():
if impact:
res += f"\n{impairment}: {-impact}"
return res
class Success(Result):
title = "Erfolg!"
def __repr__(self):
return super().__repr__() + f"Qualitätsstufe {self.quality}\n\n" + self.dice_str()
class Failure(Result):
title = "Fehlschlag!"
def __repr__(self):
return super().__repr__() + self.dice_str()
class AttackResult(metaclass=ResultMeta):
def __init__(self, character, weapon, operation, critical, dice_rolls,
effects, possible_tp):
self.character = character
self.weapon = weapon
self.operation = operation
self.critical = critical
self.dice_rolls = dice_rolls
self.effects = effects
self.kind = OP_TEXTS[operation]
self.possible_tp = possible_tp
def dice_str(self):
res = f"\n\n{self.operation}: {self.dice_rolls[0]} / {self.weapon[1][self.operation]}"
if len(self.dice_rolls) > 1:
res += f" (Bestätigungswurf: {self.dice_rolls[1]})"
return res + "\n"
def __repr__(self):
wpn = f" ({self.weapon[0]})" if self.weapon[0] else ""
res = (f"{self.character.name}s {self.kind}{wpn} ist ein "
f"{'kritischer ' if self.critical else ''}{self.title}")
res += self.dice_str()
for effect, impact in self.effects.items():
if impact[0]:
res += f"\n{effect}: {impact[0]}"
for impairment, impact in self.character.impairments.items():
if impact:
res += f"\n{impairment}: {-impact}"
if self.possible_tp:
res += str(self.possible_tp)
return res
class AttackSuccess(AttackResult):
title = "Erfolg!"
class AttackFailure(AttackResult):
title = "Fehlschlag!"
|
[
"rule_constants.trial_info",
"random.randint"
] |
[((156, 176), 'random.randint', 'random.randint', (['(1)', '(6)'], {}), '(1, 6)\n', (170, 176), False, 'import random\n'), ((200, 221), 'random.randint', 'random.randint', (['(1)', '(20)'], {}), '(1, 20)\n', (214, 221), False, 'import random\n'), ((1470, 1487), 'rule_constants.trial_info', 'trial_info', (['trial'], {}), '(trial)\n', (1480, 1487), False, 'from rule_constants import trial_info\n')]
|
from django.http import HttpResponseRedirect
from django.urls import reverse
from django.contrib import admin
from django.contrib.auth.models import Group
from django.contrib.admin.models import LogEntry
from django.contrib.admin.sites import AdminSite
from django.views.decorators.cache import never_cache
from django.template.response import TemplateResponse
from django.db import connection
from django.conf import settings
from Poem.poem.admin_interface.grmetrics import GroupOfMetricsAdmin
from Poem.poem.admin_interface.grprobes import GroupOfProbesAdmin
from Poem.poem.admin_interface.grprofiles import GroupOfProfilesAdmin
from Poem.poem.admin_interface.graggregations import GroupOfAggregationsAdmin
from Poem.poem.admin_interface.siteaggregations import *
from Poem.poem.admin_interface.sitemetrics import *
from Poem.poem.admin_interface.siteprobes import *
from Poem.poem.admin_interface.siteprofile import *
from Poem.poem.admin_interface.userprofile import *
from Poem.poem.admin_interface.siteactions import *
from Poem.poem.admin_interface.siteservices import *
from Poem.poem.models import GroupOfMetrics, GroupOfProfiles, GroupOfAggregations, GroupOfProfiles
from Poem.poem.models import MetricInstance, Metric, Probe, Profile, UserProfile, VO, ServiceFlavour, CustUser, Service, Aggregation
from Poem.api.admin import MyAPIKeyAdmin
from rest_framework_api_key.models import APIKey
import re
from configparser import ConfigParser
def tenant_samlloginstring():
tenant = connection.tenant.name
config = ConfigParser()
config.read(settings.CONFIG_FILE)
return config.get('GENERAL_' + tenant.upper(), 'samlloginstring')
class PublicViews(object):
def load_settings(self):
self.public_models = (Probe, Metric, Profile, Service)
self._map = dict()
_ = [self._map.update({x.__name__.lower(): x}) for x in self.public_models]
# (probe|metric|profile) regexp
self._regex = '(' + '|'.join([s.__name__.lower() \
for s in self.public_models]) + ')'
def get_public_urls(self):
"""
/poem/public_probe, /poem/public_profile and /poem/public_metric urls
map directly to view methods that have no permission checks
"""
from django.urls import path, re_path
public_urls = list()
public_urls.append(re_path('^poem/public_(?P<model>%s)/$' %
self._regex, self.public_views))
public_urls.append(re_path('^poem/public_(?P<model>%s)/(?P<object_id>[0-9]+)/change/'
% self._regex, self.public_views))
public_urls.append(re_path('^poem/public_(?P<model>%s)/(?P<object_id>[0-9]+)/history/(?P<rev_id>[0-9]+)/'
% self._regex, self.public_views))
return public_urls
def public_views(self, request, **kwargs):
objid = kwargs.get('object_id', None)
revid = kwargs.get('rev_id', None)
model = self._map[kwargs['model']]
context = dict(self.each_context(request))
if objid and not revid:
return self._registry[model].change_view(request, objid, extra_context=context)
elif objid and revid:
return self._registry[model].revision_view(request, objid, revid, extra_context=context)
else:
return self._registry[model].changelist_view(request, extra_context=context)
def login(self, request, extra_context):
"""
If coming from /public_<public_models> urls and want to visit Profile,
Probe and Metric change_view or changelist_view, then proceed without
authentication.
Also, allow visiting individual Probe revision if coming from
/public_metric changelist_view
"""
prev = request.META.get('HTTP_REFERER', None)
if prev:
context = dict(self.each_context(request))
next_url = request.GET.get('next')
rn = re.search('poem/(?P<model>%s)/' % self._regex, next_url)
# metric changelist_view -> probe revision_view
r = re.search('public_metric/$', prev)
rp = re.search('probe/([0-9]+)/history/([0-9]+)', next_url)
if r and rp:
revid = rp.group(2)
objid = rp.group(1)
url = reverse('admin:poem_probe_revision', args=(objid, revid,))
url = url.replace('probe', 'public_probe')
return HttpResponseRedirect(url)
r = re.search('public_(\w+)/', prev)
if r:
objid = re.search('([0-9]+)/change/', next_url)
if objid:
# changelist_view -> change_view
objid = objid.group(1)
url = reverse('admin:poem_%s_change' % rn.group('model'), args=(objid,))
url = url.replace(rn.group('model') + '/',
'public_%s/' % rn.group('model'))
return HttpResponseRedirect(url)
else:
# changelist_view -> changelist_view
url = reverse('admin:poem_%s_changelist' % rn.group('model'))
url = url.replace(rn.group('model') + '/',
'public_%s/' % rn.group('model'))
return HttpResponseRedirect(url)
# change_view -> changelist_view
r = re.search('public_(\w+)/([0-9]+)/change/$', prev)
if r:
url = reverse('admin:poem_%s_changelist' % rn.group('model'))
url = url.replace(rn.group('model') + '/',
'public_%s/' % rn.group('model'))
return HttpResponseRedirect(url)
return super().login(request, extra_context)
class MyAdminSite(PublicViews, AdminSite):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
super().load_settings()
@never_cache
def index(self, request, extra_context=None):
if request.user.is_authenticated:
if request.user.is_superuser:
return HttpResponseRedirect(request.path + 'poem')
else:
return HttpResponseRedirect(request.path + 'poem/profile')
@never_cache
def login(self, request, extra_context=None):
"""
Extend login view with SAML login string and call PublicViews.login()
"""
extra_context = extra_context if extra_context else dict()
extra_context.update(samlloginstring=tenant_samlloginstring())
return super().login(request, extra_context)
def app_index(self, request, app_label, extra_context=None):
if request.user.is_authenticated:
if request.user.is_superuser:
poem_app_name, apikey_app = 'poem', 'rest_framework_api_key'
if request.path.endswith('admin/%s/' % apikey_app):
return HttpResponseRedirect('/%s/admin/%s/' % (poem_app_name, poem_app_name))
if request.path.endswith('admin/admin/'):
return HttpResponseRedirect('/%s/admin/%s/' % (poem_app_name, poem_app_name))
# Reorganize administration page by grouping type of data that
# want to be administered:
# Poem = Metrics, Probes, Profiles
# Authnz = GroupOfAggregations, GroupOfMetrics, GroupOfProbes, GroupOfProfiles, Users
# API Permissions = API keys
app_list = self.get_app_list(request)
authnz = dict(
name='Authentication and Authorization',
app_label='authnz',
app_url='/poem/admin/poem',
has_module_perms=True,
models=list()
)
extract = set(['GroupOfAggregations', 'GroupOfProbes', 'GroupOfMetrics',
'GroupOfProfiles', 'CustUser'])
for a in app_list:
if a['app_label'] == poem_app_name:
for m in a['models']:
if m['object_name'] in extract:
authnz['models'].append(m)
a['models'] = list(filter(lambda x: x['object_name']
not in extract, a['models']))
if a['app_label'] == 'admin':
a['name'] = 'Logs'
app_list.append(authnz)
order = [poem_app_name, 'admin', 'authnz', apikey_app]
app_list = sorted(app_list, key=lambda a: order.index(a['app_label']))
extra_context = dict(
self.each_context(request),
app_list=app_list,
)
extra_context.update(extra_context or {})
request.current_app = self.name
return super().app_index(request, app_label, extra_context)
else:
return HttpResponseRedirect(request.path + 'profile')
def get_urls(self):
"""
Add public url mappings to views so that we can bypass permission
checks implied in admin_view() decorator that decorates change_view and
changelist_view methods.
"""
return super().get_urls() + super().get_public_urls()
@never_cache
def logout(self, request, extra_context=None):
super().logout(request, extra_context=extra_context)
return HttpResponseRedirect(reverse('admin:index'))
myadmin = MyAdminSite()
myadmin.register(Profile, ProfileAdmin)
myadmin.register(Probe, ProbeAdmin)
myadmin.register(Metric, MetricAdmin)
myadmin.register(GroupOfProfiles, GroupOfProfilesAdmin)
myadmin.register(GroupOfMetrics, GroupOfMetricsAdmin)
myadmin.register(GroupOfProbes, GroupOfProbesAdmin)
myadmin.register(GroupOfAggregations, GroupOfAggregationsAdmin)
myadmin.register(CustUser, UserProfileAdmin)
myadmin.register(APIKey, MyAPIKeyAdmin)
myadmin.register(LogEntry, LogEntryAdmin)
myadmin.register(Service, ServiceAdmin)
myadmin.register(Aggregation, AggregationAdmin)
|
[
"django.urls.re_path",
"django.urls.reverse",
"django.http.HttpResponseRedirect",
"configparser.ConfigParser",
"re.search"
] |
[((1533, 1547), 'configparser.ConfigParser', 'ConfigParser', ([], {}), '()\n', (1545, 1547), False, 'from configparser import ConfigParser\n'), ((2367, 2439), 'django.urls.re_path', 're_path', (["('^poem/public_(?P<model>%s)/$' % self._regex)", 'self.public_views'], {}), "('^poem/public_(?P<model>%s)/$' % self._regex, self.public_views)\n", (2374, 2439), False, 'from django.urls import path, re_path\n'), ((2503, 2608), 'django.urls.re_path', 're_path', (["('^poem/public_(?P<model>%s)/(?P<object_id>[0-9]+)/change/' % self._regex)", 'self.public_views'], {}), "('^poem/public_(?P<model>%s)/(?P<object_id>[0-9]+)/change/' % self.\n _regex, self.public_views)\n", (2510, 2608), False, 'from django.urls import path, re_path\n'), ((2667, 2797), 'django.urls.re_path', 're_path', (["('^poem/public_(?P<model>%s)/(?P<object_id>[0-9]+)/history/(?P<rev_id>[0-9]+)/'\n % self._regex)", 'self.public_views'], {}), "(\n '^poem/public_(?P<model>%s)/(?P<object_id>[0-9]+)/history/(?P<rev_id>[0-9]+)/'\n % self._regex, self.public_views)\n", (2674, 2797), False, 'from django.urls import path, re_path\n'), ((3992, 4048), 're.search', 're.search', (["('poem/(?P<model>%s)/' % self._regex)", 'next_url'], {}), "('poem/(?P<model>%s)/' % self._regex, next_url)\n", (4001, 4048), False, 'import re\n'), ((4126, 4160), 're.search', 're.search', (['"""public_metric/$"""', 'prev'], {}), "('public_metric/$', prev)\n", (4135, 4160), False, 'import re\n'), ((4178, 4232), 're.search', 're.search', (['"""probe/([0-9]+)/history/([0-9]+)"""', 'next_url'], {}), "('probe/([0-9]+)/history/([0-9]+)', next_url)\n", (4187, 4232), False, 'import re\n'), ((4537, 4570), 're.search', 're.search', (['"""public_(\\\\w+)/"""', 'prev'], {}), "('public_(\\\\w+)/', prev)\n", (4546, 4570), False, 'import re\n'), ((5468, 5518), 're.search', 're.search', (['"""public_(\\\\w+)/([0-9]+)/change/$"""', 'prev'], {}), "('public_(\\\\w+)/([0-9]+)/change/$', prev)\n", (5477, 5518), False, 'import re\n'), ((9625, 9647), 'django.urls.reverse', 'reverse', (['"""admin:index"""'], {}), "('admin:index')\n", (9632, 9647), False, 'from django.urls import reverse\n'), ((4352, 4409), 'django.urls.reverse', 'reverse', (['"""admin:poem_probe_revision"""'], {'args': '(objid, revid)'}), "('admin:poem_probe_revision', args=(objid, revid))\n", (4359, 4409), False, 'from django.urls import reverse\n'), ((4494, 4519), 'django.http.HttpResponseRedirect', 'HttpResponseRedirect', (['url'], {}), '(url)\n', (4514, 4519), False, 'from django.http import HttpResponseRedirect\n'), ((4612, 4651), 're.search', 're.search', (['"""([0-9]+)/change/"""', 'next_url'], {}), "('([0-9]+)/change/', next_url)\n", (4621, 4651), False, 'import re\n'), ((5765, 5790), 'django.http.HttpResponseRedirect', 'HttpResponseRedirect', (['url'], {}), '(url)\n', (5785, 5790), False, 'from django.http import HttpResponseRedirect\n'), ((6181, 6224), 'django.http.HttpResponseRedirect', 'HttpResponseRedirect', (["(request.path + 'poem')"], {}), "(request.path + 'poem')\n", (6201, 6224), False, 'from django.http import HttpResponseRedirect\n'), ((6266, 6317), 'django.http.HttpResponseRedirect', 'HttpResponseRedirect', (["(request.path + 'poem/profile')"], {}), "(request.path + 'poem/profile')\n", (6286, 6317), False, 'from django.http import HttpResponseRedirect\n'), ((9114, 9160), 'django.http.HttpResponseRedirect', 'HttpResponseRedirect', (["(request.path + 'profile')"], {}), "(request.path + 'profile')\n", (9134, 9160), False, 'from django.http import HttpResponseRedirect\n'), ((5030, 5055), 'django.http.HttpResponseRedirect', 'HttpResponseRedirect', (['url'], {}), '(url)\n', (5050, 5055), False, 'from django.http import HttpResponseRedirect\n'), ((5380, 5405), 'django.http.HttpResponseRedirect', 'HttpResponseRedirect', (['url'], {}), '(url)\n', (5400, 5405), False, 'from django.http import HttpResponseRedirect\n'), ((7003, 7073), 'django.http.HttpResponseRedirect', 'HttpResponseRedirect', (["('/%s/admin/%s/' % (poem_app_name, poem_app_name))"], {}), "('/%s/admin/%s/' % (poem_app_name, poem_app_name))\n", (7023, 7073), False, 'from django.http import HttpResponseRedirect\n'), ((7160, 7230), 'django.http.HttpResponseRedirect', 'HttpResponseRedirect', (["('/%s/admin/%s/' % (poem_app_name, poem_app_name))"], {}), "('/%s/admin/%s/' % (poem_app_name, poem_app_name))\n", (7180, 7230), False, 'from django.http import HttpResponseRedirect\n')]
|
import sys
import os
import json
from flask import Flask
from flask_mail import Mail
from flask_bcrypt import Bcrypt
from flask_sqlalchemy import SQLAlchemy
from flask_login import LoginManager
from flask_debugtoolbar import DebugToolbarExtension
from app.config import TestingConfig, DevelopmentConfig, ProductionConfig
app = Flask(__name__)
app.config.from_object(DevelopmentConfig)
db = SQLAlchemy()
bcrypt = Bcrypt()
toolbar = DebugToolbarExtension()
login_manager = LoginManager()
login_manager.login_view = 'users.login'
login_manager.login_message_category = 'info'
mail = Mail()
from app.users.routes import users
from app.posts.routes import posts
from app.main.routes import main
from app.errors.handlers import errors
app.register_blueprint(users)
app.register_blueprint(posts)
app.register_blueprint(main)
app.register_blueprint(errors)
def create_app():
app = Flask(__name__)
app.config.from_object(ProductionConfig if os.environ['TIWWTER_PROD'] == 'True' else DevelopmentConfig)
db.init_app(app)
bcrypt.init_app(app)
login_manager.init_app(app)
mail.init_app(app)
toolbar.init_app(app)
from app.users.routes import users
from app.posts.routes import posts
from app.main.routes import main
from app.errors.handlers import errors
app.register_blueprint(users)
app.register_blueprint(posts)
app.register_blueprint(main)
app.register_blueprint(errors)
return app
|
[
"flask.Flask",
"flask_mail.Mail",
"flask_sqlalchemy.SQLAlchemy",
"flask_bcrypt.Bcrypt",
"flask_login.LoginManager",
"flask_debugtoolbar.DebugToolbarExtension"
] |
[((328, 343), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (333, 343), False, 'from flask import Flask\n'), ((392, 404), 'flask_sqlalchemy.SQLAlchemy', 'SQLAlchemy', ([], {}), '()\n', (402, 404), False, 'from flask_sqlalchemy import SQLAlchemy\n'), ((415, 423), 'flask_bcrypt.Bcrypt', 'Bcrypt', ([], {}), '()\n', (421, 423), False, 'from flask_bcrypt import Bcrypt\n'), ((435, 458), 'flask_debugtoolbar.DebugToolbarExtension', 'DebugToolbarExtension', ([], {}), '()\n', (456, 458), False, 'from flask_debugtoolbar import DebugToolbarExtension\n'), ((476, 490), 'flask_login.LoginManager', 'LoginManager', ([], {}), '()\n', (488, 490), False, 'from flask_login import LoginManager\n'), ((586, 592), 'flask_mail.Mail', 'Mail', ([], {}), '()\n', (590, 592), False, 'from flask_mail import Mail\n'), ((883, 898), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (888, 898), False, 'from flask import Flask\n')]
|
# Generated by Django 3.2.12 on 2022-04-06 16:49
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Ingredients',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('ingredients_name', models.CharField(blank=True, max_length=100)),
('price', models.DecimalField(decimal_places=2, max_digits=10)),
('storage_count', models.PositiveSmallIntegerField()),
],
),
migrations.CreateModel(
name='Order_products',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('id_order', models.IntegerField()),
('id_product', models.IntegerField()),
],
),
migrations.CreateModel(
name='Orders',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('date', models.DateTimeField(default=django.utils.timezone.now)),
('adress', models.CharField(blank=True, max_length=100)),
('total_price', models.DecimalField(decimal_places=2, max_digits=10)),
],
),
migrations.CreateModel(
name='Products',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('product_name', models.CharField(max_length=150)),
('price', models.DecimalField(decimal_places=2, max_digits=10)),
('storage_count', models.PositiveSmallIntegerField()),
('category', models.CharField(max_length=50)),
('product_img', models.URLField()),
('description', models.TextField()),
],
),
]
|
[
"django.db.models.URLField",
"django.db.models.TextField",
"django.db.models.BigAutoField",
"django.db.models.CharField",
"django.db.models.PositiveSmallIntegerField",
"django.db.models.DecimalField",
"django.db.models.IntegerField",
"django.db.models.DateTimeField"
] |
[((337, 433), 'django.db.models.BigAutoField', 'models.BigAutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (356, 433), False, 'from django.db import migrations, models\n'), ((469, 513), 'django.db.models.CharField', 'models.CharField', ([], {'blank': '(True)', 'max_length': '(100)'}), '(blank=True, max_length=100)\n', (485, 513), False, 'from django.db import migrations, models\n'), ((542, 594), 'django.db.models.DecimalField', 'models.DecimalField', ([], {'decimal_places': '(2)', 'max_digits': '(10)'}), '(decimal_places=2, max_digits=10)\n', (561, 594), False, 'from django.db import migrations, models\n'), ((631, 665), 'django.db.models.PositiveSmallIntegerField', 'models.PositiveSmallIntegerField', ([], {}), '()\n', (663, 665), False, 'from django.db import migrations, models\n'), ((805, 901), 'django.db.models.BigAutoField', 'models.BigAutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (824, 901), False, 'from django.db import migrations, models\n'), ((929, 950), 'django.db.models.IntegerField', 'models.IntegerField', ([], {}), '()\n', (948, 950), False, 'from django.db import migrations, models\n'), ((984, 1005), 'django.db.models.IntegerField', 'models.IntegerField', ([], {}), '()\n', (1003, 1005), False, 'from django.db import migrations, models\n'), ((1137, 1233), 'django.db.models.BigAutoField', 'models.BigAutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (1156, 1233), False, 'from django.db import migrations, models\n'), ((1257, 1312), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'default': 'django.utils.timezone.now'}), '(default=django.utils.timezone.now)\n', (1277, 1312), False, 'from django.db import migrations, models\n'), ((1342, 1386), 'django.db.models.CharField', 'models.CharField', ([], {'blank': '(True)', 'max_length': '(100)'}), '(blank=True, max_length=100)\n', (1358, 1386), False, 'from django.db import migrations, models\n'), ((1421, 1473), 'django.db.models.DecimalField', 'models.DecimalField', ([], {'decimal_places': '(2)', 'max_digits': '(10)'}), '(decimal_places=2, max_digits=10)\n', (1440, 1473), False, 'from django.db import migrations, models\n'), ((1607, 1703), 'django.db.models.BigAutoField', 'models.BigAutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (1626, 1703), False, 'from django.db import migrations, models\n'), ((1735, 1767), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(150)'}), '(max_length=150)\n', (1751, 1767), False, 'from django.db import migrations, models\n'), ((1796, 1848), 'django.db.models.DecimalField', 'models.DecimalField', ([], {'decimal_places': '(2)', 'max_digits': '(10)'}), '(decimal_places=2, max_digits=10)\n', (1815, 1848), False, 'from django.db import migrations, models\n'), ((1885, 1919), 'django.db.models.PositiveSmallIntegerField', 'models.PositiveSmallIntegerField', ([], {}), '()\n', (1917, 1919), False, 'from django.db import migrations, models\n'), ((1951, 1982), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(50)'}), '(max_length=50)\n', (1967, 1982), False, 'from django.db import migrations, models\n'), ((2017, 2034), 'django.db.models.URLField', 'models.URLField', ([], {}), '()\n', (2032, 2034), False, 'from django.db import migrations, models\n'), ((2069, 2087), 'django.db.models.TextField', 'models.TextField', ([], {}), '()\n', (2085, 2087), False, 'from django.db import migrations, models\n')]
|
#! /usr/bin/env python
# setup.py
"""Setup and installer for PySci.
"""
from distutils.core import setup
setup(name='PySci',
version='0.1',
description='Pythonic interface to the QsciScintilla editor widget',
author='<NAME>',
author_email='<EMAIL>',
url='http://github.com/wapcaplet/pysci',
license='MIT License',
packages=['pysci'],
)
|
[
"distutils.core.setup"
] |
[((108, 356), 'distutils.core.setup', 'setup', ([], {'name': '"""PySci"""', 'version': '"""0.1"""', 'description': '"""Pythonic interface to the QsciScintilla editor widget"""', 'author': '"""<NAME>"""', 'author_email': '"""<EMAIL>"""', 'url': '"""http://github.com/wapcaplet/pysci"""', 'license': '"""MIT License"""', 'packages': "['pysci']"}), "(name='PySci', version='0.1', description=\n 'Pythonic interface to the QsciScintilla editor widget', author=\n '<NAME>', author_email='<EMAIL>', url=\n 'http://github.com/wapcaplet/pysci', license='MIT License', packages=[\n 'pysci'])\n", (113, 356), False, 'from distutils.core import setup\n')]
|
from event_manager.event import Attribute, Event
class ActionExecutedEvent(Event):
attributes = (
Attribute('automatic'),
Attribute('user.id', is_required=False),
)
|
[
"event_manager.event.Attribute"
] |
[((112, 134), 'event_manager.event.Attribute', 'Attribute', (['"""automatic"""'], {}), "('automatic')\n", (121, 134), False, 'from event_manager.event import Attribute, Event\n'), ((144, 183), 'event_manager.event.Attribute', 'Attribute', (['"""user.id"""'], {'is_required': '(False)'}), "('user.id', is_required=False)\n", (153, 183), False, 'from event_manager.event import Attribute, Event\n')]
|
import typing
import random
from pathlib import Path
import logging
from time import strftime, gmtime
from datetime import datetime
import os
import argparse
import contextlib
from collections import defaultdict
import numpy as np
import torch
from torch.utils.data import Dataset
import torch.distributed as dist
logger = logging.getLogger(__name__)
FloatOrTensor = typing.Union[float, torch.Tensor]
def int_or_str(arg: str) -> typing.Union[int, str]:
try:
return int(arg)
except ValueError:
return arg
def check_is_file(file_path: str) -> str:
if file_path is None or os.path.isfile(file_path):
return file_path
else:
raise argparse.ArgumentTypeError(f"File path: {file_path} is not a valid file")
def check_is_dir(dir_path: str) -> str:
if dir_path is None or os.path.isdir(dir_path):
return dir_path
else:
raise argparse.ArgumentTypeError(f"Directory path: {dir_path} is not a valid directory")
def path_to_datetime(path: Path) -> datetime:
name = path.name
datetime_string = name.split('_')[0]
try:
year, month, day, hour, minute, second = datetime_string.split('-')
except ValueError:
try:
# Deprecated datetime strings
year, month, day, time_str = datetime_string.split('-')
hour, minute, second = time_str.split(':')
except ValueError:
return datetime(1, 1, 1)
pathdatetime = datetime(
int(year), int(month), int(day), int(hour), int(minute), int(second))
return pathdatetime
def get_expname(exp_name: typing.Optional[str],
task: typing.Optional[str] = None,
model_type: typing.Optional[str] = None,
save_name: typing.Optional[str] = 'time') -> str:
if exp_name is None:
# add time_or_name param, to specify ckpt folder's name manually, instead of timestamp+randint
# reason to do so: to make eval script easy, load from the specified folder
if save_name == 'time':
time_stamp = strftime("%y-%m-%d-%H-%M-%S", gmtime())
exp_name = f"{task}_{model_type}_{time_stamp}_{random.randint(0, int(1e6)):0>6d}"
else:
exp_name = f"{task}_{model_type}_{save_name}"
return exp_name
def set_random_seeds(seed: int, n_gpu: int) -> None:
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
if n_gpu > 0:
torch.cuda.manual_seed_all(seed) # type: ignore
def get_effective_num_gpus(local_rank: int, n_gpu: int) -> int:
if local_rank == -1:
num_gpus = n_gpu
else:
num_gpus = dist.get_world_size()
return num_gpus
def get_effective_batch_size(batch_size: int,
local_rank: int,
n_gpu: int,
gradient_accumulation_steps: int = 1) -> int:
eff_batch_size = float(batch_size)
eff_batch_size /= gradient_accumulation_steps
eff_batch_size /= get_effective_num_gpus(local_rank, n_gpu)
return int(eff_batch_size)
def get_num_train_optimization_steps(dataset: Dataset,
batch_size: int,
num_train_epochs: int) -> int:
return int(len(dataset) / batch_size * num_train_epochs)
class MetricsAccumulator:
def __init__(self, smoothing: float = 0.95):
self._loss_tmp = 0.
self._smoothloss: typing.Optional[float] = None
self._totalloss = 0.
self._metricstmp: typing.Dict[str, float] = defaultdict(lambda: 0.0)
self._smoothmetrics: typing.Dict[str, float] = {}
self._totalmetrics: typing.Dict[str, float] = defaultdict(lambda: 0.0)
self._nacc_steps = 0
self._nupdates = 0
self._smoothing = smoothing
def update(self,
loss: FloatOrTensor,
metrics: typing.Dict[str, FloatOrTensor],
step: bool = True) -> None:
if isinstance(loss, torch.Tensor):
loss = loss.item()
self._loss_tmp += loss
for name, value in metrics.items():
if isinstance(value, torch.Tensor):
value = value.item()
self._metricstmp[name] += value
self._nacc_steps += 1
if step:
self.step()
def step(self) -> typing.Dict[str, float]:
loss_tmp = self._loss_tmp / self._nacc_steps
metricstmp = {name: value / self._nacc_steps
for name, value in self._metricstmp.items()}
if self._smoothloss is None:
self._smoothloss = loss_tmp
else:
self._smoothloss *= self._smoothing
self._smoothloss += (1 - self._smoothing) * loss_tmp
self._totalloss += loss_tmp
for name, value in metricstmp.items():
if name in self._smoothmetrics:
currvalue = self._smoothmetrics[name]
newvalue = currvalue * self._smoothing + value * (1 - self._smoothing)
else:
newvalue = value
self._smoothmetrics[name] = newvalue
self._totalmetrics[name] += value
self._nupdates += 1
self._nacc_steps = 0
self._loss_tmp = 0
self._metricstmp = defaultdict(lambda: 0.0)
metricstmp['loss'] = loss_tmp
return metricstmp
def loss(self) -> float:
if self._smoothloss is None:
raise RuntimeError("Trying to get the loss without any updates")
return self._smoothloss
def metrics(self) -> typing.Dict[str, float]:
if self._nupdates == 0:
raise RuntimeError("Trying to get metrics without any updates")
return dict(self._smoothmetrics)
def final_loss(self) -> float:
return self._totalloss / self._nupdates
def final_metrics(self) -> typing.Dict[str, float]:
return {name: value / self._nupdates
for name, value in self._totalmetrics.items()}
class wrap_cuda_oom_error(contextlib.ContextDecorator):
"""A context manager that wraps the Cuda OOM message so that you get some more helpful
context as to what you can/should change. Can also be used as a decorator.
Examples:
1) As a context manager:
with wrap_cuda_oom_error(local_rank, batch_size, n_gpu, gradient_accumulation):
loss = model.forward(batch)
loss.backward()
optimizer.step()
optimizer.zero_grad
2) As a decorator:
@wrap_cuda_oom_error(local_rank, batch_size, n_gpu, gradient_accumulation)
def run_train_epoch(args):
...
<code to run training epoch>
...
"""
def __init__(self,
local_rank: int,
batch_size: int,
n_gpu: int = 1,
gradient_accumulation_steps: typing.Optional[int] = None):
self._local_rank = local_rank
self._batch_size = batch_size
self._n_gpu = n_gpu
self._gradient_accumulation_steps = gradient_accumulation_steps
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
exc_args = exc_value.args if exc_value is not None else None
if exc_args and 'CUDA out of memory' in exc_args[0]:
eff_ngpu = get_effective_num_gpus(self._local_rank, self._n_gpu)
if self._gradient_accumulation_steps is not None:
eff_batch_size = get_effective_batch_size(
self._batch_size, self._local_rank, self._n_gpu,
self._gradient_accumulation_steps)
message = (f"CUDA out of memory. Reduce batch size or increase "
f"gradient_accumulation_steps to divide each batch over more "
f"forward passes.\n\n"
f"\tHyperparameters:\n"
f"\t\tbatch_size per backward-pass: {self._batch_size}\n"
f"\t\tgradient_accumulation_steps: "
f"{self._gradient_accumulation_steps}\n"
f"\t\tn_gpu: {eff_ngpu}\n"
f"\t\tbatch_size per (gpu * forward-pass): "
f"{eff_batch_size}")
else:
eff_batch_size = get_effective_batch_size(
self._batch_size, self._local_rank, self._n_gpu)
message = (f"CUDA out of memory. Reduce batch size to fit each "
f"iteration in memory.\n\n"
f"\tHyperparameters:\n"
f"\t\tbatch_size per forward-pass: {self._batch_size}\n"
f"\t\tn_gpu: {eff_ngpu}\n"
f"\t\tbatch_size per (gpu * forward-pass): "
f"{eff_batch_size}")
raise RuntimeError(message)
return False
def write_lmdb(filename: str, iterable: typing.Iterable, map_size: int = 2 ** 20):
"""Utility for writing a dataset to an LMDB file.
Args:
filename (str): Output filename to write to
iterable (Iterable): An iterable dataset to write to. Entries must be pickleable.
map_size (int, optional): Maximum allowable size of database in bytes. Required by LMDB.
You will likely have to increase this. Default: 1MB.
"""
import lmdb
import pickle as pkl
env = lmdb.open(filename, map_size=map_size)
with env.begin(write=True) as txn:
for i, entry in enumerate(iterable):
txn.put(str(i).encode(), pkl.dumps(entry))
txn.put(b'num_examples', pkl.dumps(i + 1))
env.close()
class IncrementalNPZ(object):
# Modified npz that allows incremental saving, from https://stackoverflow.com/questions/22712292/how-to-use-numpy-savez-in-a-loop-for-save-more-than-one-array # noqa: E501
def __init__(self, file):
import tempfile
import zipfile
import os
if isinstance(file, str):
if not file.endswith('.npz'):
file = file + '.npz'
compression = zipfile.ZIP_STORED
zipfile = self.zipfile_factory(file, mode="w", compression=compression)
# Stage arrays in a temporary file on disk, before writing to zip.
fd, tmpfile = tempfile.mkstemp(suffix='-numpy.npy')
os.close(fd)
self.tmpfile = tmpfile
self.zip = zipfile
self._i = 0
def zipfile_factory(self, *args, **kwargs):
import zipfile
import sys
if sys.version_info >= (2, 5):
kwargs['allowZip64'] = True
return zipfile.ZipFile(*args, **kwargs)
def savez(self, *args, **kwds):
import os
import numpy.lib.format as fmt
namedict = kwds
for val in args:
key = 'arr_%d' % self._i
if key in namedict.keys():
raise ValueError("Cannot use un-named variables and keyword %s" % key)
namedict[key] = val
self._i += 1
try:
for key, val in namedict.items():
fname = key + '.npy'
fid = open(self.tmpfile, 'wb')
with open(self.tmpfile, 'wb') as fid:
fmt.write_array(fid, np.asanyarray(val), allow_pickle=True)
self.zip.write(self.tmpfile, arcname=fname)
finally:
os.remove(self.tmpfile)
def close(self):
self.zip.close()
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
self.close()
|
[
"os.remove",
"numpy.random.seed",
"collections.defaultdict",
"os.path.isfile",
"os.close",
"torch.distributed.get_world_size",
"argparse.ArgumentTypeError",
"random.seed",
"lmdb.open",
"pickle.dumps",
"torch.manual_seed",
"datetime.datetime",
"zipfile.ZipFile",
"tempfile.mkstemp",
"os.path.isdir",
"time.gmtime",
"numpy.asanyarray",
"torch.cuda.manual_seed_all",
"logging.getLogger"
] |
[((325, 352), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (342, 352), False, 'import logging\n'), ((2349, 2366), 'random.seed', 'random.seed', (['seed'], {}), '(seed)\n', (2360, 2366), False, 'import random\n'), ((2371, 2391), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (2385, 2391), True, 'import numpy as np\n'), ((2396, 2419), 'torch.manual_seed', 'torch.manual_seed', (['seed'], {}), '(seed)\n', (2413, 2419), False, 'import torch\n'), ((9515, 9553), 'lmdb.open', 'lmdb.open', (['filename'], {'map_size': 'map_size'}), '(filename, map_size=map_size)\n', (9524, 9553), False, 'import lmdb\n'), ((604, 629), 'os.path.isfile', 'os.path.isfile', (['file_path'], {}), '(file_path)\n', (618, 629), False, 'import os\n'), ((680, 753), 'argparse.ArgumentTypeError', 'argparse.ArgumentTypeError', (['f"""File path: {file_path} is not a valid file"""'], {}), "(f'File path: {file_path} is not a valid file')\n", (706, 753), False, 'import argparse\n'), ((823, 846), 'os.path.isdir', 'os.path.isdir', (['dir_path'], {}), '(dir_path)\n', (836, 846), False, 'import os\n'), ((896, 983), 'argparse.ArgumentTypeError', 'argparse.ArgumentTypeError', (['f"""Directory path: {dir_path} is not a valid directory"""'], {}), "(\n f'Directory path: {dir_path} is not a valid directory')\n", (922, 983), False, 'import argparse\n'), ((2446, 2478), 'torch.cuda.manual_seed_all', 'torch.cuda.manual_seed_all', (['seed'], {}), '(seed)\n', (2472, 2478), False, 'import torch\n'), ((2640, 2661), 'torch.distributed.get_world_size', 'dist.get_world_size', ([], {}), '()\n', (2659, 2661), True, 'import torch.distributed as dist\n'), ((3559, 3584), 'collections.defaultdict', 'defaultdict', (['(lambda : 0.0)'], {}), '(lambda : 0.0)\n', (3570, 3584), False, 'from collections import defaultdict\n'), ((3696, 3721), 'collections.defaultdict', 'defaultdict', (['(lambda : 0.0)'], {}), '(lambda : 0.0)\n', (3707, 3721), False, 'from collections import defaultdict\n'), ((5278, 5303), 'collections.defaultdict', 'defaultdict', (['(lambda : 0.0)'], {}), '(lambda : 0.0)\n', (5289, 5303), False, 'from collections import defaultdict\n'), ((10400, 10437), 'tempfile.mkstemp', 'tempfile.mkstemp', ([], {'suffix': '"""-numpy.npy"""'}), "(suffix='-numpy.npy')\n", (10416, 10437), False, 'import tempfile\n'), ((10446, 10458), 'os.close', 'os.close', (['fd'], {}), '(fd)\n', (10454, 10458), False, 'import os\n'), ((10723, 10755), 'zipfile.ZipFile', 'zipfile.ZipFile', (['*args'], {}), '(*args, **kwargs)\n', (10738, 10755), False, 'import zipfile\n'), ((9727, 9743), 'pickle.dumps', 'pkl.dumps', (['(i + 1)'], {}), '(i + 1)\n', (9736, 9743), True, 'import pickle as pkl\n'), ((11487, 11510), 'os.remove', 'os.remove', (['self.tmpfile'], {}), '(self.tmpfile)\n', (11496, 11510), False, 'import os\n'), ((2094, 2102), 'time.gmtime', 'gmtime', ([], {}), '()\n', (2100, 2102), False, 'from time import strftime, gmtime\n'), ((9676, 9692), 'pickle.dumps', 'pkl.dumps', (['entry'], {}), '(entry)\n', (9685, 9692), True, 'import pickle as pkl\n'), ((1421, 1438), 'datetime.datetime', 'datetime', (['(1)', '(1)', '(1)'], {}), '(1, 1, 1)\n', (1429, 1438), False, 'from datetime import datetime\n'), ((11359, 11377), 'numpy.asanyarray', 'np.asanyarray', (['val'], {}), '(val)\n', (11372, 11377), True, 'import numpy as np\n')]
|
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import bottle
import common.auth as _auth
from models.badge import BadgeModel
from models.user import UserModel
@bottle.get("/badges/getasync")
@_auth.requires_auth
def get_async_badges(credentials):
um = UserModel()
user = um.get(credentials["id"])
bm = BadgeModel()
badge_names = bm.handleHomePage(user)
return {"status": "success", "badges": "|".join(badge_names)}
|
[
"models.user.UserModel",
"bottle.get",
"models.badge.BadgeModel"
] |
[((292, 322), 'bottle.get', 'bottle.get', (['"""/badges/getasync"""'], {}), "('/badges/getasync')\n", (302, 322), False, 'import bottle\n'), ((388, 399), 'models.user.UserModel', 'UserModel', ([], {}), '()\n', (397, 399), False, 'from models.user import UserModel\n'), ((446, 458), 'models.badge.BadgeModel', 'BadgeModel', ([], {}), '()\n', (456, 458), False, 'from models.badge import BadgeModel\n')]
|
import sys
sys.path.append("./")
import matplotlib.pyplot as plt
import pandas as pd
from loguru import logger
from pathlib import Path
from tpd import recorder
from myterial import salmon, teal, indigo
import draw
from data.dbase.db_tables import Tracking, ValidatedSession
from fcutils.progress import track
folder = Path(r"D:\Dropbox (UCL)\Rotation_vte\Locomotion\analysis")
recorder.start(base_folder=folder, folder_name="all_tracking", timestamp=False)
"""
Plot the tracking for each session to check that everything is ok
"""
sessions = ValidatedSession().fetch("name")
for session in track(sessions):
save_name = folder / "all_tracking" / (session + ".png")
if save_name.exists() or "open" in session:
continue
tracking = Tracking.get_session_tracking(
session, body_only=False, movement=False
)
if tracking.empty:
logger.info(f'"{session}" - no tracking')
continue
body = tracking.loc[tracking.bpname == "body"].iloc[0]
snout = tracking.loc[tracking.bpname == "snout"].iloc[0]
paw = tracking.loc[tracking.bpname == "right_fl"].iloc[0]
tail = tracking.loc[tracking.bpname == "tail_base"].iloc[0]
f, ax = plt.subplots(figsize=(9, 12))
draw.Hairpin(ax=ax)
draw.Tracking(body.x, body.y, ax=ax)
for bp, color in zip((snout, paw, tail), (teal, salmon, indigo)):
draw.Tracking.scatter(
bp.x[::30],
bp.y[::30],
color=color,
alpha=0.5,
zorder=100,
ax=ax,
label=bp.bpname,
)
ax.set(title=f"{session} - {len(tracking.x)} frames")
ax.legend()
recorder.add_figure(f, session, svg=False)
plt.close(f)
|
[
"sys.path.append",
"draw.Tracking.scatter",
"fcutils.progress.track",
"matplotlib.pyplot.close",
"tpd.recorder.start",
"matplotlib.pyplot.subplots",
"draw.Tracking",
"tpd.recorder.add_figure",
"draw.Hairpin",
"pathlib.Path",
"loguru.logger.info",
"data.dbase.db_tables.ValidatedSession",
"data.dbase.db_tables.Tracking.get_session_tracking"
] |
[((12, 33), 'sys.path.append', 'sys.path.append', (['"""./"""'], {}), "('./')\n", (27, 33), False, 'import sys\n'), ((325, 386), 'pathlib.Path', 'Path', (['"""D:\\\\Dropbox (UCL)\\\\Rotation_vte\\\\Locomotion\\\\analysis"""'], {}), "('D:\\\\Dropbox (UCL)\\\\Rotation_vte\\\\Locomotion\\\\analysis')\n", (329, 386), False, 'from pathlib import Path\n'), ((384, 463), 'tpd.recorder.start', 'recorder.start', ([], {'base_folder': 'folder', 'folder_name': '"""all_tracking"""', 'timestamp': '(False)'}), "(base_folder=folder, folder_name='all_tracking', timestamp=False)\n", (398, 463), False, 'from tpd import recorder\n'), ((605, 620), 'fcutils.progress.track', 'track', (['sessions'], {}), '(sessions)\n', (610, 620), False, 'from fcutils.progress import track\n'), ((764, 835), 'data.dbase.db_tables.Tracking.get_session_tracking', 'Tracking.get_session_tracking', (['session'], {'body_only': '(False)', 'movement': '(False)'}), '(session, body_only=False, movement=False)\n', (793, 835), False, 'from data.dbase.db_tables import Tracking, ValidatedSession\n'), ((1201, 1230), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(9, 12)'}), '(figsize=(9, 12))\n', (1213, 1230), True, 'import matplotlib.pyplot as plt\n'), ((1236, 1255), 'draw.Hairpin', 'draw.Hairpin', ([], {'ax': 'ax'}), '(ax=ax)\n', (1248, 1255), False, 'import draw\n'), ((1260, 1296), 'draw.Tracking', 'draw.Tracking', (['body.x', 'body.y'], {'ax': 'ax'}), '(body.x, body.y, ax=ax)\n', (1273, 1296), False, 'import draw\n'), ((1657, 1699), 'tpd.recorder.add_figure', 'recorder.add_figure', (['f', 'session'], {'svg': '(False)'}), '(f, session, svg=False)\n', (1676, 1699), False, 'from tpd import recorder\n'), ((1705, 1717), 'matplotlib.pyplot.close', 'plt.close', (['f'], {}), '(f)\n', (1714, 1717), True, 'import matplotlib.pyplot as plt\n'), ((556, 574), 'data.dbase.db_tables.ValidatedSession', 'ValidatedSession', ([], {}), '()\n', (572, 574), False, 'from data.dbase.db_tables import Tracking, ValidatedSession\n'), ((882, 923), 'loguru.logger.info', 'logger.info', (['f""""{session}" - no tracking"""'], {}), '(f\'"{session}" - no tracking\')\n', (893, 923), False, 'from loguru import logger\n'), ((1376, 1485), 'draw.Tracking.scatter', 'draw.Tracking.scatter', (['bp.x[::30]', 'bp.y[::30]'], {'color': 'color', 'alpha': '(0.5)', 'zorder': '(100)', 'ax': 'ax', 'label': 'bp.bpname'}), '(bp.x[::30], bp.y[::30], color=color, alpha=0.5,\n zorder=100, ax=ax, label=bp.bpname)\n', (1397, 1485), False, 'import draw\n')]
|
import docutils.nodes
import re
import nbformat.v4
import os.path
import datetime
from .utils import LanguageTranslator, JupyterOutputCellGenerators, get_source_file_name
class JupyterCodeTranslator(docutils.nodes.GenericNodeVisitor):
URI_SPACE_REPLACE_FROM = re.compile(r"\s")
URI_SPACE_REPLACE_TO = "-"
def __init__(self, builder, document):
docutils.nodes.NodeVisitor.__init__(self, document)
self.lang = None
self.nodelang = None
self.visit_first_title = True
self.langTranslator = LanguageTranslator(builder.config["templates_path"])
# Reporter
self.warn = self.document.reporter.warning
self.error = self.document.reporter.error
# Settings
self.settings = document.settings
self.builder = builder
self.source_file_name = get_source_file_name(
self.settings._source,
self.settings.env.srcdir)
self.default_lang = builder.config["jupyter_default_lang"]
# Create output notebook
self.output = nbformat.v4.new_notebook()
# Variables defined in conf.py
self.jupyter_static_file_path = builder.config["jupyter_static_file_path"]
self.jupyter_kernels = builder.config["jupyter_kernels"]
self.jupyter_write_metadata = builder.config["jupyter_write_metadata"]
self.jupyter_drop_solutions = builder.config["jupyter_drop_solutions"]
self.jupyter_drop_tests = builder.config["jupyter_drop_tests"]
self.jupyter_ignore_no_execute = builder.config["jupyter_ignore_no_execute"]
self.jupyter_ignore_skip_test = builder.config["jupyter_ignore_skip_test"]
self.jupyter_lang_synonyms = builder.config["jupyter_lang_synonyms"]
self.jupyter_target_html = builder.config["jupyter_target_html"]
self.jupyter_target_html_urlpath = builder.config["jupyter_target_html_urlpath"]
self.jupyter_images_urlpath = builder.config["jupyter_images_urlpath"]
# set the value of the cell metadata["slideshow"] to slide as the default option
self.slide = "slide"
self.metadata_slide = False #value by default for all the notebooks, we change it for those we want
# Header Block
template_paths = builder.config["templates_path"]
header_block_filename = builder.config["jupyter_header_block"]
full_path_to_header_block = None
for template_path in template_paths:
if os.path.isfile(template_path + "/" + header_block_filename):
full_path_to_header_block = os.path.normpath(
template_path + "/" + header_block_filename)
if full_path_to_header_block:
with open(full_path_to_header_block) as input_file:
lines = input_file.readlines()
line_text = "".join(lines)
formatted_line_text = self.strip_blank_lines_in_end_of_block(
line_text)
nb_header_block = nbformat.v4.new_markdown_cell(
formatted_line_text)
# Add the header block to the output stream straight away
self.output["cells"].append(nb_header_block)
# Write metadata
if self.jupyter_write_metadata:
meta_text = \
"Notebook created: {:%Y-%m-%d %H:%M:%S} \n"\
"Generated from: {} "
metadata = meta_text.format(
datetime.datetime.now(),
self.source_file_name)
self.output["cells"].append(
nbformat.v4.new_markdown_cell(metadata))
# Variables used in visit/depart
self.in_code_block = False # if False, it means in markdown_cell
self.output_cell_type = None
self.code_lines = []
# generic visit and depart methods
# --------------------------------
simple_nodes = (
docutils.nodes.TextElement,
docutils.nodes.image,
docutils.nodes.colspec,
docutils.nodes.transition) # empty elements
def default_visit(self, node):
pass
def default_departure(self, node):
pass
# specific visit and depart methods
# ---------------------------------
# =========
# Sections
# =========
def visit_document(self, node):
"""at start
"""
# we need to give the translator a default language!
# the translator needs to know what language the document is written in
# before depart_document is called.
self.lang = self.default_lang
def depart_document(self, node):
"""at end
"""
if not self.lang:
self.warn(
"Highlighting language is not given in .rst file. "
"Set kernel as default(python3)")
self.lang = self.default_lang
# metadata for slides, this activates the option where each cell can be a slide
if self.metadata_slide:
self.output.metadata.celltoolbar = "Slideshow"
# Update metadata
if self.jupyter_kernels is not None:
try:
self.output.metadata.kernelspec = \
self.jupyter_kernels[self.lang]["kernelspec"]
self.output.metadata["filename"] = self.source_file_name.split("/")[-1]
self.output.metadata["title"] = self.title
except:
self.warn(
"Invalid jupyter kernels. "
"jupyter_kernels: {}, lang: {}"
.format(self.jupyter_kernels, self.lang))
def visit_highlightlang(self, node):
lang = node.attributes["lang"].strip()
if lang in self.jupyter_kernels:
self.lang = lang
else:
self.warn(
"Highlighting language({}) is not defined "
"in jupyter_kernels in conf.py. "
"Set kernel as default({})"
.format(lang, self.default_lang))
self.lang = self.default_lang
# =================
# Inline elements
# =================
def visit_Text(self, node):
text = node.astext()
if self.in_code_block:
self.code_lines.append(text)
def depart_Text(self, node):
pass
def visit_title(self, node):
#TODO: add support for docutils .. title::
if self.visit_first_title:
self.title = node.astext()
self.visit_first_title = False
# ================
# code blocks
# ================
def visit_literal_block(self, node):
_parse_class = JupyterOutputCellGenerators.GetGeneratorFromClasses(self, node)
self.output_cell_type = _parse_class["type"]
self.solution = _parse_class["solution"]
self.test = _parse_class["test"]
try:
self.nodelang = node.attributes["language"].strip()
except KeyError:
self.nodelang = self.lang
if self.nodelang == 'default':
self.nodelang = self.lang
# Translate the language name across from the Sphinx to the Jupyter namespace
self.nodelang = self.langTranslator.translate(self.nodelang)
self.in_code_block = True
self.code_lines = []
# If the cell being processed contains code written in a language other than the one that
# was specified as the default language, do not create a code block for it - turn it into
# markup instead.
if self.nodelang != self.langTranslator.translate(self.lang):
if self.nodelang in self.jupyter_lang_synonyms:
pass
else:
self.output_cell_type = JupyterOutputCellGenerators.MARKDOWN
def depart_literal_block(self, node):
if self.solution and self.jupyter_drop_solutions:
pass # Skip solutions if we say to.
elif self.test and self.jupyter_drop_tests:
pass # Skip tests if we say to.
else: # Don't skip otherwise.
line_text = "".join(self.code_lines)
formatted_line_text = self.strip_blank_lines_in_end_of_block(line_text)
new_code_cell = self.output_cell_type.Generate(formatted_line_text, self)
# add slide metadata on each cell, value by default: slide
if self.metadata_slide: #value by default for all the notebooks, we change it for those we want
new_code_cell.metadata["slideshow"] = { 'slide_type': self.slide}
self.slide = "slide"
#Save Collapse Cell Option for HTML Parser
if "collapse" in node["classes"]:
new_code_cell["metadata"]["html-class"] = 'collapse'
#Save hide-output cell option for HTML Parser
if "hide-output" in node["classes"]:
new_code_cell["metadata"]["hide-output"] = True
else:
new_code_cell["metadata"]["hide-output"] = False
#Code Output
if self.output_cell_type is JupyterOutputCellGenerators.CODE_OUTPUT:
# Output blocks must be added to code cells to make any sense.
# This script assumes that any output blocks will immediately follow a code
# cell; a warning is raised if the cell immediately preceding this output
# block is not a code cell.
#
# It is assumed that code cells may only have one output block - any more than
# one will raise a warning and be ignored.
mostRecentCell = self.output["cells"][-1]
if mostRecentCell.cell_type != "code":
self.warn("Warning: Class: output block found after a " +
mostRecentCell.cell_type + " cell. Outputs may only come after code cells.")
elif mostRecentCell.outputs:
self.warn(
"Warning: Multiple class: output blocks found after a code cell. Each code cell may only be followed by either zero or one output blocks.")
else:
mostRecentCell.outputs.append(new_code_cell)
else:
self.output["cells"].append(new_code_cell)
self.in_code_block = False
# ===================
# general methods
# ===================
@staticmethod
def strip_blank_lines_in_end_of_block(line_text):
lines = line_text.split("\n")
for line in range(len(lines)):
if len(lines[-1].strip()) == 0:
lines = lines[:-1]
else:
break
return "\n".join(lines)
|
[
"datetime.datetime.now",
"re.compile"
] |
[((266, 283), 're.compile', 're.compile', (['"""\\\\s"""'], {}), "('\\\\s')\n", (276, 283), False, 'import re\n'), ((3431, 3454), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (3452, 3454), False, 'import datetime\n')]
|
import pandas as pd
import numpy as np
#####if 20-day volume avg >10000,can be traded in the next period
future_price=pd.read_csv("../data_extraction/future_price.csv")
future_info=pd.read_csv("../data_extraction/future_info.csv")
combined=future_price.set_index(['order_book_id'])[['date','volume']].join(future_info.set_index('order_book_id')['underlying_symbol'])
combined=combined.reset_index()
combined=combined.groupby(by=['date','underlying_symbol'])['volume'].sum()
combined=combined.reset_index()
combined=combined.set_index(['underlying_symbol','date'])
combined=combined.unstack(level=0)
combined.columns=combined.columns.droplevel(0)
#rolling 20d mean
check_vol=combined.rolling(20).mean()
#if 20d volume mean>10000, set value=1
check_vol[check_vol>=10000]=1
check_vol[check_vol!=1]=0
check_vol=check_vol.reset_index()
check_vol.to_csv("check_vol.csv",index=None)
|
[
"pandas.read_csv"
] |
[((120, 170), 'pandas.read_csv', 'pd.read_csv', (['"""../data_extraction/future_price.csv"""'], {}), "('../data_extraction/future_price.csv')\n", (131, 170), True, 'import pandas as pd\n'), ((183, 232), 'pandas.read_csv', 'pd.read_csv', (['"""../data_extraction/future_info.csv"""'], {}), "('../data_extraction/future_info.csv')\n", (194, 232), True, 'import pandas as pd\n')]
|
from smooth.components.external_component_h2_dispenser import H2Dispenser
from os import path
def test_init():
# basic creation
test_path = path.join(path.dirname(__file__), 'test_timeseries')
h2 = H2Dispenser({"csv_filename": "test_csv.csv", "path": test_path, })
assert h2 is not None
assert h2.csv_filename is not None
# read in data
assert h2.data is not None
# calculated params
assert h2.max_hourly_h2_demand == 1
|
[
"os.path.dirname",
"smooth.components.external_component_h2_dispenser.H2Dispenser"
] |
[((213, 277), 'smooth.components.external_component_h2_dispenser.H2Dispenser', 'H2Dispenser', (["{'csv_filename': 'test_csv.csv', 'path': test_path}"], {}), "({'csv_filename': 'test_csv.csv', 'path': test_path})\n", (224, 277), False, 'from smooth.components.external_component_h2_dispenser import H2Dispenser\n'), ((161, 183), 'os.path.dirname', 'path.dirname', (['__file__'], {}), '(__file__)\n', (173, 183), False, 'from os import path\n')]
|
print ('[0/3] Importing libraries')
import struct
import wave
frame_rate = 44100
channels = 1
sample_width = 2
f = open('file.txt', 'r')
vals = []
print ('[1/3] Append values from file')
for line in f:
vals.append(int(line))
f.close()
print ('[2/3] Declare parameters of the wave')
wav = wave.open('waves/export.wav', 'wb')
wav.setnchannels(channels)
wav.setsampwidth(sample_width)
wav.setframerate(frame_rate)
wav.setnframes(len(vals))
wav.setcomptype('NONE', 'not compressed')
print ('[3/3] Write the wav file')
for v in vals:
wav.writeframes(struct.pack('h', v))
wav.close()
|
[
"wave.open",
"struct.pack"
] |
[((294, 329), 'wave.open', 'wave.open', (['"""waves/export.wav"""', '"""wb"""'], {}), "('waves/export.wav', 'wb')\n", (303, 329), False, 'import wave\n'), ((553, 572), 'struct.pack', 'struct.pack', (['"""h"""', 'v'], {}), "('h', v)\n", (564, 572), False, 'import struct\n')]
|
'''
provide a simple python3 interface to the gsl_fft_real_transform function
'''
import sys
import itertools
from gsl_setup import *
def grouper(n, iterable, fillvalue=None):
# http://docs.python.org/dev/3.0/library/itertools.html#module-itertools
"grouper(3, 'ABCDEFG', 'x') --> ABC DEF Gxx"
args = [iter(iterable)] * n
return itertools.zip_longest(fillvalue=fillvalue, *args)
real_workspace_alloc = setup(
gsl.gsl_fft_real_workspace_alloc,[c_ulong,],c_void_p)
real_wavetable_alloc = setup(
gsl.gsl_fft_real_wavetable_alloc,[c_ulong,],c_void_p)
real_workspace_free =setup(gsl.gsl_fft_real_workspace_free ,[c_void_p,])
real_wavetable_free =setup(gsl.gsl_fft_real_wavetable_free ,[c_void_p,])
real_transform = setup(gsl.gsl_fft_real_transform,
[c_void_p,c_ulong,c_ulong,c_void_p,c_void_p],)
class Real_FFT:
'''
returns the complex values of the real transform of the real data.
return value[0] describes the offset,
[1] is amplitude of term for wavelength = data length
etceteras
[-1] amp of wavelength = twice sample distance
'''
def __init__(self):
self.n = 0
def __call__(self,data):
if len(data) < 2:
if 1 == len(data):
return data[:]
return []
if len(data) != self.n:
self.__del__()
self.n = len(data)
size = c_ulong(self.n)
self.workspace = real_workspace_alloc(size)
self.wavetable = real_wavetable_alloc(size)
a = array('d',data) # need a copy of the data
real_transform(ADDRESS(a),1,self.n,self.wavetable,self.workspace)
rv = [complex(a[0]),]
rv.extend(itertools.starmap(complex,grouper(2,a[1:],fillvalue=0)))
return rv
def __del__(self):
if self.n:
try:
real_workspace_free(self.workspace)
real_wavetable_free(self.wavetable)
except AttributeError:
print('Attribute error while freeing FFT auxiliary storage',
file=sys.stderr)
except:
print('error freeing FFT auxiliary storage',
file=sys.stderr)
def produce_frequency(self,*,samples=None,sample_interval=None,sample_rate=None,total_length=None):
'''
return the frequency grid based on actual sizes (default sample_interval=1).
'''
n = samples or self.n
if not n:
return array('d')
args_specified = 3 - ((not sample_interval)+(not sample_rate)+(not total_length))
if 1 < args_specified:
raise TypeError('specify at most one of [sample_rate, total_length, sample_interval]')
if 0 == args_specified:
L = n
elif sample_interval:
L = n*sample_interval
elif sample_rate:
L = n/sample_rate
else:
L = total_length
return as_array(waves/L for waves in range(1+n//2))
def produce_period(self,*args,**kwargs):
'''
return the period grid based on actual sizes.
frequency of zero --> period 0. what else to do?
'''
f2T = self.produce_frequency(*args,**kwargs)
for i in range(1,len(f2T)):
f2T[i] = 1/f2T[i]
return f2T
real_fft = Real_FFT()
def magnitude(a):
return [abs(b) for b in a]
def phase(a):
return [phase(b) for b in a]
|
[
"itertools.zip_longest"
] |
[((351, 400), 'itertools.zip_longest', 'itertools.zip_longest', (['*args'], {'fillvalue': 'fillvalue'}), '(*args, fillvalue=fillvalue)\n', (372, 400), False, 'import itertools\n')]
|
# -*- coding: utf-8 -*-
""" Tablib - TSV (Tab Separated Values) Support.
"""
from tablib.compat import unicode
from tablib.formats._csv import (
export_set as export_set_wrapper,
import_set as import_set_wrapper,
detect as detect_wrapper,
)
title = 'tsv'
extensions = ('tsv',)
DELIMITER = unicode('\t')
def export_set(dataset):
"""Returns TSV representation of Dataset."""
return export_set_wrapper(dataset, delimiter=DELIMITER)
def import_set(dset, in_stream, headers=True):
"""Returns dataset from TSV stream."""
return import_set_wrapper(dset, in_stream, headers=headers, delimiter=DELIMITER)
def detect(stream):
"""Returns True if given stream is valid TSV."""
return detect_wrapper(stream, delimiter=DELIMITER)
|
[
"tablib.formats._csv.import_set",
"tablib.compat.unicode",
"tablib.formats._csv.detect",
"tablib.formats._csv.export_set"
] |
[((305, 318), 'tablib.compat.unicode', 'unicode', (['"""\t"""'], {}), "('\\t')\n", (312, 318), False, 'from tablib.compat import unicode\n'), ((405, 453), 'tablib.formats._csv.export_set', 'export_set_wrapper', (['dataset'], {'delimiter': 'DELIMITER'}), '(dataset, delimiter=DELIMITER)\n', (423, 453), True, 'from tablib.formats._csv import export_set as export_set_wrapper, import_set as import_set_wrapper, detect as detect_wrapper\n'), ((557, 630), 'tablib.formats._csv.import_set', 'import_set_wrapper', (['dset', 'in_stream'], {'headers': 'headers', 'delimiter': 'DELIMITER'}), '(dset, in_stream, headers=headers, delimiter=DELIMITER)\n', (575, 630), True, 'from tablib.formats._csv import export_set as export_set_wrapper, import_set as import_set_wrapper, detect as detect_wrapper\n'), ((717, 760), 'tablib.formats._csv.detect', 'detect_wrapper', (['stream'], {'delimiter': 'DELIMITER'}), '(stream, delimiter=DELIMITER)\n', (731, 760), True, 'from tablib.formats._csv import export_set as export_set_wrapper, import_set as import_set_wrapper, detect as detect_wrapper\n')]
|
import wx
import HeeksCNC
from PopupMenu import PopupMenu
class CAMWindow(wx.ScrolledWindow):
def __init__(self, parent):
wx.ScrolledWindow.__init__(self, parent, name = 'CAM')
self.image_list = wx.ImageList(16, 16)
self.image_map = {}
self.object_map = {}
self.tree = wx.TreeCtrl(self, style=wx.TR_HAS_BUTTONS + wx.TR_LINES_AT_ROOT + wx.TR_HIDE_ROOT)
self.tree.Bind(wx.EVT_TREE_ITEM_RIGHT_CLICK, self.OnItemRightClick, self.tree)
self.tree.Bind(wx.EVT_LEFT_DCLICK, self.OnTreeDoubleClick)
self.tree.SetImageList(self.image_list)
self.tree.SetSize(wx.Size(200, 300))
self.root = self.tree.AddRoot("Root")
# Use some sizers to see layout options
self.sizer = wx.BoxSizer(wx.VERTICAL)
self.sizer.Add(self.tree, 1, wx.EXPAND)
#Layout sizers
self.SetSizer(self.sizer)
self.SetAutoLayout(1)
self.sizer.Fit(self)
self.Show()
def OnItemRightClick(self, evt):
item = evt.GetItem()
object = self.tree.GetPyData(item)
menu = PopupMenu()
object.AddToPopupMenu(menu)
self.PopupMenu( menu.menu, evt.GetPoint() )
menu.menu.Destroy()
def OnTreeDoubleClick(self, evt):
item, flags = self.tree.HitTest(evt.GetPosition())
if item and flags & (wx.TREE_HITTEST_ONITEMLABEL + wx.TREE_HITTEST_ONITEMICON):
object = self.tree.GetPyData(item)
if object.CanEdit():
object.Edit()
def add(self, object):
#add a tree object to the tree control
icon_name = object.icon()
if icon_name in self.image_map:
icon = self.image_map[icon_name]
else:
icon = self.image_list.AddIcon(wx.Icon(HeeksCNC.heekscnc_path + "/icons/" + object.icon() + ".png", wx.BITMAP_TYPE_PNG))
self.image_map[icon_name] = icon
parent_tree_item = self.root
if object.parent_index != None:
parent_tree_item = self.object_map[object.parent_index]
tree_item = self.tree.AppendItem(parent_tree_item, object.name(), icon)
self.object_map[object.index] = tree_item
self.tree.SetPyData(tree_item, object)
def remove(self, object):
#add a tree object to the tree control
self.tree.Delete(self.object_map[object.index])
self.object_map.remove(object.index)
|
[
"wx.BoxSizer",
"wx.ImageList",
"PopupMenu.PopupMenu",
"wx.ScrolledWindow.__init__",
"wx.TreeCtrl",
"wx.Size"
] |
[((135, 187), 'wx.ScrolledWindow.__init__', 'wx.ScrolledWindow.__init__', (['self', 'parent'], {'name': '"""CAM"""'}), "(self, parent, name='CAM')\n", (161, 187), False, 'import wx\n'), ((216, 236), 'wx.ImageList', 'wx.ImageList', (['(16)', '(16)'], {}), '(16, 16)\n', (228, 236), False, 'import wx\n'), ((314, 401), 'wx.TreeCtrl', 'wx.TreeCtrl', (['self'], {'style': '(wx.TR_HAS_BUTTONS + wx.TR_LINES_AT_ROOT + wx.TR_HIDE_ROOT)'}), '(self, style=wx.TR_HAS_BUTTONS + wx.TR_LINES_AT_ROOT + wx.\n TR_HIDE_ROOT)\n', (325, 401), False, 'import wx\n'), ((770, 794), 'wx.BoxSizer', 'wx.BoxSizer', (['wx.VERTICAL'], {}), '(wx.VERTICAL)\n', (781, 794), False, 'import wx\n'), ((1130, 1141), 'PopupMenu.PopupMenu', 'PopupMenu', ([], {}), '()\n', (1139, 1141), False, 'from PopupMenu import PopupMenu\n'), ((635, 652), 'wx.Size', 'wx.Size', (['(200)', '(300)'], {}), '(200, 300)\n', (642, 652), False, 'import wx\n')]
|
import os
import json
import shutil
import random
from termcolor import cprint
import colorama
from pyvoc.check_config import config_dir_path
from pyvoc import pyvoc
import textwrap
colorama.init()
terminal_width = shutil.get_terminal_size().columns
def revise_vocab(group_number):
print("")
group_path = os.path.join(config_dir_path(), "group" + str(group_number) + ".json")
try:
with open(os.path.join(group_path), "r") as f:
group = json.load(f)
except FileNotFoundError:
pyvoc.stop_loading_animation()
cprint(
"group number {} does not exists".format(group_number),
color="red",
attrs=["bold"],
)
exit()
words = list(group)
random.shuffle(words)
pyvoc.stop_loading_animation()
print("")
cprint(" Press <enter> for next. q<enter> to exit ", "yellow", attrs=["bold"])
print("")
for i, word in enumerate(words, 1):
cprint(
"{}".format(word),
color="green",
attrs=["reverse", "bold"],
end=" " * (15 - len(word)),
)
width_left = terminal_width - 24
sentences = textwrap.wrap(list(group[word].values())[0], width=width_left)
s_count = 1
for sentence in sentences:
if s_count == 1:
print(sentence)
else:
print(" " * (15) + sentence)
s_count += 1
print("{}. ".format(i), end="")
prompt = input("> ")
if prompt.lower() == "q":
cprint("END\n", color="yellow", attrs=["bold", "reverse"])
break
print(" ")
cprint(
"Revised all the words in vocabulary group {}!\n".format(group_number),
color="yellow",
attrs=["bold", "reverse"],
)
exit()
def validate_group_number(group_number):
if group_number in [101, 102, 103]:
path = os.path.join(config_dir_path(), "group101.json")
if not os.path.isfile(path):
pyvoc.stop_loading_animation()
cprint("group{} does not exist".format(101), color="red", attrs=["bold"])
exit()
return "custom group"
if group_number < 1 or group_number > 100:
pyvoc.stop_loading_animation()
cprint("Invalid group number. choose from 1-100", color="red", attrs=["bold"])
exit()
if group_number < 51:
path = os.path.join(config_dir_path(), "usergroups.json")
else:
path = os.path.join(config_dir_path(), "defaultgroups.json")
return path
def check_group_path(group_path, group_number):
if not os.path.isfile(group_path):
pyvoc.stop_loading_animation()
cprint(
"group number {} does not exist".format(group_number),
color="red",
attrs=["bold"],
)
exit()
def count_words_in_group(path, group_number, no_of_questions):
with open(path, "r") as f:
num_of_words_in_group = json.load(f)[str(group_number)]
if num_of_words_in_group < no_of_questions:
pyvoc.stop_loading_animation()
cprint(
"group number {} does not have enough words".format(group_number),
color="red",
attrs=["bold"],
)
exit()
def count_words_in_custom_group(group_path, no_of_questions, group_number):
with open(group_path, "r") as f:
content = json.load(f)
no_of_words_in_group = len(list(content.keys()))
if no_of_questions > no_of_words_in_group:
pyvoc.stop_loading_animation()
cprint(
"group number {} does not have enough words".format(group_number),
color="red",
attrs=["bold"],
)
exit()
def start_quiz(group_number, no_of_questions):
print("")
if no_of_questions is None:
cprint(
"Number of questions not specified. Assuming 5 questions.",
color="yellow",
attrs=["bold"],
)
no_of_questions = 5
else:
cprint(f"Initializing quiz with {no_of_questions} questions.")
path = validate_group_number(group_number)
group_path = os.path.join(config_dir_path(), "group" + str(group_number) + ".json")
options_path = os.path.join(config_dir_path(), "options.json")
check_group_path(group_path, group_number)
if path == "custom group":
count_words_in_custom_group(group_path, no_of_questions, group_number)
else:
count_words_in_group(path, group_number, no_of_questions)
result = {}
word_definition = {}
with open(group_path, "r") as f:
group_content = json.load(f)
word_list = random.sample(list(group_content), no_of_questions)
for word in word_list:
_ = group_content[word]
refined_def = _[random.sample(list(_), 1)[0]]
word_definition[word] = refined_def
with open(options_path, "r") as f:
options = json.load(f)
pyvoc.stop_loading_animation()
cprint(
"1 point for every correct answer. q<enter> to exit",
color="yellow",
attrs=["bold"],
)
print("\n")
score = 0
for i in range(no_of_questions):
cprint(word_list[i], color="white", attrs=["bold", "reverse"])
correct_option_number = print_options(options, word_definition, word_list[i])
prompt_input(correct_option_number, word_list[i], score, result, i + 1)
for word in result:
if result[word] is True:
score += 1
print("")
cprint("Score: {}/{}".format(score, no_of_questions), color="green", attrs=["bold"])
if score == no_of_questions:
cprint("Perfect Score ヽ(´▽`)/", color="yellow", attrs=["bold", "blink"])
exit()
def print_options(options, correct_answer, word):
options_list = []
for i in random.sample(range(1, 100), 3):
options_list.append(options[str(i)])
# print(options[str(i)])
options_list.append(correct_answer[word])
random.shuffle(options_list)
count = 1
for i, option in enumerate(options_list, 1):
if option == correct_answer[word]:
correct_option_number = count
cprint("[{}]".format(i), color="cyan", end=" ")
width_left = terminal_width - (3 + len(str(i)))
sentences = textwrap.wrap(option, width=width_left)
s_count = 1
for sentence in sentences:
if s_count == 1:
print(sentence)
else:
print(" " * (3 + len(str(i))) + sentence)
s_count += 1
count += 1
return correct_option_number
def prompt_input(correct_option_number, word, score, result, question_number):
while 1:
prompt = input("{}.> ".format(question_number))
if prompt.lower() == "q":
# return
exit()
try:
if not int(prompt) in [1, 2, 3, 4]:
cprint(
"enter a valid integer[1, 2, 3, 4]. q<enter> to exit",
color="yellow",
)
continue
except ValueError:
cprint(
"enter a valid integer[1, 2, 3, 4]. q<enter> to exit", color="yellow"
)
continue
if int(prompt) == correct_option_number:
cprint("correct answer", color="green", on_color="on_grey")
print("")
result[word] = True
score += 1
break
else:
cprint("wrong answer", color="red", on_color="on_grey")
print("")
result[word] = False
break
|
[
"colorama.init",
"json.load",
"random.shuffle",
"textwrap.wrap",
"shutil.get_terminal_size",
"os.path.isfile",
"pyvoc.check_config.config_dir_path",
"os.path.join",
"termcolor.cprint",
"pyvoc.pyvoc.stop_loading_animation"
] |
[((188, 203), 'colorama.init', 'colorama.init', ([], {}), '()\n', (201, 203), False, 'import colorama\n'), ((222, 248), 'shutil.get_terminal_size', 'shutil.get_terminal_size', ([], {}), '()\n', (246, 248), False, 'import shutil\n'), ((749, 770), 'random.shuffle', 'random.shuffle', (['words'], {}), '(words)\n', (763, 770), False, 'import random\n'), ((775, 805), 'pyvoc.pyvoc.stop_loading_animation', 'pyvoc.stop_loading_animation', ([], {}), '()\n', (803, 805), False, 'from pyvoc import pyvoc\n'), ((824, 902), 'termcolor.cprint', 'cprint', (['""" Press <enter> for next. q<enter> to exit """', '"""yellow"""'], {'attrs': "['bold']"}), "(' Press <enter> for next. q<enter> to exit ', 'yellow', attrs=['bold'])\n", (830, 902), False, 'from termcolor import cprint\n'), ((4974, 5004), 'pyvoc.pyvoc.stop_loading_animation', 'pyvoc.stop_loading_animation', ([], {}), '()\n', (5002, 5004), False, 'from pyvoc import pyvoc\n'), ((5009, 5105), 'termcolor.cprint', 'cprint', (['"""1 point for every correct answer. q<enter> to exit"""'], {'color': '"""yellow"""', 'attrs': "['bold']"}), "('1 point for every correct answer. q<enter> to exit', color='yellow',\n attrs=['bold'])\n", (5015, 5105), False, 'from termcolor import cprint\n'), ((5993, 6021), 'random.shuffle', 'random.shuffle', (['options_list'], {}), '(options_list)\n', (6007, 6021), False, 'import random\n'), ((335, 352), 'pyvoc.check_config.config_dir_path', 'config_dir_path', ([], {}), '()\n', (350, 352), False, 'from pyvoc.check_config import config_dir_path\n'), ((2246, 2276), 'pyvoc.pyvoc.stop_loading_animation', 'pyvoc.stop_loading_animation', ([], {}), '()\n', (2274, 2276), False, 'from pyvoc import pyvoc\n'), ((2285, 2363), 'termcolor.cprint', 'cprint', (['"""Invalid group number. choose from 1-100"""'], {'color': '"""red"""', 'attrs': "['bold']"}), "('Invalid group number. choose from 1-100', color='red', attrs=['bold'])\n", (2291, 2363), False, 'from termcolor import cprint\n'), ((2627, 2653), 'os.path.isfile', 'os.path.isfile', (['group_path'], {}), '(group_path)\n', (2641, 2653), False, 'import os\n'), ((2663, 2693), 'pyvoc.pyvoc.stop_loading_animation', 'pyvoc.stop_loading_animation', ([], {}), '()\n', (2691, 2693), False, 'from pyvoc import pyvoc\n'), ((3440, 3452), 'json.load', 'json.load', (['f'], {}), '(f)\n', (3449, 3452), False, 'import json\n'), ((3562, 3592), 'pyvoc.pyvoc.stop_loading_animation', 'pyvoc.stop_loading_animation', ([], {}), '()\n', (3590, 3592), False, 'from pyvoc import pyvoc\n'), ((3869, 3972), 'termcolor.cprint', 'cprint', (['"""Number of questions not specified. Assuming 5 questions."""'], {'color': '"""yellow"""', 'attrs': "['bold']"}), "('Number of questions not specified. Assuming 5 questions.', color=\n 'yellow', attrs=['bold'])\n", (3875, 3972), False, 'from termcolor import cprint\n'), ((4061, 4123), 'termcolor.cprint', 'cprint', (['f"""Initializing quiz with {no_of_questions} questions."""'], {}), "(f'Initializing quiz with {no_of_questions} questions.')\n", (4067, 4123), False, 'from termcolor import cprint\n'), ((4202, 4219), 'pyvoc.check_config.config_dir_path', 'config_dir_path', ([], {}), '()\n', (4217, 4219), False, 'from pyvoc.check_config import config_dir_path\n'), ((4292, 4309), 'pyvoc.check_config.config_dir_path', 'config_dir_path', ([], {}), '()\n', (4307, 4309), False, 'from pyvoc.check_config import config_dir_path\n'), ((4662, 4674), 'json.load', 'json.load', (['f'], {}), '(f)\n', (4671, 4674), False, 'import json\n'), ((4957, 4969), 'json.load', 'json.load', (['f'], {}), '(f)\n', (4966, 4969), False, 'import json\n'), ((5208, 5270), 'termcolor.cprint', 'cprint', (['word_list[i]'], {'color': '"""white"""', 'attrs': "['bold', 'reverse']"}), "(word_list[i], color='white', attrs=['bold', 'reverse'])\n", (5214, 5270), False, 'from termcolor import cprint\n'), ((5661, 5733), 'termcolor.cprint', 'cprint', (['"""Perfect Score ヽ(´▽`)/"""'], {'color': '"""yellow"""', 'attrs': "['bold', 'blink']"}), "('Perfect Score ヽ(´▽`)/', color='yellow', attrs=['bold', 'blink'])\n", (5667, 5733), False, 'from termcolor import cprint\n'), ((6302, 6341), 'textwrap.wrap', 'textwrap.wrap', (['option'], {'width': 'width_left'}), '(option, width=width_left)\n', (6315, 6341), False, 'import textwrap\n'), ((477, 489), 'json.load', 'json.load', (['f'], {}), '(f)\n', (486, 489), False, 'import json\n'), ((528, 558), 'pyvoc.pyvoc.stop_loading_animation', 'pyvoc.stop_loading_animation', ([], {}), '()\n', (556, 558), False, 'from pyvoc import pyvoc\n'), ((1563, 1621), 'termcolor.cprint', 'cprint', (['"""END\n"""'], {'color': '"""yellow"""', 'attrs': "['bold', 'reverse']"}), "('END\\n', color='yellow', attrs=['bold', 'reverse'])\n", (1569, 1621), False, 'from termcolor import cprint\n'), ((1939, 1956), 'pyvoc.check_config.config_dir_path', 'config_dir_path', ([], {}), '()\n', (1954, 1956), False, 'from pyvoc.check_config import config_dir_path\n'), ((1990, 2010), 'os.path.isfile', 'os.path.isfile', (['path'], {}), '(path)\n', (2004, 2010), False, 'import os\n'), ((2024, 2054), 'pyvoc.pyvoc.stop_loading_animation', 'pyvoc.stop_loading_animation', ([], {}), '()\n', (2052, 2054), False, 'from pyvoc import pyvoc\n'), ((2433, 2450), 'pyvoc.check_config.config_dir_path', 'config_dir_path', ([], {}), '()\n', (2448, 2450), False, 'from pyvoc.check_config import config_dir_path\n'), ((2509, 2526), 'pyvoc.check_config.config_dir_path', 'config_dir_path', ([], {}), '()\n', (2524, 2526), False, 'from pyvoc.check_config import config_dir_path\n'), ((2983, 2995), 'json.load', 'json.load', (['f'], {}), '(f)\n', (2992, 2995), False, 'import json\n'), ((3079, 3109), 'pyvoc.pyvoc.stop_loading_animation', 'pyvoc.stop_loading_animation', ([], {}), '()\n', (3107, 3109), False, 'from pyvoc import pyvoc\n'), ((7304, 7363), 'termcolor.cprint', 'cprint', (['"""correct answer"""'], {'color': '"""green"""', 'on_color': '"""on_grey"""'}), "('correct answer', color='green', on_color='on_grey')\n", (7310, 7363), False, 'from termcolor import cprint\n'), ((7485, 7540), 'termcolor.cprint', 'cprint', (['"""wrong answer"""'], {'color': '"""red"""', 'on_color': '"""on_grey"""'}), "('wrong answer', color='red', on_color='on_grey')\n", (7491, 7540), False, 'from termcolor import cprint\n'), ((420, 444), 'os.path.join', 'os.path.join', (['group_path'], {}), '(group_path)\n', (432, 444), False, 'import os\n'), ((6913, 6990), 'termcolor.cprint', 'cprint', (['"""enter a valid integer[1, 2, 3, 4]. q<enter> to exit"""'], {'color': '"""yellow"""'}), "('enter a valid integer[1, 2, 3, 4]. q<enter> to exit', color='yellow')\n", (6919, 6990), False, 'from termcolor import cprint\n'), ((7114, 7191), 'termcolor.cprint', 'cprint', (['"""enter a valid integer[1, 2, 3, 4]. q<enter> to exit"""'], {'color': '"""yellow"""'}), "('enter a valid integer[1, 2, 3, 4]. q<enter> to exit', color='yellow')\n", (7120, 7191), False, 'from termcolor import cprint\n')]
|
from WebScrapy import HomedySpider
from scrapy.crawler import CrawlerProcess
from scrapy.utils.project import get_project_settings
import logging
from selenium.webdriver.remote.remote_connection import LOGGER
from urllib3.connectionpool import log
log.setLevel(logging.WARNING)
LOGGER.setLevel(logging.WARNING)
if __name__ == '__main__':
setting = get_project_settings()
process = CrawlerProcess(get_project_settings())
process.crawl(HomedySpider)
process.start()
|
[
"selenium.webdriver.remote.remote_connection.LOGGER.setLevel",
"urllib3.connectionpool.log.setLevel",
"scrapy.utils.project.get_project_settings"
] |
[((251, 280), 'urllib3.connectionpool.log.setLevel', 'log.setLevel', (['logging.WARNING'], {}), '(logging.WARNING)\n', (263, 280), False, 'from urllib3.connectionpool import log\n'), ((281, 313), 'selenium.webdriver.remote.remote_connection.LOGGER.setLevel', 'LOGGER.setLevel', (['logging.WARNING'], {}), '(logging.WARNING)\n', (296, 313), False, 'from selenium.webdriver.remote.remote_connection import LOGGER\n'), ((357, 379), 'scrapy.utils.project.get_project_settings', 'get_project_settings', ([], {}), '()\n', (377, 379), False, 'from scrapy.utils.project import get_project_settings\n'), ((409, 431), 'scrapy.utils.project.get_project_settings', 'get_project_settings', ([], {}), '()\n', (429, 431), False, 'from scrapy.utils.project import get_project_settings\n')]
|
import shelve
import os
database_filename = 'database'
dataKey = 'webpages_database'
def save(name, content):
# if os.path.isfile(database_filename):
# old_list = shelve.open(database_filename)[dataKey]
# else:
# old_list = []
# print("now start saving data")
# old_list.extend(content_list)
# # json.dump(old_list, open(database_filename, 'w'))
# shelve.open(database_filename)[dataKey] = old_list
# print("saving data finished")
dir_name = 'contents/'
if not os.path.isdir(dir_name):
os.mkdir(dir_name)
if name is None:
return
if content == "":
return
file = shelve.open(dir_name + name, flag='c')
file[dataKey] = content
file.close()
|
[
"os.path.isdir",
"os.mkdir",
"shelve.open"
] |
[((651, 689), 'shelve.open', 'shelve.open', (['(dir_name + name)'], {'flag': '"""c"""'}), "(dir_name + name, flag='c')\n", (662, 689), False, 'import shelve\n'), ((515, 538), 'os.path.isdir', 'os.path.isdir', (['dir_name'], {}), '(dir_name)\n', (528, 538), False, 'import os\n'), ((548, 566), 'os.mkdir', 'os.mkdir', (['dir_name'], {}), '(dir_name)\n', (556, 566), False, 'import os\n')]
|
# -*- coding: utf-8 -*-
# Copyright (C) 2011-2014 Mag. <NAME> All rights reserved
# Glasauergasse 32, A--1130 Wien, Austria. <EMAIL>
# #*** <License> ************************************************************#
# This modify is part of the package GTW.OMP.SRM.
#
# This modify is licensed under the terms of the BSD 3-Clause License
# <http://www.c-tanzer.at/license/bsd_3c.html>.
# #*** </License> ***********************************************************#
#
#++
# Name
# GTW.OMP.SRM.Club
#
# Purpose
# Model a sailing club
#
# Revision Dates
# 23-Sep-2011 (CT) Creation
# 31-Jul-2012 (CT) Redefine `Club.name.cooked` to filter non-letters
# 13-Nov-2012 (CT) Fix typo in `name.cooked` (s/self/soc/)
# ««revision-date»»···
#--
from _GTW import GTW
from _MOM.import_MOM import *
import _GTW._OMP._SRM.Entity
from _TFL.I18N import _, _T, _Tn
from _TFL.pyk import pyk
from _TFL.Regexp import Re_Replacer, re
_Ancestor_Essence = GTW.OMP.SRM.Object
class Club (_Ancestor_Essence) :
"""A sailing club."""
class _Attributes (_Ancestor_Essence._Attributes) :
_Ancestor = _Ancestor_Essence._Attributes
### Primary attributes
class name (A_String) :
"""Short name of the sailing club."""
kind = Attr.Primary
example = "RORC"
ignore_case = True
max_length = 8
completer = Attr.Completer_Spec (1, Attr.Selector.primary)
_clean = Re_Replacer (r"\W+", "", re.UNICODE)
@TFL.Meta.Class_and_Instance_Method
def cooked (soc, value) :
if value is not None :
return soc._clean (str (value))
return value
# end def cooked
# end class name
### Non-primary attributes
class long_name (A_String) :
"""Long name of the sailing club."""
kind = Attr.Optional
example = "Royal Ocean Racing Club"
max_length = 64
# end class long_name
# end class _Attributes
# end class Club
if __name__ != "__main__" :
GTW.OMP.SRM._Export ("*")
### __END__ GTW.OMP.SRM.Club
|
[
"_TFL.Regexp.Re_Replacer",
"_GTW.GTW.OMP.SRM._Export"
] |
[((2287, 2311), '_GTW.GTW.OMP.SRM._Export', 'GTW.OMP.SRM._Export', (['"""*"""'], {}), "('*')\n", (2306, 2311), False, 'from _GTW import GTW\n'), ((1611, 1646), '_TFL.Regexp.Re_Replacer', 'Re_Replacer', (['"""\\\\W+"""', '""""""', 're.UNICODE'], {}), "('\\\\W+', '', re.UNICODE)\n", (1622, 1646), False, 'from _TFL.Regexp import Re_Replacer, re\n')]
|
#!/usr/bin/env python2
# Copyright (C) 2017 MongoDB Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License, version 3,
# as published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
"""
Test cases for IDL Generator.
This file exists to verify code coverage for the generator.py file. To run code coverage, run in the
idl base directory:
$ coverage run run_tests.py && coverage html
"""
from __future__ import absolute_import, print_function, unicode_literals
import os
import unittest
# import package so that it works regardless of whether we run as a module or file
if __package__ is None:
import sys
sys.path.append(os.path.dirname(os.path.abspath(__file__)))
from context import idl
import testcase
else:
from .context import idl
from . import testcase
class TestGenerator(testcase.IDLTestcase):
"""Test the IDL Generator."""
def test_compile(self):
# type: () -> None
"""Exercise the code generator so code coverage can be measured."""
base_dir = os.path.dirname(
os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))
src_dir = os.path.join(
base_dir,
'src', )
idl_dir = os.path.join(src_dir, 'mongo', 'idl')
args = idl.compiler.CompilerArgs()
args.output_suffix = "_codecoverage_gen"
args.import_directories = [src_dir]
unittest_idl_file = os.path.join(idl_dir, 'unittest.idl')
if not os.path.exists(unittest_idl_file):
unittest.skip("Skipping IDL Generator testing since %s could not be found." %
(unittest_idl_file))
return
args.input_file = os.path.join(idl_dir, 'unittest_import.idl')
self.assertTrue(idl.compiler.compile_idl(args))
args.input_file = unittest_idl_file
self.assertTrue(idl.compiler.compile_idl(args))
if __name__ == '__main__':
unittest.main()
|
[
"unittest.main",
"os.path.abspath",
"os.path.exists",
"context.idl.compiler.CompilerArgs",
"unittest.skip",
"context.idl.compiler.compile_idl",
"os.path.join"
] |
[((2400, 2415), 'unittest.main', 'unittest.main', ([], {}), '()\n', (2413, 2415), False, 'import unittest\n'), ((1614, 1643), 'os.path.join', 'os.path.join', (['base_dir', '"""src"""'], {}), "(base_dir, 'src')\n", (1626, 1643), False, 'import os\n'), ((1689, 1726), 'os.path.join', 'os.path.join', (['src_dir', '"""mongo"""', '"""idl"""'], {}), "(src_dir, 'mongo', 'idl')\n", (1701, 1726), False, 'import os\n'), ((1743, 1770), 'context.idl.compiler.CompilerArgs', 'idl.compiler.CompilerArgs', ([], {}), '()\n', (1768, 1770), False, 'from context import idl\n'), ((1893, 1930), 'os.path.join', 'os.path.join', (['idl_dir', '"""unittest.idl"""'], {}), "(idl_dir, 'unittest.idl')\n", (1905, 1930), False, 'import os\n'), ((2164, 2208), 'os.path.join', 'os.path.join', (['idl_dir', '"""unittest_import.idl"""'], {}), "(idl_dir, 'unittest_import.idl')\n", (2176, 2208), False, 'import os\n'), ((1121, 1146), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (1136, 1146), False, 'import os\n'), ((1946, 1979), 'os.path.exists', 'os.path.exists', (['unittest_idl_file'], {}), '(unittest_idl_file)\n', (1960, 1979), False, 'import os\n'), ((1993, 2093), 'unittest.skip', 'unittest.skip', (["('Skipping IDL Generator testing since %s could not be found.' %\n unittest_idl_file)"], {}), "('Skipping IDL Generator testing since %s could not be found.' %\n unittest_idl_file)\n", (2006, 2093), False, 'import unittest\n'), ((2233, 2263), 'context.idl.compiler.compile_idl', 'idl.compiler.compile_idl', (['args'], {}), '(args)\n', (2257, 2263), False, 'from context import idl\n'), ((2334, 2364), 'context.idl.compiler.compile_idl', 'idl.compiler.compile_idl', (['args'], {}), '(args)\n', (2358, 2364), False, 'from context import idl\n'), ((1566, 1591), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (1581, 1591), False, 'import os\n')]
|
from bs4 import BeautifulSoup
import json
from nltk.tokenize import word_tokenize
from nltk.corpus import stopwords
from tech2grams import tech_2grams
from punctuation import remove_punctuation
with open("spacex-job-listing.html", "r") as file:
content = file.read()
bs = BeautifulSoup(content, "lxml")
script_tag = bs.find("script", {"type": "application/ld+json"})
job_listing_contents = json.loads(script_tag.contents[0])
print(job_listing_contents)
desc_bs = BeautifulSoup(job_listing_contents["description"], "lxml")
print(desc_bs)
just_text = desc_bs.find_all(text=True)
print(just_text)
joined = ' '.join(just_text)
tokens = word_tokenize(joined)
stop_list = stopwords.words('english')
with_no_stops = [word for word in tokens if word.lower() not in stop_list]
two_grammed = tech_2grams(with_no_stops)
cleaned = remove_punctuation(two_grammed)
print(cleaned)
from wordcloud import WordCloud
from nltk.probability import FreqDist
freq_dist = FreqDist(cleaned)
wordcloud = WordCloud(width=1200, height=800).generate_from_frequencies(freq_dist)
# Display the generated image:
# the matplotlib way:
import matplotlib.pyplot as plt
plt.imshow(wordcloud, interpolation='bilinear')
plt.axis("off")
plt.show()
|
[
"nltk.probability.FreqDist",
"matplotlib.pyplot.show",
"json.loads",
"matplotlib.pyplot.imshow",
"wordcloud.WordCloud",
"punctuation.remove_punctuation",
"matplotlib.pyplot.axis",
"tech2grams.tech_2grams",
"nltk.corpus.stopwords.words",
"bs4.BeautifulSoup",
"nltk.tokenize.word_tokenize"
] |
[((278, 308), 'bs4.BeautifulSoup', 'BeautifulSoup', (['content', '"""lxml"""'], {}), "(content, 'lxml')\n", (291, 308), False, 'from bs4 import BeautifulSoup\n'), ((398, 432), 'json.loads', 'json.loads', (['script_tag.contents[0]'], {}), '(script_tag.contents[0])\n', (408, 432), False, 'import json\n'), ((472, 530), 'bs4.BeautifulSoup', 'BeautifulSoup', (["job_listing_contents['description']", '"""lxml"""'], {}), "(job_listing_contents['description'], 'lxml')\n", (485, 530), False, 'from bs4 import BeautifulSoup\n'), ((643, 664), 'nltk.tokenize.word_tokenize', 'word_tokenize', (['joined'], {}), '(joined)\n', (656, 664), False, 'from nltk.tokenize import word_tokenize\n'), ((678, 704), 'nltk.corpus.stopwords.words', 'stopwords.words', (['"""english"""'], {}), "('english')\n", (693, 704), False, 'from nltk.corpus import stopwords\n'), ((794, 820), 'tech2grams.tech_2grams', 'tech_2grams', (['with_no_stops'], {}), '(with_no_stops)\n', (805, 820), False, 'from tech2grams import tech_2grams\n'), ((831, 862), 'punctuation.remove_punctuation', 'remove_punctuation', (['two_grammed'], {}), '(two_grammed)\n', (849, 862), False, 'from punctuation import remove_punctuation\n'), ((962, 979), 'nltk.probability.FreqDist', 'FreqDist', (['cleaned'], {}), '(cleaned)\n', (970, 979), False, 'from nltk.probability import FreqDist\n'), ((1149, 1196), 'matplotlib.pyplot.imshow', 'plt.imshow', (['wordcloud'], {'interpolation': '"""bilinear"""'}), "(wordcloud, interpolation='bilinear')\n", (1159, 1196), True, 'import matplotlib.pyplot as plt\n'), ((1197, 1212), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (1205, 1212), True, 'import matplotlib.pyplot as plt\n'), ((1213, 1223), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1221, 1223), True, 'import matplotlib.pyplot as plt\n'), ((992, 1025), 'wordcloud.WordCloud', 'WordCloud', ([], {'width': '(1200)', 'height': '(800)'}), '(width=1200, height=800)\n', (1001, 1025), False, 'from wordcloud import WordCloud\n')]
|
import uuid
from django.db import models
from django.contrib.auth.models import AbstractUser
from django.conf import settings
class User(AbstractUser):
"""
A class representing a User instance
Attributes
----------
username : CharField
A field which stores the username of the user instance
full_name : CharField
A field which stores the full name of the user instance
profile_picture : CharField
A field which stores the link to the profile picture of the user
uuid : UUIDField
A field storing a unique identifier corresponding to the user
Methods
-------
get_uuid()
Returns the string form of the uuid field of a user instance
"""
username = models.CharField(
max_length=63,
blank=True,
null=True,
default=None,
unique=True
)
full_name = models.CharField(
max_length=255,
blank=False,
null=False
)
profile_picture = models.TextField(
blank=False,
null=False,
default=settings.CONFIG_VARS['DEFAULT_PROFILE_PICTURE']
)
uuid = models.UUIDField(
default=uuid.uuid4,
editable=False,
unique=True
)
def __str__(self):
return f"{self.full_name}"
def get_uuid(self):
return str(self.uuid)
|
[
"django.db.models.CharField",
"django.db.models.TextField",
"django.db.models.UUIDField"
] |
[((742, 828), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(63)', 'blank': '(True)', 'null': '(True)', 'default': 'None', 'unique': '(True)'}), '(max_length=63, blank=True, null=True, default=None, unique\n =True)\n', (758, 828), False, 'from django.db import models\n'), ((887, 944), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(255)', 'blank': '(False)', 'null': '(False)'}), '(max_length=255, blank=False, null=False)\n', (903, 944), False, 'from django.db import models\n'), ((998, 1101), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(False)', 'null': '(False)', 'default': "settings.CONFIG_VARS['DEFAULT_PROFILE_PICTURE']"}), "(blank=False, null=False, default=settings.CONFIG_VARS[\n 'DEFAULT_PROFILE_PICTURE'])\n", (1014, 1101), False, 'from django.db import models\n'), ((1139, 1204), 'django.db.models.UUIDField', 'models.UUIDField', ([], {'default': 'uuid.uuid4', 'editable': '(False)', 'unique': '(True)'}), '(default=uuid.uuid4, editable=False, unique=True)\n', (1155, 1204), False, 'from django.db import models\n')]
|
from office365.runtime.queries.service_operation_query import ServiceOperationQuery
from office365.runtime.resource_path import ResourcePath
from office365.runtime.resource_path_service_operation import ResourcePathServiceOperation
from office365.sharepoint.base_entity_collection import BaseEntityCollection
from office365.sharepoint.permissions.role_assignment import RoleAssignment
class RoleAssignmentCollection(BaseEntityCollection):
"""Represents a collection of RoleAssignment resources."""
def __init__(self, context, resource_path=None):
super(RoleAssignmentCollection, self).__init__(context, RoleAssignment, resource_path)
def __getitem__(self, index_or_principal_id):
"""
:param int or str index_or_principal_id: key is used to address a RoleAssignment resource by either an index
in collection or by resource id"""
if type(index_or_principal_id) == int:
return super(RoleAssignmentCollection, self).__getitem__(index_or_principal_id)
return self._item_type(self.context,
ResourcePath(index_or_principal_id, self.resource_path))
def get_by_principal_id(self, principal_id):
"""Retrieves the role assignment object (1) based on the specified user or group.
:param int principal_id: Specifies the user or group of the role assignment.
"""
role_assignment = RoleAssignment(self.context,
ResourcePathServiceOperation("GetByPrincipalId",
[principal_id],
self.resource_path))
self.context.load(role_assignment)
return role_assignment
def add_role_assignment(self, principal_id, role_def_id):
"""Adds a role assignment to the role assignment collection.<81>
:param int role_def_id: Specifies the role definition of the role assignment.
:param int principal_id: Specifies the user or group of the role assignment.
"""
payload = {
"principalId": principal_id,
"roleDefId": role_def_id
}
qry = ServiceOperationQuery(self, "AddRoleAssignment", payload, None, None, None)
self.context.add_query(qry)
def remove_role_assignment(self, principal_id, role_def_id):
"""Removes the role assignment with the specified principal and role definition from the collection.
:param int role_def_id: The ID of the role definition in the role assignment.
:param int principal_id: The ID of the user or group in the role assignment.
"""
payload = {
"principalId": principal_id,
"roleDefId": role_def_id
}
qry = ServiceOperationQuery(self, "RemoveRoleAssignment", payload, None, None, None)
self.context.add_query(qry)
|
[
"office365.runtime.resource_path.ResourcePath",
"office365.runtime.queries.service_operation_query.ServiceOperationQuery",
"office365.runtime.resource_path_service_operation.ResourcePathServiceOperation"
] |
[((2224, 2299), 'office365.runtime.queries.service_operation_query.ServiceOperationQuery', 'ServiceOperationQuery', (['self', '"""AddRoleAssignment"""', 'payload', 'None', 'None', 'None'], {}), "(self, 'AddRoleAssignment', payload, None, None, None)\n", (2245, 2299), False, 'from office365.runtime.queries.service_operation_query import ServiceOperationQuery\n'), ((2817, 2895), 'office365.runtime.queries.service_operation_query.ServiceOperationQuery', 'ServiceOperationQuery', (['self', '"""RemoveRoleAssignment"""', 'payload', 'None', 'None', 'None'], {}), "(self, 'RemoveRoleAssignment', payload, None, None, None)\n", (2838, 2895), False, 'from office365.runtime.queries.service_operation_query import ServiceOperationQuery\n'), ((1091, 1146), 'office365.runtime.resource_path.ResourcePath', 'ResourcePath', (['index_or_principal_id', 'self.resource_path'], {}), '(index_or_principal_id, self.resource_path)\n', (1103, 1146), False, 'from office365.runtime.resource_path import ResourcePath\n'), ((1482, 1571), 'office365.runtime.resource_path_service_operation.ResourcePathServiceOperation', 'ResourcePathServiceOperation', (['"""GetByPrincipalId"""', '[principal_id]', 'self.resource_path'], {}), "('GetByPrincipalId', [principal_id], self.\n resource_path)\n", (1510, 1571), False, 'from office365.runtime.resource_path_service_operation import ResourcePathServiceOperation\n')]
|
# -*- coding: utf-8 -*-
#
# <NAME>. aïvázis
# orthologue
# (c) 1998-2019 all rights reserved
#
# access the framework
import pyre
# my protocol
from .Functor import Functor
class Gaussian(pyre.component, family="gauss.functors.gaussian", implements=Functor):
r"""
Component that implements the normal distribution with mean μ and variance σ^2
g(x; μ,σ) = \frac{1}{\sqrt{2π} σ} e^{-\frac{|x-μ|^2}{2σ^2}}
μ and σ are implemented as component properties so that Gaussian can conform to the
functor interface. See gauss.interfaces.functor for more details.
"""
# public state
mean = pyre.properties.array(default=[0])
mean.doc = "the mean of the gaussian distribution"
mean.aliases.add("μ")
spread = pyre.properties.float(default=1)
spread.doc = "the variance of the gaussian distribution"
spread.aliases.add("σ")
# interface
@pyre.export
def eval(self, points):
"""
Compute the value of the gaussian
"""
# access the math symbols
from math import exp, sqrt, pi as π
# cache the inventory items
μ = self.μ
σ = self.σ
# precompute the normalization factor
normalization = 1 / sqrt(2*π) / σ
# and the scaling of the exposnential
scaling = 2 * σ**2
# loop over points and yield the computed value
for x in points:
# compute |x - μ|^2
# this works as long as x and μ have the same length
r2 = sum((x_i - μ_i)**2 for x_i, μ_i in zip(x, μ))
# yield the value at the current x
yield normalization * exp(- r2 / scaling)
# all done
return
# end of file
|
[
"pyre.properties.float",
"pyre.properties.array",
"math.exp",
"math.sqrt"
] |
[((622, 656), 'pyre.properties.array', 'pyre.properties.array', ([], {'default': '[0]'}), '(default=[0])\n', (643, 656), False, 'import pyre\n'), ((752, 784), 'pyre.properties.float', 'pyre.properties.float', ([], {'default': '(1)'}), '(default=1)\n', (773, 784), False, 'import pyre\n'), ((1229, 1240), 'math.sqrt', 'sqrt', (['(2 * π)'], {}), '(2 * π)\n', (1233, 1240), False, 'from math import exp, sqrt, pi as π\n'), ((1638, 1656), 'math.exp', 'exp', (['(-r2 / scaling)'], {}), '(-r2 / scaling)\n', (1641, 1656), False, 'from math import exp, sqrt, pi as π\n')]
|
#!/usr/bin/env python
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
setup(
name='ExasolDatabaseConnector',
version="0.1.7",
license="MIT",
maintainer="<NAME>",
maintainer_email="<EMAIL>",
description="Exasol database connector class written in python",
long_description="Exasol database connector classes using ODBC or WebSockets",
url='https://github.com/florian-reck/ExaDatabase',
packages=[
'ExasolDatabaseConnector',
'ExasolDatabaseConnector.ExaDatabaseAbstract',
'ExasolDatabaseConnector.ExaWebSockets',
'ExasolDatabaseConnector.ExaOdbcDriver'
],
install_requires=[
'websocket_client',
'rsa',
'EXASOL-DB-API',
'pyodbc'
]
)
|
[
"distutils.core.setup"
] |
[((118, 712), 'distutils.core.setup', 'setup', ([], {'name': '"""ExasolDatabaseConnector"""', 'version': '"""0.1.7"""', 'license': '"""MIT"""', 'maintainer': '"""<NAME>"""', 'maintainer_email': '"""<EMAIL>"""', 'description': '"""Exasol database connector class written in python"""', 'long_description': '"""Exasol database connector classes using ODBC or WebSockets"""', 'url': '"""https://github.com/florian-reck/ExaDatabase"""', 'packages': "['ExasolDatabaseConnector', 'ExasolDatabaseConnector.ExaDatabaseAbstract',\n 'ExasolDatabaseConnector.ExaWebSockets',\n 'ExasolDatabaseConnector.ExaOdbcDriver']", 'install_requires': "['websocket_client', 'rsa', 'EXASOL-DB-API', 'pyodbc']"}), "(name='ExasolDatabaseConnector', version='0.1.7', license='MIT',\n maintainer='<NAME>', maintainer_email='<EMAIL>', description=\n 'Exasol database connector class written in python', long_description=\n 'Exasol database connector classes using ODBC or WebSockets', url=\n 'https://github.com/florian-reck/ExaDatabase', packages=[\n 'ExasolDatabaseConnector',\n 'ExasolDatabaseConnector.ExaDatabaseAbstract',\n 'ExasolDatabaseConnector.ExaWebSockets',\n 'ExasolDatabaseConnector.ExaOdbcDriver'], install_requires=[\n 'websocket_client', 'rsa', 'EXASOL-DB-API', 'pyodbc'])\n", (123, 712), False, 'from distutils.core import setup\n')]
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for PoincareNormalize layer."""
import numpy as np
import tensorflow as tf
from tensorflow_addons.layers.poincare import PoincareNormalize
from tensorflow_addons.utils import test_utils
@test_utils.run_all_in_graph_and_eager_modes
class PoincareNormalizeTest(tf.test.TestCase):
def _PoincareNormalize(self, x, dim, epsilon=1e-5):
if isinstance(dim, list):
norm = np.linalg.norm(x, axis=tuple(dim))
for d in dim:
norm = np.expand_dims(norm, d)
norm_x = ((1. - epsilon) * x) / norm
else:
norm = np.expand_dims(
np.apply_along_axis(np.linalg.norm, dim, x), dim)
norm_x = ((1. - epsilon) * x) / norm
return np.where(norm > 1.0 - epsilon, norm_x, x)
def testPoincareNormalize(self):
x_shape = [20, 7, 3]
epsilon = 1e-5
tol = 1e-6
np.random.seed(1)
inputs = np.random.random_sample(x_shape).astype(np.float32)
for dim in range(len(x_shape)):
outputs_expected = self._PoincareNormalize(inputs, dim, epsilon)
outputs = test_utils.layer_test(
PoincareNormalize,
kwargs={
'axis': dim,
'epsilon': epsilon
},
input_data=inputs,
expected_output=outputs_expected)
for y in outputs_expected, outputs:
norm = np.linalg.norm(y, axis=dim)
self.assertLessEqual(norm.max(), 1. - epsilon + tol)
def testPoincareNormalizeDimArray(self):
x_shape = [20, 7, 3]
epsilon = 1e-5
tol = 1e-6
np.random.seed(1)
inputs = np.random.random_sample(x_shape).astype(np.float32)
dim = [1, 2]
outputs_expected = self._PoincareNormalize(inputs, dim, epsilon)
outputs = test_utils.layer_test(
PoincareNormalize,
kwargs={
'axis': dim,
'epsilon': epsilon
},
input_data=inputs,
expected_output=outputs_expected)
for y in outputs_expected, outputs:
norm = np.linalg.norm(y, axis=tuple(dim))
self.assertLessEqual(norm.max(), 1. - epsilon + tol)
if __name__ == '__main__':
tf.test.main()
|
[
"tensorflow.test.main",
"tensorflow_addons.utils.test_utils.layer_test",
"numpy.random.seed",
"numpy.random.random_sample",
"numpy.expand_dims",
"numpy.apply_along_axis",
"numpy.where",
"numpy.linalg.norm"
] |
[((2992, 3006), 'tensorflow.test.main', 'tf.test.main', ([], {}), '()\n', (3004, 3006), True, 'import tensorflow as tf\n'), ((1425, 1466), 'numpy.where', 'np.where', (['(norm > 1.0 - epsilon)', 'norm_x', 'x'], {}), '(norm > 1.0 - epsilon, norm_x, x)\n', (1433, 1466), True, 'import numpy as np\n'), ((1584, 1601), 'numpy.random.seed', 'np.random.seed', (['(1)'], {}), '(1)\n', (1598, 1601), True, 'import numpy as np\n'), ((2364, 2381), 'numpy.random.seed', 'np.random.seed', (['(1)'], {}), '(1)\n', (2378, 2381), True, 'import numpy as np\n'), ((2565, 2704), 'tensorflow_addons.utils.test_utils.layer_test', 'test_utils.layer_test', (['PoincareNormalize'], {'kwargs': "{'axis': dim, 'epsilon': epsilon}", 'input_data': 'inputs', 'expected_output': 'outputs_expected'}), "(PoincareNormalize, kwargs={'axis': dim, 'epsilon':\n epsilon}, input_data=inputs, expected_output=outputs_expected)\n", (2586, 2704), False, 'from tensorflow_addons.utils import test_utils\n'), ((1812, 1951), 'tensorflow_addons.utils.test_utils.layer_test', 'test_utils.layer_test', (['PoincareNormalize'], {'kwargs': "{'axis': dim, 'epsilon': epsilon}", 'input_data': 'inputs', 'expected_output': 'outputs_expected'}), "(PoincareNormalize, kwargs={'axis': dim, 'epsilon':\n epsilon}, input_data=inputs, expected_output=outputs_expected)\n", (1833, 1951), False, 'from tensorflow_addons.utils import test_utils\n'), ((1173, 1196), 'numpy.expand_dims', 'np.expand_dims', (['norm', 'd'], {}), '(norm, d)\n', (1187, 1196), True, 'import numpy as np\n'), ((1311, 1354), 'numpy.apply_along_axis', 'np.apply_along_axis', (['np.linalg.norm', 'dim', 'x'], {}), '(np.linalg.norm, dim, x)\n', (1330, 1354), True, 'import numpy as np\n'), ((1619, 1651), 'numpy.random.random_sample', 'np.random.random_sample', (['x_shape'], {}), '(x_shape)\n', (1642, 1651), True, 'import numpy as np\n'), ((2142, 2169), 'numpy.linalg.norm', 'np.linalg.norm', (['y'], {'axis': 'dim'}), '(y, axis=dim)\n', (2156, 2169), True, 'import numpy as np\n'), ((2399, 2431), 'numpy.random.random_sample', 'np.random.random_sample', (['x_shape'], {}), '(x_shape)\n', (2422, 2431), True, 'import numpy as np\n')]
|
from .transform import RandomErasing
from .collate_batch import train_collate_fn
from .collate_batch import val_collate_fn
from .triplet_sampler import RandomIdentitySampler
from .data import ImageDataset, init_dataset
import torchvision.transforms as T
from torch.utils.data.dataloader import DataLoader
def get_trm(cfg, is_train=True):
normalize_transform = T.Normalize(
mean=cfg.INPUT.PIXEL_MEAN, std=cfg.INPUT.PIXEL_STD
)
if is_train:
transform = T.Compose(
[
T.Resize(cfg.INPUT.SIZE_TRAIN),
T.RandomHorizontalFlip(p=cfg.INPUT.PROB),
T.Pad(cfg.INPUT.PADDING),
T.RandomCrop(cfg.INPUT.SIZE_TRAIN),
T.ToTensor(), normalize_transform,
RandomErasing(
probability=cfg.INPUT.RE_PROB, mean=cfg.INPUT.PIXEL_MEAN
)
]
)
else:
transform = T.Compose(
[T.Resize(cfg.INPUT.SIZE_TEST),
T.ToTensor(), normalize_transform]
)
return transform
def make_dataloader(cfg, num_gpus=1):
train_trm = get_trm(cfg, is_train=True)
val_trm = get_trm(cfg, is_train=False)
num_workers = cfg.DATALOADER.NUM_WORKERS * num_gpus
dataset = init_dataset(cfg)
num_classes = dataset.num_train_pids
train_set = ImageDataset(dataset.train, cfg, train_trm)
if cfg.DATALOADER.SAMPLER == 'softmax':
train_loader = DataLoader(
train_set,
batch_size=cfg.SOLVER.IMS_PER_BATCH * num_gpus,
shuffle=True,
num_workers=num_workers,
collate_fn=train_collate_fn
)
else:
train_loader = DataLoader(
train_set,
batch_size=cfg.SOLVER.IMS_PER_BATCH * num_gpus,
sampler=RandomIdentitySampler(
dataset.train, cfg.SOLVER.IMS_PER_BATCH * num_gpus,
cfg.DATALOADER.NUM_INSTANCE * num_gpus
),
num_workers=num_workers,
collate_fn=train_collate_fn
)
val_set = ImageDataset(dataset.query + dataset.gallery, cfg, val_trm)
val_loader = DataLoader(
val_set,
batch_size=cfg.TEST.IMS_PER_BATCH * num_gpus,
shuffle=False,
num_workers=num_workers,
collate_fn=val_collate_fn
)
return train_loader, val_loader, len(dataset.query), num_classes
|
[
"torchvision.transforms.RandomHorizontalFlip",
"torchvision.transforms.ToTensor",
"torchvision.transforms.Pad",
"torch.utils.data.dataloader.DataLoader",
"torchvision.transforms.Normalize",
"torchvision.transforms.RandomCrop",
"torchvision.transforms.Resize"
] |
[((376, 439), 'torchvision.transforms.Normalize', 'T.Normalize', ([], {'mean': 'cfg.INPUT.PIXEL_MEAN', 'std': 'cfg.INPUT.PIXEL_STD'}), '(mean=cfg.INPUT.PIXEL_MEAN, std=cfg.INPUT.PIXEL_STD)\n', (387, 439), True, 'import torchvision.transforms as T\n'), ((2216, 2353), 'torch.utils.data.dataloader.DataLoader', 'DataLoader', (['val_set'], {'batch_size': '(cfg.TEST.IMS_PER_BATCH * num_gpus)', 'shuffle': '(False)', 'num_workers': 'num_workers', 'collate_fn': 'val_collate_fn'}), '(val_set, batch_size=cfg.TEST.IMS_PER_BATCH * num_gpus, shuffle=\n False, num_workers=num_workers, collate_fn=val_collate_fn)\n', (2226, 2353), False, 'from torch.utils.data.dataloader import DataLoader\n'), ((1500, 1641), 'torch.utils.data.dataloader.DataLoader', 'DataLoader', (['train_set'], {'batch_size': '(cfg.SOLVER.IMS_PER_BATCH * num_gpus)', 'shuffle': '(True)', 'num_workers': 'num_workers', 'collate_fn': 'train_collate_fn'}), '(train_set, batch_size=cfg.SOLVER.IMS_PER_BATCH * num_gpus,\n shuffle=True, num_workers=num_workers, collate_fn=train_collate_fn)\n', (1510, 1641), False, 'from torch.utils.data.dataloader import DataLoader\n'), ((538, 568), 'torchvision.transforms.Resize', 'T.Resize', (['cfg.INPUT.SIZE_TRAIN'], {}), '(cfg.INPUT.SIZE_TRAIN)\n', (546, 568), True, 'import torchvision.transforms as T\n'), ((587, 627), 'torchvision.transforms.RandomHorizontalFlip', 'T.RandomHorizontalFlip', ([], {'p': 'cfg.INPUT.PROB'}), '(p=cfg.INPUT.PROB)\n', (609, 627), True, 'import torchvision.transforms as T\n'), ((646, 670), 'torchvision.transforms.Pad', 'T.Pad', (['cfg.INPUT.PADDING'], {}), '(cfg.INPUT.PADDING)\n', (651, 670), True, 'import torchvision.transforms as T\n'), ((689, 723), 'torchvision.transforms.RandomCrop', 'T.RandomCrop', (['cfg.INPUT.SIZE_TRAIN'], {}), '(cfg.INPUT.SIZE_TRAIN)\n', (701, 723), True, 'import torchvision.transforms as T\n'), ((742, 754), 'torchvision.transforms.ToTensor', 'T.ToTensor', ([], {}), '()\n', (752, 754), True, 'import torchvision.transforms as T\n'), ((989, 1018), 'torchvision.transforms.Resize', 'T.Resize', (['cfg.INPUT.SIZE_TEST'], {}), '(cfg.INPUT.SIZE_TEST)\n', (997, 1018), True, 'import torchvision.transforms as T\n'), ((1034, 1046), 'torchvision.transforms.ToTensor', 'T.ToTensor', ([], {}), '()\n', (1044, 1046), True, 'import torchvision.transforms as T\n')]
|
# Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
# See https://llvm.org/LICENSE.txt for license information.
# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
# Also available under a BSD-style license. See LICENSE.
import torch
import torchvision
import torch_mlir
resnet18 = torchvision.models.resnet18(pretrained=True)
resnet18.eval()
module = torch_mlir.compile(resnet18, torch.ones(1, 3, 224, 224), output_type=torch_mlir.OutputType.TORCH)
print("TORCH OutputType\n", module.operation.get_asm(large_elements_limit=10))
module = torch_mlir.compile(resnet18, torch.ones(1, 3, 224, 224), output_type=torch_mlir.OutputType.LINALG_ON_TENSORS)
print("LINALG_ON_TENSORS OutputType\n", module.operation.get_asm(large_elements_limit=10))
# TODO: Debug why this is so slow.
module = torch_mlir.compile(resnet18, torch.ones(1, 3, 224, 224), output_type=torch_mlir.OutputType.TOSA)
print("TOSA OutputType\n", module.operation.get_asm(large_elements_limit=10))
|
[
"torchvision.models.resnet18",
"torch.ones"
] |
[((319, 363), 'torchvision.models.resnet18', 'torchvision.models.resnet18', ([], {'pretrained': '(True)'}), '(pretrained=True)\n', (346, 363), False, 'import torchvision\n'), ((419, 445), 'torch.ones', 'torch.ones', (['(1)', '(3)', '(224)', '(224)'], {}), '(1, 3, 224, 224)\n', (429, 445), False, 'import torch\n'), ((605, 631), 'torch.ones', 'torch.ones', (['(1)', '(3)', '(224)', '(224)'], {}), '(1, 3, 224, 224)\n', (615, 631), False, 'import torch\n'), ((850, 876), 'torch.ones', 'torch.ones', (['(1)', '(3)', '(224)', '(224)'], {}), '(1, 3, 224, 224)\n', (860, 876), False, 'import torch\n')]
|
# Copyright 2019 NREL
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use
# this file except in compliance with the License. You may obtain a copy of the
# License at http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software distributed
# under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
# CONDITIONS OF ANY KIND, either express or implied. See the License for the
# specific language governing permissions and limitations under the License.
# See read the https://floris.readthedocs.io for documentation
import matplotlib.pyplot as plt
import floris.tools as wfct
import floris.tools.visualization as vis
import floris.tools.cut_plane as cp
from floris.utilities import Vec3
import numpy as np
from mpl_toolkits.axes_grid1 import make_axes_locatable, axes_size
# Initialize FLORIS model
fi = wfct.floris_utilities.FlorisInterface("example_input.json")
# set turbine locations to 4 turbines in a row - demonstrate how to change coordinates
D = fi.floris.farm.flow_field.turbine_map.turbines[0].rotor_diameter
layout_x = [0, 7*D, 0, 7*D]
layout_y = [0, 0, 5*D, 5*D]
fi.reinitialize_flow_field(layout_array=(layout_x, layout_y))
# Calculate wake
fi.calculate_wake()
# ================================================================================
print('Plotting the FLORIS flowfield...')
# ================================================================================
# Initialize the horizontal cut
hor_plane = wfct.cut_plane.HorPlane(
fi.get_flow_data(),
fi.floris.farm.turbines[0].hub_height
)
# Plot and show
fig, ax = plt.subplots()
wfct.visualization.visualize_cut_plane(hor_plane, ax=ax)
# ================================================================================
print('Changing wind direction and wind speed...')
# ================================================================================
ws = np.linspace(6, 8, 3)
wd = [45.0, 170.0, 270.]
# Plot and show
fig, ax = plt.subplots(3, 3, figsize=(15, 15))
power = np.zeros((len(ws), len(wd)))
for i, speed in enumerate(ws):
for j, wdir in enumerate(wd):
print('Calculating wake: wind direction = ',
wdir, 'and wind speed = ', speed)
fi.reinitialize_flow_field(wind_speed=speed, wind_direction=wdir)
# recalculate the wake
fi.calculate_wake()
# record powers
power[i, j] = np.sum(fi.get_turbine_power())
# ============================================
# not necessary if you only want the powers
# ============================================
# Visualize the changes
# Initialize the horizontal cut
hor_plane = wfct.cut_plane.HorPlane(
fi.get_flow_data(),
fi.floris.farm.turbines[0].hub_height
)
im = wfct.visualization.visualize_cut_plane(hor_plane, ax=ax[i, j])
strTitle = 'Wind Dir = ' + \
str(wdir) + 'deg' + ' Speed = ' + str(speed) + 'm/s'
ax[i, j].set_title(strTitle)
fig.colorbar(im, ax=ax[i, j], fraction=0.025, pad=0.04)
# ================================================================================
# print('Set yaw angles...')
# ================================================================================
# assign yaw angles to turbines and calculate wake at 270
# initial power output
fi.calculate_wake()
power_initial = np.sum(fi.get_turbine_power())
# Set the yaw angles
yaw_angles = [25.0, 0, 25.0, 0]
fi.calculate_wake(yaw_angles=yaw_angles)
# Check the new power
power_yaw = np.sum(fi.get_turbine_power())
print('Power aligned: %.1f' % power_initial)
print('Power yawed: %.1f' % power_yaw)
# ================================================================================
print('Plotting the FLORIS flowfield with yaw...')
# ================================================================================
# Initialize the horizontal cut
hor_plane = wfct.cut_plane.HorPlane(
fi.get_flow_data(),
fi.floris.farm.turbines[0].hub_height
)
# Plot and show
fig, ax = plt.subplots()
wfct.visualization.visualize_cut_plane(hor_plane, ax=ax)
ax.set_title('Flow with yawed front turbines')
plt.show()
|
[
"floris.tools.visualization.visualize_cut_plane",
"matplotlib.pyplot.show",
"numpy.linspace",
"floris.tools.floris_utilities.FlorisInterface",
"matplotlib.pyplot.subplots"
] |
[((910, 969), 'floris.tools.floris_utilities.FlorisInterface', 'wfct.floris_utilities.FlorisInterface', (['"""example_input.json"""'], {}), "('example_input.json')\n", (947, 969), True, 'import floris.tools as wfct\n'), ((1658, 1672), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (1670, 1672), True, 'import matplotlib.pyplot as plt\n'), ((1673, 1729), 'floris.tools.visualization.visualize_cut_plane', 'wfct.visualization.visualize_cut_plane', (['hor_plane'], {'ax': 'ax'}), '(hor_plane, ax=ax)\n', (1711, 1729), True, 'import floris.tools as wfct\n'), ((1954, 1974), 'numpy.linspace', 'np.linspace', (['(6)', '(8)', '(3)'], {}), '(6, 8, 3)\n', (1965, 1974), True, 'import numpy as np\n'), ((2027, 2063), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(3)', '(3)'], {'figsize': '(15, 15)'}), '(3, 3, figsize=(15, 15))\n', (2039, 2063), True, 'import matplotlib.pyplot as plt\n'), ((4104, 4118), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (4116, 4118), True, 'import matplotlib.pyplot as plt\n'), ((4119, 4175), 'floris.tools.visualization.visualize_cut_plane', 'wfct.visualization.visualize_cut_plane', (['hor_plane'], {'ax': 'ax'}), '(hor_plane, ax=ax)\n', (4157, 4175), True, 'import floris.tools as wfct\n'), ((4223, 4233), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4231, 4233), True, 'import matplotlib.pyplot as plt\n'), ((2865, 2927), 'floris.tools.visualization.visualize_cut_plane', 'wfct.visualization.visualize_cut_plane', (['hor_plane'], {'ax': 'ax[i, j]'}), '(hor_plane, ax=ax[i, j])\n', (2903, 2927), True, 'import floris.tools as wfct\n')]
|
import glob
from os import truncate
import cv2 as cv
import re
import random
import argparse
from pandas.core import frame
parser = argparse.ArgumentParser()
parser.add_argument('--dir' , help='root directroy path')
parser.add_argument('--type', help='data type (train < 450) , (test >= 450)')
parser.add_argument('--seq' , help='sequence number')
args = parser.parse_args()
seq_path = args.dir + "/" + args.type + "/sequence_" + ("%05d" % int(args.seq))
txt_files = glob.glob(seq_path + '/**/*.txt', recursive=True)
img_files = glob.glob(seq_path + '/**/*.png', recursive=True)
img_files = sorted(img_files)
id_color_map = dict()
def get_pick_color(model):
model_id = model
r = int(model_id/(256**3))
g = int(model_id%(256**3)/(256**2))
b = int(model_id%(256**2)/256)
a = int(model_id%256)
return (r, g, b, a)
cv.namedWindow("win")
f = open(txt_files[0] , 'r')
lines = f.readlines()
regex = re.compile(r'\d+.png')
i = 0
while True:
mat = cv.imread(img_files[i])
path_tokens = img_files[i].split(sep="/")
sub_string = '/'.join(path_tokens[-5:-1])
img_file_name = path_tokens[-1]
frame_number = int(re.search(r"(\d+).png",img_file_name).group(1))
for line in lines:
line = line.strip()
line_tokens = line.split(',')
if(frame_number == int(line_tokens[0])):
box_top_left_x = int(line_tokens[3])
box_top_left_y = int(line_tokens[4])
box_width = int(line_tokens[5])
box_hegiht = int(line_tokens[6])
person_id = "person id: " + line_tokens[1]
tracking_id = "tracking id: " + line_tokens[2]
is_valid = "is_valid: " + line_tokens[7]
pose_class = "pose_class: " + line_tokens[8]
occlusion = "occlusion: " + line_tokens[9]
truncated = "truncated: "+ line_tokens[10]
visibiltiy = "visibility: "+ line_tokens[11]
try:
color = id_color_map[line_tokens[1]]
except KeyError:
color = (random.randint(0,255), random.randint(0,255), random.randint(0,255))
id_color_map[line_tokens[1]] = color
cv.rectangle(mat, (box_top_left_x, box_top_left_y), (box_top_left_x+box_width, box_top_left_y+box_hegiht), color, 3)
cv.putText(mat, person_id, (box_top_left_x+10 + box_width, box_top_left_y+ 10) ,cv.FONT_HERSHEY_COMPLEX, 0.4, color, 1, cv.LINE_AA)
cv.putText(mat, tracking_id, (box_top_left_x+10 + box_width, box_top_left_y + 25) ,cv.FONT_HERSHEY_COMPLEX, 0.4, color, 1, cv.LINE_AA)
cv.putText(mat, is_valid, (box_top_left_x+10 + box_width, box_top_left_y+ 40) ,cv.FONT_HERSHEY_COMPLEX, 0.4, color, 1, cv.LINE_AA)
cv.putText(mat, pose_class, (box_top_left_x+10 + box_width, box_top_left_y + 55) ,cv.FONT_HERSHEY_COMPLEX, 0.4, color, 1, cv.LINE_AA)
cv.putText(mat, occlusion, (box_top_left_x+10 + box_width, box_top_left_y + 70) ,cv.FONT_HERSHEY_COMPLEX, 0.4, color, 1, cv.LINE_AA)
cv.putText(mat, truncated, (box_top_left_x+10 + box_width, box_top_left_y + 85) ,cv.FONT_HERSHEY_COMPLEX, 0.4, color, 1, cv.LINE_AA)
cv.putText(mat, visibiltiy, (box_top_left_x+10 + box_width, box_top_left_y + 100) ,cv.FONT_HERSHEY_COMPLEX, 0.4, color, 1, cv.LINE_AA)
cv.putText(mat, line_tokens[0] ,(30,30), cv.FONT_HERSHEY_COMPLEX, 1.0 , (255,255,255), 1, cv.LINE_AA)
cv.imshow("win", mat)
key = cv.waitKey(0)
if(key == 27 or key == 113):
exit()
elif(key==112 or key == 81):
if(i >1 ):
i = i-1
else:
i = i+1
|
[
"cv2.putText",
"argparse.ArgumentParser",
"random.randint",
"cv2.waitKey",
"cv2.rectangle",
"cv2.imread",
"glob.glob",
"cv2.imshow",
"re.search",
"cv2.namedWindow",
"re.compile"
] |
[((136, 161), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (159, 161), False, 'import argparse\n'), ((474, 523), 'glob.glob', 'glob.glob', (["(seq_path + '/**/*.txt')"], {'recursive': '(True)'}), "(seq_path + '/**/*.txt', recursive=True)\n", (483, 523), False, 'import glob\n'), ((536, 585), 'glob.glob', 'glob.glob', (["(seq_path + '/**/*.png')"], {'recursive': '(True)'}), "(seq_path + '/**/*.png', recursive=True)\n", (545, 585), False, 'import glob\n'), ((846, 867), 'cv2.namedWindow', 'cv.namedWindow', (['"""win"""'], {}), "('win')\n", (860, 867), True, 'import cv2 as cv\n'), ((929, 951), 're.compile', 're.compile', (['"""\\\\d+.png"""'], {}), "('\\\\d+.png')\n", (939, 951), False, 'import re\n'), ((987, 1010), 'cv2.imread', 'cv.imread', (['img_files[i]'], {}), '(img_files[i])\n', (996, 1010), True, 'import cv2 as cv\n'), ((3490, 3511), 'cv2.imshow', 'cv.imshow', (['"""win"""', 'mat'], {}), "('win', mat)\n", (3499, 3511), True, 'import cv2 as cv\n'), ((3524, 3537), 'cv2.waitKey', 'cv.waitKey', (['(0)'], {}), '(0)\n', (3534, 3537), True, 'import cv2 as cv\n'), ((2226, 2350), 'cv2.rectangle', 'cv.rectangle', (['mat', '(box_top_left_x, box_top_left_y)', '(box_top_left_x + box_width, box_top_left_y + box_hegiht)', 'color', '(3)'], {}), '(mat, (box_top_left_x, box_top_left_y), (box_top_left_x +\n box_width, box_top_left_y + box_hegiht), color, 3)\n', (2238, 2350), True, 'import cv2 as cv\n'), ((2355, 2493), 'cv2.putText', 'cv.putText', (['mat', 'person_id', '(box_top_left_x + 10 + box_width, box_top_left_y + 10)', 'cv.FONT_HERSHEY_COMPLEX', '(0.4)', 'color', '(1)', 'cv.LINE_AA'], {}), '(mat, person_id, (box_top_left_x + 10 + box_width, box_top_left_y +\n 10), cv.FONT_HERSHEY_COMPLEX, 0.4, color, 1, cv.LINE_AA)\n', (2365, 2493), True, 'import cv2 as cv\n'), ((2500, 2641), 'cv2.putText', 'cv.putText', (['mat', 'tracking_id', '(box_top_left_x + 10 + box_width, box_top_left_y + 25)', 'cv.FONT_HERSHEY_COMPLEX', '(0.4)', 'color', '(1)', 'cv.LINE_AA'], {}), '(mat, tracking_id, (box_top_left_x + 10 + box_width, \n box_top_left_y + 25), cv.FONT_HERSHEY_COMPLEX, 0.4, color, 1, cv.LINE_AA)\n', (2510, 2641), True, 'import cv2 as cv\n'), ((2647, 2784), 'cv2.putText', 'cv.putText', (['mat', 'is_valid', '(box_top_left_x + 10 + box_width, box_top_left_y + 40)', 'cv.FONT_HERSHEY_COMPLEX', '(0.4)', 'color', '(1)', 'cv.LINE_AA'], {}), '(mat, is_valid, (box_top_left_x + 10 + box_width, box_top_left_y +\n 40), cv.FONT_HERSHEY_COMPLEX, 0.4, color, 1, cv.LINE_AA)\n', (2657, 2784), True, 'import cv2 as cv\n'), ((2791, 2931), 'cv2.putText', 'cv.putText', (['mat', 'pose_class', '(box_top_left_x + 10 + box_width, box_top_left_y + 55)', 'cv.FONT_HERSHEY_COMPLEX', '(0.4)', 'color', '(1)', 'cv.LINE_AA'], {}), '(mat, pose_class, (box_top_left_x + 10 + box_width, \n box_top_left_y + 55), cv.FONT_HERSHEY_COMPLEX, 0.4, color, 1, cv.LINE_AA)\n', (2801, 2931), True, 'import cv2 as cv\n'), ((2937, 3075), 'cv2.putText', 'cv.putText', (['mat', 'occlusion', '(box_top_left_x + 10 + box_width, box_top_left_y + 70)', 'cv.FONT_HERSHEY_COMPLEX', '(0.4)', 'color', '(1)', 'cv.LINE_AA'], {}), '(mat, occlusion, (box_top_left_x + 10 + box_width, box_top_left_y +\n 70), cv.FONT_HERSHEY_COMPLEX, 0.4, color, 1, cv.LINE_AA)\n', (2947, 3075), True, 'import cv2 as cv\n'), ((3082, 3220), 'cv2.putText', 'cv.putText', (['mat', 'truncated', '(box_top_left_x + 10 + box_width, box_top_left_y + 85)', 'cv.FONT_HERSHEY_COMPLEX', '(0.4)', 'color', '(1)', 'cv.LINE_AA'], {}), '(mat, truncated, (box_top_left_x + 10 + box_width, box_top_left_y +\n 85), cv.FONT_HERSHEY_COMPLEX, 0.4, color, 1, cv.LINE_AA)\n', (3092, 3220), True, 'import cv2 as cv\n'), ((3227, 3368), 'cv2.putText', 'cv.putText', (['mat', 'visibiltiy', '(box_top_left_x + 10 + box_width, box_top_left_y + 100)', 'cv.FONT_HERSHEY_COMPLEX', '(0.4)', 'color', '(1)', 'cv.LINE_AA'], {}), '(mat, visibiltiy, (box_top_left_x + 10 + box_width, \n box_top_left_y + 100), cv.FONT_HERSHEY_COMPLEX, 0.4, color, 1, cv.LINE_AA)\n', (3237, 3368), True, 'import cv2 as cv\n'), ((3376, 3484), 'cv2.putText', 'cv.putText', (['mat', 'line_tokens[0]', '(30, 30)', 'cv.FONT_HERSHEY_COMPLEX', '(1.0)', '(255, 255, 255)', '(1)', 'cv.LINE_AA'], {}), '(mat, line_tokens[0], (30, 30), cv.FONT_HERSHEY_COMPLEX, 1.0, (\n 255, 255, 255), 1, cv.LINE_AA)\n', (3386, 3484), True, 'import cv2 as cv\n'), ((1169, 1207), 're.search', 're.search', (['"""(\\\\d+).png"""', 'img_file_name'], {}), "('(\\\\d+).png', img_file_name)\n", (1178, 1207), False, 'import re\n'), ((2091, 2113), 'random.randint', 'random.randint', (['(0)', '(255)'], {}), '(0, 255)\n', (2105, 2113), False, 'import random\n'), ((2114, 2136), 'random.randint', 'random.randint', (['(0)', '(255)'], {}), '(0, 255)\n', (2128, 2136), False, 'import random\n'), ((2137, 2159), 'random.randint', 'random.randint', (['(0)', '(255)'], {}), '(0, 255)\n', (2151, 2159), False, 'import random\n')]
|
import collections
from dataclasses import dataclass
from typing import List
def longest_substring_using_nested_for_loop(s: str) -> int:
"""
Given a string s, find the length of the longest substring without repeating characters.
https://leetcode.com/problems/longest-substring-without-repeating-characters/
3946 ms 14.2 MB
>>> longest_substring_using_nested_for_loop("abcabcbb")
3
>>> longest_substring_using_nested_for_loop("bbbbb")
1
>>> longest_substring_using_nested_for_loop("pwwkew")
3
"""
total = 0
for idx, _ in enumerate(s):
vals: List[str] = []
for c2 in s[idx:]:
if c2 not in vals:
vals += [c2]
total = max((total, len(vals)))
else:
total = max((total, len(vals)))
break
return total
def longest_substring_using_lists(s: str) -> int:
"""
find the longest substring without repeating characters
644 ms 14.3 MB
>>> longest_substring_using_lists("abac")
3
>>> longest_substring_using_lists("abcabcbb")
3
>>> longest_substring_using_lists("bbbbb")
1
>>> longest_substring_using_lists("pwwkew")
3
"""
words = list()
longest = 0
for char in s:
# for each character
removals = []
for word_idx in range(len(words)):
# check all found words for the char
word = words[word_idx]
if char in word:
# if it exists then set its length to longest if it is the longest
longest = max(longest, len(word))
removals.append(word)
else:
# else add char to word
words[word_idx] += char
for remove in removals:
words.remove(remove)
# add char into words
words.append(char)
return max(longest, *[len(word) for word in words])
def longest_substring_deque_rotations(s: str) -> int:
"""
find the longest substring without repeating characters
512 ms 14.5 MB
>>> longest_substring_deque_rotations("abac")
3
>>> longest_substring_deque_rotations("abcabcbb")
3
>>> longest_substring_deque_rotations("bbbbb")
1
>>> longest_substring_deque_rotations("pwwkew")
3
"""
words = collections.deque()
longest = 0
for char in s:
for _ in range(len(words)):
word = words.popleft()
if char not in word:
words.append(word + char)
else:
longest = max(longest, len(word))
words.append(char)
for word in words:
longest = max(longest, len(word))
return longest
if __name__ == "__main__":
import doctest
doctest.testmod()
|
[
"collections.deque",
"doctest.testmod"
] |
[((2328, 2347), 'collections.deque', 'collections.deque', ([], {}), '()\n', (2345, 2347), False, 'import collections\n'), ((2763, 2780), 'doctest.testmod', 'doctest.testmod', ([], {}), '()\n', (2778, 2780), False, 'import doctest\n')]
|
# -*- coding: utf-8 -*-
import os
from parallel_ape.submit import submit_scripts
from parallel_ape.PBS import submit_job
class ParallelJob(object):
def __init__(self, job_path, input_file, ncpus, protocol, imaginary_bonds=''):
self.job_path = job_path
self.input_file = input_file
self.ncpus = ncpus
self.protocol = protocol
self.imaginary_bonds = imaginary_bonds # Ex: '-i 3-12,25-18'
def write_submit_file(self, submit_filename, sampling_mode):
"""
Write the Job's submit script.
"""
imaginary_bonds_string = '-i ' + self.imaginary_bonds
submit = submit_scripts['parallel_ape'].format(job_path=self.job_path, input_file=self.input_file, ncpus=self.ncpus, protocol=self.protocol, imaginary_bonds=imaginary_bonds_string, sampling_mode=sampling_mode)
if not os.path.isdir(self.job_path):
os.makedirs(self.job_path)
with open(os.path.join(self.job_path, submit_filename), 'w') as f:
f.write(submit)
def submit(self, submit_filename):
job_status, job_id = submit_job(submit_filename, remote_path=self.job_path)
return job_status, job_id
|
[
"os.path.isdir",
"parallel_ape.PBS.submit_job",
"os.path.join",
"os.makedirs"
] |
[((1114, 1168), 'parallel_ape.PBS.submit_job', 'submit_job', (['submit_filename'], {'remote_path': 'self.job_path'}), '(submit_filename, remote_path=self.job_path)\n', (1124, 1168), False, 'from parallel_ape.PBS import submit_job\n'), ((868, 896), 'os.path.isdir', 'os.path.isdir', (['self.job_path'], {}), '(self.job_path)\n', (881, 896), False, 'import os\n'), ((910, 936), 'os.makedirs', 'os.makedirs', (['self.job_path'], {}), '(self.job_path)\n', (921, 936), False, 'import os\n'), ((956, 1000), 'os.path.join', 'os.path.join', (['self.job_path', 'submit_filename'], {}), '(self.job_path, submit_filename)\n', (968, 1000), False, 'import os\n')]
|
import unittest
from project.rooms.room import Room
class TestRoom(unittest.TestCase):
def setUp(self) -> None:
self.room = Room('name', 100, 1)
def test_init(self):
self.assertEqual('name', self.room.family_name)
self.assertEqual(100, self.room.budget)
self.assertEqual(1, self.room.members_count)
self.assertListEqual([], self.room.children)
self.assertEqual(0, self.room.expenses)
def test_expenses_raises(self):
with self.assertRaises(ValueError):
self.room.expenses = -1
|
[
"project.rooms.room.Room"
] |
[((139, 159), 'project.rooms.room.Room', 'Room', (['"""name"""', '(100)', '(1)'], {}), "('name', 100, 1)\n", (143, 159), False, 'from project.rooms.room import Room\n')]
|
# Copyright L.P.Klyne 2013
# Licenced under 3 clause BSD licence
# $Id: TestAll.py 2612 2008-08-11 20:08:49Z graham.klyne $
#
# Unit testing for WebBrick library functions (Functions.py)
# See http://pyunit.sourceforge.net/pyunit.html
#
import unittest, logging, sys
sys.path.append("../..")
from MiscLib import TestUtils
import TestWbConfigEdit
import TestParameterSet
import TestAllWebBrick
import TestTaskRunner
# Code to run unit tests from all library test modules
def getTestSuite(select="unit"):
suite = unittest.TestSuite()
suite.addTest(TestWbConfigEdit.getTestSuite(select=select))
suite.addTest(TestParameterSet.getTestSuite(select=select))
suite.addTest(TestAllWebBrick.getTestSuite(select=select))
suite.addTest(TestTaskRunner.getTestSuite(select=select))
return suite
# Run unit tests directly from command line
if __name__ == "__main__":
TestUtils.runTests("TestAll.log", getTestSuite, sys.argv)
# End.
|
[
"sys.path.append",
"MiscLib.TestUtils.runTests",
"TestWbConfigEdit.getTestSuite",
"unittest.TestSuite",
"TestTaskRunner.getTestSuite",
"TestParameterSet.getTestSuite",
"TestAllWebBrick.getTestSuite"
] |
[((272, 296), 'sys.path.append', 'sys.path.append', (['"""../.."""'], {}), "('../..')\n", (287, 296), False, 'import unittest, logging, sys\n'), ((523, 543), 'unittest.TestSuite', 'unittest.TestSuite', ([], {}), '()\n', (541, 543), False, 'import unittest, logging, sys\n'), ((890, 947), 'MiscLib.TestUtils.runTests', 'TestUtils.runTests', (['"""TestAll.log"""', 'getTestSuite', 'sys.argv'], {}), "('TestAll.log', getTestSuite, sys.argv)\n", (908, 947), False, 'from MiscLib import TestUtils\n'), ((562, 606), 'TestWbConfigEdit.getTestSuite', 'TestWbConfigEdit.getTestSuite', ([], {'select': 'select'}), '(select=select)\n', (591, 606), False, 'import TestWbConfigEdit\n'), ((626, 670), 'TestParameterSet.getTestSuite', 'TestParameterSet.getTestSuite', ([], {'select': 'select'}), '(select=select)\n', (655, 670), False, 'import TestParameterSet\n'), ((690, 733), 'TestAllWebBrick.getTestSuite', 'TestAllWebBrick.getTestSuite', ([], {'select': 'select'}), '(select=select)\n', (718, 733), False, 'import TestAllWebBrick\n'), ((753, 795), 'TestTaskRunner.getTestSuite', 'TestTaskRunner.getTestSuite', ([], {'select': 'select'}), '(select=select)\n', (780, 795), False, 'import TestTaskRunner\n')]
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Mossum is a tool for summarizing results from Stanford's Moss. The tool
generates a graph for (multiple) results from Moss, which can help in
identifying groups of students that have shared solutions.
The tool can also generate a report, which shows which solutions are similar
between all pairs of students. When submitting multiple parts of an assignment
to Moss, this can help in identifying which students have multiple similar
solutions.
"""
import re
import sys
import os
import datetime
import pydot
import argparse
import csv
import requests as r
from bs4 import BeautifulSoup
from faker import Faker
from collections import defaultdict, Counter
from itertools import chain
parser = argparse.ArgumentParser(description=__doc__)
urls_group = parser.add_mutually_exclusive_group()
urls_group.add_argument('--urls', '-u', metavar='URL', nargs='*', default=[],
help='URLs to Moss result pages.')
urls_group.add_argument('--csv', '-c', metavar='CSV', default=None,
help='A CSV file from which to read URLs from')
parser.add_argument('--min-percent', '-p', dest='min_percent', metavar='P', type=int, default=90,
help='All matches where less than P%% of both files are matched are ignored. (Default: %(default)s)')
parser.add_argument('--min-lines', '-l', dest='min_lines', metavar='L', type=int, default=1,
help='All matches where fewer than L lines are matched are ignored. (Default: %(default)s)')
parser.add_argument('--format', '-f', default='png', help='Format of output files. See Graphviz documentation.')
parser.add_argument('--transformer', '-t', default='.*',
help='A regular expression that is used to transform the name of them matched files.')
parser.add_argument('--anonymize', '-a', default=False, action='store_true',
help='Substitute names of matched files for random names')
parser.add_argument('--merge', '-m', default=False, action='store_true',
help='Merge all reports into one image')
parser.add_argument('--report', '-r', default=False, action='store_true',
help='Generates a report showing how many submissions each pair has in common.')
parser.add_argument('--hide-labels', default=False, action='store_true',
help='Hide edge labels, which otherwise show the percentage and lines of code matches have in common')
parser.add_argument('--show-links', default=False, action='store_true',
help='DEPRECATED: Labels with links are shown by default, use --hide-labels to hide them')
parser.add_argument('--show-assignment-names', default=False, action='store_true',
help='Show assignment name on labels. Ignored if labels are hidden.')
parser.add_argument('--output', '-o', default=None,
help='Name of output file.')
parser.add_argument('--show-loops', default=False, action='store_true',
help='Include loops in the output graph')
parser.add_argument('--verbose', '-v', default=0, action='count',
help='Increase verbosity')
parser.add_argument('--filter', metavar='N', nargs='+', default=None,
help='Include only matches between these names.')
parser.add_argument('--filteri', metavar='N', nargs='+', default=None,
help='Include only matches involving these names.')
parser.add_argument('--filterx', metavar='N', nargs='+', default=None,
help='Exclude matches between these names.')
parser.add_argument('--filterxi', metavar='N', nargs='+', default=None,
help='Exclude matches involving any of these names.')
parser.add_argument('--min-matches', metavar='N', default=1, type=int,
help='Show only files with N or more matces between each other. This is only applicable to merged results. (Default: %(default)s).')
class Results:
def __init__(self, name, matches):
self.name = name
self.matches = matches
class Match:
def __init__(self, name, first, second, lines, url):
self.name = name
self.first = first
self.second = second
self.lines = lines
self.url = url
@property
def percent(self):
return max(self.first.percent, self.second.percent)
class File:
def __init__(self, name, percent):
self.name = name
self.percent = percent
class Filter:
def __init__(self):
filters = ['filter', 'filteri', 'filterx', 'filterxi']
for f in filters:
setattr(self, f, None)
for f in filters:
if getattr(args, f) != None:
setattr(self, f, set(getattr(args, f)))
def include(self, match):
first = match.first.name
second = match.second.name
if (self.filter is not None and (first not in self.filter or second not
in self.filter)):
return False
if (self.filteri is not None and (first not in self.filteri and second
not in self.filteri)):
return False
if (self.filterx is not None and (first in self.filterx and second in
self.filterx)):
return False
if (self.filterxi is not None and (first in self.filterxi or second in
self.filterxi)):
return False
return match.lines > args.min_lines and (match.first.percent > args.min_percent or
match.second.percent > args.min_percent)
def date_str():
return datetime.datetime.today().strftime('%d-%m-%Y_%H%M%S')
def parse_col(col):
name, per = col.split()
m = re.match(args.transformer, name)
if m:
if m.groups():
name = '_'.join(m.groups())
else:
name = m.group()
per = int(re.search(r'\d+',per).group())
return File(name, per)
def random_names(length):
fake = Faker()
names = set()
while len(names) < length:
names.add(fake.first_name())
return names
def link_color(ratio):
high = 0xE9, 0x01, 0x01
low = 0xFF, 0xE3, 0x05
# Normalized ratio
if args.min_percent != 100:
min_ratio = args.min_percent / 100
ratio = (ratio - min_ratio) / (1 - min_ratio)
color = [h * ratio + l * (1 - ratio) for h,l in zip(high, low)]
return '#' + ''.join(hex(int(c))[2:].zfill(2) for c in color)
def anonymize(matches):
s = set()
for m in matches:
s.add(m.first.name)
s.add(m.second.name)
new_names = dict(zip(s,random_names(len(s))))
for m in matches:
m.first.name = new_names[m.first.name]
m.second.name = new_names[m.second.name]
def generate_report(results):
pairs = defaultdict(list)
for res in results:
for match in res.matches:
pairs[(match.first.name, match.second.name)].append( (res.name, match) )
if args.output:
base = args.output
else:
base = '+'.join(map(lambda x:x.name, results))
filename = '%s.txt' % base
with open(filename, 'w') as f:
for pair, matches in sorted(pairs.items(),
key=lambda x: (len(x[1]), sorted(map(lambda x: x[0], x[1]))), reverse=True):
f.write('Pair: %s and %s\n' % pair)
for name, match in sorted(matches):
f.write('%s: %s\n' % (name, match.url))
f.write('\n\n')
def merge_filter(matches):
pairs = [ tuple(sorted([match.first.name, match.second.name])) for match in matches ]
intereseting = {pair for pair, count in Counter(pairs).items() if count >= args.min_matches}
return [match for match in matches if tuple(sorted([match.first.name, match.second.name])) in intereseting]
def merge_results(results):
name = '+'.join(map(lambda x:x.name, results))
matches = merge_filter(list(chain(*map(lambda x:x.matches, results))))
return Results(name, matches)
def get_results(moss_url, name=None):
if args.verbose >= 1:
print(f"Getting {moss_url}")
resp = r.get(moss_url)
soup = BeautifulSoup(resp.content.decode('utf-8'), 'html5lib')
if name is None:
ps = soup('p')
if len(ps) >= 2:
name = ps[2].text.strip()
if not name:
name = 'moss_%s' % date_str()
matches = []
for row in soup.table('tr')[1:]:
first, second, lines = map(lambda x:x.text, row('td'))
first = parse_col(first)
second = parse_col(second)
lines = int(lines)
url = row.a['href']
matches.append(Match(name, first, second, lines, url))
fil = Filter()
matches = list(filter(fil.include,matches))
return Results(name, matches)
def image(results, index=None):
graph = pydot.Dot(graph_type='graph')
print('Generating image for %s' % results.name)
for m in results.matches:
ratio = m.percent / 100
color = link_color(ratio)
extra_opts = {
'color': color,
'penwidth': 3,
}
if not args.hide_labels:
label_str = f"{m.percent}% ({m.lines})"
if args.show_assignment_names:
label_str = f"{m.name}\n{label_str}"
extra_opts.update({
'label': label_str,
'labelURL': m.url,
'URL': m.url,
'fontcolor': color,
})
if m.first.name != m.second.name or args.show_loops:
graph.add_edge(pydot.Edge(m.first.name, m.second.name, **extra_opts))
if args.output:
name = args.output
if index is not None:
name = '%s-%d' % (name, index)
else:
name = results.name
filename = '%s.%s' % (name, args.format)
if os.path.exists(filename):
os.remove(filename)
graph.write(filename, format=args.format)
if args.format == 'xlib':
os.remove(filename)
print('DONE')
def main():
global args
args = parser.parse_args()
urls = args.urls
names = []
if args.csv:
with open(args.csv, 'r') as csv_f:
rdr = csv.reader(csv_f)
for row in rdr:
if len(row) == 1:
urls.append(row[0])
else:
names.append(row[0])
urls.append(row[1])
if not urls:
urls = sys.stdin.read().splitlines()
if not names:
names = [None] * len(urls)
all_res = []
for u, n in zip(urls, names):
res = get_results(u, n)
all_res.append(res)
if args.merge:
merged = merge_results(all_res)
if args.anonymize:
anonymize(merged.matches)
image(merged)
else:
for i, res in enumerate(all_res):
if args.anonymize:
anonymize(res.matches)
image(res, i+1)
if args.report:
generate_report(all_res)
if __name__ == '__main__':
main()
|
[
"os.remove",
"csv.reader",
"argparse.ArgumentParser",
"faker.Faker",
"datetime.datetime.today",
"sys.stdin.read",
"os.path.exists",
"re.match",
"pydot.Dot",
"collections.defaultdict",
"pydot.Edge",
"requests.get",
"collections.Counter",
"re.search"
] |
[((746, 790), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '__doc__'}), '(description=__doc__)\n', (769, 790), False, 'import argparse\n'), ((5686, 5718), 're.match', 're.match', (['args.transformer', 'name'], {}), '(args.transformer, name)\n', (5694, 5718), False, 'import re\n'), ((5946, 5953), 'faker.Faker', 'Faker', ([], {}), '()\n', (5951, 5953), False, 'from faker import Faker\n'), ((6760, 6777), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (6771, 6777), False, 'from collections import defaultdict, Counter\n'), ((8058, 8073), 'requests.get', 'r.get', (['moss_url'], {}), '(moss_url)\n', (8063, 8073), True, 'import requests as r\n'), ((8767, 8796), 'pydot.Dot', 'pydot.Dot', ([], {'graph_type': '"""graph"""'}), "(graph_type='graph')\n", (8776, 8796), False, 'import pydot\n'), ((9754, 9778), 'os.path.exists', 'os.path.exists', (['filename'], {}), '(filename)\n', (9768, 9778), False, 'import os\n'), ((9788, 9807), 'os.remove', 'os.remove', (['filename'], {}), '(filename)\n', (9797, 9807), False, 'import os\n'), ((9893, 9912), 'os.remove', 'os.remove', (['filename'], {}), '(filename)\n', (9902, 9912), False, 'import os\n'), ((5574, 5599), 'datetime.datetime.today', 'datetime.datetime.today', ([], {}), '()\n', (5597, 5599), False, 'import datetime\n'), ((10107, 10124), 'csv.reader', 'csv.reader', (['csv_f'], {}), '(csv_f)\n', (10117, 10124), False, 'import csv\n'), ((5849, 5871), 're.search', 're.search', (['"""\\\\d+"""', 'per'], {}), "('\\\\d+', per)\n", (5858, 5871), False, 'import re\n'), ((9487, 9540), 'pydot.Edge', 'pydot.Edge', (['m.first.name', 'm.second.name'], {}), '(m.first.name, m.second.name, **extra_opts)\n', (9497, 9540), False, 'import pydot\n'), ((10363, 10379), 'sys.stdin.read', 'sys.stdin.read', ([], {}), '()\n', (10377, 10379), False, 'import sys\n'), ((7589, 7603), 'collections.Counter', 'Counter', (['pairs'], {}), '(pairs)\n', (7596, 7603), False, 'from collections import defaultdict, Counter\n')]
|
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
chrome_driver_path = 'C:/Development/chromedriver.exe'
driver = webdriver.Chrome(chrome_driver_path)
driver.set_window_size(1440, 720)
driver.get('http://secure-retreat-92358.herokuapp.com/')
# stats = driver.find_element_by_css_selector('#articlecount a')
# print(stats.text)
#
#
# search = driver.find_element_by_name('search')
# search.send_keys('python')
# search.send_keys(Keys.ENTER)
first_name = driver.find_element_by_name('fName')
last_name = driver.find_element_by_name('lName')
email = driver.find_element_by_name('email')
sign_up = driver.find_element_by_css_selector('.form-signin button')
first_name.send_keys('<NAME>')
last_name.send_keys('<NAME>')
email.send_keys('<EMAIL>')
sign_up.click()
# driver.close()
|
[
"selenium.webdriver.Chrome"
] |
[((146, 182), 'selenium.webdriver.Chrome', 'webdriver.Chrome', (['chrome_driver_path'], {}), '(chrome_driver_path)\n', (162, 182), False, 'from selenium import webdriver\n')]
|
#!/usr/bin/env python3.3
import os
from qdunittest.program import TestProgram
if __name__ == "__main__":
os.chdir(os.path.dirname(__file__))
TestProgram(module=None)
|
[
"os.path.dirname",
"qdunittest.program.TestProgram"
] |
[((150, 174), 'qdunittest.program.TestProgram', 'TestProgram', ([], {'module': 'None'}), '(module=None)\n', (161, 174), False, 'from qdunittest.program import TestProgram\n'), ((119, 144), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (134, 144), False, 'import os\n')]
|
#!/usr/bin/env python
# encoding: utf-8
# <NAME>, 2013
"""
Writes the c and cpp compile commands into build/compile_commands.json
see http://clang.llvm.org/docs/JSONCompilationDatabase.html
Usage:
def configure(conf):
conf.load('compiler_cxx')
...
conf.load('clang_compilation_database')
"""
import sys, os, json, shlex, pipes
from waflib import Logs, TaskGen, Task
Task.TaskBase.keep_last_cmd = True
if sys.hexversion >= 0x3030000:
quote = shlex.quote
else:
quote = pipes.quote
@TaskGen.feature('c', 'cxx')
@TaskGen.after_method('process_use')
def collect_compilation_db_tasks(self):
"Add a compilation database entry for compiled tasks"
try:
clang_db = self.bld.clang_compilation_database_tasks
except AttributeError:
clang_db = self.bld.clang_compilation_database_tasks = []
self.bld.add_post_fun(write_compilation_database)
tup = tuple(y for y in [Task.classes.get(x) for x in ('c', 'cxx')] if y)
for task in getattr(self, 'compiled_tasks', []):
if isinstance(task, tup):
clang_db.append(task)
def write_compilation_database(ctx):
"Write the clang compilation database as JSON"
database_file = ctx.bldnode.make_node('compile_commands.json')
Logs.info('Build commands will be stored in %s', database_file.path_from(ctx.path))
try:
root = json.load(database_file)
except IOError:
root = []
clang_db = dict((x['file'], x) for x in root)
for task in getattr(ctx, 'clang_compilation_database_tasks', []):
try:
cmd = task.last_cmd
except AttributeError:
continue
directory = getattr(task, 'cwd', ctx.variant_dir)
f_node = task.inputs[0]
filename = os.path.relpath(f_node.abspath(), directory)
cmd = " ".join(map(quote, cmd))
entry = {
"directory": directory,
"command": cmd,
"file": filename,
}
clang_db[filename] = entry
root = list(clang_db.values())
database_file.write(json.dumps(root, indent=2))
|
[
"waflib.TaskGen.feature",
"waflib.TaskGen.after_method",
"json.load",
"json.dumps",
"waflib.Task.classes.get"
] |
[((517, 544), 'waflib.TaskGen.feature', 'TaskGen.feature', (['"""c"""', '"""cxx"""'], {}), "('c', 'cxx')\n", (532, 544), False, 'from waflib import Logs, TaskGen, Task\n'), ((546, 581), 'waflib.TaskGen.after_method', 'TaskGen.after_method', (['"""process_use"""'], {}), "('process_use')\n", (566, 581), False, 'from waflib import Logs, TaskGen, Task\n'), ((1302, 1326), 'json.load', 'json.load', (['database_file'], {}), '(database_file)\n', (1311, 1326), False, 'import sys, os, json, shlex, pipes\n'), ((1872, 1898), 'json.dumps', 'json.dumps', (['root'], {'indent': '(2)'}), '(root, indent=2)\n', (1882, 1898), False, 'import sys, os, json, shlex, pipes\n'), ((900, 919), 'waflib.Task.classes.get', 'Task.classes.get', (['x'], {}), '(x)\n', (916, 919), False, 'from waflib import Logs, TaskGen, Task\n')]
|
import os
import numpy as np
import matplotlib.pyplot as plt
import yaml
from multiview_manipulation import plotting as plot_utils, utils as bc_viewag_plot_utils
# CONFIG
#-------------------------------------------------------------------------------
# plot options
wide_full_comp = True # wide is 2 rows by 5 cols, otherwise 5 by 2
font_size = 22
interval_percentile = 95
plot_width = 3.2
plot_height = 2.4
# data loading options
main_data_dir = '/home/trevor/data/paper-data/bc-view-agnostic/bc_results'
bc_folders_file = 'bc_result_folder_names.yaml'
full_comp_envs = ['LiftSim', 'StackSim', 'PickAndInsertSim', 'DoorSim', 'DoorReal']
conditions = ['mm', 'mf', 'fm', 'ff']
mult_only_envs = ['PickAndInsertReal', 'DrawerReal']
mult_only_envs_vertical = True
sim_bc_seeds = [1, 2, 3, 4, 5]
sim_num_dem = range(25, 201, 25)
real_bc_seeds = [1, 2, 3]
real_num_dem = range(50, 201, 50)
results_filename = 'all_results.npz'
main_exp_dir = '/media/trevor/Data/paper-data/bc-viewag'
exp_dir = main_exp_dir + '/figures/bc_results' # + '/' + datetime.now().strftime("%y-%m-%d_%H-%M-%S")
#--------------------------------------------------------------------------------
plot_utils.setup_pretty_plotting()
bc_folders = yaml.load(open(bc_folders_file, 'r'), yaml.Loader)
# Full comparison fig (4 conditions) -----------------------------------------------------------------
if wide_full_comp:
fig, axes = plt.subplots(nrows=2, ncols=len(full_comp_envs), # sharex=True, sharey=True,
figsize=[plot_width * len(full_comp_envs), plot_height * 2])
axes[0, 0].set_ylabel("Fixed-base Env", labelpad=20, fontsize=font_size - 6)
axes[1, 0].set_ylabel("Multiview Env", labelpad=20, fontsize=font_size - 6)
else:
fig, axes = plt.subplots(nrows=len(full_comp_envs), ncols=2, # sharex=True, sharey=True,
figsize=[plot_width * 2, plot_height * len(full_comp_envs)])
axes[0, 0].set_title("Fixed Env", labelpad=20, fontsize=font_size - 6)
axes[0, 1].set_title("Multiview Env", labelpad=20, fontsize=font_size - 6)
full_comp_data = dict()
for env_i, env in enumerate(full_comp_envs):
full_comp_data[env] = {c: 0 for c in conditions}
for cond_i, cond in enumerate(conditions):
data = np.load(main_data_dir + '/' + bc_folders[env][cond_i] + '/' + results_filename)['per_ep_group']
if 'Sim' in env:
seeds = sim_bc_seeds
num_dem = sim_num_dem
else:
seeds = real_bc_seeds
num_dem = real_num_dem
num_dem, means, uppers, lowers = bc_viewag_plot_utils.get_means_lowers_uppers(data, num_dem, seeds, interval_percentile)
full_comp_data[env][cond] = dict(means=means, lowers=lowers, uppers=uppers)
# plot now that all data collected
fcd = full_comp_data
f_line, m_line = bc_viewag_plot_utils.plot_four_conds(axes, env, env_i, wide_full_comp, font_size - 10, num_dem,
fcd[env]['mm']['means'], fcd[env]['mm']['lowers'], fcd[env]['mm']['uppers'],
fcd[env]['mf']['means'], fcd[env]['mf']['lowers'], fcd[env]['mf']['uppers'],
fcd[env]['fm']['means'], fcd[env]['fm']['lowers'], fcd[env]['fm']['uppers'],
fcd[env]['ff']['means'], fcd[env]['ff']['lowers'], fcd[env]['ff']['uppers'])
fig.legend([m_line, f_line],
labels=["Fixed-base Policy", "Multiview Policy"],
# labels=[r"$\pi_f$", r"$\pi_m$"],
ncol=2,
fancybox=True,
shadow=True,
fontsize=font_size - 6,
# loc="lower left", # on figure
# bbox_to_anchor=(0.1, 0.175), # on figure
loc="lower right", # bottom right -- this is the original one
# bbox_to_anchor=(0.05, 0.015), # bottom right
# loc="lower left", # bottom left
# bbox_to_anchor=(0.05, 0.015), # bottom left
# loc="lower center", # center under
# bbox_to_anchor=(0.535, -0.05) # center under
)
ax = fig.add_subplot(111, frameon=False)
plt.tick_params(labelcolor='none', top=False, bottom=False, left=False, right=False)
# plt.xlabel("Number of Training Demonstrations", fontsize=font_size-6)
plt.xlabel("Number of Training Demonstrations", fontsize=font_size-6)
# ax.xaxis.set_label_coords(0.6, -0.1)
plt.ylabel("Success Rate", labelpad=10, fontsize=font_size-6)
plt.tight_layout()
os.makedirs(exp_dir, exist_ok=True)
fig.savefig(exp_dir + '/full_comp_success.pdf', bbox_inches='tight')
# Multiview suc only fig -----------------------------------------------------------------
if mult_only_envs_vertical:
fig, axes = plt.subplots(nrows=len(mult_only_envs), ncols=1, # sharex=True, sharey=True,
figsize=[plot_width, (plot_height * len(mult_only_envs)) + .5])
else:
fig, axes = plt.subplots(nrows=1, ncols=len(mult_only_envs), # sharex=True, sharey=True,
figsize=[plot_width * len(mult_only_envs), plot_height + .5])
ax = fig.add_subplot(111, frameon=False)
plt.tick_params(labelcolor='none', top=False, bottom=False, left=False, right=False)
# plt.xlabel("Number of Training Demonstrations", fontsize=font_size-6)
plt.xlabel("Number of Training Demonstrations", fontsize=font_size-6)
# ax.xaxis.set_label_coords(0.6, -0.1)
plt.ylabel("Success Rate", labelpad=10, fontsize=font_size-6)
mult_only_data = dict()
cmap = plt.get_cmap("tab10")
for env_i, env in enumerate(mult_only_envs):
mult_only_data[env] = 0
data = np.load(main_data_dir + '/' + bc_folders[env][0] + '/' + results_filename)['per_ep_group']
if 'Sim' in env:
seeds = sim_bc_seeds
num_dem = sim_num_dem
else:
seeds = real_bc_seeds
num_dem = real_num_dem
num_dem, means, uppers, lowers = bc_viewag_plot_utils.get_means_lowers_uppers(data, num_dem, seeds, interval_percentile)
line = bc_viewag_plot_utils.plot_mean_and_std(axes[env_i], num_dem, means, lowers, uppers, cmap(1),
yticks=np.arange(0, 1.1, .25), xticks=np.arange(50, 210, 50),
ylim=[-.05, 1.05], labelsize=font_size-10, title=env)
# fig.legend([m_line, f_line],
# labels=["Multiview Policy"],
# ncol=1,
# fancybox=True,
# shadow=True,
# fontsize=font_size - 6,
# loc="right", # bottom right
# bbox_to_anchor=(0.96, 0.4), # bottom right
# # loc="lower left", # bottom left
# # bbox_to_anchor=(0.05, 0.015), # bottom left
# # loc="lower center", # center under
# # bbox_to_anchor=(0.535, -0.05) # center under
# )
plt.tight_layout()
fig.savefig(exp_dir + '/mult_only_envs.pdf', bbox_inches='tight')
# plt.show()
|
[
"numpy.load",
"os.makedirs",
"matplotlib.pyplot.get_cmap",
"multiview_manipulation.utils.get_means_lowers_uppers",
"multiview_manipulation.plotting.setup_pretty_plotting",
"multiview_manipulation.utils.plot_four_conds",
"numpy.arange",
"matplotlib.pyplot.tick_params",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.tight_layout"
] |
[((1169, 1203), 'multiview_manipulation.plotting.setup_pretty_plotting', 'plot_utils.setup_pretty_plotting', ([], {}), '()\n', (1201, 1203), True, 'from multiview_manipulation import plotting as plot_utils, utils as bc_viewag_plot_utils\n'), ((4194, 4282), 'matplotlib.pyplot.tick_params', 'plt.tick_params', ([], {'labelcolor': '"""none"""', 'top': '(False)', 'bottom': '(False)', 'left': '(False)', 'right': '(False)'}), "(labelcolor='none', top=False, bottom=False, left=False,\n right=False)\n", (4209, 4282), True, 'import matplotlib.pyplot as plt\n'), ((4351, 4422), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Number of Training Demonstrations"""'], {'fontsize': '(font_size - 6)'}), "('Number of Training Demonstrations', fontsize=font_size - 6)\n", (4361, 4422), True, 'import matplotlib.pyplot as plt\n'), ((4460, 4523), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Success Rate"""'], {'labelpad': '(10)', 'fontsize': '(font_size - 6)'}), "('Success Rate', labelpad=10, fontsize=font_size - 6)\n", (4470, 4523), True, 'import matplotlib.pyplot as plt\n'), ((4523, 4541), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (4539, 4541), True, 'import matplotlib.pyplot as plt\n'), ((4543, 4578), 'os.makedirs', 'os.makedirs', (['exp_dir'], {'exist_ok': '(True)'}), '(exp_dir, exist_ok=True)\n', (4554, 4578), False, 'import os\n'), ((5187, 5275), 'matplotlib.pyplot.tick_params', 'plt.tick_params', ([], {'labelcolor': '"""none"""', 'top': '(False)', 'bottom': '(False)', 'left': '(False)', 'right': '(False)'}), "(labelcolor='none', top=False, bottom=False, left=False,\n right=False)\n", (5202, 5275), True, 'import matplotlib.pyplot as plt\n'), ((5344, 5415), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Number of Training Demonstrations"""'], {'fontsize': '(font_size - 6)'}), "('Number of Training Demonstrations', fontsize=font_size - 6)\n", (5354, 5415), True, 'import matplotlib.pyplot as plt\n'), ((5453, 5516), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Success Rate"""'], {'labelpad': '(10)', 'fontsize': '(font_size - 6)'}), "('Success Rate', labelpad=10, fontsize=font_size - 6)\n", (5463, 5516), True, 'import matplotlib.pyplot as plt\n'), ((5547, 5568), 'matplotlib.pyplot.get_cmap', 'plt.get_cmap', (['"""tab10"""'], {}), "('tab10')\n", (5559, 5568), True, 'import matplotlib.pyplot as plt\n'), ((6897, 6915), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (6913, 6915), True, 'import matplotlib.pyplot as plt\n'), ((2836, 3264), 'multiview_manipulation.utils.plot_four_conds', 'bc_viewag_plot_utils.plot_four_conds', (['axes', 'env', 'env_i', 'wide_full_comp', '(font_size - 10)', 'num_dem', "fcd[env]['mm']['means']", "fcd[env]['mm']['lowers']", "fcd[env]['mm']['uppers']", "fcd[env]['mf']['means']", "fcd[env]['mf']['lowers']", "fcd[env]['mf']['uppers']", "fcd[env]['fm']['means']", "fcd[env]['fm']['lowers']", "fcd[env]['fm']['uppers']", "fcd[env]['ff']['means']", "fcd[env]['ff']['lowers']", "fcd[env]['ff']['uppers']"], {}), "(axes, env, env_i, wide_full_comp, \n font_size - 10, num_dem, fcd[env]['mm']['means'], fcd[env]['mm'][\n 'lowers'], fcd[env]['mm']['uppers'], fcd[env]['mf']['means'], fcd[env][\n 'mf']['lowers'], fcd[env]['mf']['uppers'], fcd[env]['fm']['means'], fcd\n [env]['fm']['lowers'], fcd[env]['fm']['uppers'], fcd[env]['ff']['means'\n ], fcd[env]['ff']['lowers'], fcd[env]['ff']['uppers'])\n", (2872, 3264), True, 'from multiview_manipulation import plotting as plot_utils, utils as bc_viewag_plot_utils\n'), ((5933, 6024), 'multiview_manipulation.utils.get_means_lowers_uppers', 'bc_viewag_plot_utils.get_means_lowers_uppers', (['data', 'num_dem', 'seeds', 'interval_percentile'], {}), '(data, num_dem, seeds,\n interval_percentile)\n', (5977, 6024), True, 'from multiview_manipulation import plotting as plot_utils, utils as bc_viewag_plot_utils\n'), ((2578, 2669), 'multiview_manipulation.utils.get_means_lowers_uppers', 'bc_viewag_plot_utils.get_means_lowers_uppers', (['data', 'num_dem', 'seeds', 'interval_percentile'], {}), '(data, num_dem, seeds,\n interval_percentile)\n', (2622, 2669), True, 'from multiview_manipulation import plotting as plot_utils, utils as bc_viewag_plot_utils\n'), ((5653, 5727), 'numpy.load', 'np.load', (["(main_data_dir + '/' + bc_folders[env][0] + '/' + results_filename)"], {}), "(main_data_dir + '/' + bc_folders[env][0] + '/' + results_filename)\n", (5660, 5727), True, 'import numpy as np\n'), ((2264, 2343), 'numpy.load', 'np.load', (["(main_data_dir + '/' + bc_folders[env][cond_i] + '/' + results_filename)"], {}), "(main_data_dir + '/' + bc_folders[env][cond_i] + '/' + results_filename)\n", (2271, 2343), True, 'import numpy as np\n'), ((6182, 6205), 'numpy.arange', 'np.arange', (['(0)', '(1.1)', '(0.25)'], {}), '(0, 1.1, 0.25)\n', (6191, 6205), True, 'import numpy as np\n'), ((6213, 6235), 'numpy.arange', 'np.arange', (['(50)', '(210)', '(50)'], {}), '(50, 210, 50)\n', (6222, 6235), True, 'import numpy as np\n')]
|
from typer.testing import CliRunner
from ward import fixture
from cs_tools.thoughtspot import ThoughtSpot
from cs_tools.settings import TSConfig
from cs_tools.cli import _gather_tools, app as app_, tools_app, cfg_app, log_app
@fixture(scope='global')
def thoughtspot():
# cfg = TSConfig.from_toml('tests/_test_config_6-3-1.toml')
cfg = TSConfig.from_toml('tests/_test_config_cloud.toml')
with ThoughtSpot(config=cfg) as ts:
yield ts
@fixture(scope='global')
def app_runner():
return CliRunner()
@fixture(scope='global')
def app():
_gather_tools(tools_app)
app_.add_typer(tools_app)
app_.add_typer(cfg_app)
app_.add_typer(log_app)
yield app_
|
[
"ward.fixture",
"cs_tools.cli.app.add_typer",
"cs_tools.settings.TSConfig.from_toml",
"cs_tools.cli._gather_tools",
"cs_tools.thoughtspot.ThoughtSpot",
"typer.testing.CliRunner"
] |
[((230, 253), 'ward.fixture', 'fixture', ([], {'scope': '"""global"""'}), "(scope='global')\n", (237, 253), False, 'from ward import fixture\n'), ((460, 483), 'ward.fixture', 'fixture', ([], {'scope': '"""global"""'}), "(scope='global')\n", (467, 483), False, 'from ward import fixture\n'), ((528, 551), 'ward.fixture', 'fixture', ([], {'scope': '"""global"""'}), "(scope='global')\n", (535, 551), False, 'from ward import fixture\n'), ((347, 398), 'cs_tools.settings.TSConfig.from_toml', 'TSConfig.from_toml', (['"""tests/_test_config_cloud.toml"""'], {}), "('tests/_test_config_cloud.toml')\n", (365, 398), False, 'from cs_tools.settings import TSConfig\n'), ((513, 524), 'typer.testing.CliRunner', 'CliRunner', ([], {}), '()\n', (522, 524), False, 'from typer.testing import CliRunner\n'), ((567, 591), 'cs_tools.cli._gather_tools', '_gather_tools', (['tools_app'], {}), '(tools_app)\n', (580, 591), False, 'from cs_tools.cli import _gather_tools, app as app_, tools_app, cfg_app, log_app\n'), ((596, 621), 'cs_tools.cli.app.add_typer', 'app_.add_typer', (['tools_app'], {}), '(tools_app)\n', (610, 621), True, 'from cs_tools.cli import _gather_tools, app as app_, tools_app, cfg_app, log_app\n'), ((626, 649), 'cs_tools.cli.app.add_typer', 'app_.add_typer', (['cfg_app'], {}), '(cfg_app)\n', (640, 649), True, 'from cs_tools.cli import _gather_tools, app as app_, tools_app, cfg_app, log_app\n'), ((654, 677), 'cs_tools.cli.app.add_typer', 'app_.add_typer', (['log_app'], {}), '(log_app)\n', (668, 677), True, 'from cs_tools.cli import _gather_tools, app as app_, tools_app, cfg_app, log_app\n'), ((409, 432), 'cs_tools.thoughtspot.ThoughtSpot', 'ThoughtSpot', ([], {'config': 'cfg'}), '(config=cfg)\n', (420, 432), False, 'from cs_tools.thoughtspot import ThoughtSpot\n')]
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
##############################################################################
## Write a file, typical use
##############################################################################
import unittest
import os
import tempfile
import copy
from savReaderWriter import *
class test_SavWriter_typical_use(unittest.TestCase):
""" Write a file, typical use"""
def setUp(self):
self.savFileName = os.path.join(tempfile.gettempdir(), "test.sav")
varNames = ['var1', 'v2', 'v3', 'bdate']
varTypes = {'var1': 6, 'v2': 0, 'v3': 0, 'bdate': 10}
self.args = (self.savFileName, varNames, varTypes)
def test_SavWriter_typical(self):
records_in = [[b'Test1', 1, 1, b'2010-08-11'],
[b'Test2', 2, 1, b'1910-01-12']]
with SavWriter(*self.args) as writer:
for record in copy.deepcopy(records_in):
writer.writerow(record)
with SavReader(self.savFileName) as reader:
records_out = [line for line in reader]
self.assertEqual(records_in, records_out)
def tearDown(self):
os.remove(self.savFileName)
if __name__ == "__main__":
unittest.main()
|
[
"unittest.main",
"copy.deepcopy",
"os.remove",
"tempfile.gettempdir"
] |
[((1214, 1229), 'unittest.main', 'unittest.main', ([], {}), '()\n', (1227, 1229), False, 'import unittest\n'), ((1154, 1181), 'os.remove', 'os.remove', (['self.savFileName'], {}), '(self.savFileName)\n', (1163, 1181), False, 'import os\n'), ((474, 495), 'tempfile.gettempdir', 'tempfile.gettempdir', ([], {}), '()\n', (493, 495), False, 'import tempfile\n'), ((900, 925), 'copy.deepcopy', 'copy.deepcopy', (['records_in'], {}), '(records_in)\n', (913, 925), False, 'import copy\n')]
|
import time
import service
from model.discord.message import Message
def main():
print("# Beginning show fetch and push")
shows = service.anime_list_fetch_service.fetch_latest_aired_shows()
non_reported_shows = service.anime_list_filter_service.get_non_reported_shows(shows)
if len(non_reported_shows) == 0:
print("* No shows to push")
return
for show in non_reported_shows:
embed = service.anime_list_embed_service.get_embed(show)
service.anime_list_service.set_reported_show(show)
service.anime_list_discord_push_service.send_message(Message(embeds=[embed]))
print(f"* Pushed show \"{show.title}\" via webhook")
time.sleep(0.5) # need to wait to prevent being rate limited
if __name__ == '__main__':
main()
|
[
"service.anime_list_filter_service.get_non_reported_shows",
"model.discord.message.Message",
"time.sleep",
"service.anime_list_fetch_service.fetch_latest_aired_shows",
"service.anime_list_embed_service.get_embed",
"service.anime_list_service.set_reported_show"
] |
[((141, 200), 'service.anime_list_fetch_service.fetch_latest_aired_shows', 'service.anime_list_fetch_service.fetch_latest_aired_shows', ([], {}), '()\n', (198, 200), False, 'import service\n'), ((226, 289), 'service.anime_list_filter_service.get_non_reported_shows', 'service.anime_list_filter_service.get_non_reported_shows', (['shows'], {}), '(shows)\n', (282, 289), False, 'import service\n'), ((431, 479), 'service.anime_list_embed_service.get_embed', 'service.anime_list_embed_service.get_embed', (['show'], {}), '(show)\n', (473, 479), False, 'import service\n'), ((488, 538), 'service.anime_list_service.set_reported_show', 'service.anime_list_service.set_reported_show', (['show'], {}), '(show)\n', (532, 538), False, 'import service\n'), ((694, 709), 'time.sleep', 'time.sleep', (['(0.5)'], {}), '(0.5)\n', (704, 709), False, 'import time\n'), ((600, 623), 'model.discord.message.Message', 'Message', ([], {'embeds': '[embed]'}), '(embeds=[embed])\n', (607, 623), False, 'from model.discord.message import Message\n')]
|
from __future__ import absolute_import, division, print_function, unicode_literals
import os
import re
_MYPY = False
if _MYPY:
import typing # noqa: F401 # pylint: disable=import-error,unused-import,useless-suppression
# Hack to get around some of Python 2's standard library modules that
# accept ascii-encodable unicode literals in lieu of strs, but where
# actually passing such literals results in errors with mypy --py2. See
# <https://github.com/python/typeshed/issues/756> and
# <https://github.com/python/mypy/issues/2536>.
import importlib
argparse = importlib.import_module(str('argparse')) # type: typing.Any
from stone.backend import CodeBackend
from stone.backends.tsd_helpers import (
check_route_name_conflict,
fmt_error_type,
fmt_func,
fmt_tag,
fmt_type,
)
from stone.ir import Void
_cmdline_parser = argparse.ArgumentParser(prog='tsd-client-backend')
_cmdline_parser.add_argument(
'template',
help=('A template to use when generating the TypeScript definition file.')
)
_cmdline_parser.add_argument(
'filename',
help=('The name to give the single TypeScript definition file to contain '
'all of the emitted types.'),
)
_cmdline_parser.add_argument(
'-t',
'--template-string',
type=str,
default='ROUTES',
help=('The name of the template string to replace with route definitions. '
'Defaults to ROUTES, which replaces the string /*ROUTES*/ with route '
'definitions.')
)
_cmdline_parser.add_argument(
'-i',
'--indent-level',
type=int,
default=1,
help=('Indentation level to emit types at. Routes are automatically '
'indented one level further than this.')
)
_cmdline_parser.add_argument(
'-s',
'--spaces-per-indent',
type=int,
default=2,
help=('Number of spaces to use per indentation level.')
)
_header = """\
// Auto-generated by Stone, do not modify.
"""
class TSDClientBackend(CodeBackend):
"""Generates a TypeScript definition file with routes defined."""
cmdline_parser = _cmdline_parser
preserve_aliases = True
def generate(self, api):
spaces_per_indent = self.args.spaces_per_indent
indent_level = self.args.indent_level
template_path = os.path.join(self.target_folder_path, self.args.template)
template_string = self.args.template_string
with self.output_to_relative_path(self.args.filename):
if os.path.isfile(template_path):
with open(template_path, 'r') as template_file:
template = template_file.read()
else:
raise AssertionError('TypeScript template file does not exist.')
# /*ROUTES*/
r_match = re.search("/\\*%s\\*/" % (template_string), template)
if not r_match:
raise AssertionError(
'Missing /*%s*/ in TypeScript template file.' % template_string)
r_start = r_match.start()
r_end = r_match.end()
r_ends_with_newline = template[r_end - 1] == '\n'
t_end = len(template)
t_ends_with_newline = template[t_end - 1] == '\n'
self.emit_raw(template[0:r_start] + ('\n' if not r_ends_with_newline else ''))
self._generate_routes(api, spaces_per_indent, indent_level)
self.emit_raw(template[r_end + 1:t_end] + ('\n' if not t_ends_with_newline else ''))
def _generate_routes(self, api, spaces_per_indent, indent_level):
with self.indent(dent=spaces_per_indent * (indent_level + 1)):
for namespace in api.namespaces.values():
# first check for route name conflict
check_route_name_conflict(namespace)
for route in namespace.routes:
self._generate_route(
namespace, route)
def _generate_route(self, namespace, route):
function_name = fmt_func(namespace.name + '_' + route.name, route.version)
self.emit()
self.emit('/**')
if route.doc:
self.emit_wrapped_text(self.process_doc(route.doc, self._docf), prefix=' * ')
self.emit(' *')
self.emit_wrapped_text('When an error occurs, the route rejects the promise with type %s.'
% fmt_error_type(route.error_data_type), prefix=' * ')
if route.deprecated:
self.emit(' * @deprecated')
if route.arg_data_type.__class__ != Void:
self.emit(' * @param arg The request parameters.')
self.emit(' */')
if route.arg_data_type.__class__ != Void:
self.emit('public %s(arg: %s): Promise<%s>;' %
(function_name, fmt_type(route.arg_data_type),
fmt_type(route.result_data_type)))
else:
self.emit('public %s(): Promise<%s>;' %
(function_name, fmt_type(route.result_data_type)))
def _docf(self, tag, val):
"""
Callback to process documentation references.
"""
return fmt_tag(None, tag, val)
|
[
"stone.backends.tsd_helpers.fmt_func",
"stone.backends.tsd_helpers.fmt_tag",
"stone.backends.tsd_helpers.fmt_error_type",
"stone.backends.tsd_helpers.fmt_type",
"os.path.isfile",
"re.search",
"stone.backends.tsd_helpers.check_route_name_conflict",
"os.path.join"
] |
[((2254, 2311), 'os.path.join', 'os.path.join', (['self.target_folder_path', 'self.args.template'], {}), '(self.target_folder_path, self.args.template)\n', (2266, 2311), False, 'import os\n'), ((3942, 4000), 'stone.backends.tsd_helpers.fmt_func', 'fmt_func', (["(namespace.name + '_' + route.name)", 'route.version'], {}), "(namespace.name + '_' + route.name, route.version)\n", (3950, 4000), False, 'from stone.backends.tsd_helpers import check_route_name_conflict, fmt_error_type, fmt_func, fmt_tag, fmt_type\n'), ((5073, 5096), 'stone.backends.tsd_helpers.fmt_tag', 'fmt_tag', (['None', 'tag', 'val'], {}), '(None, tag, val)\n', (5080, 5096), False, 'from stone.backends.tsd_helpers import check_route_name_conflict, fmt_error_type, fmt_func, fmt_tag, fmt_type\n'), ((2443, 2472), 'os.path.isfile', 'os.path.isfile', (['template_path'], {}), '(template_path)\n', (2457, 2472), False, 'import os\n'), ((2737, 2788), 're.search', 're.search', (["('/\\\\*%s\\\\*/' % template_string)", 'template'], {}), "('/\\\\*%s\\\\*/' % template_string, template)\n", (2746, 2788), False, 'import re\n'), ((3700, 3736), 'stone.backends.tsd_helpers.check_route_name_conflict', 'check_route_name_conflict', (['namespace'], {}), '(namespace)\n', (3725, 3736), False, 'from stone.backends.tsd_helpers import check_route_name_conflict, fmt_error_type, fmt_func, fmt_tag, fmt_type\n'), ((4318, 4355), 'stone.backends.tsd_helpers.fmt_error_type', 'fmt_error_type', (['route.error_data_type'], {}), '(route.error_data_type)\n', (4332, 4355), False, 'from stone.backends.tsd_helpers import check_route_name_conflict, fmt_error_type, fmt_func, fmt_tag, fmt_type\n'), ((4725, 4754), 'stone.backends.tsd_helpers.fmt_type', 'fmt_type', (['route.arg_data_type'], {}), '(route.arg_data_type)\n', (4733, 4754), False, 'from stone.backends.tsd_helpers import check_route_name_conflict, fmt_error_type, fmt_func, fmt_tag, fmt_type\n'), ((4776, 4808), 'stone.backends.tsd_helpers.fmt_type', 'fmt_type', (['route.result_data_type'], {}), '(route.result_data_type)\n', (4784, 4808), False, 'from stone.backends.tsd_helpers import check_route_name_conflict, fmt_error_type, fmt_func, fmt_tag, fmt_type\n'), ((4913, 4945), 'stone.backends.tsd_helpers.fmt_type', 'fmt_type', (['route.result_data_type'], {}), '(route.result_data_type)\n', (4921, 4945), False, 'from stone.backends.tsd_helpers import check_route_name_conflict, fmt_error_type, fmt_func, fmt_tag, fmt_type\n')]
|
"""
Clenshaw-Curtis quadrature method is a good all-around quadrature method
comparable to Gaussian quadrature, but typically limited to finite intervals
without a specific weight function. In addition to be quite accurate, the
weights and abscissas can be calculated quite fast.
Another thing to note is that Clenshaw-Curtis, with an appropriate growth rule
is fully nested. This means, if one applies a method that combines different
order of quadrature rules, the number of evaluations can often be reduced as
the abscissas can be used across levels.
Example usage
-------------
The first few orders with linear growth rule::
>>> distribution = chaospy.Uniform(0, 1)
>>> for order in [0, 1, 2, 3]:
... X, W = chaospy.generate_quadrature(
... order, distribution, rule="clenshaw_curtis")
... print(order, numpy.around(X, 3), numpy.around(W, 3))
0 [[0.5]] [1.]
1 [[0. 1.]] [0.5 0.5]
2 [[0. 0.5 1. ]] [0.167 0.667 0.167]
3 [[0. 0.25 0.75 1. ]] [0.056 0.444 0.444 0.056]
The first few orders with exponential growth rule::
>>> for order in [0, 1, 2]:
... X, W = chaospy.generate_quadrature(
... order, distribution, rule="clenshaw_curtis", growth=True)
... print(order, numpy.around(X, 3), numpy.around(W, 3))
0 [[0.5]] [1.]
1 [[0. 0.5 1. ]] [0.167 0.667 0.167]
2 [[0. 0.146 0.5 0.854 1. ]] [0.033 0.267 0.4 0.267 0.033]
Applying the rule using Smolyak sparse grid::
>>> distribution = chaospy.Iid(chaospy.Uniform(0, 1), 2)
>>> X, W = chaospy.generate_quadrature(
... 2, distribution, rule="clenshaw_curtis",
... growth=True, sparse=True)
>>> print(numpy.around(X, 2))
[[0. 0. 0. 0.15 0.5 0.5 0.5 0.5 0.5 0.85 1. 1. 1. ]
[0. 0.5 1. 0.5 0. 0.15 0.5 0.85 1. 0.5 0. 0.5 1. ]]
>>> print(numpy.around(W, 3))
[ 0.028 -0.022 0.028 0.267 -0.022 0.267 -0.089 0.267 -0.022 0.267
0.028 -0.022 0.028]
"""
from __future__ import division, print_function
import numpy
from .combine import combine_quadrature
def quad_clenshaw_curtis(order, domain, growth=False):
"""
Generate the quadrature nodes and weights in Clenshaw-Curtis quadrature.
Args:
order (int, numpy.ndarray):
Quadrature order.
domain (chaospy.distributions.baseclass.Dist, numpy.ndarray):
Either distribution or bounding of interval to integrate over.
growth (bool):
If True sets the growth rule for the quadrature rule to only
include orders that enhances nested samples.
Returns:
(numpy.ndarray, numpy.ndarray):
abscissas:
The quadrature points for where to evaluate the model function
with ``abscissas.shape == (len(dist), N)`` where ``N`` is the
number of samples.
weights:
The quadrature weights with ``weights.shape == (N,)``.
Example:
>>> abscissas, weights = quad_clenshaw_curtis(3, (0, 1))
>>> print(numpy.around(abscissas, 4))
[[0. 0.25 0.75 1. ]]
>>> print(numpy.around(weights, 4))
[0.0556 0.4444 0.4444 0.0556]
"""
from ..distributions.baseclass import Dist
if isinstance(domain, Dist):
abscissas, weights = quad_clenshaw_curtis(
order, domain.range(), growth)
weights *= domain.pdf(abscissas).flatten()
weights /= numpy.sum(weights)
return abscissas, weights
order = numpy.asarray(order, dtype=int).flatten()
lower, upper = numpy.array(domain)
lower = numpy.asarray(lower).flatten()
upper = numpy.asarray(upper).flatten()
dim = max(lower.size, upper.size, order.size)
order = numpy.ones(dim, dtype=int)*order
lower = numpy.ones(dim)*lower
upper = numpy.ones(dim)*upper
if growth:
order = numpy.where(order > 0, 2**order, 0)
abscissas, weights = zip(*[_clenshaw_curtis(order_) for order_ in order])
return combine_quadrature(abscissas, weights, (lower, upper))
def _clenshaw_curtis(order):
r"""
Backend method.
Examples:
>>> abscissas, weights = _clenshaw_curtis(0)
>>> print(abscissas)
[0.5]
>>> print(weights)
[1.]
>>> abscissas, weights = _clenshaw_curtis(1)
>>> print(abscissas)
[0. 1.]
>>> print(weights)
[0.5 0.5]
>>> abscissas, weights = _clenshaw_curtis(2)
>>> print(abscissas)
[0. 0.5 1. ]
>>> print(weights)
[0.16666667 0.66666667 0.16666667]
>>> abscissas, weights = _clenshaw_curtis(3)
>>> print(abscissas)
[0. 0.25 0.75 1. ]
>>> print(weights)
[0.05555556 0.44444444 0.44444444 0.05555556]
>>> abscissas, weights = _clenshaw_curtis(4)
>>> print(abscissas)
[0. 0.14644661 0.5 0.85355339 1. ]
>>> print(weights)
[0.03333333 0.26666667 0.4 0.26666667 0.03333333]
>>> abscissas, weights = _clenshaw_curtis(5)
>>> print(abscissas)
[0. 0.0954915 0.3454915 0.6545085 0.9045085 1. ]
>>> print(weights)
[0.02 0.18037152 0.29962848 0.29962848 0.18037152 0.02 ]
"""
if order == 0:
return numpy.array([.5]), numpy.array([1.])
theta = (order-numpy.arange(order+1))*numpy.pi/order
abscisas = 0.5*numpy.cos(theta) + 0.5
idx, idy = numpy.mgrid[:order+1, :order//2]
weights = 2*numpy.cos(2*(idy+1)*theta[idx])/(4*idy*(idy+2)+3)
if order % 2 == 0:
weights[:, -1] *= 0.5
weights = (1-numpy.sum(weights, -1)) / order
weights[0] /= 2
weights[-1] /= 2
return abscisas, weights
|
[
"numpy.sum",
"numpy.asarray",
"numpy.ones",
"numpy.where",
"numpy.array",
"numpy.arange",
"numpy.cos"
] |
[((3598, 3617), 'numpy.array', 'numpy.array', (['domain'], {}), '(domain)\n', (3609, 3617), False, 'import numpy\n'), ((3471, 3489), 'numpy.sum', 'numpy.sum', (['weights'], {}), '(weights)\n', (3480, 3489), False, 'import numpy\n'), ((3768, 3794), 'numpy.ones', 'numpy.ones', (['dim'], {'dtype': 'int'}), '(dim, dtype=int)\n', (3778, 3794), False, 'import numpy\n'), ((3813, 3828), 'numpy.ones', 'numpy.ones', (['dim'], {}), '(dim)\n', (3823, 3828), False, 'import numpy\n'), ((3847, 3862), 'numpy.ones', 'numpy.ones', (['dim'], {}), '(dim)\n', (3857, 3862), False, 'import numpy\n'), ((3901, 3938), 'numpy.where', 'numpy.where', (['(order > 0)', '(2 ** order)', '(0)'], {}), '(order > 0, 2 ** order, 0)\n', (3912, 3938), False, 'import numpy\n'), ((3537, 3568), 'numpy.asarray', 'numpy.asarray', (['order'], {'dtype': 'int'}), '(order, dtype=int)\n', (3550, 3568), False, 'import numpy\n'), ((3630, 3650), 'numpy.asarray', 'numpy.asarray', (['lower'], {}), '(lower)\n', (3643, 3650), False, 'import numpy\n'), ((3673, 3693), 'numpy.asarray', 'numpy.asarray', (['upper'], {}), '(upper)\n', (3686, 3693), False, 'import numpy\n'), ((5340, 5358), 'numpy.array', 'numpy.array', (['[0.5]'], {}), '([0.5])\n', (5351, 5358), False, 'import numpy\n'), ((5359, 5377), 'numpy.array', 'numpy.array', (['[1.0]'], {}), '([1.0])\n', (5370, 5377), False, 'import numpy\n'), ((5454, 5470), 'numpy.cos', 'numpy.cos', (['theta'], {}), '(theta)\n', (5463, 5470), False, 'import numpy\n'), ((5542, 5579), 'numpy.cos', 'numpy.cos', (['(2 * (idy + 1) * theta[idx])'], {}), '(2 * (idy + 1) * theta[idx])\n', (5551, 5579), False, 'import numpy\n'), ((5662, 5684), 'numpy.sum', 'numpy.sum', (['weights', '(-1)'], {}), '(weights, -1)\n', (5671, 5684), False, 'import numpy\n'), ((5397, 5420), 'numpy.arange', 'numpy.arange', (['(order + 1)'], {}), '(order + 1)\n', (5409, 5420), False, 'import numpy\n')]
|
#data preparation utils
import numpy as np
import tensorflow as tf
def partitionByClass(X,y_true):
maxc = np.max(y_true+1)
ids = [[] for i in range(maxc)]
for i in range(np.shape(y_true)[0]):
ids[y_true[i]].append(i)
return ids
def prepareBatch(X,y_true,ids_by_class_train,N_classes = 10, N_support = 10, N_query = 5, permute = True):
maxc = np.max(y_true) #max class number
classes = np.random.choice(range(maxc+1), size = (N_classes), replace = False) #choose subset of N_classes classes
ids_batch = np.array(
[np.random.choice(ids_by_class_train[c],size = (N_support + N_query), replace = False) for c in classes]
)
ids_batch_support = np.ndarray.flatten(ids_batch[:,:N_support])
ids_batch_query = np.ndarray.flatten(ids_batch[:,N_support:])
if permute:
ids_batch_support = np.random.permutation(ids_batch_support)
ids_batch_query = np.random.permutation(ids_batch_query)
return X[ids_batch_support,:,:], y_true[ids_batch_support], X[ids_batch_query,:,:], y_true[ids_batch_query], classes
#preprocessing images (loaded background 1.0, character 0.0)
def invert_img(x):
_,H,W = np.shape(x)
return -2.0 * np.reshape(x,[-1,H,W]) + 1.0
def deinvert_img(x):
_,H,W = np.shape(x)
return 1.0 - 0.5 * x
def resize_img(x,Hold,Wold,Hnew,Wnew):
q = tf.Session().run(tf.image.resize_images(tf.reshape(x,[-1,Hold,Wold,1]),[Hnew,Wnew]))
return np.reshape(q,[-1,Hnew,Wnew])
def subtract_mean(X):
N,H,W = np.shape(X)
Xf = np.reshape(X,[N,H*W])
means = np.mean(Xf, axis = 1, keepdims = True)
Xf = Xf - np.mean(Xf, axis = 1, keepdims = True)
return np.reshape(Xf,np.shape(X)), means
def augment_by_rotations(X,y,ks = [0,1,2,3]):
Xs,ys = [],[]
class_step = np.max(y)+1
for i,k in enumerate(ks):
Xs.append(np.rot90(X, k = k, axes = (1,2)))
ys.append(np.array(y) + (i)*class_step)
Xa = np.concatenate(Xs,axis = 0)
ya = np.concatenate(ys,axis = 0)
return Xa,ya
|
[
"tensorflow.reshape",
"tensorflow.Session",
"numpy.shape",
"numpy.max",
"numpy.mean",
"numpy.rot90",
"numpy.reshape",
"numpy.array",
"numpy.random.choice",
"numpy.random.permutation",
"numpy.concatenate",
"numpy.ndarray.flatten"
] |
[((112, 130), 'numpy.max', 'np.max', (['(y_true + 1)'], {}), '(y_true + 1)\n', (118, 130), True, 'import numpy as np\n'), ((373, 387), 'numpy.max', 'np.max', (['y_true'], {}), '(y_true)\n', (379, 387), True, 'import numpy as np\n'), ((701, 745), 'numpy.ndarray.flatten', 'np.ndarray.flatten', (['ids_batch[:, :N_support]'], {}), '(ids_batch[:, :N_support])\n', (719, 745), True, 'import numpy as np\n'), ((767, 811), 'numpy.ndarray.flatten', 'np.ndarray.flatten', (['ids_batch[:, N_support:]'], {}), '(ids_batch[:, N_support:])\n', (785, 811), True, 'import numpy as np\n'), ((1177, 1188), 'numpy.shape', 'np.shape', (['x'], {}), '(x)\n', (1185, 1188), True, 'import numpy as np\n'), ((1270, 1281), 'numpy.shape', 'np.shape', (['x'], {}), '(x)\n', (1278, 1281), True, 'import numpy as np\n'), ((1451, 1482), 'numpy.reshape', 'np.reshape', (['q', '[-1, Hnew, Wnew]'], {}), '(q, [-1, Hnew, Wnew])\n', (1461, 1482), True, 'import numpy as np\n'), ((1515, 1526), 'numpy.shape', 'np.shape', (['X'], {}), '(X)\n', (1523, 1526), True, 'import numpy as np\n'), ((1536, 1561), 'numpy.reshape', 'np.reshape', (['X', '[N, H * W]'], {}), '(X, [N, H * W])\n', (1546, 1561), True, 'import numpy as np\n'), ((1570, 1604), 'numpy.mean', 'np.mean', (['Xf'], {'axis': '(1)', 'keepdims': '(True)'}), '(Xf, axis=1, keepdims=True)\n', (1577, 1604), True, 'import numpy as np\n'), ((1940, 1966), 'numpy.concatenate', 'np.concatenate', (['Xs'], {'axis': '(0)'}), '(Xs, axis=0)\n', (1954, 1966), True, 'import numpy as np\n'), ((1977, 2003), 'numpy.concatenate', 'np.concatenate', (['ys'], {'axis': '(0)'}), '(ys, axis=0)\n', (1991, 2003), True, 'import numpy as np\n'), ((856, 896), 'numpy.random.permutation', 'np.random.permutation', (['ids_batch_support'], {}), '(ids_batch_support)\n', (877, 896), True, 'import numpy as np\n'), ((923, 961), 'numpy.random.permutation', 'np.random.permutation', (['ids_batch_query'], {}), '(ids_batch_query)\n', (944, 961), True, 'import numpy as np\n'), ((1623, 1657), 'numpy.mean', 'np.mean', (['Xf'], {'axis': '(1)', 'keepdims': '(True)'}), '(Xf, axis=1, keepdims=True)\n', (1630, 1657), True, 'import numpy as np\n'), ((1789, 1798), 'numpy.max', 'np.max', (['y'], {}), '(y)\n', (1795, 1798), True, 'import numpy as np\n'), ((184, 200), 'numpy.shape', 'np.shape', (['y_true'], {}), '(y_true)\n', (192, 200), True, 'import numpy as np\n'), ((562, 647), 'numpy.random.choice', 'np.random.choice', (['ids_by_class_train[c]'], {'size': '(N_support + N_query)', 'replace': '(False)'}), '(ids_by_class_train[c], size=N_support + N_query, replace=False\n )\n', (578, 647), True, 'import numpy as np\n'), ((1207, 1232), 'numpy.reshape', 'np.reshape', (['x', '[-1, H, W]'], {}), '(x, [-1, H, W])\n', (1217, 1232), True, 'import numpy as np\n'), ((1355, 1367), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (1365, 1367), True, 'import tensorflow as tf\n'), ((1395, 1429), 'tensorflow.reshape', 'tf.reshape', (['x', '[-1, Hold, Wold, 1]'], {}), '(x, [-1, Hold, Wold, 1])\n', (1405, 1429), True, 'import tensorflow as tf\n'), ((1687, 1698), 'numpy.shape', 'np.shape', (['X'], {}), '(X)\n', (1695, 1698), True, 'import numpy as np\n'), ((1849, 1878), 'numpy.rot90', 'np.rot90', (['X'], {'k': 'k', 'axes': '(1, 2)'}), '(X, k=k, axes=(1, 2))\n', (1857, 1878), True, 'import numpy as np\n'), ((1901, 1912), 'numpy.array', 'np.array', (['y'], {}), '(y)\n', (1909, 1912), True, 'import numpy as np\n')]
|
import sys
from time import time
import click
import pyhecdss
from vtools.functions import filter
import pandas as pd
import numpy as np
from pydsm.ptm_animator import ptm_animate
from pydsm.hydro_slicer import slice_hydro
from pydsm.postpro import load_location_file, load_location_table
from pydsm.functions import tsmath
@click.group()
def main():
pass
def _build_column(columns, cpart_append, epart_replace=None):
'''
builds column name based on /A/B/C/D/E/F/ DSS pathname and
replacing the cpart with existing cpart + cpart_append value
'''
def append_cpart(name):
parts = name.split('/')
parts[3] = parts[3]+cpart_append
if epart_replace:
parts[5] = epart_replace
return '/'.join(parts)
return [append_cpart(name) for name in columns]
def _restart_console_line():
sys.stdout.write('\r')
sys.stdout.flush()
def _extract_processing(df, godin_filter, daily_average, daily_max, daily_min, monthly_average):
results = df
results_monthly = None
if godin_filter:
results = filter.godin_filter(results)
if daily_average: # if godin filtered then replace that with daily averaged values
tdf = results.resample('1D', closed='right', label='right').mean()
tdf.columns = _build_column(df.columns, '-MEAN', '1DAY')
results = tdf
if daily_max:
tdf = df.resample('1D', closed='right', label='right').max()
tdf.columns = _build_column(df.columns, '-MAX', '1DAY')
results = results.join(tdf, how='outer')
if daily_min:
tdf = df.resample('1D', closed='right', label='right').min()
tdf.columns = _build_column(df.columns, '-MIN', '1DAY')
results = results.join(tdf, how='outer')
if monthly_average:
results_monthly = df.resample('M', closed='right', label='right').mean()
results_monthly.columns = _build_column(df.columns, '-MONTHLY-AVG', '1MON')
return results, results_monthly
def _write_to_dss(od, rtg_daily, rtg_monthly, units, ptype='PER-VAL'):
for i in range(len(rtg_daily.columns)):
r = rtg_daily.iloc[:, i].to_frame()
od.write_rts(r.columns[0], r, units, ptype)
try:
r = rtg_monthly.iloc[:, 0].to_frame()
od.write_rts(r.columns[0], r, units, ptype)
except Exception:
pass
def _build_column(columns, cpart_append, epart_replace=None):
'''
builds column name based on /A/B/C/D/E/F/ DSS pathname and
replacing the cpart with existing cpart + cpart_append value
'''
def append_cpart(name):
parts = name.split('/')
parts[3] = parts[3]+cpart_append
if epart_replace:
parts[5] = epart_replace
return '/'.join(parts)
return [append_cpart(name) for name in columns]
@click.command()
@click.option("-o", "--outfile", default="out.gz", help="path to output file (ends in .zip, .gz, .bz2 for compression), (.h5 for hdf5), (.dss for dss)")
@click.option("--cpart", help="filter by cpart string match (e.g. EC for only loading EC)")
@click.option("-godin", "--godin-filter", is_flag=True, default=False, help="apply godin filter before writing out")
@click.option("-davg", "--daily-average", is_flag=True, default=False, help="average to daily values")
@click.option("-dmax", "--daily-max", is_flag=True, default=False, help="maximum daily value")
@click.option("-dmin", "--daily-min", is_flag=True, default=False, help="minimum daily value")
@click.option("-mavg", "--monthly-average", is_flag=True, default=False, help="monthly average value")
@click.argument("dssfile", type=click.Path(exists=True))
def extract_dss(dssfile, outfile, cpart, godin_filter, daily_average, daily_max, daily_min, monthly_average):
'''
Extract data from DSS file, optionally filtering it and writing to a pickle for quick future loads
'''
pyhecdss.set_message_level(0)
d = pyhecdss.DSSFile(dssfile)
od = None
if outfile.endswith('dss'):
od = pyhecdss.DSSFile(outfile)
catdf = d.read_catalog()
catec = catdf[catdf.C == cpart]
plist = d.get_pathnames(catec)
if len(plist) == 0:
print("No pathnames found in dssfile: %s for cpart=%s" %
(dssfile, cpart))
sys.stdout.write('@ %d / %d ==> Processing: %s' % (0, len(plist), plist[0]))
r, u, p = d.read_rts(plist[0])
results_daily, results_monthly = [], []
rtg_daily, rtg_monthly = _extract_processing(
r, godin_filter, daily_average, daily_max, daily_min, monthly_average)
if od:
_write_to_dss(od, rtg_daily, rtg_monthly, u)
else:
results_daily.append(rtg_daily)
results_monthly.append(rtg_monthly)
for index, p in enumerate(plist, start=1):
_restart_console_line()
sys.stdout.write('@ %d / %d ==> Processing: %s' % (index, len(plist), p))
r, u, p = d.read_rts(p)
rtg_daily, rtg_monthly = _extract_processing(
r, godin_filter, daily_average, daily_max, daily_min, monthly_average)
if od:
_write_to_dss(od, rtg_daily, rtg_monthly, u)
else:
results_daily.append(rtg_daily)
results_monthly.append(rtg_monthly)
if od:
print('Done writing to DSS: %s' % outfile)
od.close()
else:
all_daily = pd.concat(results_daily, axis=1)
all_monthly = pd.concat(results_monthly, axis=1)
if outfile.endswith('zip') or outfile.endswith('bz2') or outfile.endswith('gzip'):
all_daily.to_csv(outfile)
all_monthly.to_csv(outfile)
elif outfile.endswith('.h5'):
all_daily.to_hdf(outfile, 'daily')
all_monthly.to_hdf(outfile, 'monthly')
elif outfile.endswith('dss'):
od.close()
else:
print('Unknown type of file ending: %s' % outfile)
all_daily.to_pickle(outfile)
all_monthly.to_pickle(outfile)
@click.command()
@click.option("--cpart", help="filter by cpart string match (e.g. EC for only loading EC)")
@click.option("--threshold", default=1e-3, help="Threshold to check for mean squared error")
@click.option('--threshold-metric', default='rmse',
type=click.Choice(['mean_error', 'mse', 'rmse', 'nash_sutcliffe', 'percent_bias'], case_sensitive=False))
@click.option("--metricsfile", default="compare_dss_metrics_diff.csv", help="name of file to write out metrics differnce")
@click.option("--time-window", default=None, help='ddMMMyyyy [HHmm] - ddMMMyyyy [HHmm], e.g. "01JAN1990 - 01OCT1991" (quoted on command line)')
@click.argument("dssfile1", type=click.Path(exists=True))
@click.argument("dssfile2", type=click.Path(exists=True))
def compare_dss(dssfile1, dssfile2, threshold=1e-3, threshold_metric='rmse', time_window=None, cpart=None, godin=False, metricsfile='compare_dss_metrics_diff.csv'):
'''
Compares the dss files for common pathnames (B and C parts) and writes out various metrics to file
Filtering for matching c parts
and compare values with tolerance (default of 3 digits)
'''
pyhecdss.set_message_level(0)
with pyhecdss.DSSFile(dssfile1) as d1, pyhecdss.DSSFile(dssfile2) as d2:
dc1 = d1.read_catalog()
dc2 = d2.read_catalog()
if cpart != None:
dc1 = dc1[dc1.C == cpart]
dc2 = dc2[dc2.C == cpart]
# common B and C
cc = dc1.merge(dc2, on=['B', 'C'])
metrics = []
sdate, edate = (None, None)
if time_window:
sdate, edate = (f.strip() for f in time_window.split("-"))
for index, row in cc.iterrows():
rowid = '%s/%s' % (row.loc['B'], row.loc['C'])
print('Comparing %s' % rowid)
p1 = d1.get_pathnames(dc1[(dc1.B == row.loc['B']) & (dc1.C == row.loc['C'])])
df1, u1, p1 = d1.read_rts(p1[0], sdate, edate)
p2 = d2.get_pathnames(dc2[(dc2.B == row.loc['B']) & (dc2.C == row.loc['C'])])
df2, u2, p2 = d2.read_rts(p2[0], sdate, edate)
series1 = df1.iloc[:, 0]
series2 = df2.iloc[:, 0]
metrics.append((rowid, tsmath.mean_error(series1, series2),
tsmath.mse(series1, series2),
tsmath.rmse(series1, series2),
tsmath.nash_sutcliffe(series1, series2),
tsmath.percent_bias(series1, series2)))
dfmetrics = pd.DataFrame.from_records(
metrics, columns=['name', 'mean_error', 'mse', 'rmse', 'nash_sutcliffe', 'percent_bias'])
# -- display missing or unmatched pathnames
missingc1 = dc1[(~dc1.B.isin(cc.B)) & (~dc1.C.isin(cc.C))]
missingc2 = dc2[(~dc2.B.isin(cc.B)) & (~dc2.C.isin(cc.C))]
if not missingc1.empty:
print('No matches for FILE1: %s' % dssfile1)
print(missingc1)
if not missingc2.empty:
print('No matches for FILE2: %s' % dssfile2)
print(missingc2)
print(dfmetrics)
print('Writing out metrics to file: ', metricsfile)
dfmetrics.to_csv(metricsfile)
threshold_cond = dfmetrics[threshold_metric.lower()] > threshold
if threshold_cond.any():
print(f'Threshold {threshold} exceeded! See exceeded rows below')
print(dfmetrics[threshold_cond])
# adding sub commands to main
main.add_command(extract_dss)
main.add_command(compare_dss)
main.add_command(ptm_animate)
main.add_command(slice_hydro)
if __name__ == "__main__":
sys.exit(main())
|
[
"sys.stdout.write",
"pyhecdss.set_message_level",
"pydsm.functions.tsmath.rmse",
"pydsm.functions.tsmath.percent_bias",
"pydsm.functions.tsmath.mse",
"click.option",
"pydsm.functions.tsmath.mean_error",
"click.command",
"vtools.functions.filter.godin_filter",
"click.Choice",
"sys.stdout.flush",
"pandas.DataFrame.from_records",
"click.Path",
"pydsm.functions.tsmath.nash_sutcliffe",
"click.group",
"pandas.concat",
"pyhecdss.DSSFile"
] |
[((342, 355), 'click.group', 'click.group', ([], {}), '()\n', (353, 355), False, 'import click\n'), ((2882, 2897), 'click.command', 'click.command', ([], {}), '()\n', (2895, 2897), False, 'import click\n'), ((2900, 3061), 'click.option', 'click.option', (['"""-o"""', '"""--outfile"""'], {'default': '"""out.gz"""', 'help': '"""path to output file (ends in .zip, .gz, .bz2 for compression), (.h5 for hdf5), (.dss for dss)"""'}), "('-o', '--outfile', default='out.gz', help=\n 'path to output file (ends in .zip, .gz, .bz2 for compression), (.h5 for hdf5), (.dss for dss)'\n )\n", (2912, 3061), False, 'import click\n'), ((3054, 3149), 'click.option', 'click.option', (['"""--cpart"""'], {'help': '"""filter by cpart string match (e.g. EC for only loading EC)"""'}), "('--cpart', help=\n 'filter by cpart string match (e.g. EC for only loading EC)')\n", (3066, 3149), False, 'import click\n'), ((3147, 3267), 'click.option', 'click.option', (['"""-godin"""', '"""--godin-filter"""'], {'is_flag': '(True)', 'default': '(False)', 'help': '"""apply godin filter before writing out"""'}), "('-godin', '--godin-filter', is_flag=True, default=False, help=\n 'apply godin filter before writing out')\n", (3159, 3267), False, 'import click\n'), ((3265, 3371), 'click.option', 'click.option', (['"""-davg"""', '"""--daily-average"""'], {'is_flag': '(True)', 'default': '(False)', 'help': '"""average to daily values"""'}), "('-davg', '--daily-average', is_flag=True, default=False, help=\n 'average to daily values')\n", (3277, 3371), False, 'import click\n'), ((3369, 3467), 'click.option', 'click.option', (['"""-dmax"""', '"""--daily-max"""'], {'is_flag': '(True)', 'default': '(False)', 'help': '"""maximum daily value"""'}), "('-dmax', '--daily-max', is_flag=True, default=False, help=\n 'maximum daily value')\n", (3381, 3467), False, 'import click\n'), ((3465, 3563), 'click.option', 'click.option', (['"""-dmin"""', '"""--daily-min"""'], {'is_flag': '(True)', 'default': '(False)', 'help': '"""minimum daily value"""'}), "('-dmin', '--daily-min', is_flag=True, default=False, help=\n 'minimum daily value')\n", (3477, 3563), False, 'import click\n'), ((3561, 3666), 'click.option', 'click.option', (['"""-mavg"""', '"""--monthly-average"""'], {'is_flag': '(True)', 'default': '(False)', 'help': '"""monthly average value"""'}), "('-mavg', '--monthly-average', is_flag=True, default=False,\n help='monthly average value')\n", (3573, 3666), False, 'import click\n'), ((6073, 6088), 'click.command', 'click.command', ([], {}), '()\n', (6086, 6088), False, 'import click\n'), ((6091, 6186), 'click.option', 'click.option', (['"""--cpart"""'], {'help': '"""filter by cpart string match (e.g. EC for only loading EC)"""'}), "('--cpart', help=\n 'filter by cpart string match (e.g. EC for only loading EC)')\n", (6103, 6186), False, 'import click\n'), ((6184, 6281), 'click.option', 'click.option', (['"""--threshold"""'], {'default': '(0.001)', 'help': '"""Threshold to check for mean squared error"""'}), "('--threshold', default=0.001, help=\n 'Threshold to check for mean squared error')\n", (6196, 6281), False, 'import click\n'), ((6452, 6578), 'click.option', 'click.option', (['"""--metricsfile"""'], {'default': '"""compare_dss_metrics_diff.csv"""', 'help': '"""name of file to write out metrics differnce"""'}), "('--metricsfile', default='compare_dss_metrics_diff.csv', help=\n 'name of file to write out metrics differnce')\n", (6464, 6578), False, 'import click\n'), ((6576, 6728), 'click.option', 'click.option', (['"""--time-window"""'], {'default': 'None', 'help': '"""ddMMMyyyy [HHmm] - ddMMMyyyy [HHmm], e.g. "01JAN1990 - 01OCT1991" (quoted on command line)"""'}), '(\'--time-window\', default=None, help=\n \'ddMMMyyyy [HHmm] - ddMMMyyyy [HHmm], e.g. "01JAN1990 - 01OCT1991" (quoted on command line)\'\n )\n', (6588, 6728), False, 'import click\n'), ((885, 907), 'sys.stdout.write', 'sys.stdout.write', (["'\\r'"], {}), "('\\r')\n", (901, 907), False, 'import sys\n'), ((913, 931), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (929, 931), False, 'import sys\n'), ((3959, 3988), 'pyhecdss.set_message_level', 'pyhecdss.set_message_level', (['(0)'], {}), '(0)\n', (3985, 3988), False, 'import pyhecdss\n'), ((3998, 4023), 'pyhecdss.DSSFile', 'pyhecdss.DSSFile', (['dssfile'], {}), '(dssfile)\n', (4014, 4023), False, 'import pyhecdss\n'), ((7227, 7256), 'pyhecdss.set_message_level', 'pyhecdss.set_message_level', (['(0)'], {}), '(0)\n', (7253, 7256), False, 'import pyhecdss\n'), ((1121, 1149), 'vtools.functions.filter.godin_filter', 'filter.godin_filter', (['results'], {}), '(results)\n', (1140, 1149), False, 'from vtools.functions import filter\n'), ((4086, 4111), 'pyhecdss.DSSFile', 'pyhecdss.DSSFile', (['outfile'], {}), '(outfile)\n', (4102, 4111), False, 'import pyhecdss\n'), ((5437, 5469), 'pandas.concat', 'pd.concat', (['results_daily'], {'axis': '(1)'}), '(results_daily, axis=1)\n', (5446, 5469), True, 'import pandas as pd\n'), ((5493, 5527), 'pandas.concat', 'pd.concat', (['results_monthly'], {'axis': '(1)'}), '(results_monthly, axis=1)\n', (5502, 5527), True, 'import pandas as pd\n'), ((3696, 3719), 'click.Path', 'click.Path', ([], {'exists': '(True)'}), '(exists=True)\n', (3706, 3719), False, 'import click\n'), ((7267, 7293), 'pyhecdss.DSSFile', 'pyhecdss.DSSFile', (['dssfile1'], {}), '(dssfile1)\n', (7283, 7293), False, 'import pyhecdss\n'), ((7301, 7327), 'pyhecdss.DSSFile', 'pyhecdss.DSSFile', (['dssfile2'], {}), '(dssfile2)\n', (7317, 7327), False, 'import pyhecdss\n'), ((8607, 8726), 'pandas.DataFrame.from_records', 'pd.DataFrame.from_records', (['metrics'], {'columns': "['name', 'mean_error', 'mse', 'rmse', 'nash_sutcliffe', 'percent_bias']"}), "(metrics, columns=['name', 'mean_error', 'mse',\n 'rmse', 'nash_sutcliffe', 'percent_bias'])\n", (8632, 8726), True, 'import pandas as pd\n'), ((6349, 6453), 'click.Choice', 'click.Choice', (["['mean_error', 'mse', 'rmse', 'nash_sutcliffe', 'percent_bias']"], {'case_sensitive': '(False)'}), "(['mean_error', 'mse', 'rmse', 'nash_sutcliffe', 'percent_bias'\n ], case_sensitive=False)\n", (6361, 6453), False, 'import click\n'), ((6753, 6776), 'click.Path', 'click.Path', ([], {'exists': '(True)'}), '(exists=True)\n', (6763, 6776), False, 'import click\n'), ((6812, 6835), 'click.Path', 'click.Path', ([], {'exists': '(True)'}), '(exists=True)\n', (6822, 6835), False, 'import click\n'), ((8291, 8326), 'pydsm.functions.tsmath.mean_error', 'tsmath.mean_error', (['series1', 'series2'], {}), '(series1, series2)\n', (8308, 8326), False, 'from pydsm.functions import tsmath\n'), ((8357, 8385), 'pydsm.functions.tsmath.mse', 'tsmath.mse', (['series1', 'series2'], {}), '(series1, series2)\n', (8367, 8385), False, 'from pydsm.functions import tsmath\n'), ((8416, 8445), 'pydsm.functions.tsmath.rmse', 'tsmath.rmse', (['series1', 'series2'], {}), '(series1, series2)\n', (8427, 8445), False, 'from pydsm.functions import tsmath\n'), ((8476, 8515), 'pydsm.functions.tsmath.nash_sutcliffe', 'tsmath.nash_sutcliffe', (['series1', 'series2'], {}), '(series1, series2)\n', (8497, 8515), False, 'from pydsm.functions import tsmath\n'), ((8546, 8583), 'pydsm.functions.tsmath.percent_bias', 'tsmath.percent_bias', (['series1', 'series2'], {}), '(series1, series2)\n', (8565, 8583), False, 'from pydsm.functions import tsmath\n')]
|
#!/usr/bin/env python
# coding: utf-8
# # 02__trans_motifs
#
# in this notebook, i find motifs that are associated w/ trans effects using linear models and our RNA-seq data
# In[1]:
import warnings
warnings.filterwarnings('ignore')
import itertools
import pandas as pd
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
import seaborn as sns
import statsmodels.api as sm
import statsmodels.formula.api as smf
import sys
from itertools import combinations
from scipy.stats import boxcox
from scipy.stats import linregress
from scipy.stats import spearmanr
from scipy.stats import pearsonr
from statsmodels.stats.anova import anova_lm
from sklearn.preprocessing import StandardScaler
from sklearn.neighbors import NearestNeighbors
# import utils
sys.path.append("../../../utils")
from plotting_utils import *
get_ipython().run_line_magic('matplotlib', 'inline')
get_ipython().run_line_magic('config', "InlineBackend.figure_format = 'svg'")
mpl.rcParams['figure.autolayout'] = False
# In[2]:
sns.set(**PAPER_PRESET)
fontsize = PAPER_FONTSIZE
# In[3]:
np.random.seed(2019)
# In[4]:
QUANT_ALPHA = 0.05
# ## functions
# In[5]:
def calculate_gc(row, col):
cs = row[col].count("C")
gs = row[col].count("G")
gc = (cs+gs)/len(row[col])
return gc
# In[6]:
def calculate_cpg(row, col):
cpgs = row[col].count("CG")
cpg = cpgs/len(row[col])
return cpg
# In[7]:
def sig_status(row):
if row.padj_trans < 0.05:
return "sig"
else:
return "not sig"
# In[8]:
def neg_odds(row):
if row["sig_status"] == "sig hESC":
return -row["hESC_odds"]
elif row["sig_status"] == "sig mESC":
return row["mESC_odds"]
else:
return np.nan
# In[9]:
def direction_match(row):
if row.activ_or_repr == "activating":
if row.beta_trans < 0 and row.logFC < 0:
return "match"
elif row.beta_trans > 0 and row.logFC > 0:
return "match"
else:
return "no match"
elif row.activ_or_repr == "repressing":
if row.beta_trans < 0 and row.logFC > 0:
return "match"
elif row.beta_trans > 0 and row.logFC < 0:
return "match"
else:
return "no match"
else:
return "unclear"
# ## variables
# In[10]:
human_motifs_f = "../../../data/04__mapped_motifs/human_motifs_filtered.txt.gz"
mouse_motifs_f = "../../../data/04__mapped_motifs/mouse_motifs_filtered.txt.gz"
# In[11]:
motif_info_dir = "../../../misc/01__motif_info"
motif_map_f = "%s/00__lambert_et_al_files/00__metadata/curated_motif_map.txt" % motif_info_dir
motif_info_f = "%s/00__lambert_et_al_files/00__metadata/motif_info.txt" % motif_info_dir
# In[12]:
sig_motifs_f = "../../../data/04__mapped_motifs/sig_motifs.txt"
# In[13]:
tss_map_f = "../../../data/01__design/01__mpra_list/mpra_tss.with_ids.RECLASSIFIED_WITH_MAX.txt"
# In[14]:
index_f = "../../../data/01__design/02__index/TWIST_pool4_v8_final.with_element_id.txt.gz"
# In[15]:
data_f = "../../../data/02__mpra/03__results/all_processed_results.txt"
# In[16]:
expr_dir = "../../../data/03__rna_seq/04__TF_expr"
orth_expr_f = "%s/orth_TF_expression.txt" % expr_dir
human_expr_f = "%s/hESC_TF_expression.txt" % expr_dir
mouse_expr_f = "%s/mESC_TF_expression.txt" % expr_dir
# In[17]:
orth_f = "../../../misc/00__ensembl_orthologs/ensembl96_human_mouse_orths.txt.gz"
# ## 1. import data
# In[18]:
index = pd.read_table(index_f, sep="\t")
index_elem = index[["element", "tile_type", "element_id", "name", "tile_number", "chrom", "strand", "actual_start",
"actual_end", "dupe_info"]]
index_elem = index_elem.drop_duplicates()
# In[19]:
tss_map = pd.read_table(tss_map_f, sep="\t")
tss_map.head()
# In[20]:
# this file is already filtered to correct tile nums
human_motifs = pd.read_table(human_motifs_f, sep="\t")
human_motifs.head()
# In[21]:
# this file is already filtered to correct tile nums
mouse_motifs = pd.read_table(mouse_motifs_f, sep="\t")
mouse_motifs.head()
# In[22]:
motif_info = pd.read_table(motif_info_f, sep="\t")
motif_info.head()
# In[23]:
sig_motifs = pd.read_table(sig_motifs_f)
sig_motifs = sig_motifs[sig_motifs["padj"] < 0.05]
print(len(sig_motifs))
sig_motifs.head()
# In[24]:
data = pd.read_table(data_f)
data.head()
# In[25]:
orth_expr = pd.read_table(orth_expr_f, sep="\t")
orth_expr.head()
# In[26]:
human_expr = pd.read_table(human_expr_f, sep="\t")
human_expr.head()
# In[27]:
mouse_expr = pd.read_table(mouse_expr_f, sep="\t")
mouse_expr.head()
# In[28]:
orth = pd.read_table(orth_f, sep="\t")
orth.head()
# ## 2. merge data to build model
# In[29]:
index_elem = index_elem[index_elem["name"].str.contains("EVO")]
index_elem.head()
# In[30]:
index_elem["tss_id"] = index_elem["name"].str.split("__", expand=True)[1]
index_elem["tss_tile_num"] = index_elem["name"].str.split("__", expand=True)[2]
index_elem.sample(5)
# In[31]:
index_human = index_elem[index_elem["name"].str.contains("HUMAN")]
index_mouse = index_elem[index_elem["name"].str.contains("MOUSE")]
index_mouse.sample(5)
# In[32]:
print(len(data))
data_elem = data.merge(index_human[["element", "tss_id", "tss_tile_num"]], left_on=["hg19_id", "tss_tile_num"],
right_on=["tss_id", "tss_tile_num"])
data_elem = data_elem.merge(index_mouse[["element", "tss_id", "tss_tile_num"]], left_on=["mm9_id", "tss_tile_num"],
right_on=["tss_id", "tss_tile_num"], suffixes=("_human", "_mouse"))
data_elem.drop(["tss_id_human", "tss_id_mouse"], axis=1, inplace=True)
print(len(data))
data_elem.head()
# In[33]:
data_elem["gc_human"] = data_elem.apply(calculate_gc, col="element_human", axis=1)
data_elem["gc_mouse"] = data_elem.apply(calculate_gc, col="element_mouse", axis=1)
data_elem["cpg_human"] = data_elem.apply(calculate_cpg, col="element_human", axis=1)
data_elem["cpg_mouse"] = data_elem.apply(calculate_cpg, col="element_mouse", axis=1)
data_elem.sample(5)
# In[34]:
data_elem.columns
# In[35]:
data_human = data_elem[["hg19_id", "tss_tile_num", "logFC_trans_human", "gc_human", "cpg_human", "HUES64_padj_hg19", "trans_status_one"]]
data_mouse = data_elem[["mm9_id", "tss_tile_num", "logFC_trans_mouse", "gc_mouse", "cpg_mouse", "mESC_padj_mm9", "trans_status_one"]]
data_human.columns = ["tss_id", "tss_tile_num", "logFC_trans", "gc", "cpg", "padj", "trans_status"]
data_mouse.columns = ["tss_id", "tss_tile_num", "logFC_trans", "gc", "cpg", "padj", "trans_status"]
data_indiv = data_human.append(data_mouse).drop_duplicates()
print(len(data_indiv))
data_indiv.head()
# ## 3. build reduced model
# In[36]:
scaled_features = StandardScaler().fit_transform(data_indiv[["logFC_trans", "gc", "cpg"]])
data_norm = pd.DataFrame(scaled_features, index=data_indiv.index, columns=["logFC_trans", "gc", "cpg"])
data_norm["padj"] = data_indiv["padj"]
data_norm["tss_id"] = data_indiv["tss_id"]
data_norm["tss_tile_num"] = data_indiv["tss_tile_num"]
data_norm["trans_status"] = data_indiv["trans_status"]
data_norm.head()
# In[37]:
data_filt = data_norm[data_norm["padj"] < QUANT_ALPHA].drop_duplicates()
print(len(data_filt))
data_filt.head()
# In[38]:
mod = smf.ols(formula='logFC_trans ~ gc + cpg',
data=data_filt).fit()
# In[39]:
mod.summary()
# In[40]:
res = mod.resid
fig, ax = plt.subplots(figsize=(2.2, 2.2), ncols=1, nrows=1)
sm.qqplot(res, line='s', ax=ax)
ax.set_title("Normal QQ: trans effects model")
# fig.savefig("avg_activ_qq.pdf", dpi="figure", bbox_inches="tight")
# In[41]:
reduced_llf = mod.llf
reduced_llf
# In[42]:
reduced_rsq = mod.rsquared
reduced_rsq
# ## 4. add motifs to model
# In[43]:
data_filt["tss_index"] = data_filt["tss_id"] + "__" + data_filt["tss_tile_num"]
# In[44]:
human_motifs["hg19_index"] = human_motifs["hg19_id"] + "__" + human_motifs["tss_tile_num"]
mouse_motifs["mm9_index"] = mouse_motifs["mm9_id"] + "__" + mouse_motifs["tss_tile_num"]
# In[45]:
uniq_motifs = list(set(list(human_motifs["#pattern name"].unique()) + list(mouse_motifs["#pattern name"].unique())))
len(uniq_motifs)
# In[46]:
def tss_motif(row):
if row.human_motif:
return True
elif row.mouse_motif:
return True
else:
return False
# In[47]:
motif_results = {}
for i, motif_id in enumerate(uniq_motifs):
tmp = data_filt.copy()
# determine whether motif is in human or mouse sequence
human_motifs_sub = human_motifs[human_motifs["#pattern name"] == motif_id]["hg19_index"].unique()
mouse_motifs_sub = mouse_motifs[mouse_motifs["#pattern name"] == motif_id]["mm9_index"].unique()
tmp["human_motif"] = tmp["tss_index"].isin(human_motifs_sub)
tmp["mouse_motif"] = tmp["tss_index"].isin(mouse_motifs_sub)
tmp["tss_motif"] = tmp.apply(tss_motif, axis=1)
n_w_motif = tmp["tss_motif"].sum()
# make full model
full_mod = smf.ols(formula='logFC_trans ~ gc + cpg + tss_motif',
data=tmp).fit()
full_llf = full_mod.llf
full_rsq = full_mod.rsquared
# # perform likelihood ratio test
# lr, p = lrtest(reduced_llf, full_llf)
# calculate additional variance explained
rsq = full_rsq - reduced_rsq
# record beta
beta = list(full_mod.params)[1]
# beta p
beta_p = list(full_mod.pvalues)[1]
print("(#%s) %s: n w/ motif: %s ... p: %s, rsquared: %s" % (i+1, motif_id, len(tmp), beta_p, rsq))
motif_results[motif_id] = {"rsq": rsq, "beta": beta, "beta_p": beta_p, "n_w_motif": n_w_motif}
# In[48]:
motif_results = pd.DataFrame.from_dict(motif_results, orient="index").reset_index()
motif_results = motif_results[motif_results["n_w_motif"] >= 10]
print(len(motif_results))
motif_results.head()
# In[49]:
motif_results["padj"] = multicomp.multipletests(motif_results["beta_p"], method="fdr_bh")[1]
len(motif_results[motif_results["padj"] < 0.05])
# In[50]:
motif_results.sort_values(by="padj").head(10)
# ## 5. join w/ TF info
# In[51]:
motif_results_mrg = motif_results.merge(sig_motifs, on="index", suffixes=("_trans", "_activ"))
motif_results_mrg.sort_values(by="padj_trans").head()
# In[52]:
sig_results = motif_results_mrg[(motif_results_mrg["padj_trans"] < 0.05)]
sig_results["abs_beta"] = np.abs(sig_results["beta_trans"])
sig_results = sig_results.sort_values(by="abs_beta", ascending=False)
sig_results.head()
# In[53]:
len(sig_results)
# In[54]:
len(sig_results["HGNC symbol"].unique())
# In[55]:
data_filt = data_elem[((data_elem["HUES64_padj_hg19"] < QUANT_ALPHA) | (data_elem["mESC_padj_mm9"] < QUANT_ALPHA))]
print(len(data_filt))
# In[56]:
data_filt_sp = data_filt.drop("orig_species", axis=1)
data_filt_sp.drop_duplicates(inplace=True)
len(data_filt_sp)
# In[57]:
data_filt_hu = data_filt_sp[["hg19_id", "logFC_trans_one", "trans_status_one"]]
data_filt_hu.columns = ["tss_id", "logFC_trans_one", "trans_status_one"]
data_filt_mo = data_filt_sp[["mm9_id", "logFC_trans_one", "trans_status_one"]]
data_filt_mo.columns = ["tss_id", "logFC_trans_one", "trans_status_one"]
data_filt_plot = data_filt_hu.append(data_filt_mo)
data_filt_plot["abs_logFC_trans"] = np.abs(data_filt_plot["logFC_trans_one"])
data_filt_plot.head()
# In[58]:
# example plots
# plot some examples
examps = ["NFE2", "BACH2", "ARNTL", "BHLHE41", "POU2F3"]
order = [False, True]
pal = {False: sns.color_palette("Set2")[7], True: sns.color_palette("Set2")[2]}
for symb in examps:
motif_id = sig_results[sig_results["HGNC symbol"] == symb]["index"].iloc[0]
tmp = data_filt_plot.copy()
# determine whether motif is in human or mouse sequence
human_motifs_sub = human_motifs[human_motifs["#pattern name"] == motif_id]["hg19_id"].unique()
mouse_motifs_sub = mouse_motifs[mouse_motifs["#pattern name"] == motif_id]["mm9_id"].unique()
tmp["hg19_motif"] = tmp["tss_id"].isin(human_motifs_sub)
tmp["mm9_motif"] = tmp["tss_id"].isin(mouse_motifs_sub)
tmp["has_motif"] = tmp[["hg19_motif", "mm9_motif"]].sum(axis=1).astype(bool)
fig, axarr = plt.subplots(figsize=(2.75, 1.5), nrows=1, ncols=2)
ax = axarr[0]
sns.boxplot(data=tmp, x="has_motif", y="abs_logFC_trans", order=order, palette=pal,
flierprops = dict(marker='o', markersize=5), ax=ax)
mimic_r_boxplot(ax)
ax.set_xticklabels(["no motif", "motif"], rotation=50,
ha="right", va="top")
ax.set_ylabel("trans effect size")
ax.set_title(symb)
ax.set_xlabel("")
for i, label in enumerate(order):
n = len(tmp[tmp["has_motif"] == bool(label)])
ax.annotate(str(n), xy=(i, -0.4), xycoords="data", xytext=(0, 0),
textcoords="offset pixels", ha='center', va='bottom',
color=pal[label], size=fontsize)
ax.set_ylim((-0.5, 2.5))
ax = axarr[1]
sns.boxplot(data=tmp, x="has_motif", y="logFC_trans_one", order=order, palette=pal,
flierprops = dict(marker='o', markersize=5), ax=ax)
ax.set_xticklabels(["no motif", "motif"], rotation=50, ha="right", va="top")
mimic_r_boxplot(ax)
ax.set_ylabel("trans effect size")
ax.set_title(symb)
ax.set_xlabel("")
ax.axhline(y=0, linestyle="dashed", color="black", zorder=100)
for i, label in enumerate(order):
n = len(tmp[tmp["has_motif"] == bool(label)])
ax.annotate(str(n), xy=(i, -2.4), xycoords="data", xytext=(0, 0),
textcoords="offset pixels", ha='center', va='bottom',
color=pal[label], size=fontsize)
## annotate pvals
sub1 = tmp[tmp["has_motif"] == True]
sub2 = tmp[tmp["has_motif"] == False]
vals1 = np.asarray(sub1["logFC_trans_one"])
vals2 = np.asarray(sub2["logFC_trans_one"])
vals1 = vals1[~np.isnan(vals1)]
vals2 = vals2[~np.isnan(vals2)]
u, pval = stats.mannwhitneyu(vals1, vals2, alternative="two-sided", use_continuity=False)
print(pval)
annotate_pval(ax, 0.2, 0.8, 1, 0, 1, pval, fontsize-1)
ax.set_ylim((-2.5, 2))
plt.subplots_adjust(wspace=0.5)
if symb == "BACH2":
fig.savefig("Fig5C_1.pdf", dpi="figure", bbox_inches="tight")
elif symb == "POU2F3":
fig.savefig("Fig5C_2.pdf", dpi="figure", bbox_inches="tight")
plt.show()
# In[59]:
pal = {"repressing": sns.color_palette("pastel")[3], "activating": sns.color_palette("pastel")[0]}
# In[60]:
full_pal = {}
for i, row in sig_results.iterrows():
full_pal[row["HGNC symbol"]] = pal[row["activ_or_repr"]]
# In[61]:
sig_results_sub = sig_results.head(50)
# In[62]:
fig = plt.figure(figsize=(4.5, 8))
ax1 = plt.subplot2grid((1, 7), (0, 0), colspan=3)
ax2 = plt.subplot2grid((1, 7), (0, 3), colspan=3)
ax3 = plt.subplot2grid((1, 7), (0, 6), colspan=1)
yvals = []
symbs = []
c = 0
for i, row in sig_results_sub.iterrows():
symb = row["HGNC symbol"]
if symb not in symbs:
yvals.append(c)
symbs.append(symb)
c += 1
else:
yvals.append(c)
sig_results_sub["yval"] = yvals
sns.barplot(y="HGNC symbol", x="beta_trans", data=sig_results_sub, palette=full_pal, ax=ax1)
ax1.set_ylabel("")
ax1.set_xlabel("effect size of motif disruption")
sns.barplot(y="HGNC symbol", x="rsq_activ", data=sig_results_sub, palette=full_pal, ax=ax2)
ax2.set_ylabel("")
ax2.tick_params(left=False, labelleft=False)
ax2.set_xlabel("additional variance explained")
melt = pd.melt(sig_results_sub, id_vars=["HGNC symbol", "yval"], value_vars=["no_CAGE_enr", "eRNA_enr",
"lncRNA_enr", "mRNA_enr"])
ax3.plot(melt["value"], melt["yval"], 'o', color="black")
ax3.set_xlim((-0.5, 3.5))
ax3.set_ylim((np.max(yvals)-0.5, -0.5))
ax3.tick_params(labelleft=False, labelbottom=False, bottom=False, left=False, top=True, labeltop=True)
ax3.xaxis.set_ticks([0, 1, 2, 3])
ax3.set_xticklabels(["no CAGE", "eRNA", "lncRNA", "mRNA"], rotation=60, ha="left", va="bottom")
plt.show()
# fig.savefig("trans_motif_enrichment.pdf", dpi="figure", bbox_inches="tight")
plt.close()
# ## 6. join with expression information
# In[63]:
orth_expr.head()
# In[64]:
trans_orth = motif_results_mrg.merge(orth_expr, left_on="HGNC symbol", right_on="gene_name_human")
len(trans_orth)
# In[65]:
# fisher's exact to see if trans are enriched in DE TFs
trans_ids = trans_orth[trans_orth["padj_trans"] < 0.05]["index"].unique()
no_trans_ids = trans_orth[trans_orth["padj_trans"] >= 0.05]["index"].unique()
DE_ids = trans_orth[trans_orth["sig"] == "sig"]["index"].unique()
trans_w_DE = len([x for x in trans_ids if x in DE_ids])
trans_wo_DE = len([x for x in trans_ids if x not in DE_ids])
no_trans_w_DE = len([x for x in no_trans_ids if x in DE_ids])
no_trans_wo_DE = len([x for x in no_trans_ids if x not in DE_ids])
# fisher's exact test
arr = np.zeros((2, 2))
arr[0, 0] = trans_w_DE
arr[0, 1] = trans_wo_DE
arr[1, 0] = no_trans_w_DE
arr[1, 1] = no_trans_wo_DE
odds, p = stats.fisher_exact(arr)
print(odds)
print(p)
# In[66]:
trans_orth_sig = trans_orth[trans_orth["padj_trans"] < 0.05]
trans_orth_sig["abs_beta"] = np.abs(trans_orth_sig["beta_trans"])
trans_orth_sig = trans_orth_sig.sort_values(by="abs_beta", ascending=False)
len(trans_orth_sig)
# In[67]:
trans_orth_sub = trans_orth_sig[trans_orth_sig["sig"] == "sig"]
len(trans_orth_sub)
# In[68]:
fig = plt.figure(figsize=(4.5, 9))
ax1 = plt.subplot2grid((1, 7), (0, 0), colspan=3)
ax2 = plt.subplot2grid((1, 7), (0, 3), colspan=3)
ax3 = plt.subplot2grid((1, 7), (0, 6), colspan=1)
yvals = []
symbs = []
c = 0
for i, row in trans_orth_sub.iterrows():
symb = row["HGNC symbol"]
if symb not in symbs:
yvals.append(c)
symbs.append(symb)
c += 1
else:
yvals.append(c)
trans_orth_sub["yval"] = yvals
sns.barplot(y="HGNC symbol", x="beta_trans", data=trans_orth_sub, palette=full_pal, ax=ax1)
ax1.set_ylabel("")
ax1.set_xlabel("effect size of\nmotif presence")
sns.barplot(y="HGNC symbol", x="logFC", data=trans_orth_sub, palette=full_pal, ax=ax2)
ax2.set_ylabel("")
ax2.tick_params(left=False, labelleft=False)
ax2.set_xlabel("log2(mESC/hESC)")
melt = pd.melt(trans_orth_sub, id_vars=["HGNC symbol", "yval"], value_vars=["no_CAGE_enr", "eRNA_enr",
"lncRNA_enr", "mRNA_enr"])
ax3.plot(melt["value"], melt["yval"], 'o', color="black")
ax3.set_xlim((-0.5, 3.5))
ax3.set_ylim((np.max(yvals)-0.5, -0.5))
ax3.tick_params(labelleft=False, labelbottom=False, bottom=False, left=False, top=True, labeltop=True)
ax3.xaxis.set_ticks([0, 1, 2, 3])
ax3.set_xticklabels(["no CAGE", "eRNA", "lncRNA", "mRNA"], rotation=60, ha="left", va="bottom")
plt.show()
fig.savefig("FigS11.pdf", dpi="figure", bbox_inches="tight")
plt.close()
# In[69]:
trans_orth.head()
# In[70]:
fig, ax = plt.subplots(figsize=(2.2, 2.2), nrows=1, ncols=1)
ax.scatter(trans_orth["beta_trans"],
trans_orth["logFC"],
color=sns.color_palette("Set2")[2], alpha=0.75, s=15,
linewidths=0.5, edgecolors="white")
#ax.plot([-0.75, 400000], [-0.75, 400000], "k", linestyle="dashed")
#ax.set_xlim((-0.75, 400000))
#ax.set_ylim((-0.75, 400000))
ax.set_xlabel("trans odds ratio")
ax.set_ylabel("RNA-seq logFC([mESC/hESC])")
# annotate corr
no_nan = trans_orth[(~pd.isnull(trans_orth["beta_trans"])) &
(~pd.isnull(trans_orth["logFC"]))]
r, p = spearmanr(no_nan["beta_trans"], no_nan["logFC"])
ax.text(0.05, 0.97, "r = {:.2f}".format(r), ha="left", va="top", fontsize=fontsize,
transform=ax.transAxes)
ax.text(0.05, 0.90, "n = %s" % (len(no_nan)), ha="left", va="top", fontsize=fontsize,
transform=ax.transAxes)
#fig.savefig("TF_human_v_mouse_scatter.w_sig_outline.pdf", dpi="figure", bbox_inches="tight")
# In[71]:
# filter to those where direction matches
trans_orth_sub["direction_match"] = trans_orth_sub.apply(direction_match, axis=1)
trans_orth_sub.direction_match.value_counts()
# In[72]:
trans_orth_match = trans_orth_sub[trans_orth_sub["direction_match"] == "match"]
# In[73]:
match_activ = trans_orth_match[trans_orth_match["activ_or_repr"] == "activating"]
match_repr = trans_orth_match[trans_orth_match["activ_or_repr"] == "repressing"]
# In[74]:
fig = plt.figure(figsize=(4, 4.4))
ax1 = plt.subplot2grid((1, 7), (0, 0), colspan=3)
ax2 = plt.subplot2grid((1, 7), (0, 3), colspan=3)
ax3 = plt.subplot2grid((1, 7), (0, 6), colspan=1)
yvals = []
symbs = []
c = 0
for i, row in match_activ.iterrows():
symb = row["HGNC symbol"]
if symb not in symbs:
yvals.append(c)
symbs.append(symb)
c += 1
else:
yvals.append(c)
match_activ["yval"] = yvals
sns.barplot(y="HGNC symbol", x="beta_trans", data=match_activ, palette=full_pal, ax=ax1)
ax1.set_ylabel("")
ax1.set_xlabel("effect size of\nmotif presence")
ax1.axvline(x=0, linestyle="dashed", color="black")
sns.barplot(y="HGNC symbol", x="logFC", data=match_activ, palette=full_pal, ax=ax2)
ax2.set_ylabel("")
ax2.tick_params(left=False, labelleft=False)
ax2.set_xlabel("log2(mESC/hESC)")
ax2.axvline(x=0, linestyle="dashed", color="black")
melt = pd.melt(match_activ, id_vars=["HGNC symbol", "yval"], value_vars=["no_CAGE_enr", "eRNA_enr",
"lncRNA_enr", "mRNA_enr"])
ax3.plot(melt["value"], melt["yval"], 'o', color="black")
ax3.set_xlim((-0.5, 3.5))
ax3.set_ylim((np.max(yvals)-0.5, -0.5))
ax3.tick_params(labelleft=False, labelbottom=False, bottom=False, left=False, top=True, labeltop=True)
ax3.xaxis.set_ticks([0, 1, 2, 3])
ax3.set_xticklabels(["no CAGE", "eRNA", "lncRNA", "mRNA"], rotation=60, ha="left", va="bottom")
plt.show()
fig.savefig("Fig5B.pdf", dpi="figure", bbox_inches="tight")
plt.close()
# In[75]:
fig = plt.figure(figsize=(4, 0.5))
ax1 = plt.subplot2grid((1, 7), (0, 0), colspan=3)
ax2 = plt.subplot2grid((1, 7), (0, 3), colspan=3)
ax3 = plt.subplot2grid((1, 7), (0, 6), colspan=1)
yvals = []
symbs = []
c = 0
for i, row in match_repr.iterrows():
symb = row["HGNC symbol"]
if symb not in symbs:
yvals.append(c)
symbs.append(symb)
c += 1
else:
yvals.append(c)
match_repr["yval"] = yvals
sns.barplot(y="HGNC symbol", x="beta_trans", data=match_repr, palette=full_pal, ax=ax1)
ax1.set_ylabel("")
ax1.set_xlabel("effect size of\nmotif presence")
ax1.axvline(x=0, linestyle="dashed", color="black")
sns.barplot(y="HGNC symbol", x="logFC", data=match_repr, palette=full_pal, ax=ax2)
ax2.set_ylabel("")
ax2.tick_params(left=False, labelleft=False)
ax2.set_xlabel("log2(mESC/hESC)")
ax2.axvline(x=0, linestyle="dashed", color="black")
melt = pd.melt(match_repr, id_vars=["HGNC symbol", "yval"], value_vars=["no_CAGE_enr", "eRNA_enr",
"lncRNA_enr", "mRNA_enr"])
ax3.plot(melt["value"], melt["yval"], 'o', color="black")
ax3.set_xlim((-0.5, 3.5))
ax3.set_ylim((np.max(yvals)-0.5, -0.5))
ax3.tick_params(labelleft=False, labelbottom=False, bottom=False, left=False, top=True, labeltop=True)
ax3.xaxis.set_ticks([0, 1, 2, 3])
ax3.set_xticklabels(["no CAGE", "eRNA", "lncRNA", "mRNA"], rotation=60, ha="left", va="bottom")
plt.show()
# fig.savefig("trans_motif_enrichment.with_expr.match_only.repr.pdf", dpi="figure", bbox_inches="tight")
plt.close()
# ## 7. join w/ % similarity information
# In[76]:
orth.columns
# In[77]:
orth_sub = orth[["Gene name", "Mouse gene name", "dN with Mouse", "dS with Mouse"]]
orth_sub.columns = ["human_gene_name", "mouse_gene_name", "dN", "dS"]
orth_sub["dNdS"] = orth_sub["dN"]/orth_sub["dS"]
# In[78]:
trans_orth = trans_orth.merge(orth_sub, left_on="HGNC symbol", right_on="human_gene_name").drop_duplicates()
print(len(trans_orth))
trans_orth.sample(5)
# In[79]:
trans_orth["abs_l2fc"] = np.abs(trans_orth["logFC"])
trans_orth["sig_status"] = trans_orth.apply(sig_status, axis=1)
trans_orth.head()
# In[80]:
trans_orth.sig_status.value_counts()
# In[81]:
order = ["not sig", "sig"]
palette = {"not sig": "gray", "sig": sns.color_palette("Set2")[2]}
# In[82]:
trans_orth_sig = trans_orth[trans_orth["sig_status"] == "sig"]
print(len(trans_orth_sig))
trans_orth_sig.head()
# In[83]:
fig = plt.figure(figsize=(1, 1.75))
ax = sns.boxplot(data=trans_orth_sig, x="sig", y="dNdS", palette=palette, order=order,
flierprops = dict(marker='o', markersize=5))
mimic_r_boxplot(ax)
ax.set_xticklabels(order, rotation=50, ha='right', va='top')
ax.set_xlabel("")
ax.set_ylabel("dN/dS")
for i, label in enumerate(order):
n = len(trans_orth_sig[trans_orth_sig["sig"] == label])
ax.annotate(str(n), xy=(i, -0.07), xycoords="data", xytext=(0, 0),
textcoords="offset pixels", ha='center', va='bottom',
color=palette[label], size=fontsize)
ax.set_ylim((-0.09, 0.4))
# calc p-vals b/w dists
dist1 = np.asarray(trans_orth_sig[trans_orth_sig["sig"] == "sig"]["dNdS"])
dist2 = np.asarray(trans_orth_sig[trans_orth_sig["sig"] != "sig"]["dNdS"])
dist1 = dist1[~np.isnan(dist1)]
dist2 = dist2[~np.isnan(dist2)]
u, pval = stats.mannwhitneyu(dist1, dist2, alternative="two-sided", use_continuity=False)
print(pval)
annotate_pval(ax, 0.2, 0.8, 0.2, 0, 0.2, pval, fontsize-1)
plt.show()
# fig.savefig("DE_v_similarity_boxplot.pdf", dpi="figure", bbox_inches="tight")
plt.close()
|
[
"numpy.random.seed",
"numpy.abs",
"sklearn.preprocessing.StandardScaler",
"matplotlib.pyplot.subplot2grid",
"numpy.isnan",
"matplotlib.pyplot.figure",
"statsmodels.api.qqplot",
"pandas.read_table",
"sys.path.append",
"pandas.DataFrame",
"matplotlib.pyplot.close",
"numpy.max",
"statsmodels.formula.api.ols",
"matplotlib.pyplot.subplots",
"seaborn.set",
"matplotlib.pyplot.show",
"pandas.DataFrame.from_dict",
"numpy.asarray",
"seaborn.barplot",
"matplotlib.pyplot.subplots_adjust",
"pandas.melt",
"warnings.filterwarnings",
"scipy.stats.spearmanr",
"numpy.zeros",
"pandas.isnull",
"seaborn.color_palette"
] |
[((203, 236), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {}), "('ignore')\n", (226, 236), False, 'import warnings\n'), ((779, 812), 'sys.path.append', 'sys.path.append', (['"""../../../utils"""'], {}), "('../../../utils')\n", (794, 812), False, 'import sys\n'), ((1029, 1052), 'seaborn.set', 'sns.set', ([], {}), '(**PAPER_PRESET)\n', (1036, 1052), True, 'import seaborn as sns\n'), ((1092, 1112), 'numpy.random.seed', 'np.random.seed', (['(2019)'], {}), '(2019)\n', (1106, 1112), True, 'import numpy as np\n'), ((3491, 3523), 'pandas.read_table', 'pd.read_table', (['index_f'], {'sep': '"""\t"""'}), "(index_f, sep='\\t')\n", (3504, 3523), True, 'import pandas as pd\n'), ((3755, 3789), 'pandas.read_table', 'pd.read_table', (['tss_map_f'], {'sep': '"""\t"""'}), "(tss_map_f, sep='\\t')\n", (3768, 3789), True, 'import pandas as pd\n'), ((3887, 3926), 'pandas.read_table', 'pd.read_table', (['human_motifs_f'], {'sep': '"""\t"""'}), "(human_motifs_f, sep='\\t')\n", (3900, 3926), True, 'import pandas as pd\n'), ((4029, 4068), 'pandas.read_table', 'pd.read_table', (['mouse_motifs_f'], {'sep': '"""\t"""'}), "(mouse_motifs_f, sep='\\t')\n", (4042, 4068), True, 'import pandas as pd\n'), ((4116, 4153), 'pandas.read_table', 'pd.read_table', (['motif_info_f'], {'sep': '"""\t"""'}), "(motif_info_f, sep='\\t')\n", (4129, 4153), True, 'import pandas as pd\n'), ((4199, 4226), 'pandas.read_table', 'pd.read_table', (['sig_motifs_f'], {}), '(sig_motifs_f)\n', (4212, 4226), True, 'import pandas as pd\n'), ((4340, 4361), 'pandas.read_table', 'pd.read_table', (['data_f'], {}), '(data_f)\n', (4353, 4361), True, 'import pandas as pd\n'), ((4400, 4436), 'pandas.read_table', 'pd.read_table', (['orth_expr_f'], {'sep': '"""\t"""'}), "(orth_expr_f, sep='\\t')\n", (4413, 4436), True, 'import pandas as pd\n'), ((4481, 4518), 'pandas.read_table', 'pd.read_table', (['human_expr_f'], {'sep': '"""\t"""'}), "(human_expr_f, sep='\\t')\n", (4494, 4518), True, 'import pandas as pd\n'), ((4564, 4601), 'pandas.read_table', 'pd.read_table', (['mouse_expr_f'], {'sep': '"""\t"""'}), "(mouse_expr_f, sep='\\t')\n", (4577, 4601), True, 'import pandas as pd\n'), ((4641, 4672), 'pandas.read_table', 'pd.read_table', (['orth_f'], {'sep': '"""\t"""'}), "(orth_f, sep='\\t')\n", (4654, 4672), True, 'import pandas as pd\n'), ((6831, 6927), 'pandas.DataFrame', 'pd.DataFrame', (['scaled_features'], {'index': 'data_indiv.index', 'columns': "['logFC_trans', 'gc', 'cpg']"}), "(scaled_features, index=data_indiv.index, columns=[\n 'logFC_trans', 'gc', 'cpg'])\n", (6843, 6927), True, 'import pandas as pd\n'), ((7426, 7476), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(2.2, 2.2)', 'ncols': '(1)', 'nrows': '(1)'}), '(figsize=(2.2, 2.2), ncols=1, nrows=1)\n', (7438, 7476), True, 'import matplotlib.pyplot as plt\n'), ((7477, 7508), 'statsmodels.api.qqplot', 'sm.qqplot', (['res'], {'line': '"""s"""', 'ax': 'ax'}), "(res, line='s', ax=ax)\n", (7486, 7508), True, 'import statsmodels.api as sm\n'), ((10357, 10390), 'numpy.abs', 'np.abs', (["sig_results['beta_trans']"], {}), "(sig_results['beta_trans'])\n", (10363, 10390), True, 'import numpy as np\n'), ((11253, 11294), 'numpy.abs', 'np.abs', (["data_filt_plot['logFC_trans_one']"], {}), "(data_filt_plot['logFC_trans_one'])\n", (11259, 11294), True, 'import numpy as np\n'), ((14708, 14736), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(4.5, 8)'}), '(figsize=(4.5, 8))\n', (14718, 14736), True, 'import matplotlib.pyplot as plt\n'), ((14744, 14787), 'matplotlib.pyplot.subplot2grid', 'plt.subplot2grid', (['(1, 7)', '(0, 0)'], {'colspan': '(3)'}), '((1, 7), (0, 0), colspan=3)\n', (14760, 14787), True, 'import matplotlib.pyplot as plt\n'), ((14794, 14837), 'matplotlib.pyplot.subplot2grid', 'plt.subplot2grid', (['(1, 7)', '(0, 3)'], {'colspan': '(3)'}), '((1, 7), (0, 3), colspan=3)\n', (14810, 14837), True, 'import matplotlib.pyplot as plt\n'), ((14844, 14887), 'matplotlib.pyplot.subplot2grid', 'plt.subplot2grid', (['(1, 7)', '(0, 6)'], {'colspan': '(1)'}), '((1, 7), (0, 6), colspan=1)\n', (14860, 14887), True, 'import matplotlib.pyplot as plt\n'), ((15148, 15245), 'seaborn.barplot', 'sns.barplot', ([], {'y': '"""HGNC symbol"""', 'x': '"""beta_trans"""', 'data': 'sig_results_sub', 'palette': 'full_pal', 'ax': 'ax1'}), "(y='HGNC symbol', x='beta_trans', data=sig_results_sub, palette=\n full_pal, ax=ax1)\n", (15159, 15245), True, 'import seaborn as sns\n'), ((15311, 15407), 'seaborn.barplot', 'sns.barplot', ([], {'y': '"""HGNC symbol"""', 'x': '"""rsq_activ"""', 'data': 'sig_results_sub', 'palette': 'full_pal', 'ax': 'ax2'}), "(y='HGNC symbol', x='rsq_activ', data=sig_results_sub, palette=\n full_pal, ax=ax2)\n", (15322, 15407), True, 'import seaborn as sns\n'), ((15523, 15651), 'pandas.melt', 'pd.melt', (['sig_results_sub'], {'id_vars': "['HGNC symbol', 'yval']", 'value_vars': "['no_CAGE_enr', 'eRNA_enr', 'lncRNA_enr', 'mRNA_enr']"}), "(sig_results_sub, id_vars=['HGNC symbol', 'yval'], value_vars=[\n 'no_CAGE_enr', 'eRNA_enr', 'lncRNA_enr', 'mRNA_enr'])\n", (15530, 15651), True, 'import pandas as pd\n'), ((16086, 16096), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (16094, 16096), True, 'import matplotlib.pyplot as plt\n'), ((16176, 16187), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (16185, 16187), True, 'import matplotlib.pyplot as plt\n'), ((16954, 16970), 'numpy.zeros', 'np.zeros', (['(2, 2)'], {}), '((2, 2))\n', (16962, 16970), True, 'import numpy as np\n'), ((17231, 17267), 'numpy.abs', 'np.abs', (["trans_orth_sig['beta_trans']"], {}), "(trans_orth_sig['beta_trans'])\n", (17237, 17267), True, 'import numpy as np\n'), ((17482, 17510), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(4.5, 9)'}), '(figsize=(4.5, 9))\n', (17492, 17510), True, 'import matplotlib.pyplot as plt\n'), ((17518, 17561), 'matplotlib.pyplot.subplot2grid', 'plt.subplot2grid', (['(1, 7)', '(0, 0)'], {'colspan': '(3)'}), '((1, 7), (0, 0), colspan=3)\n', (17534, 17561), True, 'import matplotlib.pyplot as plt\n'), ((17568, 17611), 'matplotlib.pyplot.subplot2grid', 'plt.subplot2grid', (['(1, 7)', '(0, 3)'], {'colspan': '(3)'}), '((1, 7), (0, 3), colspan=3)\n', (17584, 17611), True, 'import matplotlib.pyplot as plt\n'), ((17618, 17661), 'matplotlib.pyplot.subplot2grid', 'plt.subplot2grid', (['(1, 7)', '(0, 6)'], {'colspan': '(1)'}), '((1, 7), (0, 6), colspan=1)\n', (17634, 17661), True, 'import matplotlib.pyplot as plt\n'), ((17920, 18016), 'seaborn.barplot', 'sns.barplot', ([], {'y': '"""HGNC symbol"""', 'x': '"""beta_trans"""', 'data': 'trans_orth_sub', 'palette': 'full_pal', 'ax': 'ax1'}), "(y='HGNC symbol', x='beta_trans', data=trans_orth_sub, palette=\n full_pal, ax=ax1)\n", (17931, 18016), True, 'import seaborn as sns\n'), ((18081, 18172), 'seaborn.barplot', 'sns.barplot', ([], {'y': '"""HGNC symbol"""', 'x': '"""logFC"""', 'data': 'trans_orth_sub', 'palette': 'full_pal', 'ax': 'ax2'}), "(y='HGNC symbol', x='logFC', data=trans_orth_sub, palette=\n full_pal, ax=ax2)\n", (18092, 18172), True, 'import seaborn as sns\n'), ((18274, 18401), 'pandas.melt', 'pd.melt', (['trans_orth_sub'], {'id_vars': "['HGNC symbol', 'yval']", 'value_vars': "['no_CAGE_enr', 'eRNA_enr', 'lncRNA_enr', 'mRNA_enr']"}), "(trans_orth_sub, id_vars=['HGNC symbol', 'yval'], value_vars=[\n 'no_CAGE_enr', 'eRNA_enr', 'lncRNA_enr', 'mRNA_enr'])\n", (18281, 18401), True, 'import pandas as pd\n'), ((18836, 18846), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (18844, 18846), True, 'import matplotlib.pyplot as plt\n'), ((18908, 18919), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (18917, 18919), True, 'import matplotlib.pyplot as plt\n'), ((18976, 19026), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(2.2, 2.2)', 'nrows': '(1)', 'ncols': '(1)'}), '(figsize=(2.2, 2.2), nrows=1, ncols=1)\n', (18988, 19026), True, 'import matplotlib.pyplot as plt\n'), ((19560, 19608), 'scipy.stats.spearmanr', 'spearmanr', (["no_nan['beta_trans']", "no_nan['logFC']"], {}), "(no_nan['beta_trans'], no_nan['logFC'])\n", (19569, 19608), False, 'from scipy.stats import spearmanr\n'), ((20413, 20441), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(4, 4.4)'}), '(figsize=(4, 4.4))\n', (20423, 20441), True, 'import matplotlib.pyplot as plt\n'), ((20449, 20492), 'matplotlib.pyplot.subplot2grid', 'plt.subplot2grid', (['(1, 7)', '(0, 0)'], {'colspan': '(3)'}), '((1, 7), (0, 0), colspan=3)\n', (20465, 20492), True, 'import matplotlib.pyplot as plt\n'), ((20499, 20542), 'matplotlib.pyplot.subplot2grid', 'plt.subplot2grid', (['(1, 7)', '(0, 3)'], {'colspan': '(3)'}), '((1, 7), (0, 3), colspan=3)\n', (20515, 20542), True, 'import matplotlib.pyplot as plt\n'), ((20549, 20592), 'matplotlib.pyplot.subplot2grid', 'plt.subplot2grid', (['(1, 7)', '(0, 6)'], {'colspan': '(1)'}), '((1, 7), (0, 6), colspan=1)\n', (20565, 20592), True, 'import matplotlib.pyplot as plt\n'), ((20845, 20938), 'seaborn.barplot', 'sns.barplot', ([], {'y': '"""HGNC symbol"""', 'x': '"""beta_trans"""', 'data': 'match_activ', 'palette': 'full_pal', 'ax': 'ax1'}), "(y='HGNC symbol', x='beta_trans', data=match_activ, palette=\n full_pal, ax=ax1)\n", (20856, 20938), True, 'import seaborn as sns\n'), ((21055, 21142), 'seaborn.barplot', 'sns.barplot', ([], {'y': '"""HGNC symbol"""', 'x': '"""logFC"""', 'data': 'match_activ', 'palette': 'full_pal', 'ax': 'ax2'}), "(y='HGNC symbol', x='logFC', data=match_activ, palette=full_pal,\n ax=ax2)\n", (21066, 21142), True, 'import seaborn as sns\n'), ((21297, 21421), 'pandas.melt', 'pd.melt', (['match_activ'], {'id_vars': "['HGNC symbol', 'yval']", 'value_vars': "['no_CAGE_enr', 'eRNA_enr', 'lncRNA_enr', 'mRNA_enr']"}), "(match_activ, id_vars=['HGNC symbol', 'yval'], value_vars=[\n 'no_CAGE_enr', 'eRNA_enr', 'lncRNA_enr', 'mRNA_enr'])\n", (21304, 21421), True, 'import pandas as pd\n'), ((21856, 21866), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (21864, 21866), True, 'import matplotlib.pyplot as plt\n'), ((21927, 21938), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (21936, 21938), True, 'import matplotlib.pyplot as plt\n'), ((21959, 21987), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(4, 0.5)'}), '(figsize=(4, 0.5))\n', (21969, 21987), True, 'import matplotlib.pyplot as plt\n'), ((21995, 22038), 'matplotlib.pyplot.subplot2grid', 'plt.subplot2grid', (['(1, 7)', '(0, 0)'], {'colspan': '(3)'}), '((1, 7), (0, 0), colspan=3)\n', (22011, 22038), True, 'import matplotlib.pyplot as plt\n'), ((22045, 22088), 'matplotlib.pyplot.subplot2grid', 'plt.subplot2grid', (['(1, 7)', '(0, 3)'], {'colspan': '(3)'}), '((1, 7), (0, 3), colspan=3)\n', (22061, 22088), True, 'import matplotlib.pyplot as plt\n'), ((22095, 22138), 'matplotlib.pyplot.subplot2grid', 'plt.subplot2grid', (['(1, 7)', '(0, 6)'], {'colspan': '(1)'}), '((1, 7), (0, 6), colspan=1)\n', (22111, 22138), True, 'import matplotlib.pyplot as plt\n'), ((22389, 22481), 'seaborn.barplot', 'sns.barplot', ([], {'y': '"""HGNC symbol"""', 'x': '"""beta_trans"""', 'data': 'match_repr', 'palette': 'full_pal', 'ax': 'ax1'}), "(y='HGNC symbol', x='beta_trans', data=match_repr, palette=\n full_pal, ax=ax1)\n", (22400, 22481), True, 'import seaborn as sns\n'), ((22598, 22684), 'seaborn.barplot', 'sns.barplot', ([], {'y': '"""HGNC symbol"""', 'x': '"""logFC"""', 'data': 'match_repr', 'palette': 'full_pal', 'ax': 'ax2'}), "(y='HGNC symbol', x='logFC', data=match_repr, palette=full_pal,\n ax=ax2)\n", (22609, 22684), True, 'import seaborn as sns\n'), ((22839, 22962), 'pandas.melt', 'pd.melt', (['match_repr'], {'id_vars': "['HGNC symbol', 'yval']", 'value_vars': "['no_CAGE_enr', 'eRNA_enr', 'lncRNA_enr', 'mRNA_enr']"}), "(match_repr, id_vars=['HGNC symbol', 'yval'], value_vars=[\n 'no_CAGE_enr', 'eRNA_enr', 'lncRNA_enr', 'mRNA_enr'])\n", (22846, 22962), True, 'import pandas as pd\n'), ((23397, 23407), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (23405, 23407), True, 'import matplotlib.pyplot as plt\n'), ((23513, 23524), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (23522, 23524), True, 'import matplotlib.pyplot as plt\n'), ((24017, 24044), 'numpy.abs', 'np.abs', (["trans_orth['logFC']"], {}), "(trans_orth['logFC'])\n", (24023, 24044), True, 'import numpy as np\n'), ((24432, 24461), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(1, 1.75)'}), '(figsize=(1, 1.75))\n', (24442, 24461), True, 'import matplotlib.pyplot as plt\n'), ((25085, 25151), 'numpy.asarray', 'np.asarray', (["trans_orth_sig[trans_orth_sig['sig'] == 'sig']['dNdS']"], {}), "(trans_orth_sig[trans_orth_sig['sig'] == 'sig']['dNdS'])\n", (25095, 25151), True, 'import numpy as np\n'), ((25160, 25226), 'numpy.asarray', 'np.asarray', (["trans_orth_sig[trans_orth_sig['sig'] != 'sig']['dNdS']"], {}), "(trans_orth_sig[trans_orth_sig['sig'] != 'sig']['dNdS'])\n", (25170, 25226), True, 'import numpy as np\n'), ((25456, 25466), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (25464, 25466), True, 'import matplotlib.pyplot as plt\n'), ((25547, 25558), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (25556, 25558), True, 'import matplotlib.pyplot as plt\n'), ((12151, 12202), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(2.75, 1.5)', 'nrows': '(1)', 'ncols': '(2)'}), '(figsize=(2.75, 1.5), nrows=1, ncols=2)\n', (12163, 12202), True, 'import matplotlib.pyplot as plt\n'), ((13783, 13818), 'numpy.asarray', 'np.asarray', (["sub1['logFC_trans_one']"], {}), "(sub1['logFC_trans_one'])\n", (13793, 13818), True, 'import numpy as np\n'), ((13831, 13866), 'numpy.asarray', 'np.asarray', (["sub2['logFC_trans_one']"], {}), "(sub2['logFC_trans_one'])\n", (13841, 13866), True, 'import numpy as np\n'), ((14157, 14188), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', ([], {'wspace': '(0.5)'}), '(wspace=0.5)\n', (14176, 14188), True, 'import matplotlib.pyplot as plt\n'), ((14384, 14394), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (14392, 14394), True, 'import matplotlib.pyplot as plt\n'), ((6746, 6762), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {}), '()\n', (6760, 6762), False, 'from sklearn.preprocessing import StandardScaler\n'), ((7278, 7335), 'statsmodels.formula.api.ols', 'smf.ols', ([], {'formula': '"""logFC_trans ~ gc + cpg"""', 'data': 'data_filt'}), "(formula='logFC_trans ~ gc + cpg', data=data_filt)\n", (7285, 7335), True, 'import statsmodels.formula.api as smf\n'), ((9660, 9713), 'pandas.DataFrame.from_dict', 'pd.DataFrame.from_dict', (['motif_results'], {'orient': '"""index"""'}), "(motif_results, orient='index')\n", (9682, 9713), True, 'import pandas as pd\n'), ((11461, 11486), 'seaborn.color_palette', 'sns.color_palette', (['"""Set2"""'], {}), "('Set2')\n", (11478, 11486), True, 'import seaborn as sns\n'), ((11497, 11522), 'seaborn.color_palette', 'sns.color_palette', (['"""Set2"""'], {}), "('Set2')\n", (11514, 11522), True, 'import seaborn as sns\n'), ((14430, 14457), 'seaborn.color_palette', 'sns.color_palette', (['"""pastel"""'], {}), "('pastel')\n", (14447, 14457), True, 'import seaborn as sns\n'), ((14476, 14503), 'seaborn.color_palette', 'sns.color_palette', (['"""pastel"""'], {}), "('pastel')\n", (14493, 14503), True, 'import seaborn as sns\n'), ((24256, 24281), 'seaborn.color_palette', 'sns.color_palette', (['"""Set2"""'], {}), "('Set2')\n", (24273, 24281), True, 'import seaborn as sns\n'), ((25243, 25258), 'numpy.isnan', 'np.isnan', (['dist1'], {}), '(dist1)\n', (25251, 25258), True, 'import numpy as np\n'), ((25275, 25290), 'numpy.isnan', 'np.isnan', (['dist2'], {}), '(dist2)\n', (25283, 25290), True, 'import numpy as np\n'), ((8981, 9044), 'statsmodels.formula.api.ols', 'smf.ols', ([], {'formula': '"""logFC_trans ~ gc + cpg + tss_motif"""', 'data': 'tmp'}), "(formula='logFC_trans ~ gc + cpg + tss_motif', data=tmp)\n", (8988, 9044), True, 'import statsmodels.formula.api as smf\n'), ((13886, 13901), 'numpy.isnan', 'np.isnan', (['vals1'], {}), '(vals1)\n', (13894, 13901), True, 'import numpy as np\n'), ((13922, 13937), 'numpy.isnan', 'np.isnan', (['vals2'], {}), '(vals2)\n', (13930, 13937), True, 'import numpy as np\n'), ((15826, 15839), 'numpy.max', 'np.max', (['yvals'], {}), '(yvals)\n', (15832, 15839), True, 'import numpy as np\n'), ((18576, 18589), 'numpy.max', 'np.max', (['yvals'], {}), '(yvals)\n', (18582, 18589), True, 'import numpy as np\n'), ((19115, 19140), 'seaborn.color_palette', 'sns.color_palette', (['"""Set2"""'], {}), "('Set2')\n", (19132, 19140), True, 'import seaborn as sns\n'), ((19458, 19493), 'pandas.isnull', 'pd.isnull', (["trans_orth['beta_trans']"], {}), "(trans_orth['beta_trans'])\n", (19467, 19493), True, 'import pandas as pd\n'), ((19520, 19550), 'pandas.isnull', 'pd.isnull', (["trans_orth['logFC']"], {}), "(trans_orth['logFC'])\n", (19529, 19550), True, 'import pandas as pd\n'), ((21596, 21609), 'numpy.max', 'np.max', (['yvals'], {}), '(yvals)\n', (21602, 21609), True, 'import numpy as np\n'), ((23137, 23150), 'numpy.max', 'np.max', (['yvals'], {}), '(yvals)\n', (23143, 23150), True, 'import numpy as np\n')]
|
import argparse
import os
from argparse import RawTextHelpFormatter
import hypercane.actions.identify
import hypercane.errors
from hypercane.args import universal_by_cid_gui_required_args, universal_gui_optional_args
from hypercane.actions import get_logger, calculate_loglevel
from hypercane.utils import get_hc_cache_storage
from hypercane.version import __useragent__
parser = argparse.ArgumentParser(
description="Submit a public web archive collection's ID and Hypercane will generate a file listing all archived page URLs (i.e., mementos, captures, snapshots, URI-Ms).",
formatter_class=RawTextHelpFormatter
)
# Wooey's install script does not know how to handle functions, so we have to repeat this
required = parser.add_argument_group('required arguments')
for entry in universal_by_cid_gui_required_args:
flags = entry['flags']
argument_params = entry['argument_params']
required.add_argument(*flags, **argument_params)
optional = parser.add_argument_group('optional arguments')
for entry in universal_gui_optional_args:
flags = entry['flags']
argument_params = entry['argument_params']
optional.add_argument(*flags, **argument_params)
if __name__ == '__main__':
args = parser.parse_args()
vars(args)['output_filename'] = "archived-page-urls.txt"
vars(args)['logfile'] = "hypercane-status.log"
vars(args)['errorfilename'] = "hypercane-errors.dat"
vars(args)['cache_storage'] = get_hc_cache_storage()
vars(args)['input_arguments'] = args.collection_id
# needed by discover_mementos, but not used
vars(args)['accept_datetime'] = None
vars(args)['timegates'] = None
logger = get_logger(
__name__,
calculate_loglevel(verbose=args.verbose, quiet=args.quiet),
args.logfile
)
if args.errorfilename is not None:
hypercane.errors.errorstore.type = hypercane.errors.FileErrorStore(args.errorfilename)
print("starting to identify archived page URLs (i.e., mementos, captures, snapshots, URI-Ms) for {} collection ID {}".format(args.input_type, args.collection_id))
print("in case of an issue, your administrator may need to know that the output of this job is stored in {}".format(os.getcwd()), flush=True)
print("using cache at location {}".format(args.cache_storage))
hypercane.actions.identify.discover_mementos(args)
print("done identifying archived page URLs (i.e., mementos, captures, snapshots, URI-Ms) from {} collection {}, saved list to file {}".format(args.input_type, args.collection_id, args.output_filename))
|
[
"os.getcwd",
"hypercane.utils.get_hc_cache_storage",
"argparse.ArgumentParser",
"hypercane.actions.calculate_loglevel"
] |
[((385, 628), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Submit a public web archive collection\'s ID and Hypercane will generate a file listing all archived page URLs (i.e., mementos, captures, snapshots, URI-Ms)."""', 'formatter_class': 'RawTextHelpFormatter'}), '(description=\n "Submit a public web archive collection\'s ID and Hypercane will generate a file listing all archived page URLs (i.e., mementos, captures, snapshots, URI-Ms)."\n , formatter_class=RawTextHelpFormatter)\n', (408, 628), False, 'import argparse\n'), ((1448, 1470), 'hypercane.utils.get_hc_cache_storage', 'get_hc_cache_storage', ([], {}), '()\n', (1468, 1470), False, 'from hypercane.utils import get_hc_cache_storage\n'), ((1703, 1761), 'hypercane.actions.calculate_loglevel', 'calculate_loglevel', ([], {'verbose': 'args.verbose', 'quiet': 'args.quiet'}), '(verbose=args.verbose, quiet=args.quiet)\n', (1721, 1761), False, 'from hypercane.actions import get_logger, calculate_loglevel\n'), ((2213, 2224), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (2222, 2224), False, 'import os\n')]
|
import unittest
import numpy as np
import multipy
################################################################################
################################################################################
####
#### Class: Transform
####
################################################################################
################################################################################
class Transform(unittest.TestCase):
def test_Transform__diffusive_flux_mass_molar_to_mass_mass__allowed_calls(self):
X = np.random.rand(5,100)
Y = np.random.rand(5,100)
try:
transform = multipy.Transform()
B_ou = transform.diffusive_flux_mass_molar_to_mass_mass(X, Y)
(n_species_1,n_species_2,n_observations) = np.shape(B_ou)
self.assertTrue(n_species_1==4)
self.assertTrue(n_species_2==4)
self.assertTrue(n_observations==100)
except Exception:
self.assertTrue(False)
X = np.random.rand(2,100)
Y = np.random.rand(2,100)
try:
transform = multipy.Transform()
B_ou = transform.diffusive_flux_mass_molar_to_mass_mass(X, Y)
(n_species_1,n_species_2,n_observations) = np.shape(B_ou)
self.assertTrue(n_species_1==1)
self.assertTrue(n_species_2==1)
self.assertTrue(n_observations==100)
except Exception:
self.assertTrue(False)
X = np.random.rand(2,1)
Y = np.random.rand(2,1)
try:
transform = multipy.Transform()
B_ou = transform.diffusive_flux_mass_molar_to_mass_mass(X, Y)
(n_species_1,n_species_2,n_observations) = np.shape(B_ou)
self.assertTrue(n_species_1==1)
self.assertTrue(n_species_2==1)
self.assertTrue(n_observations==1)
except Exception:
self.assertTrue(False)
################################################################################
################################################################################
def test_Transform__diffusive_flux_mass_molar_to_mass_mass__not_allowed_calls(self):
transform = multipy.Transform()
X = np.random.rand(1,100)
Y = np.random.rand(1,100)
with self.assertRaises(ValueError):
B_ou = transform.diffusive_flux_mass_molar_to_mass_mass(X, Y)
X = np.random.rand(5,100)
Y = np.random.rand(4,100)
with self.assertRaises(ValueError):
B_ou = transform.diffusive_flux_mass_molar_to_mass_mass(X, Y)
X = np.random.rand(5,100)
Y = np.random.rand(1,100)
with self.assertRaises(ValueError):
B_ou = transform.diffusive_flux_mass_molar_to_mass_mass(X, Y)
X = np.random.rand(1,100)
Y = np.random.rand(5,100)
with self.assertRaises(ValueError):
B_ou = transform.diffusive_flux_mass_molar_to_mass_mass(X, Y)
X = np.random.rand(100)
Y = np.random.rand(5,100)
with self.assertRaises(ValueError):
B_ou = transform.diffusive_flux_mass_molar_to_mass_mass(X, Y)
X = np.random.rand(5,100)
Y = np.random.rand(100)
with self.assertRaises(ValueError):
B_ou = transform.diffusive_flux_mass_molar_to_mass_mass(X, Y)
X = np.random.rand(5,100)
with self.assertRaises(ValueError):
B_ou = transform.diffusive_flux_mass_molar_to_mass_mass(X, [1,2,3,4,5])
with self.assertRaises(ValueError):
B_ou = transform.diffusive_flux_mass_molar_to_mass_mass(X, None)
Y = np.random.rand(5,100)
with self.assertRaises(ValueError):
B_ou = transform.diffusive_flux_mass_molar_to_mass_mass([1,2,3,4,5], Y)
with self.assertRaises(ValueError):
B_ou = transform.diffusive_flux_mass_molar_to_mass_mass(None, Y)
################################################################################
################################################################################
def test_Transform__diffusive_flux_mass_molar_to_mass_mass__computation(self):
pass
################################################################################
################################################################################
def test_Transform__diffusive_flux_mass_molar_to_mass_mass__inverses(self):
pass
################################################################################
################################################################################
|
[
"numpy.random.rand",
"multipy.Transform",
"numpy.shape"
] |
[((546, 568), 'numpy.random.rand', 'np.random.rand', (['(5)', '(100)'], {}), '(5, 100)\n', (560, 568), True, 'import numpy as np\n'), ((580, 602), 'numpy.random.rand', 'np.random.rand', (['(5)', '(100)'], {}), '(5, 100)\n', (594, 602), True, 'import numpy as np\n'), ((1015, 1037), 'numpy.random.rand', 'np.random.rand', (['(2)', '(100)'], {}), '(2, 100)\n', (1029, 1037), True, 'import numpy as np\n'), ((1049, 1071), 'numpy.random.rand', 'np.random.rand', (['(2)', '(100)'], {}), '(2, 100)\n', (1063, 1071), True, 'import numpy as np\n'), ((1484, 1504), 'numpy.random.rand', 'np.random.rand', (['(2)', '(1)'], {}), '(2, 1)\n', (1498, 1504), True, 'import numpy as np\n'), ((1516, 1536), 'numpy.random.rand', 'np.random.rand', (['(2)', '(1)'], {}), '(2, 1)\n', (1530, 1536), True, 'import numpy as np\n'), ((2208, 2227), 'multipy.Transform', 'multipy.Transform', ([], {}), '()\n', (2225, 2227), False, 'import multipy\n'), ((2241, 2263), 'numpy.random.rand', 'np.random.rand', (['(1)', '(100)'], {}), '(1, 100)\n', (2255, 2263), True, 'import numpy as np\n'), ((2275, 2297), 'numpy.random.rand', 'np.random.rand', (['(1)', '(100)'], {}), '(1, 100)\n', (2289, 2297), True, 'import numpy as np\n'), ((2429, 2451), 'numpy.random.rand', 'np.random.rand', (['(5)', '(100)'], {}), '(5, 100)\n', (2443, 2451), True, 'import numpy as np\n'), ((2463, 2485), 'numpy.random.rand', 'np.random.rand', (['(4)', '(100)'], {}), '(4, 100)\n', (2477, 2485), True, 'import numpy as np\n'), ((2617, 2639), 'numpy.random.rand', 'np.random.rand', (['(5)', '(100)'], {}), '(5, 100)\n', (2631, 2639), True, 'import numpy as np\n'), ((2651, 2673), 'numpy.random.rand', 'np.random.rand', (['(1)', '(100)'], {}), '(1, 100)\n', (2665, 2673), True, 'import numpy as np\n'), ((2805, 2827), 'numpy.random.rand', 'np.random.rand', (['(1)', '(100)'], {}), '(1, 100)\n', (2819, 2827), True, 'import numpy as np\n'), ((2839, 2861), 'numpy.random.rand', 'np.random.rand', (['(5)', '(100)'], {}), '(5, 100)\n', (2853, 2861), True, 'import numpy as np\n'), ((2993, 3012), 'numpy.random.rand', 'np.random.rand', (['(100)'], {}), '(100)\n', (3007, 3012), True, 'import numpy as np\n'), ((3025, 3047), 'numpy.random.rand', 'np.random.rand', (['(5)', '(100)'], {}), '(5, 100)\n', (3039, 3047), True, 'import numpy as np\n'), ((3179, 3201), 'numpy.random.rand', 'np.random.rand', (['(5)', '(100)'], {}), '(5, 100)\n', (3193, 3201), True, 'import numpy as np\n'), ((3213, 3232), 'numpy.random.rand', 'np.random.rand', (['(100)'], {}), '(100)\n', (3227, 3232), True, 'import numpy as np\n'), ((3365, 3387), 'numpy.random.rand', 'np.random.rand', (['(5)', '(100)'], {}), '(5, 100)\n', (3379, 3387), True, 'import numpy as np\n'), ((3651, 3673), 'numpy.random.rand', 'np.random.rand', (['(5)', '(100)'], {}), '(5, 100)\n', (3665, 3673), True, 'import numpy as np\n'), ((640, 659), 'multipy.Transform', 'multipy.Transform', ([], {}), '()\n', (657, 659), False, 'import multipy\n'), ((789, 803), 'numpy.shape', 'np.shape', (['B_ou'], {}), '(B_ou)\n', (797, 803), True, 'import numpy as np\n'), ((1109, 1128), 'multipy.Transform', 'multipy.Transform', ([], {}), '()\n', (1126, 1128), False, 'import multipy\n'), ((1258, 1272), 'numpy.shape', 'np.shape', (['B_ou'], {}), '(B_ou)\n', (1266, 1272), True, 'import numpy as np\n'), ((1574, 1593), 'multipy.Transform', 'multipy.Transform', ([], {}), '()\n', (1591, 1593), False, 'import multipy\n'), ((1723, 1737), 'numpy.shape', 'np.shape', (['B_ou'], {}), '(B_ou)\n', (1731, 1737), True, 'import numpy as np\n')]
|
# Generated by Django 2.0.7 on 2018-09-28 13:23
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('interest', '0001_initial'),
('users', '0003_auto_20180916_1903'),
]
operations = [
migrations.AddField(
model_name='profile',
name='answered',
field=models.IntegerField(blank=True, default=0),
),
migrations.AddField(
model_name='profile',
name='interests',
field=models.ManyToManyField(to='interest.Interest'),
),
]
|
[
"django.db.models.IntegerField",
"django.db.models.ManyToManyField"
] |
[((373, 415), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'blank': '(True)', 'default': '(0)'}), '(blank=True, default=0)\n', (392, 415), False, 'from django.db import migrations, models\n'), ((539, 585), 'django.db.models.ManyToManyField', 'models.ManyToManyField', ([], {'to': '"""interest.Interest"""'}), "(to='interest.Interest')\n", (561, 585), False, 'from django.db import migrations, models\n')]
|
# SPDX-FileCopyrightText: 2022 UChicago Argonne, LLC
# SPDX-License-Identifier: MIT
from datetime import datetime
from pathlib import Path
import shutil
from typing import List, Optional
import numpy as np
import pandas as pd
from .fileutils import PathLike, run as run_proc
from .parameters import Parameters
from .plugin import TemplatePlugin
from .results import Results
class ResultsMOOSE(Results):
"""MOOSE simulation results
Parameters
----------
params
Parameters used to generate inputs
name
Name of workflow producing results
time
Time at which workflow was run
inputs
List of input files
outputs
List of output files
Attributes
----------
stdout
Standard output from MOOSE run
csv_data
Dictionary with data from .csv files
"""
def __init__(self, params: Parameters, name: str, time: datetime,
inputs: List[PathLike], outputs: List[PathLike]):
super().__init__('MOOSE', params, name, time, inputs, outputs)
self.csv_data = self._save_MOOSE_csv()
@property
def stdout(self) -> str:
return (self.base_path / "MOOSE_log.txt").read_text()
def _save_MOOSE_csv(self) -> dict:
"""Read all MOOSE '.csv' files and return results in a dictionary
Returns
-------
Results from MOOSE .csv files
"""
input_file = self.inputs[0]
csv_file = input_file.with_name(f"{input_file.stem}_csv.csv")
# Save MOOSE's main output '.csv' files
csv_data = {}
if csv_file.exists():
csv_file_df = pd.read_csv(csv_file)
for column_name in csv_file_df.columns:
csv_data[column_name] = np.array(csv_file_df[column_name])
# Read MOOSE's vector postprocesssor '.csv' files and save the
# parameters as individual array
for output in self.outputs:
if (output.name.startswith(f"{input_file.stem}_csv_") and
not output.name.endswith("_0000.csv")):
vector_csv_df = pd.read_csv(output)
csv_param = list(set(vector_csv_df.columns) - {"id", "x", "y", "z"})
csv_data[output.stem] = np.array(vector_csv_df[csv_param[0]], dtype=float)
for name in ("id", "x", "y", "z"):
new_name = output.name[:-8] + name
if new_name not in csv_data:
csv_data[new_name] = np.array(vector_csv_df[name], dtype=float)
return csv_data
class PluginMOOSE(TemplatePlugin):
"""Plugin for running MOOSE
Parameters
----------
template_file
Templated MOOSE input
n_cpu
Number of processors to be used to run MOOSE application
extra_inputs
List of extra (non-templated) input files that are needed
extra_template_inputs
Extra templated input files
show_stdout
Whether to display output from stdout when MOOSE is run
show_stderr
Whether to display output from stderr when MOOSE is run
Attributes
----------
moose_exec
Path to MOOSE executable
"""
def __init__(self, template_file: str, n_cpu: int = 1,
extra_inputs: Optional[List[str]] = None,
extra_template_inputs: Optional[List[PathLike]] = None,
show_stdout: bool = False, show_stderr: bool = False):
super().__init__(template_file, extra_inputs, extra_template_inputs,
show_stdout, show_stderr)
self._moose_exec = Path('moose-opt')
self.input_name = "MOOSE.i"
if n_cpu < 1:
raise RuntimeError("The CPU number used to run MOOSE app must be a natural number.")
self.n_cpu = n_cpu
@property
def moose_exec(self) -> Path:
return self._moose_exec
@moose_exec.setter
def moose_exec(self, exe: PathLike):
if shutil.which(exe) is None:
raise RuntimeError(f"MOOSE executable '{exe}' is missing.")
self._moose_exec = Path(exe)
def options(self, moose_exec):
"""Input MOOSE user-specified options
Parameters
----------
MOOSE_exec
Path to MOOSE executable
"""
self.moose_exec = moose_exec
def run(self):
"""Run MOOSE"""
run_proc(["mpiexec", "-n", str(self.n_cpu), self.moose_exec,
"-i", self.input_name])
def postrun(self, params: Parameters, name: str) -> ResultsMOOSE:
"""Read MOOSE results and create results object
Parameters
----------
params
Parameters used to create MOOSE model
name
Name of the workflow
Returns
-------
MOOSE results object
"""
time, inputs, outputs = self._get_result_input(self.input_name)
return ResultsMOOSE(params, name, time, inputs, outputs)
|
[
"pandas.read_csv",
"pathlib.Path",
"numpy.array",
"shutil.which"
] |
[((3600, 3617), 'pathlib.Path', 'Path', (['"""moose-opt"""'], {}), "('moose-opt')\n", (3604, 3617), False, 'from pathlib import Path\n'), ((4083, 4092), 'pathlib.Path', 'Path', (['exe'], {}), '(exe)\n', (4087, 4092), False, 'from pathlib import Path\n'), ((1644, 1665), 'pandas.read_csv', 'pd.read_csv', (['csv_file'], {}), '(csv_file)\n', (1655, 1665), True, 'import pandas as pd\n'), ((3957, 3974), 'shutil.which', 'shutil.which', (['exe'], {}), '(exe)\n', (3969, 3974), False, 'import shutil\n'), ((1758, 1792), 'numpy.array', 'np.array', (['csv_file_df[column_name]'], {}), '(csv_file_df[column_name])\n', (1766, 1792), True, 'import numpy as np\n'), ((2100, 2119), 'pandas.read_csv', 'pd.read_csv', (['output'], {}), '(output)\n', (2111, 2119), True, 'import pandas as pd\n'), ((2245, 2295), 'numpy.array', 'np.array', (['vector_csv_df[csv_param[0]]'], {'dtype': 'float'}), '(vector_csv_df[csv_param[0]], dtype=float)\n', (2253, 2295), True, 'import numpy as np\n'), ((2497, 2539), 'numpy.array', 'np.array', (['vector_csv_df[name]'], {'dtype': 'float'}), '(vector_csv_df[name], dtype=float)\n', (2505, 2539), True, 'import numpy as np\n')]
|
# -*- coding: utf-8 -*-
"""
© <NAME>, <NAME>, 2017
Script for resuming from saved checkpoint
"""
# ----------------------------------------------------------------------------------------------------------------------
# Imports
# ----------------------------------------------------------------------------------------------------------------------
import os
import sys
import signal
import subprocess
import argparse
import shlex
from TeLL.config import Config
from TeLL.utility.misc import extract_to_tmp, rmdir, extract_named_args, extract_unnamed_args
# ----------------------------------------------------------------------------------------------------------------------
# Globals
# ----------------------------------------------------------------------------------------------------------------------
process_handle = None
working_dir = None
kill_retry_max = 10
kill_retry_count = 0
# ----------------------------------------------------------------------------------------------------------------------
# Functions
# ----------------------------------------------------------------------------------------------------------------------
def read_config(config: Config, epochs: int = None, gpu: str = None):
"""Get config either from file or use default config"""
if os.path.isfile(config):
config = Config.from_file(config)
else:
config = Config()
config.override("n_epochs", epochs)
config.override("cuda_gpu", gpu)
if epochs is not None:
config.n_epochs = epochs
return config
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument("path", type=str, default=None,
help="Path to previous working directory of run that should be resumed")
args, unknown = parser.parse_known_args()
if args.path is None:
parser.print_help()
sys.exit(1)
# --
print("Resuming {}".format(args.path))
return args, unknown
def parse_and_merge_args(command, override_args):
parts = shlex.split(command)
result = [parts[0]]
orig_args = extract_named_args(parts[1:])
override_args = extract_named_args(override_args)
merged = {**orig_args, **override_args}
for k, v in merged.items():
if v is None:
result.append(k)
else:
result.extend([k, v])
result.extend(extract_unnamed_args(parts[1:]))
return " ".join(result)
def resume(directory: str, unknown_args: str = None):
if os.path.isdir(directory):
results = os.path.join(directory, "results")
archive = os.path.join(directory, "00-script.zip")
if os.path.exists(archive) and os.path.exists(results):
global working_dir
working_dir = extract_to_tmp(archive)
# parse used config
with open(os.path.join(working_dir, "00-INFO")) as f:
command = f.readline().strip()
command = parse_and_merge_args(command, unknown_args)
# start
cmd_sep = " &&" if sys.platform == "win32" else "; "
cmd = ["cd \"{}\"{}".format(working_dir, cmd_sep),
'"{}"'.format(sys.executable),
command,
"--restore \"{}\"".format(directory)]
cmd = " ".join(cmd)
print("Resuming with command '{}' in directory '{}'".format(cmd, working_dir))
initial_working_dir = os.getcwd()
os.chdir(working_dir)
global process_handle
process_handle = subprocess.Popen(cmd, cwd=working_dir, shell=True, start_new_session=True)
process_handle.wait()
# clean up
print("Cleaning up temp directory...")
os.chdir(initial_working_dir)
rmdir(working_dir)
print("Done!")
else:
print("Can't resume from {}".format(directory))
def sigint_handler(sig, frame):
print("Killing sub-process...")
if process_handle is not None:
global kill_retry_count
while process_handle.returncode is None and kill_retry_count < kill_retry_max:
kill_retry_count += 1
print("Killing sub-process ({})...".format(kill_retry_count))
try:
os.killpg(os.getpgid(process_handle.pid), signal.SIGTERM)
os.waitpid(process_handle.pid, os.WNOHANG)
except ProcessLookupError:
break
try:
process_handle.wait(1)
except subprocess.TimeoutExpired:
pass
if working_dir is not None:
rmdir(working_dir)
sys.exit(0)
def main():
signal.signal(signal.SIGINT, sigint_handler)
args, unknown_args = parse_args()
# If resume option specified resume from snapshot and exit here
if args.path is not None:
resume(args.path, unknown_args)
if __name__ == "__main__":
main()
|
[
"argparse.ArgumentParser",
"TeLL.utility.misc.extract_to_tmp",
"os.path.isfile",
"TeLL.config.Config.from_file",
"os.path.join",
"os.chdir",
"os.waitpid",
"shlex.split",
"os.path.exists",
"subprocess.Popen",
"TeLL.utility.misc.extract_named_args",
"signal.signal",
"TeLL.config.Config",
"sys.exit",
"os.getpgid",
"TeLL.utility.misc.extract_unnamed_args",
"os.path.isdir",
"os.getcwd",
"TeLL.utility.misc.rmdir"
] |
[((1289, 1311), 'os.path.isfile', 'os.path.isfile', (['config'], {}), '(config)\n', (1303, 1311), False, 'import os\n'), ((1594, 1619), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (1617, 1619), False, 'import argparse\n'), ((2034, 2054), 'shlex.split', 'shlex.split', (['command'], {}), '(command)\n', (2045, 2054), False, 'import shlex\n'), ((2095, 2124), 'TeLL.utility.misc.extract_named_args', 'extract_named_args', (['parts[1:]'], {}), '(parts[1:])\n', (2113, 2124), False, 'from TeLL.utility.misc import extract_to_tmp, rmdir, extract_named_args, extract_unnamed_args\n'), ((2145, 2178), 'TeLL.utility.misc.extract_named_args', 'extract_named_args', (['override_args'], {}), '(override_args)\n', (2163, 2178), False, 'from TeLL.utility.misc import extract_to_tmp, rmdir, extract_named_args, extract_unnamed_args\n'), ((2496, 2520), 'os.path.isdir', 'os.path.isdir', (['directory'], {}), '(directory)\n', (2509, 2520), False, 'import os\n'), ((4672, 4683), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (4680, 4683), False, 'import sys\n'), ((4702, 4746), 'signal.signal', 'signal.signal', (['signal.SIGINT', 'sigint_handler'], {}), '(signal.SIGINT, sigint_handler)\n', (4715, 4746), False, 'import signal\n'), ((1330, 1354), 'TeLL.config.Config.from_file', 'Config.from_file', (['config'], {}), '(config)\n', (1346, 1354), False, 'from TeLL.config import Config\n'), ((1382, 1390), 'TeLL.config.Config', 'Config', ([], {}), '()\n', (1388, 1390), False, 'from TeLL.config import Config\n'), ((1881, 1892), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (1889, 1892), False, 'import sys\n'), ((2372, 2403), 'TeLL.utility.misc.extract_unnamed_args', 'extract_unnamed_args', (['parts[1:]'], {}), '(parts[1:])\n', (2392, 2403), False, 'from TeLL.utility.misc import extract_to_tmp, rmdir, extract_named_args, extract_unnamed_args\n'), ((2540, 2574), 'os.path.join', 'os.path.join', (['directory', '"""results"""'], {}), "(directory, 'results')\n", (2552, 2574), False, 'import os\n'), ((2593, 2633), 'os.path.join', 'os.path.join', (['directory', '"""00-script.zip"""'], {}), "(directory, '00-script.zip')\n", (2605, 2633), False, 'import os\n'), ((4644, 4662), 'TeLL.utility.misc.rmdir', 'rmdir', (['working_dir'], {}), '(working_dir)\n', (4649, 4662), False, 'from TeLL.utility.misc import extract_to_tmp, rmdir, extract_named_args, extract_unnamed_args\n'), ((2645, 2668), 'os.path.exists', 'os.path.exists', (['archive'], {}), '(archive)\n', (2659, 2668), False, 'import os\n'), ((2673, 2696), 'os.path.exists', 'os.path.exists', (['results'], {}), '(results)\n', (2687, 2696), False, 'import os\n'), ((2755, 2778), 'TeLL.utility.misc.extract_to_tmp', 'extract_to_tmp', (['archive'], {}), '(archive)\n', (2769, 2778), False, 'from TeLL.utility.misc import extract_to_tmp, rmdir, extract_named_args, extract_unnamed_args\n'), ((3462, 3473), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (3471, 3473), False, 'import os\n'), ((3486, 3507), 'os.chdir', 'os.chdir', (['working_dir'], {}), '(working_dir)\n', (3494, 3507), False, 'import os\n'), ((3571, 3645), 'subprocess.Popen', 'subprocess.Popen', (['cmd'], {'cwd': 'working_dir', 'shell': '(True)', 'start_new_session': '(True)'}), '(cmd, cwd=working_dir, shell=True, start_new_session=True)\n', (3587, 3645), False, 'import subprocess\n'), ((3766, 3795), 'os.chdir', 'os.chdir', (['initial_working_dir'], {}), '(initial_working_dir)\n', (3774, 3795), False, 'import os\n'), ((3808, 3826), 'TeLL.utility.misc.rmdir', 'rmdir', (['working_dir'], {}), '(working_dir)\n', (3813, 3826), False, 'from TeLL.utility.misc import extract_to_tmp, rmdir, extract_named_args, extract_unnamed_args\n'), ((4359, 4401), 'os.waitpid', 'os.waitpid', (['process_handle.pid', 'os.WNOHANG'], {}), '(process_handle.pid, os.WNOHANG)\n', (4369, 4401), False, 'import os\n'), ((2833, 2869), 'os.path.join', 'os.path.join', (['working_dir', '"""00-INFO"""'], {}), "(working_dir, '00-INFO')\n", (2845, 2869), False, 'import os\n'), ((4295, 4325), 'os.getpgid', 'os.getpgid', (['process_handle.pid'], {}), '(process_handle.pid)\n', (4305, 4325), False, 'import os\n')]
|
import urllib3
import csv
import os
import json
import arin
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
# TODO: Add in reverse lookup
csvfile = open('countries_by_rir.csv', 'r')
readcsv = csv.reader(csvfile, delimiter=',')
rir = {'AFRINIC': [],
'APNIC': [],
'ARIN': [],
'RIPE NCC': [],
'LACNIC': []
}
for row in readcsv:
key = row[3]
if key in rir:
rir[key].append(row[0].lower())
def checkIPcountry(ip_addr):
http = urllib3.PoolManager()
page = http.request('GET', 'https://geoip-db.com/jsonp/' + ip_addr).data.decode().strip("callback").strip('(').strip(')')
return json.loads(page)
def main():
csvfile = open('countries_by_rir.csv', 'r')
readcsv = csv.reader(csvfile, delimiter=',')
http = urllib3.PoolManager()
running = True
while running:
print("\nSimple WhoIs Lookup Program")
print("1) Lookup IP")
print("2) Resolve FQDN")
selection = input("Selection> ")
if selection == "quit":
running = False
elif selection == "1":
print("\nWhat is the IP you want to look up?")
ip_addr = input("IP> ")
if ip_addr == "quit":
running = False
else:
country = checkIPcountry(ip_addr)
country = country['country_name']
for key,value in rir.items():
if country.lower() in value:
registry = key
if registry == "ARIN":
page = http.request('GET', 'http://whois.arin.net/rest/ip/' + ip_addr + '.txt')
status = page.status
data = page.data.decode().split("\n")
if status == 200:
for line in data:
if line == '' or '#' in line:
continue
else:
print(line)
moreinformation = True
parent = str([s for s in data if "Parent" in s]).split()[2].replace("'", "").replace("]","").strip("(").strip(")")
organization = str([s for s in data if "Organization" in s]).split()[3].replace("'", "").replace("]","").strip("(").strip(")")
while moreinformation:
print("\nWould you like to find more information?")
print("1) Parent Information")
print("2) Organization Information")
selection = input("Selection> ")
if selection == "quit":
moreinformation = False
running = False
elif selection == "1":
print()
page = http.request('GET', 'https://whois.arin.net/rest/net/' + parent + '.txt')
status = page.status
if status == 200:
for line in page.data.decode().split("\n"):
if line == '' or '#' in line:
continue
else:
print(line)
elif selection == "2":
page = http.request('GET', 'https://whois.arin.net/rest/org/' + organization + '.txt')
status = page.status
if status == 200:
for line in page.data.decode().split("\n"):
if line == '' or '#' in line:
continue
else:
print(line)
elif register == "AFRINIC":
print("test")
if __name__ == "__main__":
main()
|
[
"urllib3.PoolManager",
"csv.reader",
"urllib3.disable_warnings",
"json.loads"
] |
[((60, 127), 'urllib3.disable_warnings', 'urllib3.disable_warnings', (['urllib3.exceptions.InsecureRequestWarning'], {}), '(urllib3.exceptions.InsecureRequestWarning)\n', (84, 127), False, 'import urllib3\n'), ((214, 248), 'csv.reader', 'csv.reader', (['csvfile'], {'delimiter': '""","""'}), "(csvfile, delimiter=',')\n", (224, 248), False, 'import csv\n'), ((504, 525), 'urllib3.PoolManager', 'urllib3.PoolManager', ([], {}), '()\n', (523, 525), False, 'import urllib3\n'), ((663, 679), 'json.loads', 'json.loads', (['page'], {}), '(page)\n', (673, 679), False, 'import json\n'), ((755, 789), 'csv.reader', 'csv.reader', (['csvfile'], {'delimiter': '""","""'}), "(csvfile, delimiter=',')\n", (765, 789), False, 'import csv\n'), ((801, 822), 'urllib3.PoolManager', 'urllib3.PoolManager', ([], {}), '()\n', (820, 822), False, 'import urllib3\n')]
|
"""
:author: <NAME>
:id: 30632749
:assignment: FIT1045 Assignment 1, Task 1 (Semester 1 2019)
:purpose: Performs and compares algorithms that approximate pi.
:created: 2019-11-13 (remade within two hours)
:updated: 2019-11-17 (documentation)
This assignment task has since been replaced as of 2019-11-17.
"""
from math import pi, sqrt # relevant modules to compare pi and get square root
def incomplete(x, precision):
# sub-function that confirms if difference of pi and x is under precision
assert type(precision) == float
return abs(pi - x) >= precision
def odd(x):
# sub-function that calculates the xth odd number (for x > 0)
assert x > 0
return 2 * (x-1) + 1
def basel(precision: int):
"""
This function calculates an approximation of pi using the basel solution.
pi**2 = 1/(n**2) for n terms
:param precision: represents how far the final result should be from pi
:returns: tuple (x, n) where x is the approximation of pi and n is
the computations made to get to x.
:pre-condition: precision must be an float
:raises: AssertionError, when the pre-condition is not met
"""
# initialise return integers
x = 0
n = 0
sub_x = 0
# calculates x = sqrt(6 * (Σ i ** -2)) for i = 1 to n
# stops when difference between actual pi and x is under precision
while incomplete(x, precision):
n += 1
sub_x += (n ** -2)
x = sqrt(6 * sub_x)
return x, n
def taylor(precision: int):
"""
This function calculates an approximation of pi using the taylor expansion
of 1 / (1+x).
pi / 4 = 1/1 - 1/3 + 1/5 - 1/7 + ...
:param precision: represents how far the final result should be from pi
:returns: tuple (x, n) where x is the approximation of pi and n is
the computations made to get to x.
:pre-condition: precision must be an float
:raises: AssertionError, when the pre-condition is not met
"""
# initialise return integers
x = 0
n = 0
# calculates ith odd number N for x = 4 * Σ ([-1 ** (n-1)] * [N ** -1])
# from i = 1 to n
# stops when difference between actual pi and x is under precision
while incomplete(x, precision):
n += 1
taylor_poly = odd(n) ** -1
taylor_poly *= (-1) ** (n-1)
x += 4 * taylor_poly
return x, n
def wallis(precision: int):
"""
This function calculates an approximation of pi using the wallis algorithm.
L represents nth even number, M and N are nth and n+1 odd numbers
pi / 2 = ∏(L**2/M*N)
:param precision: represents how far the final result should be from pi
:returns: tuple (x, n) where x is the approximation of pi and n is
the computations made to get to x.
:pre-condition: precision must be an float
:raises: AssertionError, when the pre-condition is not met
"""
# initialise return integers
x = 0
n = 0
# twice of (nth even number squared divided by nth * n+1 odd number)
# stops when difference between actual pi and x is under precision
while incomplete(x, precision):
if x == 0:
x = 2
n += 1
numerator = (n * 2) ** 2
denominator = odd(n) * odd(n+1)
x *= (numerator / denominator)
return x, n
def spigot(precision: int):
"""
This function calculates an approximation of pi using the spigot algorithm.
N is i+1 odd number
pi / 2 = 1 + Σ(∏(i / N) for i = 1 to n) for 1 to n
:param precision: represents how far the final result should be from pi
:returns: tuple (x, n) where x is the approximation of pi and n is
the computations made to get to x.
:pre-condition: precision must be an float
:raises: AssertionError, when the pre-condition is not met
"""
# initialise return integers
x = 0
n = 0
# initialise stored product of nth term
product = 0
# last product * (n-1 / N) N = nth odd number
# stops when difference between actual pi and x is under precision
while incomplete(x, precision):
n += 1
# if initial value, set x to 2 and last product to 1
if x == 0:
x = 2
product = 1
else:
product *= ((n - 1) / odd(n))
x += 2 * product
return x, n
def race(precision, algorithms):
"""
This function takes the algorithms and compares them in terms of
approximating pi by number of terms required.
:param precision: represents how far the final result should be from pi
:param algorithms: List of algorithm functions to approximate pi
:returns: List [(i0, n0), (i1, n1) ...] where n represents
the number of steps the ith algorithm took. Sorted by n
:pre-condition: precision must be an float
:pre-condition: functions of algorithms must be in namespace of program
:raises: AssertionError, when first pre-condition is not met
:raises: NameError, when second pre-condition is not met
"""
results = []
for i, func in enumerate(algorithms, start=1):
n = func(precision)[1]
results.append((i, n))
return sorted(results, key = lambda x : x[1])
def print_results(results):
"""
This function takes the results from the previous function, race()
and prints them in a human readable format.
:param results: Output from race()
:returns: None, prints out based on parameter.
:pre-condition: must be a list of tuples (should be output of race())
:raises: TypeError/IndexError, when pre-condition is not met
"""
for tuple in results:
print("Algorithm {0[0]} finished in {0[1]} steps".format(tuple))
if __name__=="__main__":
assert basel(0.1)[1] == 10
assert taylor(0.2)[1] == 5
assert wallis(0.2)[1] == 4
assert spigot(0.1)[1] == 4
assert race(0.01, [taylor, wallis, basel]) == [(2, 78), (3, 96), (1, 100)]
print_results([(2, 78), (3, 96), (1, 100)])
print("No errors. All assert test cases were successful.")
|
[
"math.sqrt"
] |
[((1505, 1520), 'math.sqrt', 'sqrt', (['(6 * sub_x)'], {}), '(6 * sub_x)\n', (1509, 1520), False, 'from math import pi, sqrt\n')]
|
# Copyright (c) 2020, Apple Inc. All rights reserved.
#
# Use of this source code is governed by a BSD-3-clause license that can be
# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause
import logging
import numpy as np
import copy
import coremltools
from coremltools import converters as converter
from coremltools.converters.mil import converter as _converter
from coremltools.converters.mil.mil import Program, Function
from coremltools.converters.mil.mil.passes.pass_registry import PASS_REGISTRY
from coremltools._deps import _IS_MACOS
import PIL.Image
converter = converter
_converter = _converter
def assert_op_count_match(program, expect, op=None, verbose=False):
"""
Assert number of ops match expected number. If op is not specified,
Count total number of ops and match with expect.
"""
if verbose:
print(program)
count = 0
for _, func in program.functions.items():
for o in func.operations:
if not op:
count += 1
elif o.op_type.lower() == op.lower():
count += 1
np.testing.assert_equal(count, expect)
def assert_model_is_valid(
program, inputs, backend="nn_proto", verbose=True, expected_output_shapes=None
):
"""
Assert Core ML model is valid.
Inputs:
- input: str -> shape tuple. All program input names need to appear in str.
shape tuple can only contain positive integers.
"""
input_dict = dict()
for name, shape in inputs.items():
input_dict[name] = np.random.rand(*shape)
proto = _converter._convert(program, convert_from="mil", convert_to=backend)
if verbose:
from coremltools.models.neural_network.printer import print_network_spec
print_network_spec(proto, style="coding")
model = coremltools.models.MLModel(proto)
assert model is not None
if _IS_MACOS:
prediction = model.predict(input_dict, useCPUOnly=True)
assert prediction is not None
if expected_output_shapes is not None:
for out_name, out_shape in expected_output_shapes.items():
assert out_name in prediction
assert out_shape == prediction[out_name].shape
def assert_same_output_names(prog1, prog2, func_name="main"):
prog1_outputs = [o.name for o in prog1[func_name].outputs]
prog2_outputs = [o.name for o in prog2[func_name].outputs]
assert prog1_outputs == prog2_outputs
def assert_same_output_shapes(prog1, prog2, func_name="main"):
prog1_output_shapes = [o.shape for o in prog1[func_name].outputs]
prog2_output_shapes = [o.shape for o in prog2[func_name].outputs]
assert prog1_output_shapes == prog2_output_shapes
def get_op_types_in_program(prog, func_name="main", skip_const_ops=True):
"""
Return the operation types in prog[func_name],
in the same order as they are stored (topological)
"""
op_types_in_program = []
for op in prog[func_name].operations:
if skip_const_ops:
if op.op_type == "const":
continue
op_types_in_program.append(op.op_type)
return op_types_in_program
def random_gen(
shape,
rand_min=0.0,
rand_max=1.0,
eps_from_int=0.0,
allow_duplicate=True,
dtype=np.float32,
):
"""
This helper function generates a random array of shape `shape`
The range of generated numbers will be between (rand_min, rand_max].
The value of generated numbers will be at least `eps_from_int` apart from integers.
If allow_duplicate is set to false, it is guaranteed that value generated are all different.
Default data type is np.float32.
"""
elem = np.prod(shape).astype(np.int)
ret = []
for _ in range(elem):
while True:
r = dtype((rand_max - rand_min) * np.random.random() + rand_min)
if not allow_duplicate and r in ret:
continue
if np.issubdtype(dtype, np.integer) or np.fabs(np.round(r) - r) > eps_from_int:
ret.append(r)
break
ret = np.array(ret).reshape(shape)
return ret.astype(dtype)
def ssa_fn(func):
"""
Deprecated: use @mb.program()
"""
def wrapper(*args, **kwargs):
prog = Program()
with Function({}) as ssa_func:
func(*args, **kwargs)
return wrapper
def to_tuple(v):
if not isinstance(v, (list, tuple)):
return tuple([v])
return tuple(v)
def is_close(expected, actual, atol=1e-04, rtol=1e-05):
"""
expected, actual: np.array or python primitive (scalar)
rtol: relative tolerance. See numpy.isclose.
"""
close = np.isclose(expected, actual, atol=atol, rtol=rtol)
if not np.all(close):
diff = expected - actual
num_not_close = np.sum(~close)
msg = "Values differ by L1 norm: {}. Num entries not close: {}/{}"
logging.error(msg.format(np.sum(np.abs(diff)), num_not_close, expected.size))
if num_not_close < 30:
logging.error("Differing entries:")
logging.error("Expected: {}".format(expected[~close]))
logging.error("Actual: {}".format(actual[~close]))
logging.error("Delta: {}".format(diff[~close]))
return False
return True
def run_core_ml_predict(proto, input_key_values, use_cpu_only=False):
model = coremltools.models.MLModel(proto, useCPUOnly=use_cpu_only)
for k, v in input_key_values.items():
if isinstance(v, PIL.Image.Image):
continue
elif not np.isscalar(v) and not v.shape == ():
input_key_values[k] = v.astype(np.float32)
else:
input_key_values[k] = np.array([v], dtype=np.float32)
return model.predict(input_key_values, useCPUOnly=use_cpu_only)
def compare_backend(
proto,
input_key_values,
expected_outputs,
use_cpu_only=False,
atol=1e-04,
rtol=1e-05,
also_compare_shapes=True,
):
"""
Inputs:
- proto: MLModel proto.
- input_key_values: str -> np.array. Keys must match those in
input_placeholders.
- expected_outputs: dict[str, np.array]. Required iff
frontend_only == False
- use_cpu_only: True/False.
"""
if _IS_MACOS:
pred = run_core_ml_predict(proto, input_key_values, use_cpu_only=use_cpu_only)
if also_compare_shapes:
compare_shapes(
proto,
input_key_values,
expected_outputs,
use_cpu_only=use_cpu_only,
pred=pred,
)
if not use_cpu_only:
atol = max(atol * 100.0, 5e-1)
rtol = max(rtol * 100.0, 5e-2)
for o, expected in expected_outputs.items():
msg = (
"Output {} differs. useCPUOnly={}.\nInput={}, "
+ "\nExpected={}, \nOutput={}\n"
)
assert is_close(expected, pred[o], atol, rtol), msg.format(
o, use_cpu_only, input_key_values, expected, pred[o]
)
def compare_shapes(
proto, input_key_values, expected_outputs, use_cpu_only=False, pred=None
):
"""
Inputs:
- proto: MLModel proto.
- input_key_values: str -> np.array or PIL.Image. Keys must match those in
input_placeholders.
- expected_outputs: dict[str, np.array].
- use_cpu_only: True/False.
- pred: Prediction to use, if it has already been computed.
"""
if _IS_MACOS:
if not pred:
pred = run_core_ml_predict(proto, input_key_values, use_cpu_only)
for o, expected in expected_outputs.items():
msg = "Output: {}. expected shape {} != actual shape {}".format(
o, expected.shape, pred[o].shape
)
# Core ML does not support scalar as output
# remove this special case when support is added
if expected.shape == () and pred[o].shape == (1,):
continue
assert pred[o].shape == expected.shape, msg
def get_core_ml_prediction(
build, input_placeholders, input_values, use_cpu_only=False, backend="nn_proto"
):
"""
Return predictions of the given model.
"""
program = Program()
with Function(input_placeholders) as ssa_func:
output_vars = build(**ssa_func.inputs)
if isinstance(output_vars, tuple):
output_vars = list(output_vars)
elif not isinstance(output_vars, list):
output_vars = [output_vars]
ssa_func.set_outputs(output_vars)
program.add_function("main", ssa_func)
proto = _converter._convert(program, convert_from="mil", convert_to=backend)
model = coremltools.models.MLModel(proto, use_cpu_only)
return model.predict(input_values, useCPUOnly=use_cpu_only)
def apply_pass_and_basic_check(prog, pass_name):
"""
Apply pass to the program
"""
prev_prog = copy.deepcopy(prog)
PASS_REGISTRY[pass_name](prog)
block = prog.functions["main"]
prev_block = prev_prog.functions["main"]
assert_same_output_names(prev_prog, prog)
assert_same_output_shapes(prev_prog, prog)
return prev_prog, prev_block, block
|
[
"numpy.sum",
"numpy.abs",
"numpy.isclose",
"numpy.round",
"numpy.prod",
"logging.error",
"numpy.testing.assert_equal",
"coremltools.converters.mil.mil.Program",
"copy.deepcopy",
"coremltools.models.neural_network.printer.print_network_spec",
"numpy.issubdtype",
"numpy.all",
"coremltools.converters.mil.converter._convert",
"numpy.isscalar",
"coremltools.converters.mil.mil.Function",
"numpy.random.random",
"numpy.array",
"numpy.random.rand",
"coremltools.models.MLModel"
] |
[((1597, 1665), 'coremltools.converters.mil.converter._convert', '_converter._convert', (['program'], {'convert_from': '"""mil"""', 'convert_to': 'backend'}), "(program, convert_from='mil', convert_to=backend)\n", (1616, 1665), True, 'from coremltools.converters.mil import converter as _converter\n'), ((1827, 1860), 'coremltools.models.MLModel', 'coremltools.models.MLModel', (['proto'], {}), '(proto)\n', (1853, 1860), False, 'import coremltools\n'), ((4669, 4719), 'numpy.isclose', 'np.isclose', (['expected', 'actual'], {'atol': 'atol', 'rtol': 'rtol'}), '(expected, actual, atol=atol, rtol=rtol)\n', (4679, 4719), True, 'import numpy as np\n'), ((5369, 5427), 'coremltools.models.MLModel', 'coremltools.models.MLModel', (['proto'], {'useCPUOnly': 'use_cpu_only'}), '(proto, useCPUOnly=use_cpu_only)\n', (5395, 5427), False, 'import coremltools\n'), ((8257, 8266), 'coremltools.converters.mil.mil.Program', 'Program', ([], {}), '()\n', (8264, 8266), False, 'from coremltools.converters.mil.mil import Program, Function\n'), ((8642, 8710), 'coremltools.converters.mil.converter._convert', '_converter._convert', (['program'], {'convert_from': '"""mil"""', 'convert_to': 'backend'}), "(program, convert_from='mil', convert_to=backend)\n", (8661, 8710), True, 'from coremltools.converters.mil import converter as _converter\n'), ((8723, 8770), 'coremltools.models.MLModel', 'coremltools.models.MLModel', (['proto', 'use_cpu_only'], {}), '(proto, use_cpu_only)\n', (8749, 8770), False, 'import coremltools\n'), ((8948, 8967), 'copy.deepcopy', 'copy.deepcopy', (['prog'], {}), '(prog)\n', (8961, 8967), False, 'import copy\n'), ((1119, 1157), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['count', 'expect'], {}), '(count, expect)\n', (1142, 1157), True, 'import numpy as np\n'), ((1562, 1584), 'numpy.random.rand', 'np.random.rand', (['*shape'], {}), '(*shape)\n', (1576, 1584), True, 'import numpy as np\n'), ((1772, 1813), 'coremltools.models.neural_network.printer.print_network_spec', 'print_network_spec', (['proto'], {'style': '"""coding"""'}), "(proto, style='coding')\n", (1790, 1813), False, 'from coremltools.models.neural_network.printer import print_network_spec\n'), ((4264, 4273), 'coremltools.converters.mil.mil.Program', 'Program', ([], {}), '()\n', (4271, 4273), False, 'from coremltools.converters.mil.mil import Program, Function\n'), ((4731, 4744), 'numpy.all', 'np.all', (['close'], {}), '(close)\n', (4737, 4744), True, 'import numpy as np\n'), ((4803, 4817), 'numpy.sum', 'np.sum', (['(~close)'], {}), '(~close)\n', (4809, 4817), True, 'import numpy as np\n'), ((8276, 8304), 'coremltools.converters.mil.mil.Function', 'Function', (['input_placeholders'], {}), '(input_placeholders)\n', (8284, 8304), False, 'from coremltools.converters.mil.mil import Program, Function\n'), ((3692, 3706), 'numpy.prod', 'np.prod', (['shape'], {}), '(shape)\n', (3699, 3706), True, 'import numpy as np\n'), ((4086, 4099), 'numpy.array', 'np.array', (['ret'], {}), '(ret)\n', (4094, 4099), True, 'import numpy as np\n'), ((4287, 4299), 'coremltools.converters.mil.mil.Function', 'Function', (['{}'], {}), '({})\n', (4295, 4299), False, 'from coremltools.converters.mil.mil import Program, Function\n'), ((5022, 5057), 'logging.error', 'logging.error', (['"""Differing entries:"""'], {}), "('Differing entries:')\n", (5035, 5057), False, 'import logging\n'), ((3947, 3979), 'numpy.issubdtype', 'np.issubdtype', (['dtype', 'np.integer'], {}), '(dtype, np.integer)\n', (3960, 3979), True, 'import numpy as np\n'), ((5692, 5723), 'numpy.array', 'np.array', (['[v]'], {'dtype': 'np.float32'}), '([v], dtype=np.float32)\n', (5700, 5723), True, 'import numpy as np\n'), ((4933, 4945), 'numpy.abs', 'np.abs', (['diff'], {}), '(diff)\n', (4939, 4945), True, 'import numpy as np\n'), ((5551, 5565), 'numpy.isscalar', 'np.isscalar', (['v'], {}), '(v)\n', (5562, 5565), True, 'import numpy as np\n'), ((3827, 3845), 'numpy.random.random', 'np.random.random', ([], {}), '()\n', (3843, 3845), True, 'import numpy as np\n'), ((3991, 4002), 'numpy.round', 'np.round', (['r'], {}), '(r)\n', (3999, 4002), True, 'import numpy as np\n')]
|
import logging
import json
from src.bot import bot
from src.constants import Client
log = logging.getLogger('discord')
log.setLevel(logging.DEBUG)
handler = logging.FileHandler(filename='devil.log', encoding='utf-8', mode='w')
handler.setFormatter(logging.Formatter('%(asctime)s | %(name)s | %(levelname)s | %(message)s'))
log.addHandler(handler)
# extensions loading
with open("src/resource/_cogs.json", "rb") as _cog:
_cog = json.load(_cog)
_cog = _cog["cogs"]
bot.load_extensions(_cog)
bot.run(Client.TOKEN)
|
[
"json.load",
"logging.FileHandler",
"src.bot.bot.load_extensions",
"logging.Formatter",
"src.bot.bot.run",
"logging.getLogger"
] |
[((111, 139), 'logging.getLogger', 'logging.getLogger', (['"""discord"""'], {}), "('discord')\n", (128, 139), False, 'import logging\n'), ((178, 247), 'logging.FileHandler', 'logging.FileHandler', ([], {'filename': '"""devil.log"""', 'encoding': '"""utf-8"""', 'mode': '"""w"""'}), "(filename='devil.log', encoding='utf-8', mode='w')\n", (197, 247), False, 'import logging\n'), ((496, 521), 'src.bot.bot.load_extensions', 'bot.load_extensions', (['_cog'], {}), '(_cog)\n', (515, 521), False, 'from src.bot import bot\n'), ((523, 544), 'src.bot.bot.run', 'bot.run', (['Client.TOKEN'], {}), '(Client.TOKEN)\n', (530, 544), False, 'from src.bot import bot\n'), ((269, 342), 'logging.Formatter', 'logging.Formatter', (['"""%(asctime)s | %(name)s | %(levelname)s | %(message)s"""'], {}), "('%(asctime)s | %(name)s | %(levelname)s | %(message)s')\n", (286, 342), False, 'import logging\n'), ((454, 469), 'json.load', 'json.load', (['_cog'], {}), '(_cog)\n', (463, 469), False, 'import json\n')]
|
import os
import flopy
import pandas as pd
import numpy as np
def hdobj2data(hdsobj):
# convert usg hdsobj to array of shape (nper, nnodes)
hds = []
kstpkpers = hdsobj.get_kstpkper()
for kstpkper in kstpkpers:
data = hdsobj.get_data(kstpkper=kstpkper)
fdata = []
for lay in range(len(data)):
fdata += data[lay].tolist()
hds.append(fdata)
return np.array(hds)
def get_sim_hds(model_ws='.'):
node_df = pd.read_csv(os.path.join("Freyberg","misc","obs_nodes.dat"),delim_whitespace=True)
hdsobj = flopy.utils.HeadUFile(os.path.join(model_ws,"freyberg.usg.hds"))
hds = hdobj2data(hdsobj)
nper,nnodes = hds.shape
data = []
for i, dfrow in node_df.iterrows():
name, node = dfrow['name'], dfrow['node']
for sp in range(nper):
hd = hds[sp,node-1]
data.append([hd,name,node,sp])
obs_df = pd.DataFrame(data,columns=['head','name','node','sp'])
obs_df.to_csv(os.path.join('obs.csv'),index=False)
if __name__ == '__main__':
get_sim_hds(model_ws = 'template')
|
[
"pandas.DataFrame",
"numpy.array",
"os.path.join"
] |
[((412, 425), 'numpy.array', 'np.array', (['hds'], {}), '(hds)\n', (420, 425), True, 'import numpy as np\n'), ((951, 1009), 'pandas.DataFrame', 'pd.DataFrame', (['data'], {'columns': "['head', 'name', 'node', 'sp']"}), "(data, columns=['head', 'name', 'node', 'sp'])\n", (963, 1009), True, 'import pandas as pd\n'), ((484, 533), 'os.path.join', 'os.path.join', (['"""Freyberg"""', '"""misc"""', '"""obs_nodes.dat"""'], {}), "('Freyberg', 'misc', 'obs_nodes.dat')\n", (496, 533), False, 'import os\n'), ((590, 632), 'os.path.join', 'os.path.join', (['model_ws', '"""freyberg.usg.hds"""'], {}), "(model_ws, 'freyberg.usg.hds')\n", (602, 632), False, 'import os\n'), ((1024, 1047), 'os.path.join', 'os.path.join', (['"""obs.csv"""'], {}), "('obs.csv')\n", (1036, 1047), False, 'import os\n')]
|
import io
import logging
import os
import re
import sys
import traceback
from logging.handlers import RotatingFileHandler
from jgutils.config import AZURE_WEB
try:
import colored_traceback
import colorlog
import pygments.lexers
from colored_traceback import Colorizer
# color tracebacks in terminal - uncaught exceptions in scripts only, not logging
colored_traceback.add_hook(style='jayme', always=True)
Formatter = colorlog.ColoredFormatter
except ModuleNotFoundError:
# running on azure
Formatter = logging.Formatter
# simple colors
_palette = dict(
black=30,
red=31,
green=32,
yellow=33,
blue=34,
cyan=36,
white=37,
underline=4,
reset=0)
# create full color codes as a dict comp
palette = {k: f'\033[{color_code}m' for k, color_code in _palette.items()}
class ColoredFormatter(Formatter):
"""Custom logging Formatter to print colored tracebacks and log level messages
"""
def __init__(self, fmt: str, *args, **kw) -> None:
log_colors = dict(
DEBUG='cyan',
INFO='green',
WARNING='yellow',
ERROR='red',
CRITICAL='red,bg_white')
# always add log_color before format
fmt = f'%(log_color)s{fmt}'
super().__init__(fmt, log_colors=log_colors, *args, **kw)
self.colorizer = Colorizer(style='jayme')
def colorize_traceback(self, type, value, tb) -> str:
"""
Copied from colored_traceback Colorizer
- just return and print to io instead of write to stderr so logging message prints first
"""
# import pygments.lexers
tb_text = ''.join(traceback.format_exception(type, value, tb))
lexer = pygments.lexers.get_lexer_by_name('pytb', stripall=True)
tb_colored = pygments.highlight(
tb_text, lexer, self.colorizer.formatter)
# self.stream.write(tb_colored)
return tb_colored
def formatException(self, ei) -> str:
sio = io.StringIO()
tb_colored = self.colorize_traceback(*ei)
print(tb_colored, file=sio)
s = sio.getvalue()
sio.close()
if s[-1:] == '\n':
s = s[:-1]
return s
def formatMessage(self, record: logging.LogRecord) -> str:
message = super().formatMessage(record)
return highlight_filepath(message)
def format(self, record: logging.LogRecord) -> str:
"""Disable caching of exception text
- Lets StreamHandler and FileHandler have different traceback formats
https://stackoverflow.com/questions/5875225/
weird-logger-only-uses-the-formatter-of-the-first-handler-for-exceptions
"""
backup = record.exc_text
record.exc_text = None
s = logging.Formatter.format(self, record)
record.exc_text = backup
return s
if not AZURE_WEB:
# local app, use colored formatter
StreamFormatter = ColoredFormatter
else:
StreamFormatter = logging.Formatter
# Console/stream handler
_fmt_stream = '%(levelname)-7s %(lineno)-4d %(name)-20s %(message)s'
stream_formatter = StreamFormatter(_fmt_stream)
sh = logging.StreamHandler(stream=sys.stdout)
sh.setFormatter(stream_formatter)
# set file logger if path set and not azure
log_path = os.getenv('file_log_path', None)
fh = None
if not log_path is None and not AZURE_WEB:
_fmt_file = '%(asctime)s %(levelname)-7s %(lineno)-4d %(name)-20s %(message)s'
fmt_file = logging.Formatter(_fmt_file, datefmt='%m-%d %H:%M:%S')
fh = RotatingFileHandler(log_path, maxBytes=100000, backupCount=0)
fh.setLevel(logging.DEBUG)
fh.setFormatter(fmt_file)
# NOTE could do logging.basicConfig(handlers=[sh, fh]) to catch everything
# logging.basicConfig(handlers=[sh, fh], level=logging.DEBUG)
def getlog(name: str) -> logging.Logger:
"""Create logger object with predefined stream handler & formatting
- need to instantiate with logging.getLogger to inherit from azure's root logger
Parameters
----------
name : str
module __name__
Returns
-------
logging.logger
Examples
--------
>>> from jambot.logger import getlog
>>> log = getlog(__name__)
"""
# remove __app__ prefix for azure
name = name.replace('__app__.', '')
name = '.'.join(name.split('.')[1:])
# cant set name to nothing or that calls the ROOT logger
if name == '':
name = 'base'
log = logging.getLogger(name)
log.setLevel(logging.DEBUG)
if not AZURE_WEB:
# this prevents duplicate outputs (eg for pytest)
log.propagate = False
if not log.handlers:
log.addHandler(sh)
if not fh is None:
log.addHandler(fh)
return log
def highlight_filepath(s: str, color: str = 'blue') -> str:
"""Highlight filepath string for output in terminal
- TODO confirm https for url
Parameters
----------
s : str
string to search for filepaths
color : str, optional
default 'blue'
Returns
-------
str
input string with filepaths colored
"""
# try to match previous color
# \x1b[32m
expr = r'\x1b\[\d+m'
match = re.search(expr, str(s))
reset = match[0] if match else palette['reset']
# stop at first backslash \ (color code)
expr = r'(http|https.*|\/.*\/[^\s\\]*)'
return re.sub(expr, f'{palette[color]}\\1{reset}', str(s))
def get_stacktrace() -> str:
return '\n'.join(traceback.format_stack()[:-1]) # remove this function
def save_stacktrace(fname: str = 'traceback') -> None:
tb_text = get_stacktrace()
with open(f'{fname}.txt', 'w') as file:
file.write(tb_text)
def print_stacktrace():
colorizer = Colorizer(style='jayme')
lexer = pygments.lexers.get_lexer_by_name('pytb', stripall=True)
tb_colored = pygments.highlight(get_stacktrace(), lexer, colorizer.formatter)
print(tb_colored)
|
[
"os.getenv",
"logging.Formatter.format",
"io.StringIO",
"traceback.format_exception",
"logging.StreamHandler",
"logging.Formatter",
"colored_traceback.Colorizer",
"traceback.format_stack",
"colored_traceback.add_hook",
"logging.handlers.RotatingFileHandler",
"logging.getLogger"
] |
[((3171, 3211), 'logging.StreamHandler', 'logging.StreamHandler', ([], {'stream': 'sys.stdout'}), '(stream=sys.stdout)\n', (3192, 3211), False, 'import logging\n'), ((3302, 3334), 'os.getenv', 'os.getenv', (['"""file_log_path"""', 'None'], {}), "('file_log_path', None)\n", (3311, 3334), False, 'import os\n'), ((377, 431), 'colored_traceback.add_hook', 'colored_traceback.add_hook', ([], {'style': '"""jayme"""', 'always': '(True)'}), "(style='jayme', always=True)\n", (403, 431), False, 'import colored_traceback\n'), ((3488, 3542), 'logging.Formatter', 'logging.Formatter', (['_fmt_file'], {'datefmt': '"""%m-%d %H:%M:%S"""'}), "(_fmt_file, datefmt='%m-%d %H:%M:%S')\n", (3505, 3542), False, 'import logging\n'), ((3553, 3614), 'logging.handlers.RotatingFileHandler', 'RotatingFileHandler', (['log_path'], {'maxBytes': '(100000)', 'backupCount': '(0)'}), '(log_path, maxBytes=100000, backupCount=0)\n', (3572, 3614), False, 'from logging.handlers import RotatingFileHandler\n'), ((4468, 4491), 'logging.getLogger', 'logging.getLogger', (['name'], {}), '(name)\n', (4485, 4491), False, 'import logging\n'), ((5755, 5779), 'colored_traceback.Colorizer', 'Colorizer', ([], {'style': '"""jayme"""'}), "(style='jayme')\n", (5764, 5779), False, 'from colored_traceback import Colorizer\n'), ((1364, 1388), 'colored_traceback.Colorizer', 'Colorizer', ([], {'style': '"""jayme"""'}), "(style='jayme')\n", (1373, 1388), False, 'from colored_traceback import Colorizer\n'), ((2013, 2026), 'io.StringIO', 'io.StringIO', ([], {}), '()\n', (2024, 2026), False, 'import io\n'), ((2789, 2827), 'logging.Formatter.format', 'logging.Formatter.format', (['self', 'record'], {}), '(self, record)\n', (2813, 2827), False, 'import logging\n'), ((1677, 1720), 'traceback.format_exception', 'traceback.format_exception', (['type', 'value', 'tb'], {}), '(type, value, tb)\n', (1703, 1720), False, 'import traceback\n'), ((5498, 5522), 'traceback.format_stack', 'traceback.format_stack', ([], {}), '()\n', (5520, 5522), False, 'import traceback\n')]
|
from flask import Flask, request #import main Flask class and request object
from eth_keys import keys
from web3 import Web3
import dataset
import sys
app = Flask(__name__) #create the Flask app
@app.route('/transaction', methods = ['POST'])
def transaction():
db = dataset.connect('sqlite:///database/users.db')
table = db['users']
content = request.get_json()
user = content['address']
new_balance = content['bal']
signature = content['signature']
balance_update = dict(address=user, bal=new_balance)
table.update(balance_update, ['address'])
signature_update = dict(address=user, signature=signature)
table.update(signature_update, ['address'])
''' STILL NEED TO IMPLEMENT VERIFICATION
nft_value = request.args.get('a')
hashed_message = Web3.keccak(int(new_balance))
print (hashed_message.hex())
print ('0xe71fac6fb785942cc6c6404a423f94f32a28ae66d69ff41494c38bfd4788b2f8')
print (nft_value)
message = request.args.get('signature')
signature_bytes = bytes.fromhex(message)
signature = keys.Signature(signature_bytes)
print (signature)
message_key = signature.recover_public_key_from_msg(hashed_message)
### STILL NEED VERIFICATION ###
#print (message_key)
#print (message_key == user_public_key)
#print (signature.verify_msg(hashed_message, user_public_key))
### STILL NEED VERIFICATION ###
'''
return '''<h1>Received tokens: {}</h1>'''.format(nft_value)
@app.route('/balance', methods = ['GET'])
def balance():
db = dataset.connect('sqlite:///database/users.db')
table = db['users']
address = request.args.get('address')
print (address)
user = table.find_one(address=address)
print (user)
#return '''<h1>User Balance: {}</h1>'''.format(user['bal'])
return str(user['bal'])
if __name__=="__main__":
app.run(debug=True, host="0.0.0.0", port=5000) #run app in debug mode on port 5000
|
[
"dataset.connect",
"flask.Flask",
"flask.request.get_json",
"flask.request.args.get"
] |
[((158, 173), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (163, 173), False, 'from flask import Flask, request\n'), ((272, 318), 'dataset.connect', 'dataset.connect', (['"""sqlite:///database/users.db"""'], {}), "('sqlite:///database/users.db')\n", (287, 318), False, 'import dataset\n'), ((357, 375), 'flask.request.get_json', 'request.get_json', ([], {}), '()\n', (373, 375), False, 'from flask import Flask, request\n'), ((1538, 1584), 'dataset.connect', 'dataset.connect', (['"""sqlite:///database/users.db"""'], {}), "('sqlite:///database/users.db')\n", (1553, 1584), False, 'import dataset\n'), ((1623, 1650), 'flask.request.args.get', 'request.args.get', (['"""address"""'], {}), "('address')\n", (1639, 1650), False, 'from flask import Flask, request\n')]
|
# -*- coding: utf-8 -*-
# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt
"""Tests that HTML generation is awesome."""
import datetime
import glob
import json
import os
import os.path
import re
import sys
import coverage
from coverage.backward import unicode_class
from coverage import env
from coverage.files import flat_rootname
import coverage.html
from coverage.misc import CoverageException, NotPython, NoSource
from tests.coveragetest import CoverageTest, TESTS_DIR
from tests.goldtest import gold_path
from tests.goldtest import change_dir, compare, contains, doesnt_contain, contains_any
class HtmlTestHelpers(CoverageTest):
"""Methods that help with HTML tests."""
def create_initial_files(self):
"""Create the source files we need to run these tests."""
self.make_file("main_file.py", """\
import helper1, helper2
helper1.func1(12)
helper2.func2(12)
""")
self.make_file("helper1.py", """\
def func1(x):
if x % 2:
print("odd")
""")
self.make_file("helper2.py", """\
def func2(x):
print("x is %d" % x)
""")
def run_coverage(self, covargs=None, htmlargs=None):
"""Run coverage.py on main_file.py, and create an HTML report."""
self.clean_local_file_imports()
cov = coverage.Coverage(**(covargs or {}))
self.start_import_stop(cov, "main_file")
return cov.html_report(**(htmlargs or {}))
def remove_html_files(self):
"""Remove the HTML files created as part of the HTML report."""
os.remove("htmlcov/index.html")
os.remove("htmlcov/main_file_py.html")
os.remove("htmlcov/helper1_py.html")
os.remove("htmlcov/helper2_py.html")
def get_html_report_content(self, module):
"""Return the content of the HTML report for `module`."""
filename = module.replace(".", "_").replace("/", "_") + ".html"
filename = os.path.join("htmlcov", filename)
with open(filename) as f:
return f.read()
def get_html_index_content(self):
"""Return the content of index.html.
Timestamps are replaced with a placeholder so that clocks don't matter.
"""
with open("htmlcov/index.html") as f:
index = f.read()
index = re.sub(
r"created at \d{4}-\d{2}-\d{2} \d{2}:\d{2}",
r"created at YYYY-MM-DD HH:MM",
index,
)
return index
def assert_correct_timestamp(self, html):
"""Extract the timestamp from `html`, and assert it is recent."""
timestamp_pat = r"created at (\d{4})-(\d{2})-(\d{2}) (\d{2}):(\d{2})"
m = re.search(timestamp_pat, html)
self.assertTrue(m, "Didn't find a timestamp!")
timestamp = datetime.datetime(*map(int, m.groups()))
# The timestamp only records the minute, so the delta could be from
# 12:00 to 12:01:59, or two minutes.
self.assert_recent_datetime(
timestamp,
seconds=120,
msg="Timestamp is wrong: {0}".format(timestamp),
)
class HtmlDeltaTest(HtmlTestHelpers, CoverageTest):
"""Tests of the HTML delta speed-ups."""
def setUp(self):
super(HtmlDeltaTest, self).setUp()
# At least one of our tests monkey-patches the version of coverage.py,
# so grab it here to restore it later.
self.real_coverage_version = coverage.__version__
self.addCleanup(setattr, coverage, "__version__", self.real_coverage_version)
def run_coverage(self, covargs=None, htmlargs=None):
"""For the delta tests, we always want source=. """
covargs = covargs or {}
covargs['source'] = "."
super(HtmlDeltaTest, self).run_coverage(covargs=covargs, htmlargs=htmlargs)
def test_html_created(self):
# Test basic HTML generation: files should be created.
self.create_initial_files()
self.run_coverage()
self.assert_exists("htmlcov/index.html")
self.assert_exists("htmlcov/main_file_py.html")
self.assert_exists("htmlcov/helper1_py.html")
self.assert_exists("htmlcov/helper2_py.html")
self.assert_exists("htmlcov/style.css")
self.assert_exists("htmlcov/coverage_html.js")
def test_html_delta_from_source_change(self):
# HTML generation can create only the files that have changed.
# In this case, helper1 changes because its source is different.
self.create_initial_files()
self.run_coverage()
index1 = self.get_html_index_content()
self.remove_html_files()
# Now change a file and do it again
self.make_file("helper1.py", """\
def func1(x): # A nice function
if x % 2:
print("odd")
""")
self.run_coverage()
# Only the changed files should have been created.
self.assert_exists("htmlcov/index.html")
self.assert_exists("htmlcov/helper1_py.html")
self.assert_doesnt_exist("htmlcov/main_file_py.html")
self.assert_doesnt_exist("htmlcov/helper2_py.html")
index2 = self.get_html_index_content()
self.assertMultiLineEqual(index1, index2)
def test_html_delta_from_coverage_change(self):
# HTML generation can create only the files that have changed.
# In this case, helper1 changes because its coverage is different.
self.create_initial_files()
self.run_coverage()
self.remove_html_files()
# Now change a file and do it again
self.make_file("main_file.py", """\
import helper1, helper2
helper1.func1(23)
helper2.func2(23)
""")
self.run_coverage()
# Only the changed files should have been created.
self.assert_exists("htmlcov/index.html")
self.assert_exists("htmlcov/helper1_py.html")
self.assert_exists("htmlcov/main_file_py.html")
self.assert_doesnt_exist("htmlcov/helper2_py.html")
def test_html_delta_from_settings_change(self):
# HTML generation can create only the files that have changed.
# In this case, everything changes because the coverage.py settings
# have changed.
self.create_initial_files()
self.run_coverage(covargs=dict(omit=[]))
index1 = self.get_html_index_content()
self.remove_html_files()
self.run_coverage(covargs=dict(omit=['xyzzy*']))
# All the files have been reported again.
self.assert_exists("htmlcov/index.html")
self.assert_exists("htmlcov/helper1_py.html")
self.assert_exists("htmlcov/main_file_py.html")
self.assert_exists("htmlcov/helper2_py.html")
index2 = self.get_html_index_content()
self.assertMultiLineEqual(index1, index2)
def test_html_delta_from_coverage_version_change(self):
# HTML generation can create only the files that have changed.
# In this case, everything changes because the coverage.py version has
# changed.
self.create_initial_files()
self.run_coverage()
index1 = self.get_html_index_content()
self.remove_html_files()
# "Upgrade" coverage.py!
coverage.__version__ = "XYZZY"
self.run_coverage()
# All the files have been reported again.
self.assert_exists("htmlcov/index.html")
self.assert_exists("htmlcov/helper1_py.html")
self.assert_exists("htmlcov/main_file_py.html")
self.assert_exists("htmlcov/helper2_py.html")
index2 = self.get_html_index_content()
fixed_index2 = index2.replace("XYZZY", self.real_coverage_version)
self.assertMultiLineEqual(index1, fixed_index2)
def test_file_becomes_100(self):
self.create_initial_files()
self.run_coverage()
# Now change a file and do it again
self.make_file("main_file.py", """\
import helper1, helper2
# helper1 is now 100%
helper1.func1(12)
helper1.func1(23)
""")
self.run_coverage(htmlargs=dict(skip_covered=True))
# The 100% file, skipped, shouldn't be here.
self.assert_doesnt_exist("htmlcov/helper1_py.html")
def test_status_format_change(self):
self.create_initial_files()
self.run_coverage()
self.remove_html_files()
with open("htmlcov/status.json") as status_json:
status_data = json.load(status_json)
self.assertEqual(status_data['format'], 1)
status_data['format'] = 2
with open("htmlcov/status.json", "w") as status_json:
json.dump(status_data, status_json)
self.run_coverage()
# All the files have been reported again.
self.assert_exists("htmlcov/index.html")
self.assert_exists("htmlcov/helper1_py.html")
self.assert_exists("htmlcov/main_file_py.html")
self.assert_exists("htmlcov/helper2_py.html")
class HtmlTitleTest(HtmlTestHelpers, CoverageTest):
"""Tests of the HTML title support."""
def test_default_title(self):
self.create_initial_files()
self.run_coverage()
index = self.get_html_index_content()
self.assertIn("<title>Coverage report</title>", index)
self.assertIn("<h1>Coverage report:", index)
def test_title_set_in_config_file(self):
self.create_initial_files()
self.make_file(".coveragerc", "[html]\ntitle = Metrics & stuff!\n")
self.run_coverage()
index = self.get_html_index_content()
self.assertIn("<title>Metrics & stuff!</title>", index)
self.assertIn("<h1>Metrics & stuff!:", index)
def test_non_ascii_title_set_in_config_file(self):
self.create_initial_files()
self.make_file(".coveragerc", "[html]\ntitle = «ταБЬℓσ» numbers")
self.run_coverage()
index = self.get_html_index_content()
self.assertIn(
"<title>«ταБЬℓσ»"
" numbers", index
)
self.assertIn(
"<h1>«ταБЬℓσ»"
" numbers", index
)
def test_title_set_in_args(self):
self.create_initial_files()
self.make_file(".coveragerc", "[html]\ntitle = Good title\n")
self.run_coverage(htmlargs=dict(title="«ταБЬℓσ» & stüff!"))
index = self.get_html_index_content()
self.assertIn(
"<title>«ταБЬℓσ»"
" & stüff!</title>", index
)
self.assertIn(
"<h1>«ταБЬℓσ»"
" & stüff!:", index
)
class HtmlWithUnparsableFilesTest(HtmlTestHelpers, CoverageTest):
"""Test the behavior when measuring unparsable files."""
def test_dotpy_not_python(self):
self.make_file("main.py", "import innocuous")
self.make_file("innocuous.py", "a = 1")
cov = coverage.Coverage()
self.start_import_stop(cov, "main")
self.make_file("innocuous.py", "<h1>This isn't python!</h1>")
msg = "Couldn't parse '.*innocuous.py' as Python source: .* at line 1"
with self.assertRaisesRegex(NotPython, msg):
cov.html_report()
def test_dotpy_not_python_ignored(self):
self.make_file("main.py", "import innocuous")
self.make_file("innocuous.py", "a = 2")
cov = coverage.Coverage()
self.start_import_stop(cov, "main")
self.make_file("innocuous.py", "<h1>This isn't python!</h1>")
cov.html_report(ignore_errors=True)
self.assertEqual(
len(cov._warnings),
1,
"Expected a warning to be thrown when an invalid python file is parsed")
self.assertIn(
"Could not parse Python file",
cov._warnings[0],
"Warning message should be in 'invalid file' warning"
)
self.assertIn(
"innocuous.py",
cov._warnings[0],
"Filename should be in 'invalid file' warning"
)
self.assert_exists("htmlcov/index.html")
# This would be better as a glob, if the HTML layout changes:
self.assert_doesnt_exist("htmlcov/innocuous.html")
def test_dothtml_not_python(self):
# We run a .html file, and when reporting, we can't parse it as
# Python. Since it wasn't .py, no error is reported.
# Run an "HTML" file
self.make_file("innocuous.html", "a = 3")
self.run_command("coverage run --source=. innocuous.html")
# Before reporting, change it to be an HTML file.
self.make_file("innocuous.html", "<h1>This isn't python at all!</h1>")
output = self.run_command("coverage html")
self.assertEqual(output.strip(), "No data to report.")
def test_execed_liar_ignored(self):
# Jinja2 sets __file__ to be a non-Python file, and then execs code.
# If that file contains non-Python code, a TokenError shouldn't
# have been raised when writing the HTML report.
source = "exec(compile('','','exec'), {'__file__': 'liar.html'})"
self.make_file("liar.py", source)
self.make_file("liar.html", "{# Whoops, not python code #}")
cov = coverage.Coverage()
self.start_import_stop(cov, "liar")
cov.html_report()
self.assert_exists("htmlcov/index.html")
def test_execed_liar_ignored_indentation_error(self):
# Jinja2 sets __file__ to be a non-Python file, and then execs code.
# If that file contains untokenizable code, we shouldn't get an
# exception.
source = "exec(compile('','','exec'), {'__file__': 'liar.html'})"
self.make_file("liar.py", source)
# Tokenize will raise an IndentationError if it can't dedent.
self.make_file("liar.html", "0\n 2\n 1\n")
cov = coverage.Coverage()
self.start_import_stop(cov, "liar")
cov.html_report()
self.assert_exists("htmlcov/index.html")
def test_decode_error(self):
# https://bitbucket.org/ned/coveragepy/issue/351/files-with-incorrect-encoding-are-ignored
# imp.load_module won't load a file with an undecodable character
# in a comment, though Python will run them. So we'll change the
# file after running.
self.make_file("main.py", "import sub.not_ascii")
self.make_file("sub/__init__.py")
self.make_file("sub/not_ascii.py", """\
# coding: utf-8
a = 1 # Isn't this great?!
""")
cov = coverage.Coverage()
self.start_import_stop(cov, "main")
# Create the undecodable version of the file. make_file is too helpful,
# so get down and dirty with bytes.
with open("sub/not_ascii.py", "wb") as f:
f.write(b"# coding: utf-8\na = 1 # Isn't this great?\xcb!\n")
with open("sub/not_ascii.py", "rb") as f:
undecodable = f.read()
self.assertIn(b"?\xcb!", undecodable)
cov.html_report()
html_report = self.get_html_report_content("sub/not_ascii.py")
expected = "# Isn't this great?�!"
self.assertIn(expected, html_report)
def test_formfeeds(self):
# https://bitbucket.org/ned/coveragepy/issue/360/html-reports-get-confused-by-l-in-the-code
self.make_file("formfeed.py", "line_one = 1\n\f\nline_two = 2\n")
cov = coverage.Coverage()
self.start_import_stop(cov, "formfeed")
cov.html_report()
formfeed_html = self.get_html_report_content("formfeed.py")
self.assertIn("line_two", formfeed_html)
class HtmlTest(HtmlTestHelpers, CoverageTest):
"""Moar HTML tests."""
def test_missing_source_file_incorrect_message(self):
# https://bitbucket.org/ned/coveragepy/issue/60
self.make_file("thefile.py", "import sub.another\n")
self.make_file("sub/__init__.py", "")
self.make_file("sub/another.py", "print('another')\n")
cov = coverage.Coverage()
self.start_import_stop(cov, 'thefile')
os.remove("sub/another.py")
missing_file = os.path.join(self.temp_dir, "sub", "another.py")
missing_file = os.path.realpath(missing_file)
msg = "(?i)No source for code: '%s'" % re.escape(missing_file)
with self.assertRaisesRegex(NoSource, msg):
cov.html_report()
def test_extensionless_file_collides_with_extension(self):
# It used to be that "program" and "program.py" would both be reported
# to "program.html". Now they are not.
# https://bitbucket.org/ned/coveragepy/issue/69
self.make_file("program", "import program\n")
self.make_file("program.py", "a = 1\n")
self.run_command("coverage run program")
self.run_command("coverage html")
self.assert_exists("htmlcov/index.html")
self.assert_exists("htmlcov/program.html")
self.assert_exists("htmlcov/program_py.html")
def test_has_date_stamp_in_files(self):
self.create_initial_files()
self.run_coverage()
with open("htmlcov/index.html") as f:
self.assert_correct_timestamp(f.read())
with open("htmlcov/main_file_py.html") as f:
self.assert_correct_timestamp(f.read())
def test_reporting_on_unmeasured_file(self):
# It should be ok to ask for an HTML report on a file that wasn't even
# measured at all. https://bitbucket.org/ned/coveragepy/issues/403
self.create_initial_files()
self.make_file("other.py", "a = 1\n")
self.run_coverage(htmlargs=dict(morfs=['other.py']))
self.assert_exists("htmlcov/index.html")
self.assert_exists("htmlcov/other_py.html")
def test_shining_panda_fix(self):
# The ShiningPanda plugin looks for "status.dat" to find HTML reports.
# Accommodate them, but only if we are running under Jenkins.
self.set_environ("JENKINS_URL", "Something or other")
self.create_initial_files()
self.run_coverage()
self.assert_exists("htmlcov/status.dat")
def test_report_skip_covered_no_branches(self):
self.make_file("main_file.py", """
import not_covered
def normal():
print("z")
normal()
""")
self.make_file("not_covered.py", """
def not_covered():
print("n")
""")
self.run_coverage(htmlargs=dict(skip_covered=True))
self.assert_exists("htmlcov/index.html")
self.assert_doesnt_exist("htmlcov/main_file_py.html")
self.assert_exists("htmlcov/not_covered_py.html")
def test_report_skip_covered_100(self):
self.make_file("main_file.py", """
def normal():
print("z")
normal()
""")
res = self.run_coverage(covargs=dict(source="."), htmlargs=dict(skip_covered=True))
self.assertEqual(res, 100.0)
self.assert_doesnt_exist("htmlcov/main_file_py.html")
def test_report_skip_covered_branches(self):
self.make_file("main_file.py", """
import not_covered
def normal():
print("z")
normal()
""")
self.make_file("not_covered.py", """
def not_covered():
print("n")
""")
self.run_coverage(covargs=dict(branch=True), htmlargs=dict(skip_covered=True))
self.assert_exists("htmlcov/index.html")
self.assert_doesnt_exist("htmlcov/main_file_py.html")
self.assert_exists("htmlcov/not_covered_py.html")
class HtmlStaticFileTest(CoverageTest):
"""Tests of the static file copying for the HTML report."""
def setUp(self):
super(HtmlStaticFileTest, self).setUp()
original_path = list(coverage.html.STATIC_PATH)
self.addCleanup(setattr, coverage.html, 'STATIC_PATH', original_path)
def test_copying_static_files_from_system(self):
# Make a new place for static files.
self.make_file("static_here/jquery.min.js", "Not Really JQuery!")
coverage.html.STATIC_PATH.insert(0, "static_here")
self.make_file("main.py", "print(17)")
cov = coverage.Coverage()
self.start_import_stop(cov, "main")
cov.html_report()
with open("htmlcov/jquery.min.js") as f:
jquery = f.read()
self.assertEqual(jquery, "Not Really JQuery!")
def test_copying_static_files_from_system_in_dir(self):
# Make a new place for static files.
INSTALLED = [
"jquery/jquery.min.js",
"jquery-hotkeys/jquery.hotkeys.js",
"jquery-isonscreen/jquery.isonscreen.js",
"jquery-tablesorter/jquery.tablesorter.min.js",
]
for fpath in INSTALLED:
self.make_file(os.path.join("static_here", fpath), "Not real.")
coverage.html.STATIC_PATH.insert(0, "static_here")
self.make_file("main.py", "print(17)")
cov = coverage.Coverage()
self.start_import_stop(cov, "main")
cov.html_report()
for fpath in INSTALLED:
the_file = os.path.basename(fpath)
with open(os.path.join("htmlcov", the_file)) as f:
contents = f.read()
self.assertEqual(contents, "Not real.")
def test_cant_find_static_files(self):
# Make the path point to useless places.
coverage.html.STATIC_PATH = ["/xyzzy"]
self.make_file("main.py", "print(17)")
cov = coverage.Coverage()
self.start_import_stop(cov, "main")
msg = "Couldn't find static file u?'.*'"
with self.assertRaisesRegex(CoverageException, msg):
cov.html_report()
def filepath_to_regex(path):
"""Create a regex for scrubbing a file path."""
regex = re.escape(path)
# If there's a backslash, let it match either slash.
regex = regex.replace(r"\\", r"[\\/]")
if env.WINDOWS:
regex = "(?i)" + regex
return regex
def compare_html(expected, actual):
"""Specialized compare function for our HTML files."""
scrubs = [
(r'/coverage.readthedocs.io/?[-.\w/]*', '/coverage.readthedocs.io/VER'),
(r'coverage.py v[\d.abc]+', 'coverage.py vVER'),
(r'created at \d\d\d\d-\d\d-\d\d \d\d:\d\d', 'created at DATE'),
# Some words are identifiers in one version, keywords in another.
(r'<span class="(nam|key)">(print|True|False)</span>', r'<span class="nam">\2</span>'),
# Occasionally an absolute path is in the HTML report.
(filepath_to_regex(TESTS_DIR), 'TESTS_DIR'),
(r'/Users/ned/coverage/trunk/tests', 'TESTS_DIR'),
(filepath_to_regex(flat_rootname(unicode_class(TESTS_DIR))), '_TESTS_DIR'),
(flat_rootname(u'/Users/ned/coverage/trunk/tests'), '_TESTS_DIR'),
# The temp dir the tests make.
(filepath_to_regex(os.getcwd()), 'TEST_TMPDIR'),
(filepath_to_regex(flat_rootname(unicode_class(os.getcwd()))), '_TEST_TMPDIR'),
(r'/private/var/folders/[\w/]{35}/coverage_test/tests_test_html_\w+_\d{8}', 'TEST_TMPDIR'),
(r'_private_var_folders_\w{35}_coverage_test_tests_test_html_\w+_\d{8}', '_TEST_TMPDIR'),
]
if env.WINDOWS:
# For file paths...
scrubs += [(r"\\", "/")]
compare(expected, actual, file_pattern="*.html", scrubs=scrubs)
class HtmlGoldTests(CoverageTest):
"""Tests of HTML reporting that use gold files."""
def test_a(self):
self.make_file("a.py", """\
if 1 < 2:
# Needed a < to look at HTML entities.
a = 3
else:
a = 4
""")
cov = coverage.Coverage()
cov.start()
import a # pragma: nested # pylint: disable=import-error
cov.stop() # pragma: nested
cov.html_report(a, directory='out')
compare_html(gold_path("html/gold_a"), "out")
contains(
"out/a_py.html",
('<span class="key">if</span> <span class="num">1</span> '
'<span class="op"><</span> <span class="num">2</span>'),
(' <span class="nam">a</span> '
'<span class="op">=</span> <span class="num">3</span>'),
'<span class="pc_cov">67%</span>',
)
contains(
"out/index.html",
'<a href="a_py.html">a.py</a>',
'<span class="pc_cov">67%</span>',
'<td class="right" data-ratio="2 3">67%</td>',
)
def test_b_branch(self):
self.make_file("b.py", """\
def one(x):
# This will be a branch that misses the else.
if x < 2:
a = 3
else:
a = 4
one(1)
def two(x):
# A missed else that branches to "exit"
if x:
a = 5
two(1)
def three():
try:
# This if has two branches, *neither* one taken.
if name_error_this_variable_doesnt_exist:
a = 1
else:
a = 2
except:
pass
three()
""")
cov = coverage.Coverage(branch=True)
cov.start()
import b # pragma: nested # pylint: disable=import-error
cov.stop() # pragma: nested
cov.html_report(b, directory="out")
compare_html(gold_path("html/gold_b_branch"), "out")
contains(
"out/b_py.html",
('<span class="key">if</span> <span class="nam">x</span> '
'<span class="op"><</span> <span class="num">2</span>'),
(' <span class="nam">a</span> <span class="op">=</span> '
'<span class="num">3</span>'),
'<span class="pc_cov">70%</span>',
('<span class="annotate short">3 ↛ 6</span>'
'<span class="annotate long">line 3 didn\'t jump to line 6, '
'because the condition on line 3 was never false</span>'),
('<span class="annotate short">12 ↛ exit</span>'
'<span class="annotate long">line 12 didn\'t return from function \'two\', '
'because the condition on line 12 was never false</span>'),
('<span class="annotate short">20 ↛ 21, '
'20 ↛ 23</span>'
'<span class="annotate long">2 missed branches: '
'1) line 20 didn\'t jump to line 21, '
'because the condition on line 20 was never true, '
'2) line 20 didn\'t jump to line 23, '
'because the condition on line 20 was never false</span>'),
)
contains(
"out/index.html",
'<a href="b_py.html">b.py</a>',
'<span class="pc_cov">70%</span>',
'<td class="right" data-ratio="16 23">70%</td>',
)
def test_bom(self):
self.make_file("bom.py", bytes=b"""\
\xef\xbb\xbf# A Python source file in utf-8, with BOM.
math = "3\xc3\x974 = 12, \xc3\xb72 = 6\xc2\xb10"
import sys
if sys.version_info >= (3, 0):
assert len(math) == 18
assert len(math.encode('utf-8')) == 21
else:
assert len(math) == 21
assert len(math.decode('utf-8')) == 18
""".replace(b"\n", b"\r\n"))
# It's important that the source file really have a BOM, which can
# get lost, so check that it's really there, and that we have \r\n
# line endings.
with open("bom.py", "rb") as f:
data = f.read()
assert data[:3] == b"\xef\xbb\xbf"
assert data.count(b"\r\n") == 11
cov = coverage.Coverage()
cov.start()
import bom # pragma: nested # pylint: disable=import-error
cov.stop() # pragma: nested
cov.html_report(bom, directory="out")
compare_html(gold_path("html/gold_bom"), "out")
contains(
"out/bom_py.html",
'<span class="str">"3×4 = 12, ÷2 = 6±0"</span>',
)
def test_isolatin1(self):
self.make_file("isolatin1.py", bytes=b"""\
# -*- coding: iso8859-1 -*-
# A Python source file in another encoding.
math = "3\xd74 = 12, \xf72 = 6\xb10"
assert len(math) == 18
""")
cov = coverage.Coverage()
cov.start()
import isolatin1 # pragma: nested # pylint: disable=import-error
cov.stop() # pragma: nested
cov.html_report(isolatin1, directory="out")
compare_html(gold_path("html/gold_isolatin1"), "out")
contains(
"out/isolatin1_py.html",
'<span class="str">"3×4 = 12, ÷2 = 6±0"</span>',
)
def make_main_etc(self):
"""Make main.py and m1-m3.py for other tests."""
self.make_file("main.py", """\
import m1
import m2
import m3
a = 5
b = 6
assert m1.m1a == 1
assert m2.m2a == 1
assert m3.m3a == 1
""")
self.make_file("m1.py", """\
m1a = 1
m1b = 2
""")
self.make_file("m2.py", """\
m2a = 1
m2b = 2
""")
self.make_file("m3.py", """\
m3a = 1
m3b = 2
""")
def test_omit_1(self):
self.make_main_etc()
cov = coverage.Coverage(include=["./*"])
cov.start()
import main # pragma: nested # pylint: disable=unused-variable, import-error
cov.stop() # pragma: nested
cov.html_report(directory="out")
compare_html(gold_path("html/gold_omit_1"), "out")
def test_omit_2(self):
self.make_main_etc()
cov = coverage.Coverage(include=["./*"])
cov.start()
import main # pragma: nested # pylint: disable=unused-variable, import-error
cov.stop() # pragma: nested
cov.html_report(directory="out", omit=["m1.py"])
compare_html(gold_path("html/gold_omit_2"), "out")
def test_omit_3(self):
self.make_main_etc()
cov = coverage.Coverage(include=["./*"])
cov.start()
import main # pragma: nested # pylint: disable=unused-variable, import-error
cov.stop() # pragma: nested
cov.html_report(directory="out", omit=["m1.py", "m2.py"])
compare_html(gold_path("html/gold_omit_3"), "out")
def test_omit_4(self):
self.make_main_etc()
self.make_file("omit4.ini", """\
[report]
omit = m2.py
""")
cov = coverage.Coverage(config_file="omit4.ini", include=["./*"])
cov.start()
import main # pragma: nested # pylint: disable=unused-variable, import-error
cov.stop() # pragma: nested
cov.html_report(directory="out")
compare_html(gold_path("html/gold_omit_4"), "out")
def test_omit_5(self):
self.make_main_etc()
self.make_file("omit5.ini", """\
[report]
omit =
fooey
gooey, m[23]*, kablooey
helloworld
[html]
directory = out/omit_5
""")
cov = coverage.Coverage(config_file="omit5.ini", include=["./*"])
cov.start()
import main # pragma: nested # pylint: disable=unused-variable, import-error
cov.stop() # pragma: nested
cov.html_report()
compare_html(gold_path("html/gold_omit_5"), "out/omit_5")
def test_other(self):
self.make_file("src/here.py", """\
import other
if 1 < 2:
h = 3
else:
h = 4
""")
self.make_file("othersrc/other.py", """\
# A file in another directory. We're checking that it ends up in the
# HTML report.
print("This is the other src!")
""")
with change_dir("src"):
sys.path.insert(0, "") # pytest sometimes has this, sometimes not!?
sys.path.insert(0, "../othersrc")
cov = coverage.Coverage(include=["./*", "../othersrc/*"])
cov.start()
import here # pragma: nested # pylint: disable=unused-variable, import-error
cov.stop() # pragma: nested
cov.html_report(directory="../out")
# Different platforms will name the "other" file differently. Rename it
for p in glob.glob("out/*_other_py.html"):
os.rename(p, "out/blah_blah_other_py.html")
compare_html(gold_path("html/gold_other"), "out")
contains(
"out/index.html",
'<a href="here_py.html">here.py</a>',
'other_py.html">', 'other.py</a>',
)
def test_partial(self):
self.make_file("partial.py", """\
# partial branches and excluded lines
a = 6
while "no peephole".upper(): # t4
break
while a: # pragma: no branch
break
if 0:
never_happen()
if 1:
a = 21
if a == 23:
raise AssertionError("Can't")
""")
self.make_file("partial.ini", """\
[run]
branch = True
[report]
exclude_lines =
raise AssertionError
""")
cov = coverage.Coverage(config_file="partial.ini")
cov.start()
import partial # pragma: nested # pylint: disable=import-error
cov.stop() # pragma: nested
cov.html_report(partial, directory="out")
compare_html(gold_path("html/gold_partial"), "out")
contains(
"out/partial_py.html",
'<p id="t4" class="stm par run hide_run">',
'<p id="t7" class="stm run hide_run">',
# The "if 0" and "if 1" statements are optimized away.
'<p id="t10" class="pln">',
# The "raise AssertionError" is excluded by regex in the .ini.
'<p id="t17" class="exc">',
)
contains(
"out/index.html",
'<a href="partial_py.html">partial.py</a>',
)
contains(
"out/index.html",
'<span class="pc_cov">91%</span>'
)
def test_styled(self):
self.make_file("a.py", """\
if 1 < 2:
# Needed a < to look at HTML entities.
a = 3
else:
a = 4
""")
self.make_file("extra.css", "/* Doesn't matter what goes in here, it gets copied. */")
cov = coverage.Coverage()
cov.start()
import a # pragma: nested # pylint: disable=import-error
cov.stop() # pragma: nested
cov.html_report(a, directory="out", extra_css="extra.css")
compare_html(gold_path("html/gold_styled"), "out")
compare(gold_path("html/gold_styled"), "out", size_within=10, file_pattern="*.css")
contains(
"out/a_py.html",
'<link rel="stylesheet" href="extra.css" type="text/css">',
('<span class="key">if</span> <span class="num">1</span> '
'<span class="op"><</span> <span class="num">2</span>'),
(' <span class="nam">a</span> <span class="op">=</span> '
'<span class="num">3</span>'),
'<span class="pc_cov">67%</span>'
)
contains(
"out/index.html",
'<link rel="stylesheet" href="extra.css" type="text/css">',
'<a href="a_py.html">a.py</a>',
'<span class="pc_cov">67%</span>'
)
def test_tabbed(self):
# The file contents would look like this with 8-space tabs:
# x = 1
# if x:
# a = "tabbed" # aligned comments
# if x: # look nice
# b = "no spaces" # when they
# c = "done" # line up.
self.make_file("tabbed.py", """\
x = 1
if x:
\ta = "Tabbed"\t\t\t\t# Aligned comments
\tif x:\t\t\t\t\t# look nice
\t\tb = "No spaces"\t\t\t# when they
\tc = "Done"\t\t\t\t# line up.
""")
cov = coverage.Coverage()
cov.start()
import tabbed # pragma: nested # pylint: disable=import-error
cov.stop() # pragma: nested
cov.html_report(tabbed, directory="out")
# Editors like to change things, make sure our source file still has tabs.
contains("tabbed.py", "\tif x:\t\t\t\t\t# look nice")
contains(
"out/tabbed_py.html",
'> <span class="key">if</span> '
'<span class="nam">x</span><span class="op">:</span>'
' '
'<span class="com"># look nice</span>'
)
doesnt_contain("out/tabbed_py.html", "\t")
def test_unicode(self):
self.make_file("unicode.py", """\
# -*- coding: utf-8 -*-
# A Python source file with exotic characters.
upside_down = "ʎd˙ǝbɐɹǝʌoɔ"
surrogate = "db40,dd00: x󠄀"
""")
# pylint: disable=import-error, redefined-builtin
cov = coverage.Coverage()
cov.start()
import unicode # pragma: nested
cov.stop() # pragma: nested
cov.html_report(unicode, directory="out")
compare_html(gold_path("html/gold_unicode"), "out")
contains(
"out/unicode_py.html",
'<span class="str">"ʎd˙ǝbɐɹǝʌoɔ"</span>',
)
contains_any(
"out/unicode_py.html",
'<span class="str">"db40,dd00: x��"</span>',
'<span class="str">"db40,dd00: x󠄀"</span>',
)
|
[
"os.remove",
"coverage.files.flat_rootname",
"tests.goldtest.contains_any",
"glob.glob",
"os.path.join",
"tests.goldtest.compare",
"re.escape",
"tests.goldtest.gold_path",
"re.search",
"re.sub",
"json.dump",
"tests.goldtest.doesnt_contain",
"os.path.basename",
"os.path.realpath",
"os.rename",
"tests.goldtest.contains",
"coverage.backward.unicode_class",
"json.load",
"coverage.html.STATIC_PATH.insert",
"os.getcwd",
"tests.goldtest.change_dir",
"sys.path.insert",
"coverage.Coverage"
] |
[((22170, 22185), 're.escape', 're.escape', (['path'], {}), '(path)\n', (22179, 22185), False, 'import re\n'), ((23654, 23717), 'tests.goldtest.compare', 'compare', (['expected', 'actual'], {'file_pattern': '"""*.html"""', 'scrubs': 'scrubs'}), "(expected, actual, file_pattern='*.html', scrubs=scrubs)\n", (23661, 23717), False, 'from tests.goldtest import change_dir, compare, contains, doesnt_contain, contains_any\n'), ((1501, 1535), 'coverage.Coverage', 'coverage.Coverage', ([], {}), '(**covargs or {})\n', (1518, 1535), False, 'import coverage\n'), ((1752, 1783), 'os.remove', 'os.remove', (['"""htmlcov/index.html"""'], {}), "('htmlcov/index.html')\n", (1761, 1783), False, 'import os\n'), ((1792, 1830), 'os.remove', 'os.remove', (['"""htmlcov/main_file_py.html"""'], {}), "('htmlcov/main_file_py.html')\n", (1801, 1830), False, 'import os\n'), ((1839, 1875), 'os.remove', 'os.remove', (['"""htmlcov/helper1_py.html"""'], {}), "('htmlcov/helper1_py.html')\n", (1848, 1875), False, 'import os\n'), ((1884, 1920), 'os.remove', 'os.remove', (['"""htmlcov/helper2_py.html"""'], {}), "('htmlcov/helper2_py.html')\n", (1893, 1920), False, 'import os\n'), ((2126, 2159), 'os.path.join', 'os.path.join', (['"""htmlcov"""', 'filename'], {}), "('htmlcov', filename)\n", (2138, 2159), False, 'import os\n'), ((2491, 2588), 're.sub', 're.sub', (['"""created at \\\\d{4}-\\\\d{2}-\\\\d{2} \\\\d{2}:\\\\d{2}"""', '"""created at YYYY-MM-DD HH:MM"""', 'index'], {}), "('created at \\\\d{4}-\\\\d{2}-\\\\d{2} \\\\d{2}:\\\\d{2}',\n 'created at YYYY-MM-DD HH:MM', index)\n", (2497, 2588), False, 'import re\n'), ((2861, 2891), 're.search', 're.search', (['timestamp_pat', 'html'], {}), '(timestamp_pat, html)\n', (2870, 2891), False, 'import re\n'), ((11268, 11287), 'coverage.Coverage', 'coverage.Coverage', ([], {}), '()\n', (11285, 11287), False, 'import coverage\n'), ((11726, 11745), 'coverage.Coverage', 'coverage.Coverage', ([], {}), '()\n', (11743, 11745), False, 'import coverage\n'), ((13580, 13599), 'coverage.Coverage', 'coverage.Coverage', ([], {}), '()\n', (13597, 13599), False, 'import coverage\n'), ((14200, 14219), 'coverage.Coverage', 'coverage.Coverage', ([], {}), '()\n', (14217, 14219), False, 'import coverage\n'), ((14897, 14916), 'coverage.Coverage', 'coverage.Coverage', ([], {}), '()\n', (14914, 14916), False, 'import coverage\n'), ((15756, 15775), 'coverage.Coverage', 'coverage.Coverage', ([], {}), '()\n', (15773, 15775), False, 'import coverage\n'), ((16343, 16362), 'coverage.Coverage', 'coverage.Coverage', ([], {}), '()\n', (16360, 16362), False, 'import coverage\n'), ((16418, 16445), 'os.remove', 'os.remove', (['"""sub/another.py"""'], {}), "('sub/another.py')\n", (16427, 16445), False, 'import os\n'), ((16470, 16518), 'os.path.join', 'os.path.join', (['self.temp_dir', '"""sub"""', '"""another.py"""'], {}), "(self.temp_dir, 'sub', 'another.py')\n", (16482, 16518), False, 'import os\n'), ((16542, 16572), 'os.path.realpath', 'os.path.realpath', (['missing_file'], {}), '(missing_file)\n', (16558, 16572), False, 'import os\n'), ((20446, 20496), 'coverage.html.STATIC_PATH.insert', 'coverage.html.STATIC_PATH.insert', (['(0)', '"""static_here"""'], {}), "(0, 'static_here')\n", (20478, 20496), False, 'import coverage\n'), ((20559, 20578), 'coverage.Coverage', 'coverage.Coverage', ([], {}), '()\n', (20576, 20578), False, 'import coverage\n'), ((21236, 21286), 'coverage.html.STATIC_PATH.insert', 'coverage.html.STATIC_PATH.insert', (['(0)', '"""static_here"""'], {}), "(0, 'static_here')\n", (21268, 21286), False, 'import coverage\n'), ((21349, 21368), 'coverage.Coverage', 'coverage.Coverage', ([], {}), '()\n', (21366, 21368), False, 'import coverage\n'), ((21872, 21891), 'coverage.Coverage', 'coverage.Coverage', ([], {}), '()\n', (21889, 21891), False, 'import coverage\n'), ((24040, 24059), 'coverage.Coverage', 'coverage.Coverage', ([], {}), '()\n', (24057, 24059), False, 'import coverage\n'), ((24308, 24587), 'tests.goldtest.contains', 'contains', (['"""out/a_py.html"""', '"""<span class="key">if</span> <span class="num">1</span> <span class="op"><</span> <span class="num">2</span>"""', '""" <span class="nam">a</span> <span class="op">=</span> <span class="num">3</span>"""', '"""<span class="pc_cov">67%</span>"""'], {}), '(\'out/a_py.html\',\n \'<span class="key">if</span> <span class="num">1</span> <span class="op"><</span> <span class="num">2</span>\'\n ,\n \' <span class="nam">a</span> <span class="op">=</span> <span class="num">3</span>\'\n , \'<span class="pc_cov">67%</span>\')\n', (24316, 24587), False, 'from tests.goldtest import change_dir, compare, contains, doesnt_contain, contains_any\n'), ((24673, 24821), 'tests.goldtest.contains', 'contains', (['"""out/index.html"""', '"""<a href="a_py.html">a.py</a>"""', '"""<span class="pc_cov">67%</span>"""', '"""<td class="right" data-ratio="2 3">67%</td>"""'], {}), '(\'out/index.html\', \'<a href="a_py.html">a.py</a>\',\n \'<span class="pc_cov">67%</span>\',\n \'<td class="right" data-ratio="2 3">67%</td>\')\n', (24681, 24821), False, 'from tests.goldtest import change_dir, compare, contains, doesnt_contain, contains_any\n'), ((25660, 25690), 'coverage.Coverage', 'coverage.Coverage', ([], {'branch': '(True)'}), '(branch=True)\n', (25677, 25690), False, 'import coverage\n'), ((25946, 26963), 'tests.goldtest.contains', 'contains', (['"""out/b_py.html"""', '"""<span class="key">if</span> <span class="nam">x</span> <span class="op"><</span> <span class="num">2</span>"""', '""" <span class="nam">a</span> <span class="op">=</span> <span class="num">3</span>"""', '"""<span class="pc_cov">70%</span>"""', '"""<span class="annotate short">3 ↛ 6</span><span class="annotate long">line 3 didn\'t jump to line 6, because the condition on line 3 was never false</span>"""', '"""<span class="annotate short">12 ↛ exit</span><span class="annotate long">line 12 didn\'t return from function \'two\', because the condition on line 12 was never false</span>"""', '"""<span class="annotate short">20 ↛ 21, 20 ↛ 23</span><span class="annotate long">2 missed branches: 1) line 20 didn\'t jump to line 21, because the condition on line 20 was never true, 2) line 20 didn\'t jump to line 23, because the condition on line 20 was never false</span>"""'], {}), '(\'out/b_py.html\',\n \'<span class="key">if</span> <span class="nam">x</span> <span class="op"><</span> <span class="num">2</span>\'\n ,\n \' <span class="nam">a</span> <span class="op">=</span> <span class="num">3</span>\'\n , \'<span class="pc_cov">70%</span>\',\n \'<span class="annotate short">3 ↛ 6</span><span class="annotate long">line 3 didn\\\'t jump to line 6, because the condition on line 3 was never false</span>\'\n ,\n \'<span class="annotate short">12 ↛ exit</span><span class="annotate long">line 12 didn\\\'t return from function \\\'two\\\', because the condition on line 12 was never false</span>\'\n ,\n \'<span class="annotate short">20 ↛ 21, 20 ↛ 23</span><span class="annotate long">2 missed branches: 1) line 20 didn\\\'t jump to line 21, because the condition on line 20 was never true, 2) line 20 didn\\\'t jump to line 23, because the condition on line 20 was never false</span>\'\n )\n', (25954, 26963), False, 'from tests.goldtest import change_dir, compare, contains, doesnt_contain, contains_any\n'), ((27338, 27488), 'tests.goldtest.contains', 'contains', (['"""out/index.html"""', '"""<a href="b_py.html">b.py</a>"""', '"""<span class="pc_cov">70%</span>"""', '"""<td class="right" data-ratio="16 23">70%</td>"""'], {}), '(\'out/index.html\', \'<a href="b_py.html">b.py</a>\',\n \'<span class="pc_cov">70%</span>\',\n \'<td class="right" data-ratio="16 23">70%</td>\')\n', (27346, 27488), False, 'from tests.goldtest import change_dir, compare, contains, doesnt_contain, contains_any\n'), ((28283, 28302), 'coverage.Coverage', 'coverage.Coverage', ([], {}), '()\n', (28300, 28302), False, 'import coverage\n'), ((28555, 28650), 'tests.goldtest.contains', 'contains', (['"""out/bom_py.html"""', '"""<span class="str">"3×4 = 12, ÷2 = 6±0"</span>"""'], {}), '(\'out/bom_py.html\',\n \'<span class="str">"3×4 = 12, ÷2 = 6±0"</span>\')\n', (28563, 28650), False, 'from tests.goldtest import change_dir, compare, contains, doesnt_contain, contains_any\n'), ((28917, 28936), 'coverage.Coverage', 'coverage.Coverage', ([], {}), '()\n', (28934, 28936), False, 'import coverage\n'), ((29217, 29318), 'tests.goldtest.contains', 'contains', (['"""out/isolatin1_py.html"""', '"""<span class="str">"3×4 = 12, ÷2 = 6±0"</span>"""'], {}), '(\'out/isolatin1_py.html\',\n \'<span class="str">"3×4 = 12, ÷2 = 6±0"</span>\')\n', (29225, 29318), False, 'from tests.goldtest import change_dir, compare, contains, doesnt_contain, contains_any\n'), ((30044, 30078), 'coverage.Coverage', 'coverage.Coverage', ([], {'include': "['./*']"}), "(include=['./*'])\n", (30061, 30078), False, 'import coverage\n'), ((30410, 30444), 'coverage.Coverage', 'coverage.Coverage', ([], {'include': "['./*']"}), "(include=['./*'])\n", (30427, 30444), False, 'import coverage\n'), ((30792, 30826), 'coverage.Coverage', 'coverage.Coverage', ([], {'include': "['./*']"}), "(include=['./*'])\n", (30809, 30826), False, 'import coverage\n'), ((31287, 31346), 'coverage.Coverage', 'coverage.Coverage', ([], {'config_file': '"""omit4.ini"""', 'include': "['./*']"}), "(config_file='omit4.ini', include=['./*'])\n", (31304, 31346), False, 'import coverage\n'), ((31920, 31979), 'coverage.Coverage', 'coverage.Coverage', ([], {'config_file': '"""omit5.ini"""', 'include': "['./*']"}), "(config_file='omit5.ini', include=['./*'])\n", (31937, 31979), False, 'import coverage\n'), ((33202, 33234), 'glob.glob', 'glob.glob', (['"""out/*_other_py.html"""'], {}), "('out/*_other_py.html')\n", (33211, 33234), False, 'import glob\n'), ((33359, 33462), 'tests.goldtest.contains', 'contains', (['"""out/index.html"""', '"""<a href="here_py.html">here.py</a>"""', '"""other_py.html">"""', '"""other.py</a>"""'], {}), '(\'out/index.html\', \'<a href="here_py.html">here.py</a>\',\n \'other_py.html">\', \'other.py</a>\')\n', (33367, 33462), False, 'from tests.goldtest import change_dir, compare, contains, doesnt_contain, contains_any\n'), ((34178, 34222), 'coverage.Coverage', 'coverage.Coverage', ([], {'config_file': '"""partial.ini"""'}), "(config_file='partial.ini')\n", (34195, 34222), False, 'import coverage\n'), ((34491, 34670), 'tests.goldtest.contains', 'contains', (['"""out/partial_py.html"""', '"""<p id="t4" class="stm par run hide_run">"""', '"""<p id="t7" class="stm run hide_run">"""', '"""<p id="t10" class="pln">"""', '"""<p id="t17" class="exc">"""'], {}), '(\'out/partial_py.html\', \'<p id="t4" class="stm par run hide_run">\',\n \'<p id="t7" class="stm run hide_run">\', \'<p id="t10" class="pln">\',\n \'<p id="t17" class="exc">\')\n', (34499, 34670), False, 'from tests.goldtest import change_dir, compare, contains, doesnt_contain, contains_any\n'), ((34884, 34954), 'tests.goldtest.contains', 'contains', (['"""out/index.html"""', '"""<a href="partial_py.html">partial.py</a>"""'], {}), '(\'out/index.html\', \'<a href="partial_py.html">partial.py</a>\')\n', (34892, 34954), False, 'from tests.goldtest import change_dir, compare, contains, doesnt_contain, contains_any\n'), ((34998, 35059), 'tests.goldtest.contains', 'contains', (['"""out/index.html"""', '"""<span class="pc_cov">91%</span>"""'], {}), '(\'out/index.html\', \'<span class="pc_cov">91%</span>\')\n', (35006, 35059), False, 'from tests.goldtest import change_dir, compare, contains, doesnt_contain, contains_any\n'), ((35425, 35444), 'coverage.Coverage', 'coverage.Coverage', ([], {}), '()\n', (35442, 35444), False, 'import coverage\n'), ((35813, 36156), 'tests.goldtest.contains', 'contains', (['"""out/a_py.html"""', '"""<link rel="stylesheet" href="extra.css" type="text/css">"""', '"""<span class="key">if</span> <span class="num">1</span> <span class="op"><</span> <span class="num">2</span>"""', '""" <span class="nam">a</span> <span class="op">=</span> <span class="num">3</span>"""', '"""<span class="pc_cov">67%</span>"""'], {}), '(\'out/a_py.html\',\n \'<link rel="stylesheet" href="extra.css" type="text/css">\',\n \'<span class="key">if</span> <span class="num">1</span> <span class="op"><</span> <span class="num">2</span>\'\n ,\n \' <span class="nam">a</span> <span class="op">=</span> <span class="num">3</span>\'\n , \'<span class="pc_cov">67%</span>\')\n', (35821, 36156), False, 'from tests.goldtest import change_dir, compare, contains, doesnt_contain, contains_any\n'), ((36249, 36410), 'tests.goldtest.contains', 'contains', (['"""out/index.html"""', '"""<link rel="stylesheet" href="extra.css" type="text/css">"""', '"""<a href="a_py.html">a.py</a>"""', '"""<span class="pc_cov">67%</span>"""'], {}), '(\'out/index.html\',\n \'<link rel="stylesheet" href="extra.css" type="text/css">\',\n \'<a href="a_py.html">a.py</a>\', \'<span class="pc_cov">67%</span>\')\n', (36257, 36410), False, 'from tests.goldtest import change_dir, compare, contains, doesnt_contain, contains_any\n'), ((37182, 37201), 'coverage.Coverage', 'coverage.Coverage', ([], {}), '()\n', (37199, 37201), False, 'import coverage\n'), ((37492, 37545), 'tests.goldtest.contains', 'contains', (['"""tabbed.py"""', '"""\tif x:\t\t\t\t\t# look nice"""'], {}), "('tabbed.py', '\\tif x:\\t\\t\\t\\t\\t# look nice')\n", (37500, 37545), False, 'from tests.goldtest import change_dir, compare, contains, doesnt_contain, contains_any\n'), ((37555, 37757), 'tests.goldtest.contains', 'contains', (['"""out/tabbed_py.html"""', '"""> <span class="key">if</span> <span class="nam">x</span><span class="op">:</span> <span class="com"># look nice</span>"""'], {}), '(\'out/tabbed_py.html\',\n \'> <span class="key">if</span> <span class="nam">x</span><span class="op">:</span> <span class="com"># look nice</span>\'\n )\n', (37563, 37757), False, 'from tests.goldtest import change_dir, compare, contains, doesnt_contain, contains_any\n'), ((37837, 37879), 'tests.goldtest.doesnt_contain', 'doesnt_contain', (['"""out/tabbed_py.html"""', '"""\t"""'], {}), "('out/tabbed_py.html', '\\t')\n", (37851, 37879), False, 'from tests.goldtest import change_dir, compare, contains, doesnt_contain, contains_any\n'), ((38217, 38236), 'coverage.Coverage', 'coverage.Coverage', ([], {}), '()\n', (38234, 38236), False, 'import coverage\n'), ((38474, 38596), 'tests.goldtest.contains', 'contains', (['"""out/unicode_py.html"""', '"""<span class="str">"ʎd˙ǝbɐɹǝʌoɔ"</span>"""'], {}), '(\'out/unicode_py.html\',\n \'<span class="str">"ʎd˙ǝbɐɹǝʌoɔ"</span>\'\n )\n', (38482, 38596), False, 'from tests.goldtest import change_dir, compare, contains, doesnt_contain, contains_any\n'), ((38632, 38786), 'tests.goldtest.contains_any', 'contains_any', (['"""out/unicode_py.html"""', '"""<span class="str">"db40,dd00: x��"</span>"""', '"""<span class="str">"db40,dd00: x󠄀"</span>"""'], {}), '(\'out/unicode_py.html\',\n \'<span class="str">"db40,dd00: x��"</span>\',\n \'<span class="str">"db40,dd00: x󠄀"</span>\')\n', (38644, 38786), False, 'from tests.goldtest import change_dir, compare, contains, doesnt_contain, contains_any\n'), ((8691, 8713), 'json.load', 'json.load', (['status_json'], {}), '(status_json)\n', (8700, 8713), False, 'import json\n'), ((8874, 8909), 'json.dump', 'json.dump', (['status_data', 'status_json'], {}), '(status_data, status_json)\n', (8883, 8909), False, 'import json\n'), ((16620, 16643), 're.escape', 're.escape', (['missing_file'], {}), '(missing_file)\n', (16629, 16643), False, 'import re\n'), ((21495, 21518), 'os.path.basename', 'os.path.basename', (['fpath'], {}), '(fpath)\n', (21511, 21518), False, 'import os\n'), ((23115, 23164), 'coverage.files.flat_rootname', 'flat_rootname', (['u"""/Users/ned/coverage/trunk/tests"""'], {}), "(u'/Users/ned/coverage/trunk/tests')\n", (23128, 23164), False, 'from coverage.files import flat_rootname\n'), ((24267, 24291), 'tests.goldtest.gold_path', 'gold_path', (['"""html/gold_a"""'], {}), "('html/gold_a')\n", (24276, 24291), False, 'from tests.goldtest import gold_path\n'), ((25898, 25929), 'tests.goldtest.gold_path', 'gold_path', (['"""html/gold_b_branch"""'], {}), "('html/gold_b_branch')\n", (25907, 25929), False, 'from tests.goldtest import gold_path\n'), ((28512, 28538), 'tests.goldtest.gold_path', 'gold_path', (['"""html/gold_bom"""'], {}), "('html/gold_bom')\n", (28521, 28538), False, 'from tests.goldtest import gold_path\n'), ((29168, 29200), 'tests.goldtest.gold_path', 'gold_path', (['"""html/gold_isolatin1"""'], {}), "('html/gold_isolatin1')\n", (29177, 29200), False, 'from tests.goldtest import gold_path\n'), ((30300, 30329), 'tests.goldtest.gold_path', 'gold_path', (['"""html/gold_omit_1"""'], {}), "('html/gold_omit_1')\n", (30309, 30329), False, 'from tests.goldtest import gold_path\n'), ((30682, 30711), 'tests.goldtest.gold_path', 'gold_path', (['"""html/gold_omit_2"""'], {}), "('html/gold_omit_2')\n", (30691, 30711), False, 'from tests.goldtest import gold_path\n'), ((31073, 31102), 'tests.goldtest.gold_path', 'gold_path', (['"""html/gold_omit_3"""'], {}), "('html/gold_omit_3')\n", (31082, 31102), False, 'from tests.goldtest import gold_path\n'), ((31568, 31597), 'tests.goldtest.gold_path', 'gold_path', (['"""html/gold_omit_4"""'], {}), "('html/gold_omit_4')\n", (31577, 31597), False, 'from tests.goldtest import gold_path\n'), ((32186, 32215), 'tests.goldtest.gold_path', 'gold_path', (['"""html/gold_omit_5"""'], {}), "('html/gold_omit_5')\n", (32195, 32215), False, 'from tests.goldtest import gold_path\n'), ((32662, 32679), 'tests.goldtest.change_dir', 'change_dir', (['"""src"""'], {}), "('src')\n", (32672, 32679), False, 'from tests.goldtest import change_dir, compare, contains, doesnt_contain, contains_any\n'), ((32693, 32715), 'sys.path.insert', 'sys.path.insert', (['(0)', '""""""'], {}), "(0, '')\n", (32708, 32715), False, 'import sys\n'), ((32782, 32815), 'sys.path.insert', 'sys.path.insert', (['(0)', '"""../othersrc"""'], {}), "(0, '../othersrc')\n", (32797, 32815), False, 'import sys\n'), ((32834, 32885), 'coverage.Coverage', 'coverage.Coverage', ([], {'include': "['./*', '../othersrc/*']"}), "(include=['./*', '../othersrc/*'])\n", (32851, 32885), False, 'import coverage\n'), ((33248, 33291), 'os.rename', 'os.rename', (['p', '"""out/blah_blah_other_py.html"""'], {}), "(p, 'out/blah_blah_other_py.html')\n", (33257, 33291), False, 'import os\n'), ((33314, 33342), 'tests.goldtest.gold_path', 'gold_path', (['"""html/gold_other"""'], {}), "('html/gold_other')\n", (33323, 33342), False, 'from tests.goldtest import gold_path\n'), ((34444, 34474), 'tests.goldtest.gold_path', 'gold_path', (['"""html/gold_partial"""'], {}), "('html/gold_partial')\n", (34453, 34474), False, 'from tests.goldtest import gold_path\n'), ((35675, 35704), 'tests.goldtest.gold_path', 'gold_path', (['"""html/gold_styled"""'], {}), "('html/gold_styled')\n", (35684, 35704), False, 'from tests.goldtest import gold_path\n'), ((35729, 35758), 'tests.goldtest.gold_path', 'gold_path', (['"""html/gold_styled"""'], {}), "('html/gold_styled')\n", (35738, 35758), False, 'from tests.goldtest import gold_path\n'), ((38427, 38457), 'tests.goldtest.gold_path', 'gold_path', (['"""html/gold_unicode"""'], {}), "('html/gold_unicode')\n", (38436, 38457), False, 'from tests.goldtest import gold_path\n'), ((21179, 21213), 'os.path.join', 'os.path.join', (['"""static_here"""', 'fpath'], {}), "('static_here', fpath)\n", (21191, 21213), False, 'import os\n'), ((23247, 23258), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (23256, 23258), False, 'import os\n'), ((21541, 21574), 'os.path.join', 'os.path.join', (['"""htmlcov"""', 'the_file'], {}), "('htmlcov', the_file)\n", (21553, 21574), False, 'import os\n'), ((23063, 23087), 'coverage.backward.unicode_class', 'unicode_class', (['TESTS_DIR'], {}), '(TESTS_DIR)\n', (23076, 23087), False, 'from coverage.backward import unicode_class\n'), ((23332, 23343), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (23341, 23343), False, 'import os\n')]
|
"""Functions to get OSC types from datagrams and vice versa"""
import struct
from spiegelib.network.osc import ntp
from datetime import datetime, timedelta, date
from typing import Union, Tuple
class ParseError(Exception):
"""Base exception for when a datagram parsing error occurs."""
class BuildError(Exception):
"""Base exception for when a datagram building error occurs."""
# Constant for special ntp datagram sequences that represent an immediate time.
IMMEDIATELY = 0
# Datagram length in bytes for types that have a fixed size.
_INT_DGRAM_LEN = 4
_UINT64_DGRAM_LEN = 8
_FLOAT_DGRAM_LEN = 4
_DOUBLE_DGRAM_LEN = 8
_TIMETAG_DGRAM_LEN = 8
# Strings and blob dgram length is always a multiple of 4 bytes.
_STRING_DGRAM_PAD = 4
_BLOB_DGRAM_PAD = 4
_EMPTY_STR_DGRAM = b'\x00\x00\x00\x00'
def write_string(val: str) -> bytes:
"""Returns the OSC string equivalent of the given python string.
Raises:
- BuildError if the string could not be encoded.
"""
try:
dgram = val.encode('utf-8') # Default, but better be explicit.
except (UnicodeEncodeError, AttributeError) as e:
raise BuildError('Incorrect string, could not encode {}'.format(e))
diff = _STRING_DGRAM_PAD - (len(dgram) % _STRING_DGRAM_PAD)
dgram += (b'\x00' * diff)
return dgram
def get_string(dgram: bytes, start_index: int) -> Tuple[str, int]:
"""Get a python string from the datagram, starting at pos start_index.
According to the specifications, a string is:
"A sequence of non-null ASCII characters followed by a null,
followed by 0-3 additional null characters to make the total number
of bits a multiple of 32".
Args:
dgram: A datagram packet.
start_index: An index where the string starts in the datagram.
Returns:
A tuple containing the string and the new end index.
Raises:
ParseError if the datagram could not be parsed.
"""
if start_index < 0:
raise ParseError('start_index < 0')
offset = 0
try:
if (len(dgram) > start_index + _STRING_DGRAM_PAD
and dgram[start_index + _STRING_DGRAM_PAD] == _EMPTY_STR_DGRAM):
return '', start_index + _STRING_DGRAM_PAD
while dgram[start_index + offset] != 0:
offset += 1
# Align to a byte word.
if (offset) % _STRING_DGRAM_PAD == 0:
offset += _STRING_DGRAM_PAD
else:
offset += (-offset % _STRING_DGRAM_PAD)
# Python slices do not raise an IndexError past the last index,
# do it ourselves.
if offset > len(dgram[start_index:]):
raise ParseError('Datagram is too short')
data_str = dgram[start_index:start_index + offset]
return data_str.replace(b'\x00', b'').decode('utf-8'), start_index + offset
except IndexError as ie:
raise ParseError('Could not parse datagram %s' % ie)
except TypeError as te:
raise ParseError('Could not parse datagram %s' % te)
def write_int(val: int) -> bytes:
"""Returns the datagram for the given integer parameter value
Raises:
- BuildError if the int could not be converted.
"""
try:
return struct.pack('>i', val)
except struct.error as e:
raise BuildError('Wrong argument value passed: {}'.format(e))
def get_int(dgram: bytes, start_index: int) -> Tuple[int, int]:
"""Get a 32-bit big-endian two's complement integer from the datagram.
Args:
dgram: A datagram packet.
start_index: An index where the integer starts in the datagram.
Returns:
A tuple containing the integer and the new end index.
Raises:
ParseError if the datagram could not be parsed.
"""
try:
if len(dgram[start_index:]) < _INT_DGRAM_LEN:
raise ParseError('Datagram is too short')
return (
struct.unpack('>i',
dgram[start_index:start_index + _INT_DGRAM_LEN])[0],
start_index + _INT_DGRAM_LEN)
except (struct.error, TypeError) as e:
raise ParseError('Could not parse datagram %s' % e)
def get_uint64(dgram: bytes, start_index: int) -> Tuple[int, int]:
"""Get a 64-bit big-endian unsigned integer from the datagram.
Args:
dgram: A datagram packet.
start_index: An index where the integer starts in the datagram.
Returns:
A tuple containing the integer and the new end index.
Raises:
ParseError if the datagram could not be parsed.
"""
try:
if len(dgram[start_index:]) < _UINT64_DGRAM_LEN:
raise ParseError('Datagram is too short')
return (
struct.unpack('>Q',
dgram[start_index:start_index + _UINT64_DGRAM_LEN])[0],
start_index + _UINT64_DGRAM_LEN)
except (struct.error, TypeError) as e:
raise ParseError('Could not parse datagram %s' % e)
def get_timetag(dgram: bytes, start_index: int) -> Tuple[datetime, int]:
"""Get a 64-bit OSC time tag from the datagram.
Args:
dgram: A datagram packet.
start_index: An index where the osc time tag starts in the datagram.
Returns:
A tuple containing the tuple of time of sending in utc as datetime and the
fraction of the current second and the new end index.
Raises:
ParseError if the datagram could not be parsed.
"""
try:
if len(dgram[start_index:]) < _TIMETAG_DGRAM_LEN:
raise ParseError('Datagram is too short')
timetag, _ = get_uint64(dgram, start_index)
seconds, fraction = ntp.parse_timestamp(timetag)
hours, seconds = seconds // 3600, seconds % 3600
minutes, seconds = seconds // 60, seconds % 60
utc = (datetime.combine(ntp._NTP_EPOCH, datetime.min.time()) +
timedelta(hours=hours, minutes=minutes, seconds=seconds))
return (utc, fraction), start_index + _TIMETAG_DGRAM_LEN
except (struct.error, TypeError) as e:
raise ParseError('Could not parse datagram %s' % e)
def write_float(val: float) -> bytes:
"""Returns the datagram for the given float parameter value
Raises:
- BuildError if the float could not be converted.
"""
try:
return struct.pack('>f', val)
except struct.error as e:
raise BuildError('Wrong argument value passed: {}'.format(e))
def get_float(dgram: bytes, start_index: int) -> Tuple[float, int]:
"""Get a 32-bit big-endian IEEE 754 floating point number from the datagram.
Args:
dgram: A datagram packet.
start_index: An index where the float starts in the datagram.
Returns:
A tuple containing the float and the new end index.
Raises:
ParseError if the datagram could not be parsed.
"""
try:
if len(dgram[start_index:]) < _FLOAT_DGRAM_LEN:
# Noticed that Reaktor doesn't send the last bunch of \x00 needed to make
# the float representation complete in some cases, thus we pad here to
# account for that.
dgram = dgram + b'\x00' * (_FLOAT_DGRAM_LEN - len(dgram[start_index:]))
return (
struct.unpack('>f',
dgram[start_index:start_index + _FLOAT_DGRAM_LEN])[0],
start_index + _FLOAT_DGRAM_LEN)
except (struct.error, TypeError) as e:
raise ParseError('Could not parse datagram %s' % e)
def write_double(val: float) -> bytes:
"""Returns the datagram for the given double parameter value
Raises:
- BuildError if the double could not be converted.
"""
try:
return struct.pack('>d', val)
except struct.error as e:
raise BuildError('Wrong argument value passed: {}'.format(e))
def get_double(dgram: bytes, start_index: int) -> Tuple[float, int]:
"""Get a 64-bit big-endian IEEE 754 floating point number from the datagram.
Args:
dgram: A datagram packet.
start_index: An index where the double starts in the datagram.
Returns:
A tuple containing the double and the new end index.
Raises:
ParseError if the datagram could not be parsed.
"""
try:
if len(dgram[start_index:]) < _DOUBLE_DGRAM_LEN:
raise ParseError('Datagram is too short')
return (
struct.unpack('>d',
dgram[start_index:start_index + _DOUBLE_DGRAM_LEN])[0],
start_index + _DOUBLE_DGRAM_LEN)
except (struct.error, TypeError) as e:
raise ParseError('Could not parse datagram {}'.format(e))
def get_blob(dgram: bytes, start_index: int) -> Tuple[bytes, int]:
""" Get a blob from the datagram.
According to the specifications, a blob is made of
"an int32 size count, followed by that many 8-bit bytes of arbitrary
binary data, followed by 0-3 additional zero bytes to make the total
number of bits a multiple of 32".
Args:
dgram: A datagram packet.
start_index: An index where the float starts in the datagram.
Returns:
A tuple containing the blob and the new end index.
Raises:
ParseError if the datagram could not be parsed.
"""
size, int_offset = get_int(dgram, start_index)
# Make the size a multiple of 32 bits.
total_size = size + (-size % _BLOB_DGRAM_PAD)
end_index = int_offset + size
if end_index - start_index > len(dgram[start_index:]):
raise ParseError('Datagram is too short.')
return dgram[int_offset:int_offset + size], int_offset + total_size
def write_blob(val: bytes) -> bytes:
"""Returns the datagram for the given blob parameter value.
Raises:
- BuildError if the value was empty or if its size didn't fit an OSC int.
"""
if not val:
raise BuildError('Blob value cannot be empty')
dgram = write_int(len(val))
dgram += val
while len(dgram) % _BLOB_DGRAM_PAD != 0:
dgram += b'\x00'
return dgram
def get_date(dgram: bytes, start_index: int) -> Tuple[float, int]:
"""Get a 64-bit big-endian fixed-point time tag as a date from the datagram.
According to the specifications, a date is represented as is:
"the first 32 bits specify the number of seconds since midnight on
January 1, 1900, and the last 32 bits specify fractional parts of a second
to a precision of about 200 picoseconds".
Args:
dgram: A datagram packet.
start_index: An index where the date starts in the datagram.
Returns:
A tuple containing the system date and the new end index.
returns osc_immediately (0) if the corresponding OSC sequence was found.
Raises:
ParseError if the datagram could not be parsed.
"""
# Check for the special case first.
if dgram[start_index:start_index + _TIMETAG_DGRAM_LEN] == ntp.IMMEDIATELY:
return IMMEDIATELY, start_index + _TIMETAG_DGRAM_LEN
if len(dgram[start_index:]) < _TIMETAG_DGRAM_LEN:
raise ParseError('Datagram is too short')
timetag, start_index = get_uint64(dgram, start_index)
seconds = timetag * ntp._NTP_TIMESTAMP_TO_SECONDS
return ntp.ntp_time_to_system_epoch(seconds), start_index
def write_date(system_time: Union[int, float]) -> bytes:
if system_time == IMMEDIATELY:
return ntp.IMMEDIATELY
try:
return ntp.system_time_to_ntp(system_time)
except ntp.NtpError as ntpe:
raise BuildError(ntpe)
def write_rgba(val: bytes) -> bytes:
"""Returns the datagram for the given rgba32 parameter value
Raises:
- BuildError if the int could not be converted.
"""
try:
return struct.pack('>I', val)
except struct.error as e:
raise BuildError('Wrong argument value passed: {}'.format(e))
def get_rgba(dgram: bytes, start_index: int) -> Tuple[bytes, int]:
"""Get an rgba32 integer from the datagram.
Args:
dgram: A datagram packet.
start_index: An index where the integer starts in the datagram.
Returns:
A tuple containing the integer and the new end index.
Raises:
ParseError if the datagram could not be parsed.
"""
try:
if len(dgram[start_index:]) < _INT_DGRAM_LEN:
raise ParseError('Datagram is too short')
return (
struct.unpack('>I',
dgram[start_index:start_index + _INT_DGRAM_LEN])[0],
start_index + _INT_DGRAM_LEN)
except (struct.error, TypeError) as e:
raise ParseError('Could not parse datagram %s' % e)
def write_midi(val: Tuple[Tuple[int, int, int, int], int]) -> bytes:
"""Returns the datagram for the given MIDI message parameter value
A valid MIDI message: (port id, status byte, data1, data2).
Raises:
- BuildError if the MIDI message could not be converted.
"""
if len(val) != 4:
raise BuildError('MIDI message length is invalid')
try:
value = sum((value & 0xFF) << 8 * (3 - pos) for pos, value in enumerate(val))
return struct.pack('>I', value)
except struct.error as e:
raise BuildError('Wrong argument value passed: {}'.format(e))
def get_midi(dgram: bytes, start_index: int) -> Tuple[Tuple[int, int, int, int], int]:
"""Get a MIDI message (port id, status byte, data1, data2) from the datagram.
Args:
dgram: A datagram packet.
start_index: An index where the MIDI message starts in the datagram.
Returns:
A tuple containing the MIDI message and the new end index.
Raises:
ParseError if the datagram could not be parsed.
"""
try:
if len(dgram[start_index:]) < _INT_DGRAM_LEN:
raise ParseError('Datagram is too short')
val = struct.unpack('>I',
dgram[start_index:start_index + _INT_DGRAM_LEN])[0]
midi_msg = tuple((val & 0xFF << 8 * i) >> 8 * i for i in range(3, -1, -1))
return (midi_msg, start_index + _INT_DGRAM_LEN)
except (struct.error, TypeError) as e:
raise ParseError('Could not parse datagram %s' % e)
|
[
"struct.unpack",
"struct.pack",
"spiegelib.network.osc.ntp.ntp_time_to_system_epoch",
"datetime.timedelta",
"datetime.datetime.min.time",
"spiegelib.network.osc.ntp.parse_timestamp",
"spiegelib.network.osc.ntp.system_time_to_ntp"
] |
[((3199, 3221), 'struct.pack', 'struct.pack', (['""">i"""', 'val'], {}), "('>i', val)\n", (3210, 3221), False, 'import struct\n'), ((5589, 5617), 'spiegelib.network.osc.ntp.parse_timestamp', 'ntp.parse_timestamp', (['timetag'], {}), '(timetag)\n', (5608, 5617), False, 'from spiegelib.network.osc import ntp\n'), ((6250, 6272), 'struct.pack', 'struct.pack', (['""">f"""', 'val'], {}), "('>f', val)\n", (6261, 6272), False, 'import struct\n'), ((7617, 7639), 'struct.pack', 'struct.pack', (['""">d"""', 'val'], {}), "('>d', val)\n", (7628, 7639), False, 'import struct\n'), ((11094, 11131), 'spiegelib.network.osc.ntp.ntp_time_to_system_epoch', 'ntp.ntp_time_to_system_epoch', (['seconds'], {}), '(seconds)\n', (11122, 11131), False, 'from spiegelib.network.osc import ntp\n'), ((11295, 11330), 'spiegelib.network.osc.ntp.system_time_to_ntp', 'ntp.system_time_to_ntp', (['system_time'], {}), '(system_time)\n', (11317, 11330), False, 'from spiegelib.network.osc import ntp\n'), ((11598, 11620), 'struct.pack', 'struct.pack', (['""">I"""', 'val'], {}), "('>I', val)\n", (11609, 11620), False, 'import struct\n'), ((12976, 13000), 'struct.pack', 'struct.pack', (['""">I"""', 'value'], {}), "('>I', value)\n", (12987, 13000), False, 'import struct\n'), ((5818, 5874), 'datetime.timedelta', 'timedelta', ([], {'hours': 'hours', 'minutes': 'minutes', 'seconds': 'seconds'}), '(hours=hours, minutes=minutes, seconds=seconds)\n', (5827, 5874), False, 'from datetime import datetime, timedelta, date\n'), ((13675, 13743), 'struct.unpack', 'struct.unpack', (['""">I"""', 'dgram[start_index:start_index + _INT_DGRAM_LEN]'], {}), "('>I', dgram[start_index:start_index + _INT_DGRAM_LEN])\n", (13688, 13743), False, 'import struct\n'), ((3871, 3939), 'struct.unpack', 'struct.unpack', (['""">i"""', 'dgram[start_index:start_index + _INT_DGRAM_LEN]'], {}), "('>i', dgram[start_index:start_index + _INT_DGRAM_LEN])\n", (3884, 3939), False, 'import struct\n'), ((4662, 4733), 'struct.unpack', 'struct.unpack', (['""">Q"""', 'dgram[start_index:start_index + _UINT64_DGRAM_LEN]'], {}), "('>Q', dgram[start_index:start_index + _UINT64_DGRAM_LEN])\n", (4675, 4733), False, 'import struct\n'), ((5780, 5799), 'datetime.datetime.min.time', 'datetime.min.time', ([], {}), '()\n', (5797, 5799), False, 'from datetime import datetime, timedelta, date\n'), ((7161, 7231), 'struct.unpack', 'struct.unpack', (['""">f"""', 'dgram[start_index:start_index + _FLOAT_DGRAM_LEN]'], {}), "('>f', dgram[start_index:start_index + _FLOAT_DGRAM_LEN])\n", (7174, 7231), False, 'import struct\n'), ((8301, 8372), 'struct.unpack', 'struct.unpack', (['""">d"""', 'dgram[start_index:start_index + _DOUBLE_DGRAM_LEN]'], {}), "('>d', dgram[start_index:start_index + _DOUBLE_DGRAM_LEN])\n", (8314, 8372), False, 'import struct\n'), ((12246, 12314), 'struct.unpack', 'struct.unpack', (['""">I"""', 'dgram[start_index:start_index + _INT_DGRAM_LEN]'], {}), "('>I', dgram[start_index:start_index + _INT_DGRAM_LEN])\n", (12259, 12314), False, 'import struct\n')]
|
import grpc
import service_pb2
import service_pb2_grpc
_HOST = "127.0.0.1"
_PORT = "41005"
def main():
with grpc.insecure_channel("{0}:{1}".format(_HOST, _PORT)) as channel:
client = service_pb2_grpc.SayHelloServiceStub(channel=channel)
response = client.SayHello(service_pb2.SayHelloRequest(name="<NAME>"))
print("received: " + response.result)
if __name__ == '__main__':
main()
|
[
"service_pb2.SayHelloRequest",
"service_pb2_grpc.SayHelloServiceStub"
] |
[((197, 250), 'service_pb2_grpc.SayHelloServiceStub', 'service_pb2_grpc.SayHelloServiceStub', ([], {'channel': 'channel'}), '(channel=channel)\n', (233, 250), False, 'import service_pb2_grpc\n'), ((286, 328), 'service_pb2.SayHelloRequest', 'service_pb2.SayHelloRequest', ([], {'name': '"""<NAME>"""'}), "(name='<NAME>')\n", (313, 328), False, 'import service_pb2\n')]
|
# coding=utf-8
from django.shortcuts import render_to_response, render
from django.views import generic
from course.models import App, AppCategory
class IndexView(generic.ListView):
template_name = 'index.html'
def get_queryset(self):
"""Return the last five published questions."""
return App.objects.order_by("id")[:6]
class AppView(generic.ListView):
template_name = 'course.html'
def get_queryset(self):
search = self.request.GET.get("search")
if search:
return App.objects.filter(title__icontains=search)
category_id = self.request.GET.get("category_id")
if category_id:
return App.objects.filter(category_id=category_id)
return App.objects.all()
def get_context_data(self, **kwargs):
context = super(AppView, self).get_context_data(**kwargs)
context['categories'] = AppCategory.objects.all()
return context
def get_id(request,pk):
list = App.objects.get(id=pk)
context = {}
context['list']= list
return render(request,'course-intro.html',context)
class ContactView(generic.View):
def get(self, *args, **kwargs):
return render_to_response(template_name='contact.html')
|
[
"django.shortcuts.render_to_response",
"course.models.App.objects.all",
"course.models.AppCategory.objects.all",
"course.models.App.objects.order_by",
"course.models.App.objects.filter",
"django.shortcuts.render",
"course.models.App.objects.get"
] |
[((982, 1004), 'course.models.App.objects.get', 'App.objects.get', ([], {'id': 'pk'}), '(id=pk)\n', (997, 1004), False, 'from course.models import App, AppCategory\n'), ((1059, 1104), 'django.shortcuts.render', 'render', (['request', '"""course-intro.html"""', 'context'], {}), "(request, 'course-intro.html', context)\n", (1065, 1104), False, 'from django.shortcuts import render_to_response, render\n'), ((737, 754), 'course.models.App.objects.all', 'App.objects.all', ([], {}), '()\n', (752, 754), False, 'from course.models import App, AppCategory\n'), ((896, 921), 'course.models.AppCategory.objects.all', 'AppCategory.objects.all', ([], {}), '()\n', (919, 921), False, 'from course.models import App, AppCategory\n'), ((1189, 1237), 'django.shortcuts.render_to_response', 'render_to_response', ([], {'template_name': '"""contact.html"""'}), "(template_name='contact.html')\n", (1207, 1237), False, 'from django.shortcuts import render_to_response, render\n'), ((318, 344), 'course.models.App.objects.order_by', 'App.objects.order_by', (['"""id"""'], {}), "('id')\n", (338, 344), False, 'from course.models import App, AppCategory\n'), ((533, 576), 'course.models.App.objects.filter', 'App.objects.filter', ([], {'title__icontains': 'search'}), '(title__icontains=search)\n', (551, 576), False, 'from course.models import App, AppCategory\n'), ((678, 721), 'course.models.App.objects.filter', 'App.objects.filter', ([], {'category_id': 'category_id'}), '(category_id=category_id)\n', (696, 721), False, 'from course.models import App, AppCategory\n')]
|
import argparse
import network_utils
import helper_utils
import torch
import json
import numpy as np
def get_args():
parser = argparse.ArgumentParser(description="Predict flower classification with DNN")
parser.add_argument('input', default='./flowers/test/17/image_03911.jpg', type=str, help="input flower image to predict")
parser.add_argument('checkpoint', type=str, help='pre-trained model path')
parser.add_argument('--top_k', default=3, type=int, help='default top_k results')
parser.add_argument('--category_names', default='./cat_to_name.json', type=str, help='default category file')
parser.add_argument('--gpu', default='False',type=str, help='If GPU should be enabled')
return parser.parse_args()
def predict(image, model, use_gpu, topk):
#Predict the class (or classes) of an image using a trained deep learning model.
if helper_utils.str_to_bool(use_gpu) and torch.cuda.is_available():
image = torch.from_numpy(image).type(torch.cuda.FloatTensor) # Convert numpy to tensor
model.cuda()
print("GPU active")
else:
image = torch.from_numpy(image).type(torch.FloatTensor) # Convert numpy to tensor
model.cpu()
print("CPU active: Either cuda is not available or gpu option has been turn off")
model.eval() # set model to evaluation mode
image = torch.unsqueeze(image, dim=0) # form a column tensor
with torch.no_grad (): # Turn off gradient
output = model.forward(image)
preds, classes = torch.exp(output).topk(topk) # Get prediction and classes of top 5
probs = preds.cpu().numpy().tolist()[0]
classes = classes.cpu().numpy().tolist()[0]
idx_to_class = {model.class_to_idx[k]: k for k in model.class_to_idx}
topk_classes = [idx_to_class[i] for i in classes]
return probs, topk_classes
def main():
args = get_args()
processed_image = helper_utils.process_image(args.input) # Process the image to numpy array
model = network_utils.loading_model(args.checkpoint)
probs, topk_classes = predict(processed_image, model, args.gpu, args.top_k)
cat_to_name = helper_utils.load_cat_to_name(args.category_names)
class_names = [cat_to_name [item] for item in topk_classes]
max_prob_idx = np.argmax(probs)
max_class_nb = topk_classes[max_prob_idx]
predicted_class = cat_to_name[max_class_nb]
#helper_utils.display_image(args.input,predicted_class)
helper_utils.display_result(class_names, probs)
if __name__ == '__main__':
main()
|
[
"helper_utils.str_to_bool",
"argparse.ArgumentParser",
"numpy.argmax",
"torch.exp",
"network_utils.loading_model",
"helper_utils.load_cat_to_name",
"torch.cuda.is_available",
"helper_utils.display_result",
"torch.unsqueeze",
"torch.no_grad",
"helper_utils.process_image",
"torch.from_numpy"
] |
[((132, 209), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Predict flower classification with DNN"""'}), "(description='Predict flower classification with DNN')\n", (155, 209), False, 'import argparse\n'), ((1382, 1411), 'torch.unsqueeze', 'torch.unsqueeze', (['image'], {'dim': '(0)'}), '(image, dim=0)\n', (1397, 1411), False, 'import torch\n'), ((1996, 2034), 'helper_utils.process_image', 'helper_utils.process_image', (['args.input'], {}), '(args.input)\n', (2022, 2034), False, 'import helper_utils\n'), ((2082, 2126), 'network_utils.loading_model', 'network_utils.loading_model', (['args.checkpoint'], {}), '(args.checkpoint)\n', (2109, 2126), False, 'import network_utils\n'), ((2226, 2276), 'helper_utils.load_cat_to_name', 'helper_utils.load_cat_to_name', (['args.category_names'], {}), '(args.category_names)\n', (2255, 2276), False, 'import helper_utils\n'), ((2360, 2376), 'numpy.argmax', 'np.argmax', (['probs'], {}), '(probs)\n', (2369, 2376), True, 'import numpy as np\n'), ((2536, 2583), 'helper_utils.display_result', 'helper_utils.display_result', (['class_names', 'probs'], {}), '(class_names, probs)\n', (2563, 2583), False, 'import helper_utils\n'), ((879, 912), 'helper_utils.str_to_bool', 'helper_utils.str_to_bool', (['use_gpu'], {}), '(use_gpu)\n', (903, 912), False, 'import helper_utils\n'), ((917, 942), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (940, 942), False, 'import torch\n'), ((1463, 1478), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (1476, 1478), False, 'import torch\n'), ((1594, 1611), 'torch.exp', 'torch.exp', (['output'], {}), '(output)\n', (1603, 1611), False, 'import torch\n'), ((960, 983), 'torch.from_numpy', 'torch.from_numpy', (['image'], {}), '(image)\n', (976, 983), False, 'import torch\n'), ((1115, 1138), 'torch.from_numpy', 'torch.from_numpy', (['image'], {}), '(image)\n', (1131, 1138), False, 'import torch\n')]
|
from PIL import Image
from torchvision import transforms
def image_transforms(load_size):
return transforms.Compose([
# transforms.CenterCrop(size=(178, 178)), # for CelebA
transforms.RandomCrop(size=load_size),
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
])
def mask_transforms(load_size):
return transforms.Compose([
transforms.Resize(size=load_size, interpolation=Image.BILINEAR),
transforms.ToTensor()
])
|
[
"torchvision.transforms.Normalize",
"torchvision.transforms.Resize",
"torchvision.transforms.RandomCrop",
"torchvision.transforms.ToTensor"
] |
[((198, 235), 'torchvision.transforms.RandomCrop', 'transforms.RandomCrop', ([], {'size': 'load_size'}), '(size=load_size)\n', (219, 235), False, 'from torchvision import transforms\n'), ((248, 269), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (267, 269), False, 'from torchvision import transforms\n'), ((279, 333), 'torchvision.transforms.Normalize', 'transforms.Normalize', (['(0.5, 0.5, 0.5)', '(0.5, 0.5, 0.5)'], {}), '((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))\n', (299, 333), False, 'from torchvision import transforms\n'), ((416, 479), 'torchvision.transforms.Resize', 'transforms.Resize', ([], {'size': 'load_size', 'interpolation': 'Image.BILINEAR'}), '(size=load_size, interpolation=Image.BILINEAR)\n', (433, 479), False, 'from torchvision import transforms\n'), ((489, 510), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (508, 510), False, 'from torchvision import transforms\n')]
|
# Copyright (c) 2017 Sony Corporation. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import nnabla as nn
import nnabla.logger as logger
import nnabla.functions as F
import nnabla.parametric_functions as PF
import nnabla.solver as S
import nnabla.utils.save as save
from _checkpoint_nnp_util import save_checkpoint, load_checkpoint, save_nnp
from args import get_args
from mnist_data import data_iterator_mnist
import os
def categorical_error(pred, label):
"""
Compute categorical error given score vectors and labels as
numpy.ndarray.
"""
pred_label = pred.argmax(1)
return (pred_label != label.flat).mean()
# Binary Connect Model
def mnist_binary_connect_lenet_prediction(image, test=False):
"""
Construct LeNet for MNIST (BinaryNet version).
"""
with nn.parameter_scope("conv1"):
c1 = PF.binary_connect_convolution(image, 16, (5, 5))
c1 = PF.batch_normalization(c1, batch_stat=not test)
c1 = F.elu(F.average_pooling(c1, (2, 2)))
with nn.parameter_scope("conv2"):
c2 = PF.binary_connect_convolution(c1, 16, (5, 5))
c2 = PF.batch_normalization(c2, batch_stat=not test)
c2 = F.elu(F.average_pooling(c2, (2, 2)))
with nn.parameter_scope("fc3"):
c3 = PF.binary_connect_affine(c2, 50)
c3 = PF.batch_normalization(c3, batch_stat=not test)
c3 = F.elu(c3)
with nn.parameter_scope("fc4"):
c4 = PF.binary_connect_affine(c3, 10)
c4 = PF.batch_normalization(c4, batch_stat=not test)
return c4
def mnist_binary_connect_resnet_prediction(image, test=False):
"""
Construct ResNet for MNIST (BinaryNet version).
"""
def bn(x):
return PF.batch_normalization(x, batch_stat=not test)
def res_unit(x, scope):
C = x.shape[1]
with nn.parameter_scope(scope):
with nn.parameter_scope('conv1'):
h = F.elu(bn(PF.binary_connect_convolution(
x, C / 2, (1, 1), with_bias=False)))
with nn.parameter_scope('conv2'):
h = F.elu(
bn(PF.binary_connect_convolution(h, C / 2, (3, 3), pad=(1, 1), with_bias=False)))
with nn.parameter_scope('conv3'):
h = bn(PF.binary_connect_convolution(
h, C, (1, 1), with_bias=False))
return F.elu(x + h)
# Conv1 --> 64 x 32 x 32
with nn.parameter_scope("conv1"):
c1 = F.elu(
bn(PF.binary_connect_convolution(image, 64, (3, 3), pad=(3, 3), with_bias=False)))
# Conv2 --> 64 x 16 x 16
c2 = F.max_pooling(res_unit(c1, "conv2"), (2, 2))
# Conv3 --> 64 x 8 x 8
c3 = F.max_pooling(res_unit(c2, "conv3"), (2, 2))
# Conv4 --> 64 x 8 x 8
c4 = res_unit(c3, "conv4")
# Conv5 --> 64 x 4 x 4
c5 = F.max_pooling(res_unit(c4, "conv5"), (2, 2))
# Conv5 --> 64 x 4 x 4
c6 = res_unit(c5, "conv6")
pl = F.average_pooling(c6, (4, 4))
with nn.parameter_scope("classifier"):
y = bn(PF.binary_connect_affine(pl, 10))
return y
# Binary Net Model
def mnist_binary_net_lenet_prediction(image, test=False):
"""
Construct LeNet for MNIST (BinaryNet version).
"""
with nn.parameter_scope("conv1"):
c1 = PF.binary_connect_convolution(image, 16, (5, 5))
c1 = PF.batch_normalization(c1, batch_stat=not test)
c1 = F.binary_tanh(F.average_pooling(c1, (2, 2)))
with nn.parameter_scope("conv2"):
c2 = PF.binary_connect_convolution(c1, 16, (5, 5))
c2 = PF.batch_normalization(c2, batch_stat=not test)
c2 = F.binary_tanh(F.average_pooling(c2, (2, 2)))
with nn.parameter_scope("fc3"):
c3 = PF.binary_connect_affine(c2, 50)
c3 = PF.batch_normalization(c3, batch_stat=not test)
c3 = F.binary_tanh(c3)
with nn.parameter_scope("fc4"):
c4 = PF.binary_connect_affine(c3, 10)
c4 = PF.batch_normalization(c4, batch_stat=not test)
return c4
def mnist_binary_net_resnet_prediction(image, test=False):
"""
Construct ResNet for MNIST (BinaryNet version).
"""
def bn(x):
return PF.batch_normalization(x, batch_stat=not test)
def res_unit(x, scope):
C = x.shape[1]
with nn.parameter_scope(scope):
with nn.parameter_scope('conv1'):
h = F.binary_tanh(bn(PF.binary_connect_convolution(
x, C / 2, (1, 1), with_bias=False)))
with nn.parameter_scope('conv2'):
h = F.binary_tanh(
bn(PF.binary_connect_convolution(h, C / 2, (3, 3), pad=(1, 1), with_bias=False)))
with nn.parameter_scope('conv3'):
h = bn(PF.binary_connect_convolution(
h, C, (1, 1), with_bias=False))
return F.binary_tanh(x + h)
# Conv1 --> 64 x 32 x 32
with nn.parameter_scope("conv1"):
c1 = F.binary_tanh(
bn(PF.binary_connect_convolution(image, 64, (3, 3), pad=(3, 3), with_bias=False)))
# Conv2 --> 64 x 16 x 16
c2 = F.max_pooling(res_unit(c1, "conv2"), (2, 2))
# Conv3 --> 64 x 8 x 8
c3 = F.max_pooling(res_unit(c2, "conv3"), (2, 2))
# Conv4 --> 64 x 8 x 8
c4 = res_unit(c3, "conv4")
# Conv5 --> 64 x 4 x 4
c5 = F.max_pooling(res_unit(c4, "conv5"), (2, 2))
# Conv5 --> 64 x 4 x 4
c6 = res_unit(c5, "conv6")
pl = F.average_pooling(c6, (4, 4))
with nn.parameter_scope("classifier"):
y = bn(PF.binary_connect_affine(pl, 10))
return y
# Binary Weight Model
def mnist_binary_weight_lenet_prediction(image, test=False):
"""
Construct LeNet for MNIST (Binary Weight Network version).
"""
with nn.parameter_scope("conv1"):
c1 = PF.binary_weight_convolution(image, 16, (5, 5))
c1 = F.elu(F.average_pooling(c1, (2, 2)))
with nn.parameter_scope("conv2"):
c2 = PF.binary_weight_convolution(c1, 16, (5, 5))
c2 = F.elu(F.average_pooling(c2, (2, 2)))
with nn.parameter_scope("fc3"):
c3 = F.elu(PF.binary_weight_affine(c2, 50))
with nn.parameter_scope("fc4"):
c4 = PF.binary_weight_affine(c3, 10)
return c4
def mnist_binary_weight_resnet_prediction(image, test=False):
"""
Construct ResNet for MNIST (Binary Weight Network version).
"""
def bn(x):
return PF.batch_normalization(x, batch_stat=not test)
def res_unit(x, scope):
C = x.shape[1]
with nn.parameter_scope(scope):
with nn.parameter_scope('conv1'):
h = F.elu(bn(PF.binary_weight_convolution(
x, C / 2, (1, 1), with_bias=False)))
with nn.parameter_scope('conv2'):
h = F.elu(
bn(PF.binary_weight_convolution(h, C / 2, (3, 3), pad=(1, 1), with_bias=False)))
with nn.parameter_scope('conv3'):
h = bn(PF.binary_weight_convolution(
h, C, (1, 1), with_bias=False))
return F.elu(x + h)
# Conv1 --> 64 x 32 x 32
with nn.parameter_scope("conv1"):
c1 = F.elu(
bn(PF.binary_weight_convolution(image, 64, (3, 3), pad=(3, 3), with_bias=False)))
# Conv2 --> 64 x 16 x 16
c2 = F.max_pooling(res_unit(c1, "conv2"), (2, 2))
# Conv3 --> 64 x 8 x 8
c3 = F.max_pooling(res_unit(c2, "conv3"), (2, 2))
# Conv4 --> 64 x 8 x 8
c4 = res_unit(c3, "conv4")
# Conv5 --> 64 x 4 x 4
c5 = F.max_pooling(res_unit(c4, "conv5"), (2, 2))
# Conv5 --> 64 x 4 x 4
c6 = res_unit(c5, "conv6")
pl = F.average_pooling(c6, (4, 4))
with nn.parameter_scope("classifier"):
y = PF.binary_weight_affine(pl, 10)
return y
def train():
"""
Main script.
Steps:
* Parse command line arguments.
* Specify a context for computation.
* Initialize DataIterator for MNIST.
* Construct a computation graph for training and validation.
* Initialize a solver and set parameter variables to it.
* Create monitor instances for saving and displaying training stats.
* Training loop
* Computate error rate for validation data (periodically)
* Get a next minibatch.
* Set parameter gradients zero
* Execute forwardprop on the training graph.
* Execute backprop.
* Solver updates parameters by using gradients computed by backprop.
* Compute training error
"""
args = get_args(monitor_path='tmp.monitor.bnn')
# Get context.
from nnabla.ext_utils import get_extension_context
logger.info("Running in %s" % args.context)
ctx = get_extension_context(
args.context, device_id=args.device_id, type_config=args.type_config)
nn.set_default_context(ctx)
# Initialize DataIterator for MNIST.
data = data_iterator_mnist(args.batch_size, True)
vdata = data_iterator_mnist(args.batch_size, False)
# Create CNN network for both training and testing.
mnist_cnn_prediction = mnist_binary_connect_lenet_prediction
if args.net == 'bincon':
mnist_cnn_prediction = mnist_binary_connect_lenet_prediction
elif args.net == 'binnet':
mnist_cnn_prediction = mnist_binary_net_lenet_prediction
elif args.net == 'bwn':
mnist_cnn_prediction = mnist_binary_weight_lenet_prediction
elif args.net == 'bincon_resnet':
mnist_cnn_prediction = mnist_binary_connect_resnet_prediction
elif args.net == 'binnet_resnet':
mnist_cnn_prediction = mnist_binary_net_resnet_prediction
elif args.net == 'bwn_resnet':
mnist_cnn_prediction = mnist_binary_weight_resnet_prediction
# TRAIN
# Create input variables.
image = nn.Variable([args.batch_size, 1, 28, 28])
label = nn.Variable([args.batch_size, 1])
# Create prediction graph.
pred = mnist_cnn_prediction(image / 255, test=False)
pred.persistent = True
# Create loss function.
loss = F.mean(F.softmax_cross_entropy(pred, label))
# TEST
# Create input variables.
vimage = nn.Variable([args.batch_size, 1, 28, 28])
vlabel = nn.Variable([args.batch_size, 1])
# Create prediction graph.
vpred = mnist_cnn_prediction(vimage / 255, test=True)
# Create Solver.
solver = S.Adam(args.learning_rate)
solver.set_parameters(nn.get_parameters())
start_point = 0
if args.checkpoint is not None:
# load weights and solver state info from specified checkpoint file.
start_point = load_checkpoint(args.checkpoint, solver)
# Create monitor.
import nnabla.monitor as M
monitor = M.Monitor(args.monitor_path)
monitor_loss = M.MonitorSeries("Training loss", monitor, interval=10)
monitor_err = M.MonitorSeries("Training error", monitor, interval=10)
monitor_time = M.MonitorTimeElapsed("Training time", monitor, interval=100)
monitor_verr = M.MonitorSeries("Test error", monitor, interval=10)
# save_nnp
contents = save_nnp({'x': vimage}, {'y': vpred}, args.batch_size)
save.save(os.path.join(args.model_save_path,
'{}_result_epoch0.nnp'.format(args.net)), contents)
# Training loop.
for i in range(start_point, args.max_iter):
if i % args.val_interval == 0:
# Validation
ve = 0.0
for j in range(args.val_iter):
vimage.d, vlabel.d = vdata.next()
vpred.forward(clear_buffer=True)
ve += categorical_error(vpred.d, vlabel.d)
monitor_verr.add(i, ve / args.val_iter)
if i % args.model_save_interval == 0:
# save checkpoint file
save_checkpoint(args.model_save_path, i, solver)
# Training forward
image.d, label.d = data.next()
solver.zero_grad()
loss.forward(clear_no_need_grad=True)
# Training backward & update
loss.backward(clear_buffer=True)
solver.weight_decay(args.weight_decay)
solver.update()
# Monitor
e = categorical_error(pred.d, label.d)
monitor_loss.add(i, loss.d.copy())
monitor_err.add(i, e)
monitor_time.add(i)
parameter_file = os.path.join(
args.model_save_path, 'params_%06d.h5' % args.max_iter)
nn.save_parameters(parameter_file)
# save_nnp_lastepoch
contents = save_nnp({'x': vimage}, {'y': vpred}, args.batch_size)
save.save(os.path.join(args.model_save_path,
'{}_result.nnp'.format(args.net)), contents)
if __name__ == '__main__':
train()
|
[
"nnabla.ext_utils.get_extension_context",
"nnabla.parametric_functions.binary_weight_affine",
"nnabla.get_parameters",
"nnabla.parametric_functions.batch_normalization",
"_checkpoint_nnp_util.load_checkpoint",
"os.path.join",
"nnabla.parametric_functions.binary_connect_affine",
"nnabla.parametric_functions.binary_connect_convolution",
"nnabla.save_parameters",
"_checkpoint_nnp_util.save_nnp",
"_checkpoint_nnp_util.save_checkpoint",
"nnabla.solver.Adam",
"nnabla.monitor.MonitorTimeElapsed",
"nnabla.logger.info",
"nnabla.functions.average_pooling",
"nnabla.monitor.MonitorSeries",
"nnabla.Variable",
"nnabla.set_default_context",
"nnabla.functions.binary_tanh",
"mnist_data.data_iterator_mnist",
"nnabla.monitor.Monitor",
"nnabla.functions.softmax_cross_entropy",
"args.get_args",
"nnabla.parameter_scope",
"nnabla.functions.elu",
"nnabla.parametric_functions.binary_weight_convolution"
] |
[((3426, 3455), 'nnabla.functions.average_pooling', 'F.average_pooling', (['c6', '(4, 4)'], {}), '(c6, (4, 4))\n', (3443, 3455), True, 'import nnabla.functions as F\n'), ((5874, 5903), 'nnabla.functions.average_pooling', 'F.average_pooling', (['c6', '(4, 4)'], {}), '(c6, (4, 4))\n', (5891, 5903), True, 'import nnabla.functions as F\n'), ((8031, 8060), 'nnabla.functions.average_pooling', 'F.average_pooling', (['c6', '(4, 4)'], {}), '(c6, (4, 4))\n', (8048, 8060), True, 'import nnabla.functions as F\n'), ((8884, 8924), 'args.get_args', 'get_args', ([], {'monitor_path': '"""tmp.monitor.bnn"""'}), "(monitor_path='tmp.monitor.bnn')\n", (8892, 8924), False, 'from args import get_args\n'), ((9004, 9047), 'nnabla.logger.info', 'logger.info', (["('Running in %s' % args.context)"], {}), "('Running in %s' % args.context)\n", (9015, 9047), True, 'import nnabla.logger as logger\n'), ((9058, 9154), 'nnabla.ext_utils.get_extension_context', 'get_extension_context', (['args.context'], {'device_id': 'args.device_id', 'type_config': 'args.type_config'}), '(args.context, device_id=args.device_id, type_config=\n args.type_config)\n', (9079, 9154), False, 'from nnabla.ext_utils import get_extension_context\n'), ((9163, 9190), 'nnabla.set_default_context', 'nn.set_default_context', (['ctx'], {}), '(ctx)\n', (9185, 9190), True, 'import nnabla as nn\n'), ((9244, 9286), 'mnist_data.data_iterator_mnist', 'data_iterator_mnist', (['args.batch_size', '(True)'], {}), '(args.batch_size, True)\n', (9263, 9286), False, 'from mnist_data import data_iterator_mnist\n'), ((9299, 9342), 'mnist_data.data_iterator_mnist', 'data_iterator_mnist', (['args.batch_size', '(False)'], {}), '(args.batch_size, False)\n', (9318, 9342), False, 'from mnist_data import data_iterator_mnist\n'), ((10126, 10167), 'nnabla.Variable', 'nn.Variable', (['[args.batch_size, 1, 28, 28]'], {}), '([args.batch_size, 1, 28, 28])\n', (10137, 10167), True, 'import nnabla as nn\n'), ((10180, 10213), 'nnabla.Variable', 'nn.Variable', (['[args.batch_size, 1]'], {}), '([args.batch_size, 1])\n', (10191, 10213), True, 'import nnabla as nn\n'), ((10468, 10509), 'nnabla.Variable', 'nn.Variable', (['[args.batch_size, 1, 28, 28]'], {}), '([args.batch_size, 1, 28, 28])\n', (10479, 10509), True, 'import nnabla as nn\n'), ((10523, 10556), 'nnabla.Variable', 'nn.Variable', (['[args.batch_size, 1]'], {}), '([args.batch_size, 1])\n', (10534, 10556), True, 'import nnabla as nn\n'), ((10681, 10707), 'nnabla.solver.Adam', 'S.Adam', (['args.learning_rate'], {}), '(args.learning_rate)\n', (10687, 10707), True, 'import nnabla.solver as S\n'), ((11020, 11048), 'nnabla.monitor.Monitor', 'M.Monitor', (['args.monitor_path'], {}), '(args.monitor_path)\n', (11029, 11048), True, 'import nnabla.monitor as M\n'), ((11068, 11122), 'nnabla.monitor.MonitorSeries', 'M.MonitorSeries', (['"""Training loss"""', 'monitor'], {'interval': '(10)'}), "('Training loss', monitor, interval=10)\n", (11083, 11122), True, 'import nnabla.monitor as M\n'), ((11141, 11196), 'nnabla.monitor.MonitorSeries', 'M.MonitorSeries', (['"""Training error"""', 'monitor'], {'interval': '(10)'}), "('Training error', monitor, interval=10)\n", (11156, 11196), True, 'import nnabla.monitor as M\n'), ((11216, 11276), 'nnabla.monitor.MonitorTimeElapsed', 'M.MonitorTimeElapsed', (['"""Training time"""', 'monitor'], {'interval': '(100)'}), "('Training time', monitor, interval=100)\n", (11236, 11276), True, 'import nnabla.monitor as M\n'), ((11296, 11347), 'nnabla.monitor.MonitorSeries', 'M.MonitorSeries', (['"""Test error"""', 'monitor'], {'interval': '(10)'}), "('Test error', monitor, interval=10)\n", (11311, 11347), True, 'import nnabla.monitor as M\n'), ((11379, 11433), '_checkpoint_nnp_util.save_nnp', 'save_nnp', (["{'x': vimage}", "{'y': vpred}", 'args.batch_size'], {}), "({'x': vimage}, {'y': vpred}, args.batch_size)\n", (11387, 11433), False, 'from _checkpoint_nnp_util import save_checkpoint, load_checkpoint, save_nnp\n'), ((12588, 12656), 'os.path.join', 'os.path.join', (['args.model_save_path', "('params_%06d.h5' % args.max_iter)"], {}), "(args.model_save_path, 'params_%06d.h5' % args.max_iter)\n", (12600, 12656), False, 'import os\n'), ((12670, 12704), 'nnabla.save_parameters', 'nn.save_parameters', (['parameter_file'], {}), '(parameter_file)\n', (12688, 12704), True, 'import nnabla as nn\n'), ((12746, 12800), '_checkpoint_nnp_util.save_nnp', 'save_nnp', (["{'x': vimage}", "{'y': vpred}", 'args.batch_size'], {}), "({'x': vimage}, {'y': vpred}, args.batch_size)\n", (12754, 12800), False, 'from _checkpoint_nnp_util import save_checkpoint, load_checkpoint, save_nnp\n'), ((1321, 1348), 'nnabla.parameter_scope', 'nn.parameter_scope', (['"""conv1"""'], {}), "('conv1')\n", (1339, 1348), True, 'import nnabla as nn\n'), ((1363, 1411), 'nnabla.parametric_functions.binary_connect_convolution', 'PF.binary_connect_convolution', (['image', '(16)', '(5, 5)'], {}), '(image, 16, (5, 5))\n', (1392, 1411), True, 'import nnabla.parametric_functions as PF\n'), ((1425, 1472), 'nnabla.parametric_functions.batch_normalization', 'PF.batch_normalization', (['c1'], {'batch_stat': '(not test)'}), '(c1, batch_stat=not test)\n', (1447, 1472), True, 'import nnabla.parametric_functions as PF\n'), ((1532, 1559), 'nnabla.parameter_scope', 'nn.parameter_scope', (['"""conv2"""'], {}), "('conv2')\n", (1550, 1559), True, 'import nnabla as nn\n'), ((1574, 1619), 'nnabla.parametric_functions.binary_connect_convolution', 'PF.binary_connect_convolution', (['c1', '(16)', '(5, 5)'], {}), '(c1, 16, (5, 5))\n', (1603, 1619), True, 'import nnabla.parametric_functions as PF\n'), ((1633, 1680), 'nnabla.parametric_functions.batch_normalization', 'PF.batch_normalization', (['c2'], {'batch_stat': '(not test)'}), '(c2, batch_stat=not test)\n', (1655, 1680), True, 'import nnabla.parametric_functions as PF\n'), ((1740, 1765), 'nnabla.parameter_scope', 'nn.parameter_scope', (['"""fc3"""'], {}), "('fc3')\n", (1758, 1765), True, 'import nnabla as nn\n'), ((1780, 1812), 'nnabla.parametric_functions.binary_connect_affine', 'PF.binary_connect_affine', (['c2', '(50)'], {}), '(c2, 50)\n', (1804, 1812), True, 'import nnabla.parametric_functions as PF\n'), ((1826, 1873), 'nnabla.parametric_functions.batch_normalization', 'PF.batch_normalization', (['c3'], {'batch_stat': '(not test)'}), '(c3, batch_stat=not test)\n', (1848, 1873), True, 'import nnabla.parametric_functions as PF\n'), ((1887, 1896), 'nnabla.functions.elu', 'F.elu', (['c3'], {}), '(c3)\n', (1892, 1896), True, 'import nnabla.functions as F\n'), ((1906, 1931), 'nnabla.parameter_scope', 'nn.parameter_scope', (['"""fc4"""'], {}), "('fc4')\n", (1924, 1931), True, 'import nnabla as nn\n'), ((1946, 1978), 'nnabla.parametric_functions.binary_connect_affine', 'PF.binary_connect_affine', (['c3', '(10)'], {}), '(c3, 10)\n', (1970, 1978), True, 'import nnabla.parametric_functions as PF\n'), ((1992, 2039), 'nnabla.parametric_functions.batch_normalization', 'PF.batch_normalization', (['c4'], {'batch_stat': '(not test)'}), '(c4, batch_stat=not test)\n', (2014, 2039), True, 'import nnabla.parametric_functions as PF\n'), ((2217, 2263), 'nnabla.parametric_functions.batch_normalization', 'PF.batch_normalization', (['x'], {'batch_stat': '(not test)'}), '(x, batch_stat=not test)\n', (2239, 2263), True, 'import nnabla.parametric_functions as PF\n'), ((2861, 2873), 'nnabla.functions.elu', 'F.elu', (['(x + h)'], {}), '(x + h)\n', (2866, 2873), True, 'import nnabla.functions as F\n'), ((2912, 2939), 'nnabla.parameter_scope', 'nn.parameter_scope', (['"""conv1"""'], {}), "('conv1')\n", (2930, 2939), True, 'import nnabla as nn\n'), ((3465, 3497), 'nnabla.parameter_scope', 'nn.parameter_scope', (['"""classifier"""'], {}), "('classifier')\n", (3483, 3497), True, 'import nnabla as nn\n'), ((3717, 3744), 'nnabla.parameter_scope', 'nn.parameter_scope', (['"""conv1"""'], {}), "('conv1')\n", (3735, 3744), True, 'import nnabla as nn\n'), ((3759, 3807), 'nnabla.parametric_functions.binary_connect_convolution', 'PF.binary_connect_convolution', (['image', '(16)', '(5, 5)'], {}), '(image, 16, (5, 5))\n', (3788, 3807), True, 'import nnabla.parametric_functions as PF\n'), ((3821, 3868), 'nnabla.parametric_functions.batch_normalization', 'PF.batch_normalization', (['c1'], {'batch_stat': '(not test)'}), '(c1, batch_stat=not test)\n', (3843, 3868), True, 'import nnabla.parametric_functions as PF\n'), ((3936, 3963), 'nnabla.parameter_scope', 'nn.parameter_scope', (['"""conv2"""'], {}), "('conv2')\n", (3954, 3963), True, 'import nnabla as nn\n'), ((3978, 4023), 'nnabla.parametric_functions.binary_connect_convolution', 'PF.binary_connect_convolution', (['c1', '(16)', '(5, 5)'], {}), '(c1, 16, (5, 5))\n', (4007, 4023), True, 'import nnabla.parametric_functions as PF\n'), ((4037, 4084), 'nnabla.parametric_functions.batch_normalization', 'PF.batch_normalization', (['c2'], {'batch_stat': '(not test)'}), '(c2, batch_stat=not test)\n', (4059, 4084), True, 'import nnabla.parametric_functions as PF\n'), ((4152, 4177), 'nnabla.parameter_scope', 'nn.parameter_scope', (['"""fc3"""'], {}), "('fc3')\n", (4170, 4177), True, 'import nnabla as nn\n'), ((4192, 4224), 'nnabla.parametric_functions.binary_connect_affine', 'PF.binary_connect_affine', (['c2', '(50)'], {}), '(c2, 50)\n', (4216, 4224), True, 'import nnabla.parametric_functions as PF\n'), ((4238, 4285), 'nnabla.parametric_functions.batch_normalization', 'PF.batch_normalization', (['c3'], {'batch_stat': '(not test)'}), '(c3, batch_stat=not test)\n', (4260, 4285), True, 'import nnabla.parametric_functions as PF\n'), ((4299, 4316), 'nnabla.functions.binary_tanh', 'F.binary_tanh', (['c3'], {}), '(c3)\n', (4312, 4316), True, 'import nnabla.functions as F\n'), ((4326, 4351), 'nnabla.parameter_scope', 'nn.parameter_scope', (['"""fc4"""'], {}), "('fc4')\n", (4344, 4351), True, 'import nnabla as nn\n'), ((4366, 4398), 'nnabla.parametric_functions.binary_connect_affine', 'PF.binary_connect_affine', (['c3', '(10)'], {}), '(c3, 10)\n', (4390, 4398), True, 'import nnabla.parametric_functions as PF\n'), ((4412, 4459), 'nnabla.parametric_functions.batch_normalization', 'PF.batch_normalization', (['c4'], {'batch_stat': '(not test)'}), '(c4, batch_stat=not test)\n', (4434, 4459), True, 'import nnabla.parametric_functions as PF\n'), ((4633, 4679), 'nnabla.parametric_functions.batch_normalization', 'PF.batch_normalization', (['x'], {'batch_stat': '(not test)'}), '(x, batch_stat=not test)\n', (4655, 4679), True, 'import nnabla.parametric_functions as PF\n'), ((5293, 5313), 'nnabla.functions.binary_tanh', 'F.binary_tanh', (['(x + h)'], {}), '(x + h)\n', (5306, 5313), True, 'import nnabla.functions as F\n'), ((5352, 5379), 'nnabla.parameter_scope', 'nn.parameter_scope', (['"""conv1"""'], {}), "('conv1')\n", (5370, 5379), True, 'import nnabla as nn\n'), ((5913, 5945), 'nnabla.parameter_scope', 'nn.parameter_scope', (['"""classifier"""'], {}), "('classifier')\n", (5931, 5945), True, 'import nnabla as nn\n'), ((6183, 6210), 'nnabla.parameter_scope', 'nn.parameter_scope', (['"""conv1"""'], {}), "('conv1')\n", (6201, 6210), True, 'import nnabla as nn\n'), ((6225, 6272), 'nnabla.parametric_functions.binary_weight_convolution', 'PF.binary_weight_convolution', (['image', '(16)', '(5, 5)'], {}), '(image, 16, (5, 5))\n', (6253, 6272), True, 'import nnabla.parametric_functions as PF\n'), ((6332, 6359), 'nnabla.parameter_scope', 'nn.parameter_scope', (['"""conv2"""'], {}), "('conv2')\n", (6350, 6359), True, 'import nnabla as nn\n'), ((6374, 6418), 'nnabla.parametric_functions.binary_weight_convolution', 'PF.binary_weight_convolution', (['c1', '(16)', '(5, 5)'], {}), '(c1, 16, (5, 5))\n', (6402, 6418), True, 'import nnabla.parametric_functions as PF\n'), ((6478, 6503), 'nnabla.parameter_scope', 'nn.parameter_scope', (['"""fc3"""'], {}), "('fc3')\n", (6496, 6503), True, 'import nnabla as nn\n'), ((6566, 6591), 'nnabla.parameter_scope', 'nn.parameter_scope', (['"""fc4"""'], {}), "('fc4')\n", (6584, 6591), True, 'import nnabla as nn\n'), ((6606, 6637), 'nnabla.parametric_functions.binary_weight_affine', 'PF.binary_weight_affine', (['c3', '(10)'], {}), '(c3, 10)\n', (6629, 6637), True, 'import nnabla.parametric_functions as PF\n'), ((6826, 6872), 'nnabla.parametric_functions.batch_normalization', 'PF.batch_normalization', (['x'], {'batch_stat': '(not test)'}), '(x, batch_stat=not test)\n', (6848, 6872), True, 'import nnabla.parametric_functions as PF\n'), ((7467, 7479), 'nnabla.functions.elu', 'F.elu', (['(x + h)'], {}), '(x + h)\n', (7472, 7479), True, 'import nnabla.functions as F\n'), ((7518, 7545), 'nnabla.parameter_scope', 'nn.parameter_scope', (['"""conv1"""'], {}), "('conv1')\n", (7536, 7545), True, 'import nnabla as nn\n'), ((8070, 8102), 'nnabla.parameter_scope', 'nn.parameter_scope', (['"""classifier"""'], {}), "('classifier')\n", (8088, 8102), True, 'import nnabla as nn\n'), ((8116, 8147), 'nnabla.parametric_functions.binary_weight_affine', 'PF.binary_weight_affine', (['pl', '(10)'], {}), '(pl, 10)\n', (8139, 8147), True, 'import nnabla.parametric_functions as PF\n'), ((10375, 10411), 'nnabla.functions.softmax_cross_entropy', 'F.softmax_cross_entropy', (['pred', 'label'], {}), '(pred, label)\n', (10398, 10411), True, 'import nnabla.functions as F\n'), ((10734, 10753), 'nnabla.get_parameters', 'nn.get_parameters', ([], {}), '()\n', (10751, 10753), True, 'import nnabla as nn\n'), ((10911, 10951), '_checkpoint_nnp_util.load_checkpoint', 'load_checkpoint', (['args.checkpoint', 'solver'], {}), '(args.checkpoint, solver)\n', (10926, 10951), False, 'from _checkpoint_nnp_util import save_checkpoint, load_checkpoint, save_nnp\n'), ((1492, 1521), 'nnabla.functions.average_pooling', 'F.average_pooling', (['c1', '(2, 2)'], {}), '(c1, (2, 2))\n', (1509, 1521), True, 'import nnabla.functions as F\n'), ((1700, 1729), 'nnabla.functions.average_pooling', 'F.average_pooling', (['c2', '(2, 2)'], {}), '(c2, (2, 2))\n', (1717, 1729), True, 'import nnabla.functions as F\n'), ((2329, 2354), 'nnabla.parameter_scope', 'nn.parameter_scope', (['scope'], {}), '(scope)\n', (2347, 2354), True, 'import nnabla as nn\n'), ((3514, 3546), 'nnabla.parametric_functions.binary_connect_affine', 'PF.binary_connect_affine', (['pl', '(10)'], {}), '(pl, 10)\n', (3538, 3546), True, 'import nnabla.parametric_functions as PF\n'), ((3896, 3925), 'nnabla.functions.average_pooling', 'F.average_pooling', (['c1', '(2, 2)'], {}), '(c1, (2, 2))\n', (3913, 3925), True, 'import nnabla.functions as F\n'), ((4112, 4141), 'nnabla.functions.average_pooling', 'F.average_pooling', (['c2', '(2, 2)'], {}), '(c2, (2, 2))\n', (4129, 4141), True, 'import nnabla.functions as F\n'), ((4745, 4770), 'nnabla.parameter_scope', 'nn.parameter_scope', (['scope'], {}), '(scope)\n', (4763, 4770), True, 'import nnabla as nn\n'), ((5962, 5994), 'nnabla.parametric_functions.binary_connect_affine', 'PF.binary_connect_affine', (['pl', '(10)'], {}), '(pl, 10)\n', (5986, 5994), True, 'import nnabla.parametric_functions as PF\n'), ((6292, 6321), 'nnabla.functions.average_pooling', 'F.average_pooling', (['c1', '(2, 2)'], {}), '(c1, (2, 2))\n', (6309, 6321), True, 'import nnabla.functions as F\n'), ((6438, 6467), 'nnabla.functions.average_pooling', 'F.average_pooling', (['c2', '(2, 2)'], {}), '(c2, (2, 2))\n', (6455, 6467), True, 'import nnabla.functions as F\n'), ((6524, 6555), 'nnabla.parametric_functions.binary_weight_affine', 'PF.binary_weight_affine', (['c2', '(50)'], {}), '(c2, 50)\n', (6547, 6555), True, 'import nnabla.parametric_functions as PF\n'), ((6938, 6963), 'nnabla.parameter_scope', 'nn.parameter_scope', (['scope'], {}), '(scope)\n', (6956, 6963), True, 'import nnabla as nn\n'), ((12063, 12111), '_checkpoint_nnp_util.save_checkpoint', 'save_checkpoint', (['args.model_save_path', 'i', 'solver'], {}), '(args.model_save_path, i, solver)\n', (12078, 12111), False, 'from _checkpoint_nnp_util import save_checkpoint, load_checkpoint, save_nnp\n'), ((2373, 2400), 'nnabla.parameter_scope', 'nn.parameter_scope', (['"""conv1"""'], {}), "('conv1')\n", (2391, 2400), True, 'import nnabla as nn\n'), ((2536, 2563), 'nnabla.parameter_scope', 'nn.parameter_scope', (['"""conv2"""'], {}), "('conv2')\n", (2554, 2563), True, 'import nnabla as nn\n'), ((2711, 2738), 'nnabla.parameter_scope', 'nn.parameter_scope', (['"""conv3"""'], {}), "('conv3')\n", (2729, 2738), True, 'import nnabla as nn\n'), ((2976, 3053), 'nnabla.parametric_functions.binary_connect_convolution', 'PF.binary_connect_convolution', (['image', '(64)', '(3, 3)'], {'pad': '(3, 3)', 'with_bias': '(False)'}), '(image, 64, (3, 3), pad=(3, 3), with_bias=False)\n', (3005, 3053), True, 'import nnabla.parametric_functions as PF\n'), ((4789, 4816), 'nnabla.parameter_scope', 'nn.parameter_scope', (['"""conv1"""'], {}), "('conv1')\n", (4807, 4816), True, 'import nnabla as nn\n'), ((4960, 4987), 'nnabla.parameter_scope', 'nn.parameter_scope', (['"""conv2"""'], {}), "('conv2')\n", (4978, 4987), True, 'import nnabla as nn\n'), ((5143, 5170), 'nnabla.parameter_scope', 'nn.parameter_scope', (['"""conv3"""'], {}), "('conv3')\n", (5161, 5170), True, 'import nnabla as nn\n'), ((5424, 5501), 'nnabla.parametric_functions.binary_connect_convolution', 'PF.binary_connect_convolution', (['image', '(64)', '(3, 3)'], {'pad': '(3, 3)', 'with_bias': '(False)'}), '(image, 64, (3, 3), pad=(3, 3), with_bias=False)\n', (5453, 5501), True, 'import nnabla.parametric_functions as PF\n'), ((6982, 7009), 'nnabla.parameter_scope', 'nn.parameter_scope', (['"""conv1"""'], {}), "('conv1')\n", (7000, 7009), True, 'import nnabla as nn\n'), ((7144, 7171), 'nnabla.parameter_scope', 'nn.parameter_scope', (['"""conv2"""'], {}), "('conv2')\n", (7162, 7171), True, 'import nnabla as nn\n'), ((7318, 7345), 'nnabla.parameter_scope', 'nn.parameter_scope', (['"""conv3"""'], {}), "('conv3')\n", (7336, 7345), True, 'import nnabla as nn\n'), ((7582, 7658), 'nnabla.parametric_functions.binary_weight_convolution', 'PF.binary_weight_convolution', (['image', '(64)', '(3, 3)'], {'pad': '(3, 3)', 'with_bias': '(False)'}), '(image, 64, (3, 3), pad=(3, 3), with_bias=False)\n', (7610, 7658), True, 'import nnabla.parametric_functions as PF\n'), ((2763, 2823), 'nnabla.parametric_functions.binary_connect_convolution', 'PF.binary_connect_convolution', (['h', 'C', '(1, 1)'], {'with_bias': '(False)'}), '(h, C, (1, 1), with_bias=False)\n', (2792, 2823), True, 'import nnabla.parametric_functions as PF\n'), ((5195, 5255), 'nnabla.parametric_functions.binary_connect_convolution', 'PF.binary_connect_convolution', (['h', 'C', '(1, 1)'], {'with_bias': '(False)'}), '(h, C, (1, 1), with_bias=False)\n', (5224, 5255), True, 'import nnabla.parametric_functions as PF\n'), ((7370, 7429), 'nnabla.parametric_functions.binary_weight_convolution', 'PF.binary_weight_convolution', (['h', 'C', '(1, 1)'], {'with_bias': '(False)'}), '(h, C, (1, 1), with_bias=False)\n', (7398, 7429), True, 'import nnabla.parametric_functions as PF\n'), ((2431, 2495), 'nnabla.parametric_functions.binary_connect_convolution', 'PF.binary_connect_convolution', (['x', '(C / 2)', '(1, 1)'], {'with_bias': '(False)'}), '(x, C / 2, (1, 1), with_bias=False)\n', (2460, 2495), True, 'import nnabla.parametric_functions as PF\n'), ((2615, 2691), 'nnabla.parametric_functions.binary_connect_convolution', 'PF.binary_connect_convolution', (['h', '(C / 2)', '(3, 3)'], {'pad': '(1, 1)', 'with_bias': '(False)'}), '(h, C / 2, (3, 3), pad=(1, 1), with_bias=False)\n', (2644, 2691), True, 'import nnabla.parametric_functions as PF\n'), ((4855, 4919), 'nnabla.parametric_functions.binary_connect_convolution', 'PF.binary_connect_convolution', (['x', '(C / 2)', '(1, 1)'], {'with_bias': '(False)'}), '(x, C / 2, (1, 1), with_bias=False)\n', (4884, 4919), True, 'import nnabla.parametric_functions as PF\n'), ((5047, 5123), 'nnabla.parametric_functions.binary_connect_convolution', 'PF.binary_connect_convolution', (['h', '(C / 2)', '(3, 3)'], {'pad': '(1, 1)', 'with_bias': '(False)'}), '(h, C / 2, (3, 3), pad=(1, 1), with_bias=False)\n', (5076, 5123), True, 'import nnabla.parametric_functions as PF\n'), ((7040, 7103), 'nnabla.parametric_functions.binary_weight_convolution', 'PF.binary_weight_convolution', (['x', '(C / 2)', '(1, 1)'], {'with_bias': '(False)'}), '(x, C / 2, (1, 1), with_bias=False)\n', (7068, 7103), True, 'import nnabla.parametric_functions as PF\n'), ((7223, 7298), 'nnabla.parametric_functions.binary_weight_convolution', 'PF.binary_weight_convolution', (['h', '(C / 2)', '(3, 3)'], {'pad': '(1, 1)', 'with_bias': '(False)'}), '(h, C / 2, (3, 3), pad=(1, 1), with_bias=False)\n', (7251, 7298), True, 'import nnabla.parametric_functions as PF\n')]
|
import numpy as np
import pandas as pd
from glob import glob
import matplotlib.pyplot as plt
'''
turbine-08_helihoist-1_tom_acc-vel-pos_hammerhead_2019-10-14-07-55-52_2019-10-15-06-10-33
turbine-08_helihoist-1_tom_geometry_hammerhead_2019-10-14-07-55-52_2019-10-15-06-10-33
turbine-08_sbitroot_tom_acc-vel-pos_hammerhead_2019-10-14-10-49-53_2019-10-15-06-18-48
turbine-08_sbittip_tom_acc-vel-pos_hammerhead_2019-10-14-10-45-39_2019-10-15-06-08-27
turbine-08_helihoist-1_tom_acc-vel-pos_sbi1_2019-10-15-06-10-33_2019-10-15-07-30-26
turbine-08_sbitroot_tom_acc-vel-pos_sbi1_2019-10-15-06-18-48_2019-10-15-07-40-56
turbine-08_sbittip_tom_acc-vel-pos_sbi1_2019-10-15-06-08-27_2019-10-15-07-57-03
turbine-08_helihoist-1_tom_acc-vel-pos_sbi2_2019-10-15-14-21-36_2019-10-15-15-13-04
turbine-08_sbitroot_tom_acc-vel-pos_sbi2_2019-10-15-14-10-47_2019-10-15-15-05-07
turbine-08_sbittip_tom_acc-vel-pos_sbi2_2019-10-15-14-17-49_2019-10-15-15-09-25
turbine-08_helihoist-1_tom_acc-vel-pos_tnhb1_2019-10-15-07-30-26_2019-10-15-14-21-36
turbine-08_helihoist-1_tom_geometry_tnhb1_2019-10-15-07-30-26_2019-10-15-14-21-36
turbine-08_sbitroot_tom_acc-vel-pos_tnhb1_2019-10-15-07-40-56_2019-10-15-14-10-47
turbine-08_sbittip_tom_acc-vel-pos_tnhb1_2019-10-15-07-57-03_2019-10-15-14-17-49
turbine-08_helihoist-1_tom_acc-vel-pos_tnhb2_2019-10-15-15-13-04_2019-10-15-22-19-59
wmb-sued-2019-10-14
wmb-sued-2019-10-15
lidar_2019_10_14
lidar_2019_10_15
komplett
'''
#loading data and filling it into an array of all dataframes
hammerhead = sorted(glob('Daten/hammerhead/hammerhead/turbine-08**.csv'))
sbi1 = sorted(glob('Daten/sbi1/sbi1/turbine-08**.csv'))
sbi2 = sorted(glob('Daten/sbi2/sbi2/turbine-08**.csv'))
tnhb1 = sorted(glob('Daten/tnhb1/tnhb1/turbine-08**.csv'))
tnhb2 = sorted(glob('Daten/tnhb2/tnhb2/turbine-08**.csv'))
#wmb = "wmb-sued-2019-9-22"
#lidar = "lidar_2019_09_22"
data = []
helihoist_tele_hammerhead = pd.read_csv(hammerhead[0], delimiter = ',')
helihoist_geo_hammerhead = pd.read_csv(hammerhead[1], delimiter = ',')
sbitroot_hammerhead = pd.read_csv(hammerhead[2], delimiter = ',')
sbitip_hammerhead = pd.read_csv(hammerhead[3], delimiter = ',')
data.append(helihoist_tele_hammerhead) , data.append(helihoist_geo_hammerhead), data.append(sbitroot_hammerhead) ,data.append(sbitip_hammerhead)
helihoist_sbi1 = pd.read_csv(sbi1[0], delimiter= ',')
sbiroot_sbi1 = pd.read_csv(sbi1[1], delimiter = ',')
sbitip_sbi1 = pd.read_csv(sbi1[2], delimiter = ',')
data.append(helihoist_sbi1) ,data.append(sbiroot_sbi1), data.append(sbitip_sbi1)
helihoist_sbi2 = pd.read_csv(sbi2[0], delimiter = ',')
sbiroot_sbi2 = pd.read_csv(sbi2[1], delimiter = ',')
sbitip_sbi2 = pd.read_csv(sbi2[2], delimiter = ',')
data.append(helihoist_sbi2) ,data.append(sbiroot_sbi2), data.append(sbitip_sbi2)
helihoist_tnhb1 = pd.read_csv(tnhb1[0], delimiter = ',')
helihoist_geo_tnhb1 = pd.read_csv(tnhb1[1], delimiter = ',')
sbiroot_tnhb1 = pd.read_csv(tnhb1[2], delimiter = ',')
sbitip_tnhb1 = pd.read_csv(tnhb1[3], delimiter = ',')
data.append(helihoist_tnhb1) ,data.append(helihoist_geo_tnhb1) ,data.append(sbiroot_tnhb1), data.append(sbitip_tnhb1)
helihoist_tnhb2 = pd.read_csv(tnhb2[0], delimiter = ',')
data.append(helihoist_tnhb2)
wmb1= pd.read_csv('environment/environment/waves/wmb-sued/wmb-sued_2019-10-14.csv', delimiter = ' ')
wmb2= pd.read_csv('environment/environment/waves/wmb-sued/wmb-sued_2019-10-15.csv', delimiter = ' ')
wmb_all = []
wmb_all.append(wmb1), wmb_all.append(wmb2)
lidar1 =pd.read_csv('environment/environment/wind/lidar/lidar_2019-10-14.csv', delimiter = ' ')
lidar2 =pd.read_csv('environment/environment/wind/lidar/lidar_2019-10-15.csv', delimiter = ' ')
data.append(lidar1), data.append(lidar2)
lidar_all = []
lidar_all.append(lidar1), lidar_all.append(lidar2)
lidar_all = []
lidar_all.append(lidar1), lidar_all.append(lidar2)
buffer1 = []
for i in wmb_all:
i.columns = (
'epoch', 'Tp', 'Dirp', 'Sprp', 'Tz', 'Hm0', 'TI', 'T1', 'Tc', 'Tdw2', 'Tdw1', 'Tpc', 'nu', 'eps', 'QP', 'Ss',
'TRef', 'TSea', 'Bat', 'Percentage', 'Hmax', 'Tmax', 'H(1/10)', 'T(1/10)', 'H(1/3)', 'T(1/3)', 'Hav', 'Tav', 'Eps',
'#Waves')
buffer1.append(i)
wmb = pd.concat(buffer1, axis=0)
wmb.columns = (
'epoch', 'Tp', 'Dirp', 'Sprp', 'Tz', 'Hm0', 'TI', 'T1', 'Tc', 'Tdw2', 'Tdw1', 'Tpc', 'nu', 'eps', 'QP', 'Ss',
'TRef', 'TSea', 'Bat', 'Percentage', 'Hmax', 'Tmax', 'H(1/10)', 'T(1/10)', 'H(1/3)', 'T(1/3)', 'Hav', 'Tav', 'Eps',
'#Waves')
buffer2 = []
for j in lidar_all:
j.columns = ('epoch', 'wind_speed_0', 'wind_dir_0', 'wind_dir_0_corr', 'height_0', 'wind_speed_1', 'wind_dir_1',
'wind_dir_1_corr', 'height_1', 'wind_speed_2', 'wind_dir_2', 'wind_dir_2_corr', 'height_2',
'wind_speed_3', 'wind_dir_3', 'wind_dir_3_corr', 'height_3', 'wind_speed_4', 'wind_dir_4',
'wind_dir_4_corr', 'height_4', 'wind_speed_5', 'wind_dir_5', 'wind_dir_5_corr', 'height_5',
'wind_speed_6', 'wind_dir_6', 'wind_dir_6_corr', 'height_6', 'wind_speed_7', 'wind_dir_7',
'wind_dir_7_corr', 'height_7', 'wind_speed_8', 'wind_dir_8', 'wind_dir_8_corr', 'height_8',
'wind_speed_9', 'wind_dir_9', 'wind_dir_9_corr', 'height_9', 'wind_speed_10', 'wind_dir_10',
'wind_dir_10_corr', 'height_10', 'heading')
buffer2.append(j)
lidar = pd.concat(buffer2, axis=0)
lidar.columns = ('epoch', 'wind_speed_0', 'wind_dir_0', 'wind_dir_0_corr', 'height_0', 'wind_speed_1', 'wind_dir_1',
'wind_dir_1_corr', 'height_1', 'wind_speed_2', 'wind_dir_2', 'wind_dir_2_corr', 'height_2',
'wind_speed_3', 'wind_dir_3', 'wind_dir_3_corr', 'height_3', 'wind_speed_4', 'wind_dir_4',
'wind_dir_4_corr', 'height_4', 'wind_speed_5', 'wind_dir_5', 'wind_dir_5_corr', 'height_5',
'wind_speed_6', 'wind_dir_6', 'wind_dir_6_corr', 'height_6', 'wind_speed_7', 'wind_dir_7',
'wind_dir_7_corr', 'height_7', 'wind_speed_8', 'wind_dir_8', 'wind_dir_8_corr', 'height_8',
'wind_speed_9', 'wind_dir_9', 'wind_dir_9_corr', 'height_9', 'wind_speed_10', 'wind_dir_10',
'wind_dir_10_corr', 'height_10', 'heading')
UTC = []
for k in range(len(wmb)):
UTC.append(pd.Timestamp.fromtimestamp(wmb.iloc[k, 0]))
wmb['epoch'] = UTC
wmb.index = wmb['epoch']
del wmb['epoch']
wmb = wmb.resample('3S', label='left').mean().pad() / 1800
wmb = wmb
UTC = []
for k in range(len(lidar)):
UTC.append(pd.Timestamp.fromtimestamp(lidar.iloc[k, 0]))
lidar['epoch'] = UTC
lidar.index = lidar['epoch']
del lidar['epoch']
lidar = lidar.resample('3S', label='left').mean().pad()
lidar = lidar
#generating timestamps for every dataframe
counter = 0
for df in data:
UTC = []
for k in range(len(df)):
UTC.append(pd.Timestamp.fromtimestamp(df.iloc[k, 0]))
df['epoch'] = UTC
df.index = df['epoch']
del df['epoch']
df = df.resample('3S', label = 'left').mean().pad()
data[counter] = df
counter = counter+1
'''
plt.plot(wmb.index, wmb['#Waves'])
plt.title('#Waves')
plt.xlabel('time')
plt.xticks(rotation= 90)
plt.show()
plt.plot(lidar.index, lidar['wind_speed_7'])
plt.title('wind_speed_7')
plt.xlabel('time')
plt.xticks(rotation= 90)
plt.show()
plt.plot(lidar.index, lidar['wind_dir_7_corr'])
plt.title('wind_dir_7_corr')
plt.xlabel('time')
plt.xticks(rotation= 90)
plt.show()
'''
'''
# generating hammerhead file
#10:49:53 06:08:27
for i in range(4):
data[i] = data[i]['2019-10-14 10:49:53': '2019-10-15 06:08:27']
transition_wmb =wmb['2019-10-14 10:49:53': '2019-10-15 06:08:27']
transition_lidar = lidar['2019-10-14 10:49:53': '2019-10-15 06:08:27']
result =pd.concat([data[0],data[1],data[2],data[3], transition_lidar, transition_wmb], axis=1 )
result.to_csv('Results_preprocessing/turbine08/hammerhead_turbine08.csv')
#generating sbi1 file
#06:18:48 07:30:26
for i in range(4,7):
data[i] = data[i]['2019-10-15 06:18:48': '2019-10-15 07:30:26']
transition_wmb =wmb['2019-10-15 06:18:48': '2019-10-15 07:30:26']
transition_lidar = lidar['2019-10-15 06:18:48': '2019-10-15 07:30:26']
result =pd.concat([data[4],data[5],data[6], transition_lidar, transition_wmb], axis=1 )
result.to_csv('Results_preprocessing/turbine08/sbi1_turbine08.csv')
#generating sbi2 file
#14:21:36 15:05:07
for i in range(7,10):
data[i] = data[i]['2019-10-15 14:21:36': '2019-10-15 15:05:07']
transition_wmb =wmb['2019-10-15 14:21:36': '2019-10-15 15:05:07']
transition_lidar = lidar['2019-10-15 14:21:36': '2019-10-15 15:05:07']
result =pd.concat([data[7],data[8],data[9], transition_lidar, transition_wmb], axis=1 )
result.to_csv('Results_preprocessing/turbine08/sbi2_turbine08.csv')
#generating tnhb1 file
#07:57:03 14:10:47
for i in range(10,14):
data[i] = data[i]['2019-10-15 07:57:03': '2019-10-15 14:10:47']
transition_wmb =wmb['2019-10-15 07:57:03': '2019-10-15 14:10:47']
transition_lidar = lidar['2019-10-15 07:57:03': '2019-10-15 14:10:47']
result =pd.concat([data[10],data[11],data[12],data[13], transition_lidar, transition_wmb], axis=1 )
result.to_csv('Results_preprocessing/turbine08/tnhb1_turbine08.csv')
#generating tnhb2 file
#15:13:04 22:19:59
for i in range(14,15):
data[i] = data[i]['2019-10-15 15:13:04': '2019-10-15 22:19:59']
transition_wmb =wmb['2019-10-15 15:13:04': '2019-10-15 22:19:59']
transition_lidar = lidar['2019-10-15 15:13:04': '2019-10-15 22:19:59']
result =pd.concat([data[14], transition_lidar, transition_wmb], axis=1 )
result.to_csv('Results_preprocessing/turbine08/tnhb2_turbine08.csv')
'''
'''
files to extract
14.10.2019 07:55:52 15.10.2019 06:10:33
15.10.2019 07:30:26 15.10.2019 14:21:36
'''
print(data[1].index[0])
print(data[1].index[-1])
data[1] = data[1]['2019-10-14 09:55:53': '2019-10-15 08:10:27']
transition_wmb =wmb['2019-10-14 09:55:53': '2019-10-15 08:10:27']
transition_lidar = lidar['2019-10-14 09:55:53': '2019-10-15 08:10:27']
result = pd.concat([data[1], transition_lidar, transition_wmb], axis=1)
del result['max_deflection_i']
del result['ddt_max_deflection']
del result['eccentricity']
del result['ddt_axis_ratio']
del result['ddt_eccentricity']
del result['axis_angle_signed']
del result['axis_angle_unsigned']
del result['axis_azimuth']
del result['ddt_axis_angle_signed']
del result['ddt_axis_angle_unsigned']
del result['p2p_angle_unsigned']
del result['p2p_angle_signed']
del result['p2p_azimuth']
del result['ddt_p2p_azimuth_unwrapped']
del result['ddt_p2p_azimuth']
del result['ddt_p2p_angle_unsigned']
del result['ddt_p2p_angle_signed']
del result['wind_speed_0']
del result['wind_dir_0']
del result['wind_dir_0_corr']
del result['height_0']
del result['wind_speed_1']
del result['wind_dir_1']
del result['wind_dir_1_corr']
del result['height_1']
del result['wind_speed_2']
del result['wind_dir_2']
del result['wind_dir_2_corr']
del result['height_2']
del result['wind_dir_3']
del result['height_3']
del result['wind_speed_4']
del result['wind_dir_4']
del result['wind_dir_4_corr']
del result['height_4']
del result['wind_speed_5']
del result['wind_dir_5']
del result['wind_dir_5_corr']
del result['height_5']
del result['wind_speed_6']
del result['wind_dir_6']
del result['wind_dir_6_corr']
del result['height_6']
del result['wind_speed_7']
del result['wind_dir_7']
del result['wind_dir_7_corr']
del result['height_7']
del result['wind_speed_8']
del result['wind_dir_8']
del result['wind_dir_8_corr']
del result['height_8']
del result['wind_speed_9']
del result['wind_dir_9']
del result['wind_dir_9_corr']
del result['height_9']
del result['wind_speed_10']
del result['wind_dir_10']
del result['wind_dir_10_corr']
del result['height_10']
del result['heading']
del result['Tp']
del result['Sprp']
del result['Tz']
del result['Hm0']
del result['TI']
del result['T1']
del result['Tc']
del result['Tdw2']
del result['Tdw1']
del result['Tpc']
del result['nu']
del result['eps']
del result['QP']
del result['Ss']
del result['TRef']
del result['Bat']
del result['Percentage']
del result['H(1/10)']
del result['T(1/10)']
del result['H(1/3)']
del result['T(1/3)']
del result['Eps']
del result['#Waves']
result.to_csv('Results_preprocessing/geometry_files/hammerhead_turbine08.csv')
print(data[11].index[0])
print(data[11].index[-1])
data[11] = data[11]['2019-10-15 09:30:27': '2019-10-15 16:21:31']
transition_wmb =wmb['2019-10-15 09:30:27': '2019-10-15 16:21:31']
transition_lidar = lidar['2019-10-15 09:30:27': '2019-10-15 16:21:31']
result = pd.concat([data[11], transition_lidar, transition_wmb], axis=1)
del result['max_deflection_i']
del result['ddt_max_deflection']
del result['eccentricity']
del result['ddt_axis_ratio']
del result['ddt_eccentricity']
del result['axis_angle_signed']
del result['axis_angle_unsigned']
del result['axis_azimuth']
del result['ddt_axis_angle_signed']
del result['ddt_axis_angle_unsigned']
del result['p2p_angle_unsigned']
del result['p2p_angle_signed']
del result['p2p_azimuth']
del result['ddt_p2p_azimuth_unwrapped']
del result['ddt_p2p_azimuth']
del result['ddt_p2p_angle_unsigned']
del result['ddt_p2p_angle_signed']
del result['wind_speed_0']
del result['wind_dir_0']
del result['wind_dir_0_corr']
del result['height_0']
del result['wind_speed_1']
del result['wind_dir_1']
del result['wind_dir_1_corr']
del result['height_1']
del result['wind_speed_2']
del result['wind_dir_2']
del result['wind_dir_2_corr']
del result['height_2']
del result['wind_dir_3']
del result['height_3']
del result['wind_speed_4']
del result['wind_dir_4']
del result['wind_dir_4_corr']
del result['height_4']
del result['wind_speed_5']
del result['wind_dir_5']
del result['wind_dir_5_corr']
del result['height_5']
del result['wind_speed_6']
del result['wind_dir_6']
del result['wind_dir_6_corr']
del result['height_6']
del result['wind_speed_7']
del result['wind_dir_7']
del result['wind_dir_7_corr']
del result['height_7']
del result['wind_speed_8']
del result['wind_dir_8']
del result['wind_dir_8_corr']
del result['height_8']
del result['wind_speed_9']
del result['wind_dir_9']
del result['wind_dir_9_corr']
del result['height_9']
del result['wind_speed_10']
del result['wind_dir_10']
del result['wind_dir_10_corr']
del result['height_10']
del result['heading']
del result['Tp']
del result['Sprp']
del result['Tz']
del result['Hm0']
del result['TI']
del result['T1']
del result['Tc']
del result['Tdw2']
del result['Tdw1']
del result['Tpc']
del result['nu']
del result['eps']
del result['QP']
del result['Ss']
del result['TRef']
del result['Bat']
del result['Percentage']
del result['H(1/10)']
del result['T(1/10)']
del result['H(1/3)']
del result['T(1/3)']
del result['Eps']
del result['#Waves']
result.to_csv('Results_preprocessing/geometry_files/tnhb1_turbine08.csv')
|
[
"pandas.read_csv",
"pandas.Timestamp.fromtimestamp",
"pandas.concat",
"glob.glob"
] |
[((1910, 1951), 'pandas.read_csv', 'pd.read_csv', (['hammerhead[0]'], {'delimiter': '""","""'}), "(hammerhead[0], delimiter=',')\n", (1921, 1951), True, 'import pandas as pd\n'), ((1981, 2022), 'pandas.read_csv', 'pd.read_csv', (['hammerhead[1]'], {'delimiter': '""","""'}), "(hammerhead[1], delimiter=',')\n", (1992, 2022), True, 'import pandas as pd\n'), ((2047, 2088), 'pandas.read_csv', 'pd.read_csv', (['hammerhead[2]'], {'delimiter': '""","""'}), "(hammerhead[2], delimiter=',')\n", (2058, 2088), True, 'import pandas as pd\n'), ((2111, 2152), 'pandas.read_csv', 'pd.read_csv', (['hammerhead[3]'], {'delimiter': '""","""'}), "(hammerhead[3], delimiter=',')\n", (2122, 2152), True, 'import pandas as pd\n'), ((2319, 2354), 'pandas.read_csv', 'pd.read_csv', (['sbi1[0]'], {'delimiter': '""","""'}), "(sbi1[0], delimiter=',')\n", (2330, 2354), True, 'import pandas as pd\n'), ((2371, 2406), 'pandas.read_csv', 'pd.read_csv', (['sbi1[1]'], {'delimiter': '""","""'}), "(sbi1[1], delimiter=',')\n", (2382, 2406), True, 'import pandas as pd\n'), ((2423, 2458), 'pandas.read_csv', 'pd.read_csv', (['sbi1[2]'], {'delimiter': '""","""'}), "(sbi1[2], delimiter=',')\n", (2434, 2458), True, 'import pandas as pd\n'), ((2561, 2596), 'pandas.read_csv', 'pd.read_csv', (['sbi2[0]'], {'delimiter': '""","""'}), "(sbi2[0], delimiter=',')\n", (2572, 2596), True, 'import pandas as pd\n'), ((2614, 2649), 'pandas.read_csv', 'pd.read_csv', (['sbi2[1]'], {'delimiter': '""","""'}), "(sbi2[1], delimiter=',')\n", (2625, 2649), True, 'import pandas as pd\n'), ((2666, 2701), 'pandas.read_csv', 'pd.read_csv', (['sbi2[2]'], {'delimiter': '""","""'}), "(sbi2[2], delimiter=',')\n", (2677, 2701), True, 'import pandas as pd\n'), ((2805, 2841), 'pandas.read_csv', 'pd.read_csv', (['tnhb1[0]'], {'delimiter': '""","""'}), "(tnhb1[0], delimiter=',')\n", (2816, 2841), True, 'import pandas as pd\n'), ((2866, 2902), 'pandas.read_csv', 'pd.read_csv', (['tnhb1[1]'], {'delimiter': '""","""'}), "(tnhb1[1], delimiter=',')\n", (2877, 2902), True, 'import pandas as pd\n'), ((2921, 2957), 'pandas.read_csv', 'pd.read_csv', (['tnhb1[2]'], {'delimiter': '""","""'}), "(tnhb1[2], delimiter=',')\n", (2932, 2957), True, 'import pandas as pd\n'), ((2975, 3011), 'pandas.read_csv', 'pd.read_csv', (['tnhb1[3]'], {'delimiter': '""","""'}), "(tnhb1[3], delimiter=',')\n", (2986, 3011), True, 'import pandas as pd\n'), ((3151, 3187), 'pandas.read_csv', 'pd.read_csv', (['tnhb2[0]'], {'delimiter': '""","""'}), "(tnhb2[0], delimiter=',')\n", (3162, 3187), True, 'import pandas as pd\n'), ((3228, 3324), 'pandas.read_csv', 'pd.read_csv', (['"""environment/environment/waves/wmb-sued/wmb-sued_2019-10-14.csv"""'], {'delimiter': '""" """'}), "('environment/environment/waves/wmb-sued/wmb-sued_2019-10-14.csv',\n delimiter=' ')\n", (3239, 3324), True, 'import pandas as pd\n'), ((3329, 3425), 'pandas.read_csv', 'pd.read_csv', (['"""environment/environment/waves/wmb-sued/wmb-sued_2019-10-15.csv"""'], {'delimiter': '""" """'}), "('environment/environment/waves/wmb-sued/wmb-sued_2019-10-15.csv',\n delimiter=' ')\n", (3340, 3425), True, 'import pandas as pd\n'), ((3490, 3579), 'pandas.read_csv', 'pd.read_csv', (['"""environment/environment/wind/lidar/lidar_2019-10-14.csv"""'], {'delimiter': '""" """'}), "('environment/environment/wind/lidar/lidar_2019-10-14.csv',\n delimiter=' ')\n", (3501, 3579), True, 'import pandas as pd\n'), ((3586, 3675), 'pandas.read_csv', 'pd.read_csv', (['"""environment/environment/wind/lidar/lidar_2019-10-15.csv"""'], {'delimiter': '""" """'}), "('environment/environment/wind/lidar/lidar_2019-10-15.csv',\n delimiter=' ')\n", (3597, 3675), True, 'import pandas as pd\n'), ((9994, 10056), 'pandas.concat', 'pd.concat', (['[data[1], transition_lidar, transition_wmb]'], {'axis': '(1)'}), '([data[1], transition_lidar, transition_wmb], axis=1)\n', (10003, 10056), True, 'import pandas as pd\n'), ((12507, 12570), 'pandas.concat', 'pd.concat', (['[data[11], transition_lidar, transition_wmb]'], {'axis': '(1)'}), '([data[11], transition_lidar, transition_wmb], axis=1)\n', (12516, 12570), True, 'import pandas as pd\n'), ((1530, 1582), 'glob.glob', 'glob', (['"""Daten/hammerhead/hammerhead/turbine-08**.csv"""'], {}), "('Daten/hammerhead/hammerhead/turbine-08**.csv')\n", (1534, 1582), False, 'from glob import glob\n'), ((1598, 1638), 'glob.glob', 'glob', (['"""Daten/sbi1/sbi1/turbine-08**.csv"""'], {}), "('Daten/sbi1/sbi1/turbine-08**.csv')\n", (1602, 1638), False, 'from glob import glob\n'), ((1654, 1694), 'glob.glob', 'glob', (['"""Daten/sbi2/sbi2/turbine-08**.csv"""'], {}), "('Daten/sbi2/sbi2/turbine-08**.csv')\n", (1658, 1694), False, 'from glob import glob\n'), ((1711, 1753), 'glob.glob', 'glob', (['"""Daten/tnhb1/tnhb1/turbine-08**.csv"""'], {}), "('Daten/tnhb1/tnhb1/turbine-08**.csv')\n", (1715, 1753), False, 'from glob import glob\n'), ((1770, 1812), 'glob.glob', 'glob', (['"""Daten/tnhb2/tnhb2/turbine-08**.csv"""'], {}), "('Daten/tnhb2/tnhb2/turbine-08**.csv')\n", (1774, 1812), False, 'from glob import glob\n'), ((4181, 4207), 'pandas.concat', 'pd.concat', (['buffer1'], {'axis': '(0)'}), '(buffer1, axis=0)\n', (4190, 4207), True, 'import pandas as pd\n'), ((5396, 5422), 'pandas.concat', 'pd.concat', (['buffer2'], {'axis': '(0)'}), '(buffer2, axis=0)\n', (5405, 5422), True, 'import pandas as pd\n'), ((6330, 6372), 'pandas.Timestamp.fromtimestamp', 'pd.Timestamp.fromtimestamp', (['wmb.iloc[k, 0]'], {}), '(wmb.iloc[k, 0])\n', (6356, 6372), True, 'import pandas as pd\n'), ((6558, 6602), 'pandas.Timestamp.fromtimestamp', 'pd.Timestamp.fromtimestamp', (['lidar.iloc[k, 0]'], {}), '(lidar.iloc[k, 0])\n', (6584, 6602), True, 'import pandas as pd\n'), ((6875, 6916), 'pandas.Timestamp.fromtimestamp', 'pd.Timestamp.fromtimestamp', (['df.iloc[k, 0]'], {}), '(df.iloc[k, 0])\n', (6901, 6916), True, 'import pandas as pd\n')]
|
import numpy as np
from eb_gridmaker import dtb, config
from eb_gridmaker.utils import aux, multiproc
from elisa import SingleSystem, BinarySystem, Observer, settings
from elisa.base.error import LimbDarkeningError, AtmosphereError, MorphologyError
def spotty_single_system_random_sampling(db_name=None, number_of_samples=1e4):
"""
Producing sample of spotty single system models generated randomly in given parameter space.
:param db_name: str;
:param number_of_samples: int;
:return: None;
"""
if db_name is not None:
config.DATABASE_NAME = db_name
phases = np.linspace(0, 1.0, num=config.N_POINTS, endpoint=False)
# generating IDs of each possible combination
ids = np.arange(0, number_of_samples, dtype=np.int)
dtb.create_ceb_db(config.DATABASE_NAME, config.PARAMETER_COLUMNS_SINGLE, config.PARAMETER_TYPES_SINGLE)
brkpoint = dtb.search_for_breakpoint(config.DATABASE_NAME, ids)
print(f'Breakpoint found {100.0 * brkpoint / number_of_samples:.2f}%: {brkpoint}/{number_of_samples}')
ids = ids[brkpoint:]
args = (phases, number_of_samples, brkpoint, )
multiproc.multiprocess_eval(ids, eval_single_grid_node, args)
def eval_single_grid_node(iden, counter, phases, maxiter, start_index):
"""
Evaluating randomly generated spotty single system model.
:param iden: str; node ID
:param counter: int; current number of already calculeted nodes
:param phases: numpy.array; desired phases of observations
:param maxiter: int; total number of nodes in this batch
:param start_index: int; number of iterations already calculated before interruption
:return: None
"""
aug_counter = counter + start_index
print(f'Processing node: {aug_counter}/{maxiter}, {100.0 * aug_counter / maxiter:.2f}%')
while True:
params = aux.draw_single_star_params()
try:
s = SingleSystem.from_json(params)
except ValueError as e:
continue
o = Observer(passband=config.PASSBANDS, system=s)
try:
o.lc(phases=phases, normalize=True)
# o.plot.lc()
except (LimbDarkeningError, AtmosphereError) as e:
# print(f'Parameters: {params} produced system outside grid coverage.')
continue
dtb.insert_observation(
config.DATABASE_NAME, o, iden, config.PARAMETER_COLUMNS_SINGLE, config.PARAMETER_TYPES_SINGLE
)
break
def eval_eccentric_random_sample(iden, counter, phases, maxiter, start_index):
np.random.seed()
while True:
args = aux.draw_eccentric_system_params()
params = aux.assign_eccentric_system_params(*args)
try:
bs = BinarySystem.from_json(params)
except MorphologyError as e:
# print(e)
continue
try:
setattr(bs, 'inclination', np.radians(aux.draw_inclination(binary=bs)))
bs.init()
o = Observer(passband=config.PASSBANDS, system=bs)
except Exception as e:
raise ValueError(e)
try:
o.lc(phases=phases, normalize=True)
# o.plot.lc()
except (LimbDarkeningError, AtmosphereError) as e:
# print(f'Parameters: {params} produced system outside grid coverage.')
continue
dtb.insert_observation(
config.DATABASE_NAME, o, iden, config.PARAMETER_COLUMNS_ECCENTRIC, config.PARAMETER_TYPES_ECCENTRIC
)
aug_counter = counter + start_index + 1
print(f'Node processed: {aug_counter}/{maxiter}, {100.0 * aug_counter / maxiter:.2f}%')
break
def eccentric_system_random_sampling(db_name=None, number_of_samples=1e4):
if db_name is not None:
config.DATABASE_NAME = db_name
phases = np.linspace(0, 1.0, num=config.N_POINTS, endpoint=False)
# generating IDs of each possible combination
ids = np.arange(0, number_of_samples, dtype=np.int)
dtb.create_ceb_db(config.DATABASE_NAME, config.PARAMETER_COLUMNS_ECCENTRIC, config.PARAMETER_TYPES_ECCENTRIC)
brkpoint = dtb.search_for_breakpoint(config.DATABASE_NAME, ids)
print(f'Breakpoint found {100.0 * brkpoint / number_of_samples:.2f}%: {brkpoint}/{number_of_samples}')
ids = ids[brkpoint:]
args = (phases, number_of_samples, brkpoint,)
multiproc.multiprocess_eval(ids, eval_eccentric_random_sample, args)
def random_sampling(db_name=None, desired_morphology='all', number_of_samples=1e4):
"""
:param db_name: str; path to the database
:param desired_morphology: string; `all`, `detached` - detached binaries on circular orbit, `overcontact`,
`single_spotty`, `eccentric`
:param number_of_samples: int; number of samples for random sampling
:return:
"""
if desired_morphology in ['detached', 'overcontact', 'circular']:
raise NotImplementedError('Random sampling on circular binaries is not yet implemented. '
'Try grid sampling method.')
elif desired_morphology in ['single_spotty']:
spotty_single_system_random_sampling(db_name, number_of_samples=number_of_samples)
elif desired_morphology in ['eccentric']:
eccentric_system_random_sampling(db_name, number_of_samples=number_of_samples)
else:
raise ValueError(f'Unknown morphology: {desired_morphology}. '
f'List of available morphologies: `all`, `detached` - detached binaries on circular orbit, '
f'`overcontact`, `single_spotty`, `eccentric`')
if __name__ == "__main__":
settings.LOG_CONFIG = 'fit'
config.NUMBER_OF_PROCESSES = 1
# random_sampling('../../random.db', desired_morphology='single_spotty', number_of_samples=10)
random_sampling('../../random.db', desired_morphology='eccentric', number_of_samples=10)
|
[
"eb_gridmaker.dtb.insert_observation",
"eb_gridmaker.dtb.search_for_breakpoint",
"numpy.random.seed",
"eb_gridmaker.utils.aux.draw_single_star_params",
"eb_gridmaker.utils.aux.assign_eccentric_system_params",
"eb_gridmaker.utils.aux.draw_inclination",
"elisa.SingleSystem.from_json",
"eb_gridmaker.utils.multiproc.multiprocess_eval",
"elisa.Observer",
"numpy.arange",
"numpy.linspace",
"eb_gridmaker.dtb.create_ceb_db",
"elisa.BinarySystem.from_json",
"eb_gridmaker.utils.aux.draw_eccentric_system_params"
] |
[((604, 660), 'numpy.linspace', 'np.linspace', (['(0)', '(1.0)'], {'num': 'config.N_POINTS', 'endpoint': '(False)'}), '(0, 1.0, num=config.N_POINTS, endpoint=False)\n', (615, 660), True, 'import numpy as np\n'), ((722, 767), 'numpy.arange', 'np.arange', (['(0)', 'number_of_samples'], {'dtype': 'np.int'}), '(0, number_of_samples, dtype=np.int)\n', (731, 767), True, 'import numpy as np\n'), ((773, 880), 'eb_gridmaker.dtb.create_ceb_db', 'dtb.create_ceb_db', (['config.DATABASE_NAME', 'config.PARAMETER_COLUMNS_SINGLE', 'config.PARAMETER_TYPES_SINGLE'], {}), '(config.DATABASE_NAME, config.PARAMETER_COLUMNS_SINGLE,\n config.PARAMETER_TYPES_SINGLE)\n', (790, 880), False, 'from eb_gridmaker import dtb, config\n'), ((892, 944), 'eb_gridmaker.dtb.search_for_breakpoint', 'dtb.search_for_breakpoint', (['config.DATABASE_NAME', 'ids'], {}), '(config.DATABASE_NAME, ids)\n', (917, 944), False, 'from eb_gridmaker import dtb, config\n'), ((1133, 1194), 'eb_gridmaker.utils.multiproc.multiprocess_eval', 'multiproc.multiprocess_eval', (['ids', 'eval_single_grid_node', 'args'], {}), '(ids, eval_single_grid_node, args)\n', (1160, 1194), False, 'from eb_gridmaker.utils import aux, multiproc\n'), ((2546, 2562), 'numpy.random.seed', 'np.random.seed', ([], {}), '()\n', (2560, 2562), True, 'import numpy as np\n'), ((3801, 3857), 'numpy.linspace', 'np.linspace', (['(0)', '(1.0)'], {'num': 'config.N_POINTS', 'endpoint': '(False)'}), '(0, 1.0, num=config.N_POINTS, endpoint=False)\n', (3812, 3857), True, 'import numpy as np\n'), ((3919, 3964), 'numpy.arange', 'np.arange', (['(0)', 'number_of_samples'], {'dtype': 'np.int'}), '(0, number_of_samples, dtype=np.int)\n', (3928, 3964), True, 'import numpy as np\n'), ((3970, 4083), 'eb_gridmaker.dtb.create_ceb_db', 'dtb.create_ceb_db', (['config.DATABASE_NAME', 'config.PARAMETER_COLUMNS_ECCENTRIC', 'config.PARAMETER_TYPES_ECCENTRIC'], {}), '(config.DATABASE_NAME, config.PARAMETER_COLUMNS_ECCENTRIC,\n config.PARAMETER_TYPES_ECCENTRIC)\n', (3987, 4083), False, 'from eb_gridmaker import dtb, config\n'), ((4095, 4147), 'eb_gridmaker.dtb.search_for_breakpoint', 'dtb.search_for_breakpoint', (['config.DATABASE_NAME', 'ids'], {}), '(config.DATABASE_NAME, ids)\n', (4120, 4147), False, 'from eb_gridmaker import dtb, config\n'), ((4335, 4403), 'eb_gridmaker.utils.multiproc.multiprocess_eval', 'multiproc.multiprocess_eval', (['ids', 'eval_eccentric_random_sample', 'args'], {}), '(ids, eval_eccentric_random_sample, args)\n', (4362, 4403), False, 'from eb_gridmaker.utils import aux, multiproc\n'), ((1843, 1872), 'eb_gridmaker.utils.aux.draw_single_star_params', 'aux.draw_single_star_params', ([], {}), '()\n', (1870, 1872), False, 'from eb_gridmaker.utils import aux, multiproc\n'), ((2000, 2045), 'elisa.Observer', 'Observer', ([], {'passband': 'config.PASSBANDS', 'system': 's'}), '(passband=config.PASSBANDS, system=s)\n', (2008, 2045), False, 'from elisa import SingleSystem, BinarySystem, Observer, settings\n'), ((2307, 2429), 'eb_gridmaker.dtb.insert_observation', 'dtb.insert_observation', (['config.DATABASE_NAME', 'o', 'iden', 'config.PARAMETER_COLUMNS_SINGLE', 'config.PARAMETER_TYPES_SINGLE'], {}), '(config.DATABASE_NAME, o, iden, config.\n PARAMETER_COLUMNS_SINGLE, config.PARAMETER_TYPES_SINGLE)\n', (2329, 2429), False, 'from eb_gridmaker import dtb, config\n'), ((2594, 2628), 'eb_gridmaker.utils.aux.draw_eccentric_system_params', 'aux.draw_eccentric_system_params', ([], {}), '()\n', (2626, 2628), False, 'from eb_gridmaker.utils import aux, multiproc\n'), ((2646, 2687), 'eb_gridmaker.utils.aux.assign_eccentric_system_params', 'aux.assign_eccentric_system_params', (['*args'], {}), '(*args)\n', (2680, 2687), False, 'from eb_gridmaker.utils import aux, multiproc\n'), ((3339, 3467), 'eb_gridmaker.dtb.insert_observation', 'dtb.insert_observation', (['config.DATABASE_NAME', 'o', 'iden', 'config.PARAMETER_COLUMNS_ECCENTRIC', 'config.PARAMETER_TYPES_ECCENTRIC'], {}), '(config.DATABASE_NAME, o, iden, config.\n PARAMETER_COLUMNS_ECCENTRIC, config.PARAMETER_TYPES_ECCENTRIC)\n', (3361, 3467), False, 'from eb_gridmaker import dtb, config\n'), ((1903, 1933), 'elisa.SingleSystem.from_json', 'SingleSystem.from_json', (['params'], {}), '(params)\n', (1925, 1933), False, 'from elisa import SingleSystem, BinarySystem, Observer, settings\n'), ((2719, 2749), 'elisa.BinarySystem.from_json', 'BinarySystem.from_json', (['params'], {}), '(params)\n', (2741, 2749), False, 'from elisa import SingleSystem, BinarySystem, Observer, settings\n'), ((2968, 3014), 'elisa.Observer', 'Observer', ([], {'passband': 'config.PASSBANDS', 'system': 'bs'}), '(passband=config.PASSBANDS, system=bs)\n', (2976, 3014), False, 'from elisa import SingleSystem, BinarySystem, Observer, settings\n'), ((2895, 2926), 'eb_gridmaker.utils.aux.draw_inclination', 'aux.draw_inclination', ([], {'binary': 'bs'}), '(binary=bs)\n', (2915, 2926), False, 'from eb_gridmaker.utils import aux, multiproc\n')]
|
from urllib.parse import urlparse
from django.http import Http404
from django.urls import resolve
class Referer:
"""
Wrapper for http referer information
"""
def __init__(self, current_path, referer_path):
self.current_path = current_path
self.referer_path = referer_path
self.current_url_name = self.get_url_name(current_path)
if referer_path:
self.referer_url_name = self.get_url_name(referer_path)
else:
self.referer_url_name = None
def get_url_name(self, path):
try:
return resolve(urlparse(path).path).url_name
except Http404:
return None
@property
def path(self):
if self.referer_url_name != self.current_url_name:
return self.referer_path
@property
def url_name(self):
return self.referer_url_name
class RefererMixin:
"""
Allows a view to access the referring url information
"""
_referer = None
def get_context_data(self, **kwargs):
context_data = super().get_context_data(**kwargs)
context_data["referer"] = self.referer
return context_data
def get_referer(self):
if "referer_path" in self.request.POST:
referer_path = self.request.POST.get("referer_path")
elif self.request.META.get("HTTP_REFERER"):
referer_path = urlparse(self.request.META.get("HTTP_REFERER")).path
referer_querystring = urlparse(self.request.META.get("HTTP_REFERER")).query
if referer_querystring:
referer_path = f"{referer_path}?{referer_querystring}"
else:
referer_path = None
return Referer(
current_path=self.request.path_info,
referer_path=referer_path,
)
@property
def referer(self):
if self._referer is None:
self._referer = self.get_referer()
return self._referer
|
[
"urllib.parse.urlparse"
] |
[((595, 609), 'urllib.parse.urlparse', 'urlparse', (['path'], {}), '(path)\n', (603, 609), False, 'from urllib.parse import urlparse\n')]
|
import pygame
from src.weapons.data.weapon import WeaponData
from src.terrain import Terrain
from typing import Tuple
class Weapon(pygame.sprite.Sprite):
def __init__(
self, weapon_type: WeaponData,
pos: Tuple[int, int],
bounds: pygame.Rect, terrain: Terrain
):
"""Initialize the weapon
Args:
pos (Tuple[int, int]): the position of the weapon
terrain ([type]): the terrain of the map
"""
pygame.sprite.Sprite.__init__(self)
self.weapon_type = weapon_type
# Set the position of the weapon
self.rect = pygame.Rect((0, 0), self.weapon_type.size)
self.rect.center = pos
# Set the cooldown
self.cooldown = 0
# Set the terrain
self.terrain = terrain
self.bounds = bounds
# Initialize sounds
self.shoot_sound = pygame.mixer.Sound("src/assets/sounds/shoot.wav")
self.shoot_sound.set_volume(0.1)
def shoot(self):
"""Shoot a bullet
Returns:
Bullet: the bullet that was shot
"""
# Check if the player can shoot
if self.cooldown <= 0:
# Play sound
self.shoot_sound.play()
return self.weapon_type.shoot(self)
def update(self, pos: Tuple[int, int], direction: int):
"""Update the weapon's position
Args:
pos (Tuple[int, int]): the new position of the weapon
direction (int): the direction of the weapon
"""
# Update the position of the weapon
self.rect.center = pos
self.dir = direction
# Update the cooldown
self.cooldown -= 1
def draw(self, screen: pygame.Surface):
"""Draw the weapon
Args:
screen (pygame.Surface): the screen to draw the weapon on
"""
# Set the image
self.image = self.weapon_type.image
self.image = pygame.transform.scale(self.image, (self.rect.width, self.rect.height))
# Draw the weapon
image = pygame.transform.flip(self.image, self.dir != 1, False)
screen.blit(image, self.rect)
|
[
"pygame.transform.flip",
"pygame.Rect",
"pygame.transform.scale",
"pygame.sprite.Sprite.__init__",
"pygame.mixer.Sound"
] |
[((486, 521), 'pygame.sprite.Sprite.__init__', 'pygame.sprite.Sprite.__init__', (['self'], {}), '(self)\n', (515, 521), False, 'import pygame\n'), ((623, 665), 'pygame.Rect', 'pygame.Rect', (['(0, 0)', 'self.weapon_type.size'], {}), '((0, 0), self.weapon_type.size)\n', (634, 665), False, 'import pygame\n'), ((902, 951), 'pygame.mixer.Sound', 'pygame.mixer.Sound', (['"""src/assets/sounds/shoot.wav"""'], {}), "('src/assets/sounds/shoot.wav')\n", (920, 951), False, 'import pygame\n'), ((1968, 2039), 'pygame.transform.scale', 'pygame.transform.scale', (['self.image', '(self.rect.width, self.rect.height)'], {}), '(self.image, (self.rect.width, self.rect.height))\n', (1990, 2039), False, 'import pygame\n'), ((2083, 2138), 'pygame.transform.flip', 'pygame.transform.flip', (['self.image', '(self.dir != 1)', '(False)'], {}), '(self.image, self.dir != 1, False)\n', (2104, 2138), False, 'import pygame\n')]
|
""" CNN cell for architecture search """
import torch
import torch.nn as nn
from models import ops
class SearchCell(nn.Module):
"""
Cell for search
Each edge is mixed and continuous relaxed.
The cell is also simplified.
"""
def __init__(self, n_nodes, C_p, C, bn_momentum):
"""
Args:
n_nodes: # of intermediate n_nodes
C_p : C_out[k-1]
C : C_in[k] (current)
"""
super().__init__()
self.n_nodes = n_nodes
self.preproc = ops.StdConv(C_p, C, 1, 1, 0, affine=False, bn_momentum=bn_momentum)
# generate dag
self.dag = nn.ModuleList()
for i in range(self.n_nodes):
self.dag.append(nn.ModuleList())
for j in range(1 + i): # include 1 input node
op = ops.MixedOp(C, stride=1, bn_momentum=bn_momentum)
self.dag[i].append(op)
def forward(self, s, w_dag):
s = self.preproc(s)
states = [s]
for edges, w_list in zip(self.dag, w_dag):
s_cur = sum(edges[i](s, w) for i, (s, w) in enumerate(zip(states, w_list)))
states.append(s_cur)
s_out = torch.cat(states[1:], dim=1)
return s_out
|
[
"models.ops.StdConv",
"models.ops.MixedOp",
"torch.cat",
"torch.nn.ModuleList"
] |
[((534, 601), 'models.ops.StdConv', 'ops.StdConv', (['C_p', 'C', '(1)', '(1)', '(0)'], {'affine': '(False)', 'bn_momentum': 'bn_momentum'}), '(C_p, C, 1, 1, 0, affine=False, bn_momentum=bn_momentum)\n', (545, 601), False, 'from models import ops\n'), ((645, 660), 'torch.nn.ModuleList', 'nn.ModuleList', ([], {}), '()\n', (658, 660), True, 'import torch.nn as nn\n'), ((1186, 1214), 'torch.cat', 'torch.cat', (['states[1:]'], {'dim': '(1)'}), '(states[1:], dim=1)\n', (1195, 1214), False, 'import torch\n'), ((727, 742), 'torch.nn.ModuleList', 'nn.ModuleList', ([], {}), '()\n', (740, 742), True, 'import torch.nn as nn\n'), ((824, 873), 'models.ops.MixedOp', 'ops.MixedOp', (['C'], {'stride': '(1)', 'bn_momentum': 'bn_momentum'}), '(C, stride=1, bn_momentum=bn_momentum)\n', (835, 873), False, 'from models import ops\n')]
|
from typing import TypeVar, Generic, Optional
from typing import Union
import numpy as np
import torch
from PIL.Image import Image as Img
T = TypeVar('T')
class Closure(Generic[T]):
def __init__(self):
self.value: Optional[T] = None
def shape(tensor: Union[Img, torch.Tensor, np.ndarray]) -> str:
if isinstance(tensor, Img):
w, h = tensor.size
prefix = 'image'
dims = [h, w]
elif isinstance(tensor, torch.Tensor):
prefix = 'tensor'
dims = list(tensor.shape)
elif isinstance(tensor, np.ndarray):
prefix = 'ndarray'
dims = list(tensor.shape)
else:
raise NotImplementedError(f'image type: {type(tensor)}')
dims = [str(dim) for dim in dims]
return f'{prefix}({"x".join(dims)})'
|
[
"typing.TypeVar"
] |
[((144, 156), 'typing.TypeVar', 'TypeVar', (['"""T"""'], {}), "('T')\n", (151, 156), False, 'from typing import TypeVar, Generic, Optional\n')]
|
import xml.etree.ElementTree as etree
import ast
class Record(object):
pass
class Settings(object):
def __init__(self, filename):
self.filename = filename
try:
self._xml = etree.parse(self.filename)
self._root = self._xml.getroot()
self._parseContent(self._root)
except (IOError, etree.ParseError):
raise IOError('Invalid file specified. XML only. {}')
def _parseContent(self, root):
for tab in root:
if tab.tag == 'tab':
r = Record()
setattr(self, self._readName(tab), r)
for opt in tab:
if opt.tag == 'option':
setattr(r, self._readName(opt), self._parseValue(opt))
def _readName(self, item):
try:
return item.attrib['name'].replace(' ', '_')
except (AttributeError, KeyError):
raise AttributeError('Option name not specified')
def _readValue(self, item):
return item.get('value')
def _readDefault(self, item):
return item.get('default')
def _readType(self, item):
"""returns item type retreived from xml-tree. In case of missing returns string format"""
_type = item.get('type')
if _type:
return _type
else:
return 'str'
def _parseValue(self, item, value=None):
"""return value with proper type parsing from raw string from xml-tree. If type is
not specified returns raw string value."""
vtype = self._readType(item)
if value is None:
value = self._readValue(item)
if value is not None:
if vtype == 'int':
return int(value)
elif vtype == 'str':
return str(value)
elif vtype == 'bool' or vtype == 'list' or vtype == 'dict':
return ast.literal_eval(value)
else:
return value
def _getElement(self, name):
xpath = ".//*[@name='{}']"
element = self._root.find(xpath.format(name))
if element is None:
element = self._root.find(xpath.format(name.replace('_', ' ')))
return element
def reload(self):
self._parseContent(self._root)
def save(self):
for cat_name, cat in self.__dict__.items():
if not cat_name.startswith('_') and isinstance(cat, Record):
for opt_name, opt in cat.__dict__.items():
element = self._getElement(opt_name)
if element is not None:
element.set('value', str(opt))
self._xml.write(self.filename)
def defaults(self):
for tab in self._root:
if tab.tag == 'tab':
r = getattr(self, self._readName(tab))
for opt in tab:
if opt.tag == 'option' and isinstance(r, Record):
setattr(r,
self._readName(opt),
self._parseValue(opt, self._readDefault(opt)))
|
[
"ast.literal_eval",
"xml.etree.ElementTree.parse"
] |
[((213, 239), 'xml.etree.ElementTree.parse', 'etree.parse', (['self.filename'], {}), '(self.filename)\n', (224, 239), True, 'import xml.etree.ElementTree as etree\n'), ((1912, 1935), 'ast.literal_eval', 'ast.literal_eval', (['value'], {}), '(value)\n', (1928, 1935), False, 'import ast\n')]
|
from django.db import migrations, models
def raise_error(apps, schema_editor):
# Test operation in non-atomic migration is not wrapped in transaction
Publisher = apps.get_model('migrations', 'Publisher')
Publisher.objects.create(name='Test Publisher')
raise RuntimeError('Abort migration')
class Migration(migrations.Migration):
atomic = False
operations = [
migrations.CreateModel(
"Publisher",
[
("name", models.CharField(primary_key=True, max_length=255)),
],
),
migrations.RunPython(raise_error),
migrations.CreateModel(
"Book",
[
("title", models.CharField(primary_key=True, max_length=255)),
("publisher", models.ForeignKey("migrations.Publisher", models.SET_NULL, null=True)),
],
),
]
|
[
"django.db.migrations.RunPython",
"django.db.models.CharField",
"django.db.models.ForeignKey"
] |
[((591, 624), 'django.db.migrations.RunPython', 'migrations.RunPython', (['raise_error'], {}), '(raise_error)\n', (611, 624), False, 'from django.db import migrations, models\n'), ((501, 551), 'django.db.models.CharField', 'models.CharField', ([], {'primary_key': '(True)', 'max_length': '(255)'}), '(primary_key=True, max_length=255)\n', (517, 551), False, 'from django.db import migrations, models\n'), ((722, 772), 'django.db.models.CharField', 'models.CharField', ([], {'primary_key': '(True)', 'max_length': '(255)'}), '(primary_key=True, max_length=255)\n', (738, 772), False, 'from django.db import migrations, models\n'), ((806, 875), 'django.db.models.ForeignKey', 'models.ForeignKey', (['"""migrations.Publisher"""', 'models.SET_NULL'], {'null': '(True)'}), "('migrations.Publisher', models.SET_NULL, null=True)\n", (823, 875), False, 'from django.db import migrations, models\n')]
|
# -*- coding: utf-8 -*-
"""Management command to create DKIM keys."""
from __future__ import print_function, unicode_literals
import os
from django.core.management.base import BaseCommand
from django.utils.encoding import smart_text
from modoboa.lib import sysutils
from modoboa.parameters import tools as param_tools
from .... import models
from .... import signals
class ManageDKIMKeys(BaseCommand):
"""Command class."""
def create_new_dkim_key(self, domain):
"""Create a new DKIM key."""
storage_dir = param_tools.get_global_parameter("dkim_keys_storage_dir")
pkey_path = os.path.join(storage_dir, "{}.pem".format(domain.name))
key_size = (
domain.dkim_key_length if domain.dkim_key_length
else self.default_key_length)
code, output = sysutils.exec_cmd(
"openssl genrsa -out {} {}".format(pkey_path, key_size))
if code:
print("Failed to generate DKIM private key for domain {}: {}"
.format(domain.name, smart_text(output)))
domain.dkim_private_key_path = pkey_path
code, output = sysutils.exec_cmd(
"openssl rsa -in {} -pubout".format(pkey_path))
if code:
print("Failed to generate DKIM public key for domain {}: {}"
.format(domain.name, smart_text(output)))
public_key = ""
for cpt, line in enumerate(smart_text(output).splitlines()):
if cpt == 0 or line.startswith("-----"):
continue
public_key += line
domain.dkim_public_key = public_key
domain.save(update_fields=["dkim_public_key", "dkim_private_key_path"])
def handle(self, *args, **options):
"""Entry point."""
self.default_key_length = param_tools.get_global_parameter(
"dkim_default_key_length")
qset = models.Domain.objects.filter(
enable_dkim=True, dkim_private_key_path="")
for domain in qset:
self.create_new_dkim_key(domain)
if qset.exists():
signals.new_dkim_keys.send(sender=self.__class__, domains=qset)
|
[
"django.utils.encoding.smart_text",
"modoboa.parameters.tools.get_global_parameter"
] |
[((538, 595), 'modoboa.parameters.tools.get_global_parameter', 'param_tools.get_global_parameter', (['"""dkim_keys_storage_dir"""'], {}), "('dkim_keys_storage_dir')\n", (570, 595), True, 'from modoboa.parameters import tools as param_tools\n'), ((1787, 1846), 'modoboa.parameters.tools.get_global_parameter', 'param_tools.get_global_parameter', (['"""dkim_default_key_length"""'], {}), "('dkim_default_key_length')\n", (1819, 1846), True, 'from modoboa.parameters import tools as param_tools\n'), ((1037, 1055), 'django.utils.encoding.smart_text', 'smart_text', (['output'], {}), '(output)\n', (1047, 1055), False, 'from django.utils.encoding import smart_text\n'), ((1338, 1356), 'django.utils.encoding.smart_text', 'smart_text', (['output'], {}), '(output)\n', (1348, 1356), False, 'from django.utils.encoding import smart_text\n'), ((1418, 1436), 'django.utils.encoding.smart_text', 'smart_text', (['output'], {}), '(output)\n', (1428, 1436), False, 'from django.utils.encoding import smart_text\n')]
|
#!/usr/bin/env python3
"""
This module should introduce you to basic plotting routines
using matplotlib.
We will be plotting quadratic equations since we already
have a module to calculate them.
"""
# Matplotlib is a module with routines to
# plot data and display them on the screen or save them to files.
# The primary plotting module is pyplot, which is conventionally imported
# as plt
import matplotlib.pyplot as plt
import numpy as np
# This is our quadratic_equation class, rename as QE for shortness
from quad_class import quadratic_Equation as QE
def simplePlot(quadEqn, xlim):
"""
This function will plot a quadratic equation, using
the quadratic_Equation module.
inputs:
quadEqn -- instance of QE --
the quadratic equation to plot
xlim -- list of [float, float] or None --
This will define the limits for which x is plotted.
If xlim is None, Matplotlib will auto-scale the axis.
"""
# Enforce that quadEqn MUST be a QE object.
# The isinstance function checks if the variable is of type QE,
# and returns true if and only if it is.
if not isinstance(quadEqn, QE):
# RuntimeError is a built in exception class
raise RuntimeError(msg='provided quadEqn is NOT of type QE')
# Define the x values we are going to plot
# np.arange is a function similiar to range, except
# can use floats as well as integers.
x_vals = np.arange(xlim[0], xlim[1], .01) #go from xlim[0] to xlim[1] in step sizes of .01
# Define the y values to plot. This should be the value of our quadratic equation at each
# value of x.
# NOTE: x_vals and y_vals MUST have the same length to plot
y_vals = [quadEqn(x) for x in x_vals]
# Create a simple plot
plt.plot(x_vals, y_vals)
# Display the plot on the screen
plt.show()
# We are introducing a new python object called None here.
# In python, None represents that nothing is there.
# So in the following definition we are saying that
# by default ylim will be a nothing.
def plotQE(quadEqn, xlim, ylim=None):
"""
This function will plot a quadratic equation, using
the quadratic_Equation module.
inputs:
quadEqn -- instance of QE --
the quadratic equation to plot
xlim -- list of [float, float] or None --
This will define the limits for which x is plotted.
If xlim is None, Matplotlib will auto-scale the axis.
optional inputs:
ylim=None -- list of [float, float] or None --
This will define the limits for which y is plotted.
If ylim is None, Matplotlib will auto-scale the axis.
"""
# Ensure quadEqn is of type QE
if not isinstance(quadEqn, QE):
raise RuntimeError(msg='provided quadEqn is NOT of type QE')
# Define the x values to plot
x_vals = np.arange(xlim[0], xlim[1], .01) #go from xlim[0] to xlim[1] in step sizes of .01
# Define the y values to plot.
y_vals = [quadEqn(x) for x in x_vals]
# Plot the function, but make it red, and only plot the actual data points without a line
plt.plot(x_vals, y_vals, 'ro')
# Set the plot so it only shows the defined x range
plt.xlim(xlim)
# If ylim was provided, set the y limits of the plot
if ylim is not None:
plt.ylim(ylim)
# Label the axes
plt.xlabel('x')
plt.ylabel('y')
# Display the plot on the screen
plt.show()
def plotRoots(quadEqn, xlim=None, ylim=None):
"""
This function will plot a quadratic equation,
along with vertical bars at its roots.
inputs:
quadEqn -- instance of QE --
the quadratic equation to plot
optional inputs:
xlim=None -- list of [float, float] or None --
This will define the limits for which x is plotted.
If xlim is None, will only plot just beyond the roots of the function.
If the roots are not real, an Exception will be raised.
ylim=None -- list of [float, float] or None --
This will define the limits for which y is plotted.
If ylim is None, the limits will be chosen to fit the plot tightly.
"""
# Ensure quadEqn is of type QE
if not isinstance(quadEqn, QE):
raise RuntimeError(msg='provided quadEqn is NOT of type QE')
# find the roots
neg_root = quadEqn.root(False)
pos_root = quadEqn.root(True)
# if xlim not provided, set just further than the roots as the limits
if xlim is None:
# define padding of a tenth of the distance between the roots
pad = pos_root - neg_root
pad *= .1
xlim = [min(neg_root, pos_root) - pad, max(neg_root, pos_root) + pad]
# Define the x values to plot
x_vals = np.arange(xlim[0], xlim[1], .01) #go from xlim[0] to xlim[1] in step sizes of .01
# Define the y values to plot.
y_vals = [quadEqn(x) for x in x_vals]
# Create a plot of the equation, with a solid red line. Give it a label
plt.plot(x_vals, y_vals, linestyle='-', color='red', label='Quad. Eqn.')
# Set the plot so it only shows the defined x range
plt.xlim(xlim)
if ylim is not None:
# If ylim was provided, set the y limits of the plot
plt.ylim(ylim)
else:
# squeeze the y limits to just cover y-values
plt.ylim([min(y_vals), max(y_vals)])
# Plot a blue vertical bar at the the negative root, with a label
plt.axvline(neg_root, color='blue', label='neg. root')
# Plot a purple vertical bar at the the positive root, with a label
plt.axvline(pos_root, color='purple', label='pos. root')
# add a legend to the plot
plt.legend()
# add a title to the plot
plt.title('root plot')
# display the plot
plt.show()
def test_plots():
"""
A simple method to demonstrate the three plotting routines.
"""
myeqn = QE(.8, 3, -2)
simplePlot(myeqn, [-5,3])
plotQE(myeqn, [-5,3])
plotRoots(myeqn)
if __name__ == '__main__':
test_plots()
|
[
"matplotlib.pyplot.title",
"matplotlib.pyplot.xlim",
"matplotlib.pyplot.axvline",
"matplotlib.pyplot.show",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.ylim",
"matplotlib.pyplot.legend",
"numpy.arange",
"quad_class.quadratic_Equation",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel"
] |
[((1449, 1482), 'numpy.arange', 'np.arange', (['xlim[0]', 'xlim[1]', '(0.01)'], {}), '(xlim[0], xlim[1], 0.01)\n', (1458, 1482), True, 'import numpy as np\n'), ((1785, 1809), 'matplotlib.pyplot.plot', 'plt.plot', (['x_vals', 'y_vals'], {}), '(x_vals, y_vals)\n', (1793, 1809), True, 'import matplotlib.pyplot as plt\n'), ((1852, 1862), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1860, 1862), True, 'import matplotlib.pyplot as plt\n'), ((2876, 2909), 'numpy.arange', 'np.arange', (['xlim[0]', 'xlim[1]', '(0.01)'], {}), '(xlim[0], xlim[1], 0.01)\n', (2885, 2909), True, 'import numpy as np\n'), ((3138, 3168), 'matplotlib.pyplot.plot', 'plt.plot', (['x_vals', 'y_vals', '"""ro"""'], {}), "(x_vals, y_vals, 'ro')\n", (3146, 3168), True, 'import matplotlib.pyplot as plt\n'), ((3230, 3244), 'matplotlib.pyplot.xlim', 'plt.xlim', (['xlim'], {}), '(xlim)\n', (3238, 3244), True, 'import matplotlib.pyplot as plt\n'), ((3377, 3392), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""x"""'], {}), "('x')\n", (3387, 3392), True, 'import matplotlib.pyplot as plt\n'), ((3397, 3412), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""y"""'], {}), "('y')\n", (3407, 3412), True, 'import matplotlib.pyplot as plt\n'), ((3455, 3465), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3463, 3465), True, 'import matplotlib.pyplot as plt\n'), ((4781, 4814), 'numpy.arange', 'np.arange', (['xlim[0]', 'xlim[1]', '(0.01)'], {}), '(xlim[0], xlim[1], 0.01)\n', (4790, 4814), True, 'import numpy as np\n'), ((5025, 5097), 'matplotlib.pyplot.plot', 'plt.plot', (['x_vals', 'y_vals'], {'linestyle': '"""-"""', 'color': '"""red"""', 'label': '"""Quad. Eqn."""'}), "(x_vals, y_vals, linestyle='-', color='red', label='Quad. Eqn.')\n", (5033, 5097), True, 'import matplotlib.pyplot as plt\n'), ((5159, 5173), 'matplotlib.pyplot.xlim', 'plt.xlim', (['xlim'], {}), '(xlim)\n', (5167, 5173), True, 'import matplotlib.pyplot as plt\n'), ((5468, 5522), 'matplotlib.pyplot.axvline', 'plt.axvline', (['neg_root'], {'color': '"""blue"""', 'label': '"""neg. root"""'}), "(neg_root, color='blue', label='neg. root')\n", (5479, 5522), True, 'import matplotlib.pyplot as plt\n'), ((5599, 5655), 'matplotlib.pyplot.axvline', 'plt.axvline', (['pos_root'], {'color': '"""purple"""', 'label': '"""pos. root"""'}), "(pos_root, color='purple', label='pos. root')\n", (5610, 5655), True, 'import matplotlib.pyplot as plt\n'), ((5692, 5704), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (5702, 5704), True, 'import matplotlib.pyplot as plt\n'), ((5740, 5762), 'matplotlib.pyplot.title', 'plt.title', (['"""root plot"""'], {}), "('root plot')\n", (5749, 5762), True, 'import matplotlib.pyplot as plt\n'), ((5791, 5801), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (5799, 5801), True, 'import matplotlib.pyplot as plt\n'), ((5913, 5927), 'quad_class.quadratic_Equation', 'QE', (['(0.8)', '(3)', '(-2)'], {}), '(0.8, 3, -2)\n', (5915, 5927), True, 'from quad_class import quadratic_Equation as QE\n'), ((3336, 3350), 'matplotlib.pyplot.ylim', 'plt.ylim', (['ylim'], {}), '(ylim)\n', (3344, 3350), True, 'import matplotlib.pyplot as plt\n'), ((5269, 5283), 'matplotlib.pyplot.ylim', 'plt.ylim', (['ylim'], {}), '(ylim)\n', (5277, 5283), True, 'import matplotlib.pyplot as plt\n')]
|
from selenium import webdriver
from selenium.webdriver.common.by import By
from configparser import ConfigParser
from selenium.webdriver.common.action_chains import ActionChains
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.keys import Keys
from time import sleep
class SearchHarness:
def __init__(self):
self.driver = webdriver.Firefox()
self.driver.maximize_window()
self.stackOverflowDictionary = {}
def execute(self):
self.driver.get("https://www.google.com")
def login(self):
url = "https://www.google.com/accounts/Login"
self.driver.get(url)
wait = WebDriverWait(self.driver, 6)
username, password = self._get_login_details()
self.driver.find_element_by_id("identifierId").send_keys(username)
wait.until(EC.element_to_be_clickable((By.ID, "identifierNext")))
self.driver.find_element_by_id("identifierNext").click()
wait.until(EC.visibility_of_element_located((By.NAME, "password")))
password_element = self.driver.find_element_by_name("password")
password_element.send_keys(password)
wait.until(EC.element_to_be_clickable((By.XPATH, "//*[@id='passwordNext']/span")))
next_element = self.driver.find_element_by_xpath("//*[@id='passwordNext']/span")
next_element.click()
def _get_login_details(self):
config = ConfigParser()
config.read('config.properties')
username = config.get('Account', "username")
password = config.get('Account', "password")
return username, password
def search_questions_from_stack_overflow(self):
wait = WebDriverWait(self.driver, 3)
self.driver.get("https://google.com")
for questionPair in self.stackOverflowDictionary.values():
for question in questionPair:
self.driver.find_element_by_xpath("//div/div/input").clear()
search_element = self.driver.find_element_by_xpath("//div/div/input")
search_element.clear()
search_element.send_keys(question)
search_element.send_keys(Keys.RETURN)
sleep(20)
wait.until(EC.visibility_of_element_located((By.ID, "resultStats")))
def get_stack_overflow_tags(self):
wait = WebDriverWait(self.driver, 6)
self.driver.get("https://stackoverflow.com/tags")
wait.until(EC.visibility_of_element_located((By.ID, "tags-browser")))
tags_browser_element = self.driver.find_element_by_id("tags-browser")
link_elements = tags_browser_element.find_elements_by_xpath("//a[@class='post-tag']")
for tag in link_elements[:2]:
tag_text = tag.text
actions = ActionChains(self.driver)
actions.move_to_element(tag)
actions.key_down(Keys.CONTROL)
actions.key_down(Keys.TAB)
actions.click(tag)
actions.key_up(Keys.CONTROL)
actions.key_up(Keys.TAB)
actions.perform()
sleep(2)
self.driver.switch_to.window(self.driver.window_handles[1])
wait.until(EC.visibility_of_element_located((By.ID, "questions")))
questions_element = self.driver.find_element_by_id("questions")
title_elements = questions_element.find_elements_by_xpath("//h3/a[@class='question-hyperlink']")
for title in title_elements:
if tag_text not in self.stackOverflowDictionary:
self.stackOverflowDictionary[tag_text] = []
self.stackOverflowDictionary[tag_text].append(title.text)
print(tag_text + "," + title.text)
self.driver.close()
self.driver.switch_to.window(self.driver.window_handles[0])
def run(self):
self.login()
self.get_stack_overflow_tags()
self.search_questions_from_stack_overflow()
if __name__ == '__main__':
searchHarness = SearchHarness()
searchHarness.run()
|
[
"selenium.webdriver.support.expected_conditions.element_to_be_clickable",
"selenium.webdriver.Firefox",
"selenium.webdriver.common.action_chains.ActionChains",
"selenium.webdriver.support.expected_conditions.visibility_of_element_located",
"time.sleep",
"configparser.ConfigParser",
"selenium.webdriver.support.ui.WebDriverWait"
] |
[((440, 459), 'selenium.webdriver.Firefox', 'webdriver.Firefox', ([], {}), '()\n', (457, 459), False, 'from selenium import webdriver\n'), ((734, 763), 'selenium.webdriver.support.ui.WebDriverWait', 'WebDriverWait', (['self.driver', '(6)'], {}), '(self.driver, 6)\n', (747, 763), False, 'from selenium.webdriver.support.ui import WebDriverWait\n'), ((1489, 1503), 'configparser.ConfigParser', 'ConfigParser', ([], {}), '()\n', (1501, 1503), False, 'from configparser import ConfigParser\n'), ((1753, 1782), 'selenium.webdriver.support.ui.WebDriverWait', 'WebDriverWait', (['self.driver', '(3)'], {}), '(self.driver, 3)\n', (1766, 1782), False, 'from selenium.webdriver.support.ui import WebDriverWait\n'), ((2411, 2440), 'selenium.webdriver.support.ui.WebDriverWait', 'WebDriverWait', (['self.driver', '(6)'], {}), '(self.driver, 6)\n', (2424, 2440), False, 'from selenium.webdriver.support.ui import WebDriverWait\n'), ((915, 968), 'selenium.webdriver.support.expected_conditions.element_to_be_clickable', 'EC.element_to_be_clickable', (["(By.ID, 'identifierNext')"], {}), "((By.ID, 'identifierNext'))\n", (941, 968), True, 'from selenium.webdriver.support import expected_conditions as EC\n'), ((1054, 1109), 'selenium.webdriver.support.expected_conditions.visibility_of_element_located', 'EC.visibility_of_element_located', (["(By.NAME, 'password')"], {}), "((By.NAME, 'password'))\n", (1086, 1109), True, 'from selenium.webdriver.support import expected_conditions as EC\n'), ((1247, 1317), 'selenium.webdriver.support.expected_conditions.element_to_be_clickable', 'EC.element_to_be_clickable', (['(By.XPATH, "//*[@id=\'passwordNext\']/span")'], {}), '((By.XPATH, "//*[@id=\'passwordNext\']/span"))\n', (1273, 1317), True, 'from selenium.webdriver.support import expected_conditions as EC\n'), ((2519, 2576), 'selenium.webdriver.support.expected_conditions.visibility_of_element_located', 'EC.visibility_of_element_located', (["(By.ID, 'tags-browser')"], {}), "((By.ID, 'tags-browser'))\n", (2551, 2576), True, 'from selenium.webdriver.support import expected_conditions as EC\n'), ((2842, 2867), 'selenium.webdriver.common.action_chains.ActionChains', 'ActionChains', (['self.driver'], {}), '(self.driver)\n', (2854, 2867), False, 'from selenium.webdriver.common.action_chains import ActionChains\n'), ((3142, 3150), 'time.sleep', 'sleep', (['(2)'], {}), '(2)\n', (3147, 3150), False, 'from time import sleep\n'), ((2261, 2270), 'time.sleep', 'sleep', (['(20)'], {}), '(20)\n', (2266, 2270), False, 'from time import sleep\n'), ((3246, 3300), 'selenium.webdriver.support.expected_conditions.visibility_of_element_located', 'EC.visibility_of_element_located', (["(By.ID, 'questions')"], {}), "((By.ID, 'questions'))\n", (3278, 3300), True, 'from selenium.webdriver.support import expected_conditions as EC\n'), ((2298, 2354), 'selenium.webdriver.support.expected_conditions.visibility_of_element_located', 'EC.visibility_of_element_located', (["(By.ID, 'resultStats')"], {}), "((By.ID, 'resultStats'))\n", (2330, 2354), True, 'from selenium.webdriver.support import expected_conditions as EC\n')]
|
import json
import logging
from thrift.Thrift import TException, TApplicationException
from EOSS.vassar.api import VASSARClient
from EOSS.data import problem_specific
from daphne_context.models import UserInformation
logger = logging.getLogger('EOSS.engineer')
def get_architecture_scores(design_id, designs, context):
port = context["screen"]["vassar_port"]
client = VASSARClient(port)
try:
# Start connection with VASSAR
client.start_connection()
scores = client.get_architecture_score_explanation(context["screen"]["problem"],
designs.get(id=design_id))
# End the connection before return statement
client.end_connection()
return scores
except Exception:
logger.exception('Exception in loading architecture score information')
client.end_connection()
return None
def get_satisfying_data_products(design_id, designs, subobjective, context):
port = context["screen"]["vassar_port"]
client = VASSARClient(port)
try:
# Start connection with VASSAR
client.start_connection()
subobjective_explanation = client.get_subscore_details(context["screen"]["problem"],
designs.get(id=design_id),
subobjective.upper())
satisfying_data_products = [subobjective_explanation.taken_by[i] for i, x in enumerate(subobjective_explanation.scores) if x == 1.0][:5]
# End the connection before return statement
client.end_connection()
return satisfying_data_products
except Exception:
logger.exception('Exception in checking satisfying data products for a subobjective')
client.end_connection()
return None
def get_unsatisfied_justifications(design_id, designs, subobjective, context):
port = context["screen"]["vassar_port"]
client = VASSARClient(port)
try:
# Start connection with VASSAR
client.start_connection()
subobjective_explanation = client.get_subscore_details(context["screen"]["problem"],
designs.get(id=design_id),
subobjective.upper())
if max(subobjective_explanation.scores) < 1.:
unsatisfied_data_products = [subobjective_explanation.taken_by[i] for i, x in enumerate(subobjective_explanation.scores) if x < 1.0]
unsatisfied_justifications = [subobjective_explanation.justifications[i] for i, x in enumerate(subobjective_explanation.scores) if x < 1.0]
# Only show the first 4 explanations
explanations = [
{
"data_product": dp,
"explanations": ", ".join(unsatisfied_justifications[i])
} for i, dp in enumerate(unsatisfied_data_products)
][:5]
else:
unsatisfied_justifications = []
# End the connection before return statement
client.end_connection()
return explanations
except TException:
logger.exception('Exception in justifying not satisfying a subobjective')
client.end_connection()
return None
def get_panel_scores(design_id, designs, panel, context):
port = context["screen"]["vassar_port"]
client = VASSARClient(port)
try:
# Start connection with VASSAR
client.start_connection()
stakeholders_to_excel = {
"atmospheric": "ATM",
"oceanic": "OCE",
"terrestrial": "TER",
"weather": "WEA",
"climate": "CLI",
"land and ecosystems": "ECO",
"water": "WAT",
"human health": "HEA"
}
panel_code = stakeholders_to_excel[panel.lower()]
panel_scores = client.get_panel_score_explanation(context["screen"]["problem"],
designs.get(id=design_id),
panel_code)
# End the connection before return statement
client.end_connection()
return panel_scores
except TException:
logger.exception('Exception in loading panel score information')
client.end_connection()
return None
def get_objective_scores(design_id, designs, objective, context):
port = context["screen"]["vassar_port"]
client = VASSARClient(port)
try:
# Start connection with VASSAR
client.start_connection()
objective_scores = client.get_objective_score_explanation(context["screen"]["problem"],
designs.get(id=design_id),
objective.upper())
# End the connection before return statement
client.end_connection()
return objective_scores
except TException:
logger.exception('Exception in loading objective score information')
client.end_connection()
return None
def get_instruments_for_objective(objective, context):
port = context["screen"]["vassar_port"]
client = VASSARClient(port)
try:
# Start connection with VASSAR
client.start_connection()
instruments = client.get_instruments_for_objective(context["screen"]["problem"], objective.upper())
# End the connection before return statement
client.end_connection()
return instruments
except TException:
logger.exception('Exception in loading related instruments to an objective')
client.end_connection()
return None
def get_instruments_for_stakeholder(stakeholder, context):
port = context["screen"]["vassar_port"]
client = VASSARClient(port)
try:
# Start connection with VASSAR
client.start_connection()
stakeholders_to_excel = {
"atmospheric": "ATM",
"oceanic": "OCE",
"terrestrial": "TER",
"weather": "WEA",
"climate": "CLI",
"land and ecosystems": "ECO",
"water": "WAT",
"human health": "HEA"
}
panel_code = stakeholders_to_excel[stakeholder.lower()]
stakeholder_instruments = client.get_instruments_for_panel(context["screen"]["problem"], panel_code)
# End the connection before return statement
client.end_connection()
return stakeholder_instruments
except TException:
logger.exception('Exception in loading related instruments to a panel')
client.end_connection()
return None
def get_instrument_parameter(vassar_instrument, instrument_parameter, context, new_dialogue_contexts):
new_dialogue_contexts["engineer_context"].vassar_instrument = vassar_instrument
new_dialogue_contexts["engineer_context"].instrument_parameter = instrument_parameter
capabilities_sheet = problem_specific.get_capabilities_sheet(context["screen"]["problem"])
capability_found = False
capability_value = None
for row in capabilities_sheet.itertuples(name='Instrument'):
if row[1].split()[1] == vassar_instrument:
for i in range(2, len(row)):
if row[i].split()[0] == instrument_parameter:
capability_found = True
capability_value = row[i].split()[1]
if capability_found:
return 'The ' + instrument_parameter + ' for ' + vassar_instrument + ' is ' + capability_value
else:
instrument_sheet = problem_specific.get_instrument_sheet(context["screen"]["problem"], vassar_instrument)
for i in range(2, len(instrument_sheet.columns)):
if instrument_sheet[i][0].split()[0] == instrument_parameter:
capability_found = True
if capability_found:
return 'I have found different values for this parameter depending on the measurement. ' \
'Please tell me for which measurement you want this parameter: ' \
+ ', '.join([measurement[11:-1] for measurement in instrument_sheet[1]])
else:
return 'I have not found any information for this measurement.'
def get_instrument_parameter_followup(vassar_instrument, instrument_parameter, instrument_measurement, context):
instrument_sheet = problem_specific.get_instrument_sheet(context["screen"]["problem"], vassar_instrument)
capability_value = None
for row in instrument_sheet.itertuples(index=True, name='Measurement'):
if row[2][11:-1] == instrument_measurement:
for i in range(3, len(row)):
if row[i].split()[0] == instrument_parameter:
capability_value = row[i].split()[1]
return 'The ' + instrument_parameter + ' for instrument ' + vassar_instrument + ' and measurement ' + \
instrument_measurement + ' is ' + capability_value
def get_measurement_requirement(vassar_measurement, instrument_parameter, context, new_dialogue_contexts):
new_dialogue_contexts["engineer_context"].vassar_measurement = vassar_measurement
new_dialogue_contexts["engineer_context"].instrument_parameter = instrument_parameter
requirements_sheet = problem_specific.get_requirements_sheet(context["screen"]["problem"])
requirement_found = False
requirements = []
for row in requirements_sheet.itertuples(name='Requirement'):
if row[2][1:-1] == vassar_measurement and row[3] == instrument_parameter:
requirement_found = True
requirements.append({"subobjective": row[1], "type": row[4], "thresholds": row[5]})
if requirement_found:
if len(requirements) > 1:
return 'I have found different values for this requirement depending on the stakeholder. ' \
'Please tell me for which stakeholder you want this requirement: ' \
+ ', '.join([requirement["subobjective"] for requirement in requirements])
else:
threshold = requirements[0]["thresholds"][1:-1].split(',')[-1]
target_value = requirements[0]["thresholds"][1:-1].split(',')[0]
return 'The threshold for ' + instrument_parameter + ' for ' + vassar_measurement + ' (subobjective ' + \
requirements[0]["subobjective"] + ') is ' + threshold + ' and its target value is ' + \
target_value + '.'
else:
return 'I have not found any information for this requirement.'
def get_measurement_requirement_followup(vassar_measurement, instrument_parameter, subobjective, context):
requirements_sheet = problem_specific.get_requirements_sheet(context["screen"]["problem"])
requirement = None
for row in requirements_sheet.itertuples(name='Requirement'):
if row[1] == subobjective.upper() and row[2][1:-1] == vassar_measurement and row[3] == instrument_parameter:
requirement = {"subobjective": row[1], "type": row[4], "thresholds": row[5]}
threshold = requirement["thresholds"][1:-1].split(',')[-1]
target_value = requirement["thresholds"][1:-1].split(',')[0]
return 'The threshold for ' + instrument_parameter + ' for ' + vassar_measurement + ' for subobjective ' \
+ requirement["subobjective"] + ' is ' + threshold + ' and its target value is ' + target_value + '.'
def get_cost_explanation(design_id, designs, context):
try:
# Start connection with VASSAR
port = context["screen"]["vassar_port"]
client = VASSARClient(port)
client.start_connection()
# Get the correct architecture
arch = designs.get(id=design_id)
problem = context["screen"]["problem"]
cost_explanation = client.get_arch_cost_information(problem, arch)
# End the connection before return statement
client.end_connection()
def budgets_to_json(explanation):
json_list = []
for exp in explanation:
json_exp = {
'orbit_name': exp.orbit_name,
'payload': exp.payload,
'launch_vehicle': exp.launch_vehicle,
'total_mass': exp.total_mass,
'total_power': exp.total_power,
'total_cost': exp.total_cost,
'mass_budget': exp.mass_budget,
'power_budget': exp.power_budget,
'cost_budget': exp.cost_budget
}
json_list.append(json_exp)
return json_list
json_explanation = budgets_to_json(cost_explanation)
for explanation in json_explanation:
explanation["subcosts"] = [type + ": $" + str("%.2f" % round(number, 2)) + 'M' for type, number in explanation["cost_budget"].items()]
return json_explanation
except TApplicationException as exc:
logger.exception('Exception when retrieving information from the current architecture!')
client.end_connection()
raise exc
|
[
"EOSS.vassar.api.VASSARClient",
"EOSS.data.problem_specific.get_capabilities_sheet",
"EOSS.data.problem_specific.get_requirements_sheet",
"EOSS.data.problem_specific.get_instrument_sheet",
"logging.getLogger"
] |
[((229, 263), 'logging.getLogger', 'logging.getLogger', (['"""EOSS.engineer"""'], {}), "('EOSS.engineer')\n", (246, 263), False, 'import logging\n'), ((381, 399), 'EOSS.vassar.api.VASSARClient', 'VASSARClient', (['port'], {}), '(port)\n', (393, 399), False, 'from EOSS.vassar.api import VASSARClient\n'), ((1057, 1075), 'EOSS.vassar.api.VASSARClient', 'VASSARClient', (['port'], {}), '(port)\n', (1069, 1075), False, 'from EOSS.vassar.api import VASSARClient\n'), ((2004, 2022), 'EOSS.vassar.api.VASSARClient', 'VASSARClient', (['port'], {}), '(port)\n', (2016, 2022), False, 'from EOSS.vassar.api import VASSARClient\n'), ((3470, 3488), 'EOSS.vassar.api.VASSARClient', 'VASSARClient', (['port'], {}), '(port)\n', (3482, 3488), False, 'from EOSS.vassar.api import VASSARClient\n'), ((4568, 4586), 'EOSS.vassar.api.VASSARClient', 'VASSARClient', (['port'], {}), '(port)\n', (4580, 4586), False, 'from EOSS.vassar.api import VASSARClient\n'), ((5329, 5347), 'EOSS.vassar.api.VASSARClient', 'VASSARClient', (['port'], {}), '(port)\n', (5341, 5347), False, 'from EOSS.vassar.api import VASSARClient\n'), ((5931, 5949), 'EOSS.vassar.api.VASSARClient', 'VASSARClient', (['port'], {}), '(port)\n', (5943, 5949), False, 'from EOSS.vassar.api import VASSARClient\n'), ((7098, 7167), 'EOSS.data.problem_specific.get_capabilities_sheet', 'problem_specific.get_capabilities_sheet', (["context['screen']['problem']"], {}), "(context['screen']['problem'])\n", (7137, 7167), False, 'from EOSS.data import problem_specific\n'), ((8509, 8599), 'EOSS.data.problem_specific.get_instrument_sheet', 'problem_specific.get_instrument_sheet', (["context['screen']['problem']", 'vassar_instrument'], {}), "(context['screen']['problem'],\n vassar_instrument)\n", (8546, 8599), False, 'from EOSS.data import problem_specific\n'), ((9395, 9464), 'EOSS.data.problem_specific.get_requirements_sheet', 'problem_specific.get_requirements_sheet', (["context['screen']['problem']"], {}), "(context['screen']['problem'])\n", (9434, 9464), False, 'from EOSS.data import problem_specific\n'), ((10791, 10860), 'EOSS.data.problem_specific.get_requirements_sheet', 'problem_specific.get_requirements_sheet', (["context['screen']['problem']"], {}), "(context['screen']['problem'])\n", (10830, 10860), False, 'from EOSS.data import problem_specific\n'), ((7711, 7801), 'EOSS.data.problem_specific.get_instrument_sheet', 'problem_specific.get_instrument_sheet', (["context['screen']['problem']", 'vassar_instrument'], {}), "(context['screen']['problem'],\n vassar_instrument)\n", (7748, 7801), False, 'from EOSS.data import problem_specific\n'), ((11679, 11697), 'EOSS.vassar.api.VASSARClient', 'VASSARClient', (['port'], {}), '(port)\n', (11691, 11697), False, 'from EOSS.vassar.api import VASSARClient\n')]
|
'''
Joins multiple tsv files and clades into single dataframe.
Inputs are:
--clades (json)
--data (list of tsv files
--source, source tsv is from
--metadata
--output
'''
import argparse
import json
import pandas as pd
def make_df(data, source):
'''
Combines multiple data tsv files into one dataframe
'''
df = pd.DataFrame()
for m,s in zip(data, source):
m_df = pd.read_csv(m, sep = '\t')
m_df['source'] = s
df = df.append(m_df)
df = df.set_index('strain')
return df
def add_clade(clades, df):
'''
Adds clades for each strain in dataframe
'''
with open(clades) as jfile:
clades_dict = json.load(jfile)
for strain, row in df.iterrows():
if strain in clades_dict.keys():
df.loc[strain, 'clade'] = clades_dict[strain]['clade']
return df
def add_metadata(df, metadata):
'''
Adds metadata from ncov build to df.
'''
meta = pd.read_csv(metadata, sep = '\t')
df = df.merge(meta[['strain', 'date', 'division', 'location', 'clade']], how = 'left', left_index = True, right_on = 'strain')
df = df.set_index('strain')
return df
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description="Subsamples strains over time",
formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument('--clades', type=str, required=True, help='JSON with clade & strain')
parser.add_argument('--data', type=str, nargs='+', required=True, help='TSV metadata for UW sequences')
parser.add_argument('--source', type=str, nargs='+', required=True, help='Source of metadata')
parser.add_argument('--output', type=str, required=True, help = 'location of output json')
parser.add_argument('--metadata', type=str, required=True, help = 'Metadata from ncov build')
args = parser.parse_args()
# Combines metadata into one dataframe
data = make_df(args.data, args.source)
# Adds clades to df
combined = add_clade(args.clades, data)
# Adds metadata from ncov build to df
combined_meta = add_metadata(combined, args.metadata)
# Write df to tsv
combined_meta.to_csv(args.output, sep = '\t')
|
[
"pandas.DataFrame",
"json.load",
"pandas.read_csv",
"argparse.ArgumentParser"
] |
[((346, 360), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (358, 360), True, 'import pandas as pd\n'), ((983, 1014), 'pandas.read_csv', 'pd.read_csv', (['metadata'], {'sep': '"""\t"""'}), "(metadata, sep='\\t')\n", (994, 1014), True, 'import pandas as pd\n'), ((1241, 1368), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Subsamples strains over time"""', 'formatter_class': 'argparse.ArgumentDefaultsHelpFormatter'}), "(description='Subsamples strains over time',\n formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n", (1264, 1368), False, 'import argparse\n'), ((412, 436), 'pandas.read_csv', 'pd.read_csv', (['m'], {'sep': '"""\t"""'}), "(m, sep='\\t')\n", (423, 436), True, 'import pandas as pd\n'), ((695, 711), 'json.load', 'json.load', (['jfile'], {}), '(jfile)\n', (704, 711), False, 'import json\n')]
|
# Generated by Django 2.2.7 on 2020-02-12 12:04
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('songs', '0003_auto_20200107_0103'),
]
operations = [
migrations.AlterField(
model_name='song',
name='artist',
field=models.CharField(blank=True, max_length=255, null=True),
),
migrations.AlterField(
model_name='song',
name='name',
field=models.CharField(blank=True, max_length=255, null=True),
),
migrations.AlterField(
model_name='song',
name='song_id',
field=models.IntegerField(unique=True),
),
]
|
[
"django.db.models.CharField",
"django.db.models.IntegerField"
] |
[((332, 387), 'django.db.models.CharField', 'models.CharField', ([], {'blank': '(True)', 'max_length': '(255)', 'null': '(True)'}), '(blank=True, max_length=255, null=True)\n', (348, 387), False, 'from django.db import migrations, models\n'), ((505, 560), 'django.db.models.CharField', 'models.CharField', ([], {'blank': '(True)', 'max_length': '(255)', 'null': '(True)'}), '(blank=True, max_length=255, null=True)\n', (521, 560), False, 'from django.db import migrations, models\n'), ((681, 713), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'unique': '(True)'}), '(unique=True)\n', (700, 713), False, 'from django.db import migrations, models\n')]
|
import os
import dataclasses
import docutils.parsers.rst
import docutils.statemachine
import docutils.nodes
import sphinx.addnodes
import sphinx.util.docutils
import sphinx.util.nodes
from sphinx_a4doc.settings import GrammarType, OrderSettings, GroupingSettings, EndClass
from sphinx_a4doc.settings import global_namespace, autogrammar_namespace, autorule_namespace
from sphinx_a4doc.domain import Grammar, Rule
from sphinx_a4doc.diagram_directive import RailroadDiagramNode
from sphinx_a4doc.model.model import ModelCache, Model, RuleBase
from sphinx_a4doc.model.reachable_finder import find_reachable_rules
from sphinx_a4doc.model.model_renderer import Renderer, cc_to_dash
from sphinx_a4doc.contrib.marker_nodes import find_or_add_marker
from typing import *
class ModelLoaderMixin:
used_models: Optional[Set[Model]] = None
def load_model(self, name: str) -> Model:
# TODO: use grammar resolver
base_path = global_namespace.load_global_settings(self.env).base_path
if not name.endswith('.g4'):
name += '.g4'
name = os.path.normpath(os.path.expanduser(name))
path = os.path.join(base_path, name)
model = ModelCache.instance().from_file(path)
if self.used_models is None:
self.used_models = set()
self.used_models.add(model)
return model
def register_deps(self):
if self.used_models is None:
return
seen = set()
models = self.used_models.copy()
while models:
model = models.pop()
if model in seen:
continue
if not model.is_in_memory():
self.state.document.settings.record_dependencies.add(model.get_path())
models.update(model.get_imports())
seen.add(model)
class DocsRendererMixin:
def render_docs(self, path: str, docs: List[Tuple[int, str]], node, titles=False):
docs = docs or []
for line, doc in docs:
lines = doc.splitlines()
items = [(path, line + i - 1) for i in range(len(lines))]
content = docutils.statemachine.StringList(lines, items=items)
with sphinx.util.docutils.switch_source_input(self.state, content):
if titles:
sphinx.util.nodes.nested_parse_with_titles(self.state, content, node)
else:
self.state.nested_parse(content, 0, node)
class AutoGrammar(Grammar, ModelLoaderMixin, DocsRendererMixin):
"""
Autogrammar directive generates a grammar description from a ``.g4`` file.
Its only argument, ``name``, should contain path of the grammar file
relative to the ``a4_base_path``. File extension may be omitted.
.. TODO: reference to global settings
.. TODO: mention grammar resolver (once it's implemented).
Autogrammar will read a ``.g4`` file and extract grammar name (which will
be used for cross-referencing), grammar-level documentation comments,
set of production rules, their documentation and contents. It will then
generate railroad diagrams and render extracted information.
See more on how to write documentation comments and control look of the
automatically generated railroad diagrams in the ':ref:`grammar_comments`'
section.
Like :rst:dir:`autoclass` and other default autodoc directives,
``autogrammar`` can have contents on its own. These contents will
be merged with the automatically generated description.
Use :rst:dir:`docstring-marker` and :rst:dir:`members-marker` to control
merging process.
**Options:**
.. rst:option:: name
type
imports
noindex
diagram-*
Inherited from :rst:dir:`a4:grammar` directive.
If not given, :rst:opt:`:type: <a4:grammar:type>` and
:rst:opt:`:imports: <a4:grammar:imports>`
will be extracted from grammar file.
.. members-marker::
"""
required_arguments = 1
has_content = True
settings = autogrammar_namespace.for_directive()
def __init__(self, *args, **kwargs):
super().__init__(*args, *kwargs)
self.root_rule: Optional[RuleBase] = None
def run(self):
self.name = 'a4:grammar'
if 'cc-to-dash' in self.options and 'diagram-cc-to-dash' not in self.options:
self.options['diagram-cc-to-dash'] = self.options['cc-to-dash']
# Load model from file
model = self.load_model(self.arguments[0])
# Early exit
if model.has_errors():
self.register_deps()
return [
self.state_machine.reporter.error(
'unable to document this grammar',
line=self.lineno
)
]
# Update settings from model
if 'imports' not in self.options:
self.options['imports'] = [
i.get_name() for i in model.get_imports() if i.get_name()
]
if 'type' not in self.options and model.get_type():
self.options['type'] = GrammarType[model.get_type().upper()]
self.arguments = [model.get_name()]
self.env.temp_data.setdefault('a4:autogrammar_ctx', []).append(model.get_path())
try:
# Create a skeleton of the grammar description
nodes = super(AutoGrammar, self).run()
# If user described some rules manually, we want that descriptions
# to replace ones obtained from the grammar file. We also want to
# remove all descriptions temporarily to rearrange them according
# to the `ordering` settings
desc_content, rule_nodes = self.cut_rule_descriptions(model, nodes)
# Set proper ref_context
self.before_content()
try:
# Find place where docstring should be rendered
doc_node = find_or_add_marker(desc_content, 'docstring')
# Render model docstring
self.render_docs(model.get_path(), model.get_model_docs(), doc_node)
# Insert docstring to the document
doc_node.replace_self(doc_node.children)
# Find place where to insert rule descriptions
rules_node = find_or_add_marker(desc_content, 'members')
# Arrange rules found in the grammar file and render them
last_section = None
for rule in self.make_order(model):
if (
self.settings.honor_sections and
self.settings.ordering is OrderSettings.BY_SOURCE and
last_section is not rule.section
):
last_section = rule.section
if last_section is not None:
self.render_docs(
rule.position.file,
last_section.docs,
rules_node,
True
)
# Manual description overrides autogenerated description
if rule.name in rule_nodes:
rules_node.append(rule_nodes.pop(rule.name))
else:
rules_node.extend(self.make_rule(rule))
# Add any rule that was described manually but that wasn't found
# in the grammar file
for rule in sorted(rule_nodes.values(), key=lambda x: x.line):
rules_node.append(rule)
# Insert rule descriptions to the document
rules_node.replace_self(rules_node.children)
finally:
self.after_content()
return nodes
finally:
self.env.temp_data['a4:autogrammar_ctx'].pop()
self.register_deps()
def cut_rule_descriptions(self, model, nodes):
desc_content = None
rule_nodes = {}
for node in nodes:
if not isinstance(node, sphinx.addnodes.desc):
continue
for content_node in node.children:
if isinstance(content_node, sphinx.addnodes.desc_content):
desc_content = content_node
break
else:
raise RuntimeError('no desc_content can be found')
for rule_node in node.traverse(
lambda x: (
isinstance(x, sphinx.addnodes.desc) and
x['domain'] == 'a4' and
x['objtype'] == 'rule'
)
):
sig = rule_node.next_node(sphinx.addnodes.desc_signature)
if sig is None:
continue
prefix = f'a4.{model.get_name()}.'
for ident in sig['ids']:
if ident.startswith(prefix):
rule_nodes[ident[len(prefix):]] = rule_node
rule_node.replace_self([])
break
assert desc_content is not None
return desc_content, rule_nodes
def make_order(self, model: Model) -> List[RuleBase]:
lexer_rules = []
if self.settings.lexer_rules:
lexer_rules = model.get_terminals()
if not self.settings.fragments:
lexer_rules = filter(lambda r: not r.is_fragment, lexer_rules)
if not self.settings.undocumented:
lexer_rules = filter(lambda r: r.documentation, lexer_rules)
lexer_rules = list(lexer_rules)
parser_rules = []
if self.settings.parser_rules:
parser_rules = model.get_non_terminals()
if not self.settings.undocumented:
parser_rules = filter(lambda r: r.documentation, parser_rules)
parser_rules = list(parser_rules)
precedence = {
OrderSettings.BY_SOURCE: lambda rule: rule.position,
OrderSettings.BY_NAME: lambda rule: rule.name.lower(),
}[self.settings.ordering]
if self.settings.grouping is GroupingSettings.MIXED:
all_rules = sorted(lexer_rules + parser_rules, key=precedence)
elif self.settings.grouping is GroupingSettings.LEXER_FIRST:
all_rules = sorted(lexer_rules, key=precedence) + sorted(parser_rules, key=precedence)
elif self.settings.grouping is GroupingSettings.PARSER_FIRST:
all_rules = sorted(parser_rules, key=precedence) + sorted(lexer_rules, key=precedence)
else:
raise RuntimeError('invalid grouping parameter')
if self.settings.only_reachable_from:
rule_name = self.settings.only_reachable_from
rule_model = model
if '.' in rule_name:
model_name, rule_name = rule_name.split('.', 1)
rule_model = self.load_model(model_name)
rule = rule_model.lookup(rule_name)
self.root_rule = rule
if rule is None:
return all_rules
reachable = find_reachable_rules(rule)
return [r for r in all_rules if r in reachable]
return all_rules
def make_rule(self, rule: RuleBase) -> List[docutils.nodes.Node]:
if rule.is_doxygen_nodoc or rule.is_doxygen_inline:
return [] # implicitly disabled
if not rule.documentation and rule.content is None:
return [] # nothing to document
options = {}
if 'noindex' in self.options:
options['noindex'] = None
if self.settings.cc_to_dash and not rule.display_name:
options['name'] = cc_to_dash(rule.name)
elif rule.display_name:
options['name'] = rule.display_name
rule_dir = Rule(
name='a4:rule',
arguments=[rule.name],
options=options,
content=docutils.statemachine.StringList(),
lineno=self.lineno,
content_offset=self.content_offset,
block_text=self.block_text,
state=self.state,
state_machine=self.state_machine
)
nodes = rule_dir.run()
for node in nodes:
if not isinstance(node, sphinx.addnodes.desc):
continue
for content_node in node.children:
if isinstance(content_node, sphinx.addnodes.desc_content):
desc_content = content_node
break
else:
raise RuntimeError('no desc_content can be found')
if rule.documentation:
self.render_docs(rule.position.file, rule.documentation[:1], desc_content)
docs = rule.documentation[1:]
else:
docs = rule.documentation
if not rule.is_doxygen_no_diagram:
env = self.env
grammar = env.ref_context.get('a4:grammar', '__default__')
renderer = Renderer(
self.diagram_settings.literal_rendering,
self.diagram_settings.cc_to_dash
)
dia = renderer.visit(rule.content)
settings = self.diagram_settings
if (
self.settings.mark_root_rule and
self.root_rule is not None and
rule.name == self.root_rule.name and
rule.model is self.root_rule.model
):
settings = dataclasses.replace(settings, end_class=EndClass.COMPLEX)
desc_content.append(
RailroadDiagramNode(dia, settings, grammar)
)
self.render_docs(rule.position.file, docs, desc_content)
break
return nodes
class AutoRule(Rule, ModelLoaderMixin, DocsRendererMixin):
"""
Autorule directive renders documentation for a single rule.
It accepts two arguments, first is a path to the grammar file relative
to the ``a4_base_path``, second is name of the rule that should
be documented.
Note that autorule can only be used when within a grammar definition.
Name of the current grammar definition must match name of the grammar
from which the documented rule is imported.
**Options:**
.. rst:option:: name
noindex
diagram-*
Inherited from :rst:dir:`a4:rule` directive.
.. members-marker::
"""
settings = autorule_namespace.for_directive()
required_arguments = 1
optional_arguments = 2
has_content = True
def run(self):
self.name = 'a4:rule'
if len(self.arguments) == 2:
path, rule_name = self.arguments
else:
rule_name = self.arguments[0]
if self.env.temp_data.get('a4:autogrammar_ctx'):
path = self.env.temp_data['a4:autogrammar_ctx'][-1]
elif 'a4:grammar' in self.env.ref_context:
path = self.env.ref_context['a4:grammar']
else:
return [
self.state_machine.reporter.error(
'could not figure out grammar path for autorule directive',
line=self.lineno
)
]
model = self.load_model(path)
if model.has_errors():
self.register_deps()
return [
self.state_machine.reporter.error(
'unable to document this rule',
line=self.lineno
)
]
if self.env.ref_context.get('a4:grammar') != model.get_name():
return [
self.state_machine.reporter.error(
f'cannot only use autorule while within a proper '
f'grammar definition',
line=self.lineno
)
]
rule = model.lookup(rule_name)
if rule is None:
self.register_deps()
return [
self.state_machine.reporter.error(
f'unknown rule {rule_name!r}',
line=self.lineno
)
]
if rule.display_name and 'name' not in self.options:
self.options['name'] = rule.display_name
self.arguments = [rule.name]
self.env.temp_data.setdefault('a4:autogrammar_ctx', []).append(model.get_path())
try:
nodes = super(AutoRule, self).run()
desc_content = self.find_desc_content(nodes)
self.before_content()
try:
doc_node = find_or_add_marker(desc_content, 'docstring')
if rule.documentation:
self.render_docs(rule.position.file, rule.documentation[:1], doc_node)
docs = rule.documentation[1:]
else:
docs = rule.documentation
if not rule.is_doxygen_no_diagram:
env = self.env
grammar = env.ref_context.get('a4:grammar', '__default__')
renderer = Renderer(
self.diagram_settings.literal_rendering,
self.diagram_settings.cc_to_dash
)
dia = renderer.visit(rule.content)
settings = self.diagram_settings
doc_node.append(
RailroadDiagramNode(dia, settings, grammar)
)
self.render_docs(rule.position.file, docs, doc_node)
doc_node.replace_self(doc_node.children)
finally:
self.after_content()
return nodes
finally:
self.env.temp_data['a4:autogrammar_ctx'].pop()
self.register_deps()
def find_desc_content(self, nodes):
for node in nodes:
if not isinstance(node, sphinx.addnodes.desc):
continue
for content_node in node.children:
if isinstance(content_node, sphinx.addnodes.desc_content):
return content_node
break
raise RuntimeError('no desc_content can be found')
|
[
"sphinx_a4doc.model.reachable_finder.find_reachable_rules",
"sphinx_a4doc.model.model.ModelCache.instance",
"sphinx_a4doc.settings.autogrammar_namespace.for_directive",
"os.path.join",
"sphinx_a4doc.settings.autorule_namespace.for_directive",
"sphinx_a4doc.model.model_renderer.cc_to_dash",
"sphinx_a4doc.diagram_directive.RailroadDiagramNode",
"sphinx_a4doc.contrib.marker_nodes.find_or_add_marker",
"sphinx_a4doc.model.model_renderer.Renderer",
"sphinx_a4doc.settings.global_namespace.load_global_settings",
"os.path.expanduser",
"dataclasses.replace"
] |
[((4078, 4115), 'sphinx_a4doc.settings.autogrammar_namespace.for_directive', 'autogrammar_namespace.for_directive', ([], {}), '()\n', (4113, 4115), False, 'from sphinx_a4doc.settings import global_namespace, autogrammar_namespace, autorule_namespace\n'), ((14609, 14643), 'sphinx_a4doc.settings.autorule_namespace.for_directive', 'autorule_namespace.for_directive', ([], {}), '()\n', (14641, 14643), False, 'from sphinx_a4doc.settings import global_namespace, autogrammar_namespace, autorule_namespace\n'), ((1134, 1163), 'os.path.join', 'os.path.join', (['base_path', 'name'], {}), '(base_path, name)\n', (1146, 1163), False, 'import os\n'), ((940, 987), 'sphinx_a4doc.settings.global_namespace.load_global_settings', 'global_namespace.load_global_settings', (['self.env'], {}), '(self.env)\n', (977, 987), False, 'from sphinx_a4doc.settings import global_namespace, autogrammar_namespace, autorule_namespace\n'), ((1093, 1117), 'os.path.expanduser', 'os.path.expanduser', (['name'], {}), '(name)\n', (1111, 1117), False, 'import os\n'), ((11187, 11213), 'sphinx_a4doc.model.reachable_finder.find_reachable_rules', 'find_reachable_rules', (['rule'], {}), '(rule)\n', (11207, 11213), False, 'from sphinx_a4doc.model.reachable_finder import find_reachable_rules\n'), ((11772, 11793), 'sphinx_a4doc.model.model_renderer.cc_to_dash', 'cc_to_dash', (['rule.name'], {}), '(rule.name)\n', (11782, 11793), False, 'from sphinx_a4doc.model.model_renderer import Renderer, cc_to_dash\n'), ((1180, 1201), 'sphinx_a4doc.model.model.ModelCache.instance', 'ModelCache.instance', ([], {}), '()\n', (1199, 1201), False, 'from sphinx_a4doc.model.model import ModelCache, Model, RuleBase\n'), ((5963, 6008), 'sphinx_a4doc.contrib.marker_nodes.find_or_add_marker', 'find_or_add_marker', (['desc_content', '"""docstring"""'], {}), "(desc_content, 'docstring')\n", (5981, 6008), False, 'from sphinx_a4doc.contrib.marker_nodes import find_or_add_marker\n'), ((6335, 6378), 'sphinx_a4doc.contrib.marker_nodes.find_or_add_marker', 'find_or_add_marker', (['desc_content', '"""members"""'], {}), "(desc_content, 'members')\n", (6353, 6378), False, 'from sphinx_a4doc.contrib.marker_nodes import find_or_add_marker\n'), ((13093, 13181), 'sphinx_a4doc.model.model_renderer.Renderer', 'Renderer', (['self.diagram_settings.literal_rendering', 'self.diagram_settings.cc_to_dash'], {}), '(self.diagram_settings.literal_rendering, self.diagram_settings.\n cc_to_dash)\n', (13101, 13181), False, 'from sphinx_a4doc.model.model_renderer import Renderer, cc_to_dash\n'), ((16770, 16815), 'sphinx_a4doc.contrib.marker_nodes.find_or_add_marker', 'find_or_add_marker', (['desc_content', '"""docstring"""'], {}), "(desc_content, 'docstring')\n", (16788, 16815), False, 'from sphinx_a4doc.contrib.marker_nodes import find_or_add_marker\n'), ((13624, 13681), 'dataclasses.replace', 'dataclasses.replace', (['settings'], {'end_class': 'EndClass.COMPLEX'}), '(settings, end_class=EndClass.COMPLEX)\n', (13643, 13681), False, 'import dataclasses\n'), ((13739, 13782), 'sphinx_a4doc.diagram_directive.RailroadDiagramNode', 'RailroadDiagramNode', (['dia', 'settings', 'grammar'], {}), '(dia, settings, grammar)\n', (13758, 13782), False, 'from sphinx_a4doc.diagram_directive import RailroadDiagramNode\n'), ((17262, 17350), 'sphinx_a4doc.model.model_renderer.Renderer', 'Renderer', (['self.diagram_settings.literal_rendering', 'self.diagram_settings.cc_to_dash'], {}), '(self.diagram_settings.literal_rendering, self.diagram_settings.\n cc_to_dash)\n', (17270, 17350), False, 'from sphinx_a4doc.model.model_renderer import Renderer, cc_to_dash\n'), ((17587, 17630), 'sphinx_a4doc.diagram_directive.RailroadDiagramNode', 'RailroadDiagramNode', (['dia', 'settings', 'grammar'], {}), '(dia, settings, grammar)\n', (17606, 17630), False, 'from sphinx_a4doc.diagram_directive import RailroadDiagramNode\n')]
|
import json
import os
import platform
import textwrap
import pytest
from conan.tools.cmake.presets import load_cmake_presets
from conan.tools.microsoft.visual import vcvars_command
from conans.client.tools import replace_in_file
from conans.model.ref import ConanFileReference
from conans.test.assets.cmake import gen_cmakelists
from conans.test.assets.genconanfile import GenConanfile
from conans.test.utils.tools import TestClient, TurboTestClient
from conans.util.files import save, load, rmdir
@pytest.mark.skipif(platform.system() != "Windows", reason="Only for windows")
@pytest.mark.parametrize("compiler, version, update, runtime",
[("msvc", "192", None, "dynamic"),
("msvc", "192", "6", "static"),
("msvc", "192", "8", "static")])
def test_cmake_toolchain_win_toolset(compiler, version, update, runtime):
client = TestClient(path_with_spaces=False)
settings = {"compiler": compiler,
"compiler.version": version,
"compiler.update": update,
"compiler.cppstd": "17",
"compiler.runtime": runtime,
"build_type": "Release",
"arch": "x86_64"}
# Build the profile according to the settings provided
settings = " ".join('-s %s="%s"' % (k, v) for k, v in settings.items() if v)
conanfile = GenConanfile().with_settings("os", "compiler", "build_type", "arch").\
with_generator("CMakeToolchain")
client.save({"conanfile.py": conanfile})
client.run("install . {}".format(settings))
toolchain = client.load("conan_toolchain.cmake")
if update is not None: # Fullversion
value = "version=14.{}{}".format(version[-1], update)
else:
value = "v14{}".format(version[-1])
assert 'set(CMAKE_GENERATOR_TOOLSET "{}" CACHE STRING "" FORCE)'.format(value) in toolchain
def test_cmake_toolchain_user_toolchain():
client = TestClient(path_with_spaces=False)
conanfile = GenConanfile().with_settings("os", "compiler", "build_type", "arch").\
with_generator("CMakeToolchain")
save(client.cache.new_config_path, "tools.cmake.cmaketoolchain:user_toolchain+=mytoolchain.cmake")
client.save({"conanfile.py": conanfile})
client.run("install .")
toolchain = client.load("conan_toolchain.cmake")
assert 'include("mytoolchain.cmake")' in toolchain
def test_cmake_toolchain_custom_toolchain():
client = TestClient(path_with_spaces=False)
conanfile = GenConanfile().with_settings("os", "compiler", "build_type", "arch").\
with_generator("CMakeToolchain")
save(client.cache.new_config_path, "tools.cmake.cmaketoolchain:toolchain_file=mytoolchain.cmake")
client.save({"conanfile.py": conanfile})
client.run("install .")
assert not os.path.exists(os.path.join(client.current_folder, "conan_toolchain.cmake"))
presets = load_cmake_presets(client.current_folder)
assert "mytoolchain.cmake" in presets["configurePresets"][0]["toolchainFile"]
assert "binaryDir" not in presets["configurePresets"][0]
@pytest.mark.skipif(platform.system() != "Darwin",
reason="Single config test, Linux CI still without 3.23")
@pytest.mark.tool_cmake(version="3.23")
@pytest.mark.parametrize("existing_user_presets", [None, "user_provided", "conan_generated"])
def test_cmake_user_presets_load(existing_user_presets):
"""
Test if the CMakeUserPresets.cmake is generated and use CMake to use it to verify the right
syntax of generated CMakeUserPresets.cmake and CMakePresets.cmake. If the user already provided
a CMakeUserPresets.cmake, leave the file untouched, and only generate or modify the file if
the `conan` object exists in the `vendor` field.
"""
t = TestClient()
t.run("new mylib/1.0 --template cmake_lib")
t.run("create . -s:h build_type=Release")
t.run("create . -s:h build_type=Debug")
consumer = textwrap.dedent("""
from conan import ConanFile
from conan.tools.cmake import cmake_layout
class Consumer(ConanFile):
settings = "build_type", "os", "arch", "compiler"
requires = "mylib/1.0"
generators = "CMakeToolchain", "CMakeDeps"
def layout(self):
cmake_layout(self)
""")
cmakelist = textwrap.dedent("""
cmake_minimum_required(VERSION 3.1)
project(PackageTest CXX)
find_package(mylib REQUIRED CONFIG)
""")
user_presets = None
if existing_user_presets == "user_provided":
user_presets = "{}"
elif existing_user_presets == "conan_generated":
user_presets = '{ "vendor": {"conan": {} } }'
files_to_save = {"conanfile.py": consumer, "CMakeLists.txt": cmakelist}
if user_presets:
files_to_save['CMakeUserPresets.json'] = user_presets
t.save(files_to_save, clean_first=True)
t.run("install . -s:h build_type=Debug -g CMakeToolchain")
t.run("install . -s:h build_type=Release -g CMakeToolchain")
user_presets_path = os.path.join(t.current_folder, "CMakeUserPresets.json")
assert os.path.exists(user_presets_path)
user_presets_data = json.loads(load(user_presets_path))
if existing_user_presets == "user_provided":
assert not user_presets_data
else:
assert "include" in user_presets_data.keys()
if existing_user_presets == None:
t.run_command("cmake . --preset release")
assert 'CMAKE_BUILD_TYPE="Release"' in t.out
t.run_command("cmake . --preset debug")
assert 'CMAKE_BUILD_TYPE="Debug"' in t.out
def test_cmake_toolchain_user_toolchain_from_dep():
client = TestClient()
conanfile = textwrap.dedent("""
import os
from conans import ConanFile
class Pkg(ConanFile):
exports_sources = "*"
def package(self):
self.copy("*")
def package_info(self):
f = os.path.join(self.package_folder, "mytoolchain.cmake")
self.conf_info.append("tools.cmake.cmaketoolchain:user_toolchain", f)
""")
client.save({"conanfile.py": conanfile,
"mytoolchain.cmake": 'message(STATUS "mytoolchain.cmake !!!running!!!")'})
client.run("create . toolchain/0.1@")
conanfile = textwrap.dedent("""
from conans import ConanFile
from conan.tools.cmake import CMake
class Pkg(ConanFile):
settings = "os", "compiler", "arch", "build_type"
exports_sources = "CMakeLists.txt"
build_requires = "toolchain/0.1"
generators = "CMakeToolchain"
def build(self):
cmake = CMake(self)
cmake.configure()
""")
client.save({"conanfile.py": conanfile,
"CMakeLists.txt": gen_cmakelists()}, clean_first=True)
client.run("create . pkg/0.1@")
assert "mytoolchain.cmake !!!running!!!" in client.out
def test_cmake_toolchain_without_build_type():
# If "build_type" is not defined, toolchain will still be generated, it will not crash
# Main effect is CMAKE_MSVC_RUNTIME_LIBRARY not being defined
client = TestClient(path_with_spaces=False)
conanfile = GenConanfile().with_settings("os", "compiler", "arch").\
with_generator("CMakeToolchain")
client.save({"conanfile.py": conanfile})
client.run("install .")
toolchain = client.load("conan_toolchain.cmake")
assert "CMAKE_MSVC_RUNTIME_LIBRARY" not in toolchain
assert "CMAKE_BUILD_TYPE" not in toolchain
def test_cmake_toolchain_multiple_user_toolchain():
""" A consumer consuming two packages that declare:
self.conf_info["tools.cmake.cmaketoolchain:user_toolchain"]
The consumer wants to use apply both toolchains in the CMakeToolchain.
There are two ways to customize the CMakeToolchain (parametrized):
1. Altering the context of the block (with_context = True)
2. Using the t.blocks["user_toolchain"].user_toolchains = [] (with_context = False)
"""
client = TestClient()
conanfile = textwrap.dedent("""
import os
from conans import ConanFile
class Pkg(ConanFile):
exports_sources = "*"
def package(self):
self.copy("*")
def package_info(self):
f = os.path.join(self.package_folder, "mytoolchain.cmake")
self.conf_info.append("tools.cmake.cmaketoolchain:user_toolchain", f)
""")
client.save({"conanfile.py": conanfile,
"mytoolchain.cmake": 'message(STATUS "mytoolchain1.cmake !!!running!!!")'})
client.run("create . toolchain1/0.1@")
client.save({"conanfile.py": conanfile,
"mytoolchain.cmake": 'message(STATUS "mytoolchain2.cmake !!!running!!!")'})
client.run("create . toolchain2/0.1@")
conanfile = textwrap.dedent("""
from conans import ConanFile
from conan.tools.cmake import CMake
class Pkg(ConanFile):
settings = "os", "compiler", "arch", "build_type"
exports_sources = "CMakeLists.txt"
tool_requires = "toolchain1/0.1", "toolchain2/0.1"
generators = "CMakeToolchain"
def build(self):
cmake = CMake(self)
cmake.configure()
""")
client.save({"conanfile.py": conanfile,
"CMakeLists.txt": gen_cmakelists()}, clean_first=True)
client.run("create . pkg/0.1@")
assert "mytoolchain1.cmake !!!running!!!" in client.out
assert "mytoolchain2.cmake !!!running!!!" in client.out
@pytest.mark.tool_cmake
def test_cmaketoolchain_no_warnings():
"""Make sure unitialized variables do not cause any warnings, passing -Werror=dev
and --wanr-unitialized, calling "cmake" with conan_toolchain.cmake used to fail
"""
# Issue https://github.com/conan-io/conan/issues/10288
client = TestClient()
conanfile = textwrap.dedent("""
from conans import ConanFile
class Conan(ConanFile):
settings = "os", "compiler", "arch", "build_type"
generators = "CMakeToolchain", "CMakeDeps"
requires = "dep/0.1"
""")
consumer = textwrap.dedent("""
cmake_minimum_required(VERSION 3.15)
set(CMAKE_CXX_COMPILER_WORKS 1)
set(CMAKE_CXX_ABI_COMPILED 1)
project(MyHello CXX)
find_package(dep CONFIG REQUIRED)
""")
client.save({"dep/conanfile.py": GenConanfile("dep", "0.1"),
"conanfile.py": conanfile,
"CMakeLists.txt": consumer})
client.run("create dep")
client.run("install .")
build_type = "-DCMAKE_BUILD_TYPE=Release" if platform.system() != "Windows" else ""
client.run_command("cmake . -DCMAKE_TOOLCHAIN_FILE=./conan_toolchain.cmake {}"
"-Werror=dev --warn-uninitialized".format(build_type))
assert "Using Conan toolchain" in client.out
# The real test is that there are no errors, it returns successfully
def test_install_output_directories():
"""
If we change the libdirs of the cpp.package, as we are doing cmake.install, the output directory
for the libraries is changed
"""
ref = ConanFileReference.loads("zlib/1.2.11")
client = TurboTestClient()
client.run("new zlib/1.2.11 --template cmake_lib")
cf = client.load("conanfile.py")
pref = client.create(ref, conanfile=cf)
p_folder = client.cache.package_layout(pref.ref).package(pref)
assert not os.path.exists(os.path.join(p_folder, "mylibs"))
assert os.path.exists(os.path.join(p_folder, "lib"))
# Edit the cpp.package.libdirs and check if the library is placed anywhere else
cf = client.load("conanfile.py")
cf = cf.replace("cmake_layout(self)",
'cmake_layout(self)\n self.cpp.package.libdirs = ["mylibs"]')
pref = client.create(ref, conanfile=cf)
p_folder = client.cache.package_layout(pref.ref).package(pref)
assert os.path.exists(os.path.join(p_folder, "mylibs"))
assert not os.path.exists(os.path.join(p_folder, "lib"))
b_folder = client.cache.package_layout(pref.ref).build(pref)
toolchain = client.load(os.path.join(b_folder, "build", "generators", "conan_toolchain.cmake"))
assert 'set(CMAKE_INSTALL_LIBDIR "mylibs")' in toolchain
@pytest.mark.tool_cmake
def test_cmake_toolchain_definitions_complex_strings():
# https://github.com/conan-io/conan/issues/11043
client = TestClient(path_with_spaces=False)
profile = textwrap.dedent(r'''
include(default)
[conf]
tools.build:defines+=["escape=partially \"escaped\""]
tools.build:defines+=["spaces=me you"]
tools.build:defines+=["foobar=bazbuz"]
tools.build:defines+=["answer=42"]
''')
conanfile = textwrap.dedent(r'''
from conan import ConanFile
from conan.tools.cmake import CMake, CMakeToolchain, cmake_layout
class Test(ConanFile):
exports_sources = "CMakeLists.txt", "src/*"
settings = "os", "compiler", "arch", "build_type"
def generate(self):
tc = CMakeToolchain(self)
tc.preprocessor_definitions["escape2"] = "partially \"escaped\""
tc.preprocessor_definitions["spaces2"] = "me you"
tc.preprocessor_definitions["foobar2"] = "bazbuz"
tc.preprocessor_definitions["answer2"] = 42
tc.preprocessor_definitions.release["escape_release"] = "release partially \"escaped\""
tc.preprocessor_definitions.release["spaces_release"] = "release me you"
tc.preprocessor_definitions.release["foobar_release"] = "release bazbuz"
tc.preprocessor_definitions.release["answer_release"] = 42
tc.preprocessor_definitions.debug["escape_debug"] = "debug partially \"escaped\""
tc.preprocessor_definitions.debug["spaces_debug"] = "debug me you"
tc.preprocessor_definitions.debug["foobar_debug"] = "debug bazbuz"
tc.preprocessor_definitions.debug["answer_debug"] = 21
tc.generate()
def layout(self):
cmake_layout(self)
def build(self):
cmake = CMake(self)
cmake.configure()
cmake.build()
''')
main = textwrap.dedent("""
#include <stdio.h>
#define STR(x) #x
#define SHOW_DEFINE(x) printf("%s=%s", #x, STR(x))
int main(int argc, char *argv[]) {
SHOW_DEFINE(escape);
SHOW_DEFINE(spaces);
SHOW_DEFINE(foobar);
SHOW_DEFINE(answer);
SHOW_DEFINE(escape2);
SHOW_DEFINE(spaces2);
SHOW_DEFINE(foobar2);
SHOW_DEFINE(answer2);
#ifdef NDEBUG
SHOW_DEFINE(escape_release);
SHOW_DEFINE(spaces_release);
SHOW_DEFINE(foobar_release);
SHOW_DEFINE(answer_release);
#else
SHOW_DEFINE(escape_debug);
SHOW_DEFINE(spaces_debug);
SHOW_DEFINE(foobar_debug);
SHOW_DEFINE(answer_debug);
#endif
return 0;
}
""")
cmakelists = textwrap.dedent("""
cmake_minimum_required(VERSION 3.15)
project(Test CXX)
set(CMAKE_CXX_STANDARD 11)
add_executable(example src/main.cpp)
""")
client.save({"conanfile.py": conanfile, "profile": profile, "src/main.cpp": main,
"CMakeLists.txt": cmakelists}, clean_first=True)
client.run("install . -pr=./profile -if=install")
client.run("build . -if=install")
exe = "build/Release/example" if platform.system() != "Windows" else r"build\Release\example.exe"
client.run_command(exe)
assert 'escape=partially "escaped"' in client.out
assert 'spaces=me you' in client.out
assert 'foobar=bazbuz' in client.out
assert 'answer=42' in client.out
assert 'escape2=partially "escaped"' in client.out
assert 'spaces2=me you' in client.out
assert 'foobar2=bazbuz' in client.out
assert 'answer2=42' in client.out
assert 'escape_release=release partially "escaped"' in client.out
assert 'spaces_release=release me you' in client.out
assert 'foobar_release=release bazbuz' in client.out
assert 'answer_release=42' in client.out
client.run("install . -pr=./profile -if=install -s build_type=Debug")
client.run("build . -if=install -s build_type=Debug")
exe = "build/Debug/example" if platform.system() != "Windows" else r"build\Debug\example.exe"
client.run_command(exe)
assert 'escape_debug=debug partially "escaped"' in client.out
assert 'spaces_debug=debug me you' in client.out
assert 'foobar_debug=debug bazbuz' in client.out
assert 'answer_debug=21' in client.out
class TestAutoLinkPragma:
# Consumer test_package setting cmake_deps.set_interface_link_directories = True
test_cf = textwrap.dedent("""
import os
from conan import ConanFile
from conan.tools.cmake import CMake, cmake_layout, CMakeDeps
from conan.tools.build import cross_building
class HelloTestConan(ConanFile):
settings = "os", "compiler", "build_type", "arch"
generators = "CMakeToolchain", "VirtualBuildEnv", "VirtualRunEnv"
apply_env = False
test_type = "explicit"
def generate(self):
deps = CMakeDeps(self)
deps.set_interface_link_directories = True
deps.generate()
def requirements(self):
self.requires(self.tested_reference_str)
def build(self):
cmake = CMake(self)
cmake.configure()
cmake.build()
def layout(self):
cmake_layout(self)
def test(self):
if not cross_building(self):
cmd = os.path.join(self.cpp.build.bindirs[0], "example")
self.run(cmd, env="conanrun")
""")
@pytest.mark.skipif(platform.system() != "Windows", reason="Requires Visual Studio")
@pytest.mark.tool_cmake
def test_autolink_pragma_components(self):
"""https://github.com/conan-io/conan/issues/10837
NOTE: At the moment the property cmake_set_interface_link_directories is only read at the
global cppinfo, not in the components"""
client = TestClient()
client.run("new hello/1.0 --template cmake_lib")
cf = client.load("conanfile.py")
cf = cf.replace('self.cpp_info.libs = ["hello"]', """
self.cpp_info.components['my_component'].includedirs.append('include')
self.cpp_info.components['my_component'].libdirs.append('lib')
self.cpp_info.components['my_component'].libs = []
self.cpp_info.set_property("cmake_set_interface_link_directories", True)
""")
hello_h = client.load("include/hello.h")
hello_h = hello_h.replace("#define hello_EXPORT __declspec(dllexport)",
'#define hello_EXPORT __declspec(dllexport)\n'
'#pragma comment(lib, "hello")')
test_cmakelist = client.load("test_package/CMakeLists.txt")
test_cmakelist = test_cmakelist.replace("target_link_libraries(example hello::hello)",
"target_link_libraries(example hello::my_component)")
client.save({"conanfile.py": cf,
"include/hello.h": hello_h,
"test_package/CMakeLists.txt": test_cmakelist,
"test_package/conanfile.py": self.test_cf})
client.run("create .")
@pytest.mark.skipif(platform.system() != "Windows", reason="Requires Visual Studio")
@pytest.mark.tool_cmake
def test_autolink_pragma_without_components(self):
"""https://github.com/conan-io/conan/issues/10837"""
client = TestClient()
client.run("new hello/1.0 --template cmake_lib")
cf = client.load("conanfile.py")
cf = cf.replace('self.cpp_info.libs = ["hello"]', """
self.cpp_info.includedirs.append('include')
self.cpp_info.libdirs.append('lib')
self.cpp_info.libs = []
self.cpp_info.set_property("cmake_set_interface_link_directories", True)
""")
hello_h = client.load("include/hello.h")
hello_h = hello_h.replace("#define hello_EXPORT __declspec(dllexport)",
'#define hello_EXPORT __declspec(dllexport)\n'
'#pragma comment(lib, "hello")')
client.save({"conanfile.py": cf,
"include/hello.h": hello_h,
"test_package/conanfile.py": self.test_cf})
client.run("create .")
@pytest.mark.skipif(platform.system() != "Windows", reason="Only for windows")
def test_cmake_toolchain_runtime_types():
# everything works with the default cmake_minimum_required version 3.15 in the template
client = TestClient(path_with_spaces=False)
client.run("new hello/0.1 --template=cmake_lib")
client.run("install . -s compiler.runtime=MTd -s build_type=Debug")
client.run("build .")
vcvars = vcvars_command(version="15", architecture="x64")
lib = os.path.join(client.current_folder, "build", "Debug", "hello.lib")
dumpbind_cmd = '{} && dumpbin /directives "{}"'.format(vcvars, lib)
client.run_command(dumpbind_cmd)
assert "LIBCMTD" in client.out
@pytest.mark.skipif(platform.system() != "Windows", reason="Only for windows")
def test_cmake_toolchain_runtime_types_cmake_older_than_3_15():
client = TestClient(path_with_spaces=False)
# Setting an older cmake_minimum_required in the CMakeLists fails, will link
# against the default debug runtime (MDd->MSVCRTD), not against MTd->LIBCMTD
client.run("new hello/0.1 --template=cmake_lib")
replace_in_file(os.path.join(client.current_folder, "CMakeLists.txt"),
'cmake_minimum_required(VERSION 3.15)',
'cmake_minimum_required(VERSION 3.1)'
, output=client.out)
client.run("install . -s compiler.runtime=MTd -s build_type=Debug")
client.run("build .")
vcvars = vcvars_command(version="15", architecture="x64")
lib = os.path.join(client.current_folder, "build", "Debug", "hello.lib")
dumpbind_cmd = '{} && dumpbin /directives "{}"'.format(vcvars, lib)
client.run_command(dumpbind_cmd)
assert "LIBCMTD" in client.out
@pytest.mark.tool_cmake(version="3.23")
def test_cmake_presets_missing_option():
client = TestClient(path_with_spaces=False)
client.run("new hello/0.1 --template=cmake_exe")
settings_layout = '-c tools.cmake.cmake_layout:build_folder_vars=' \
'\'["options.missing"]\''
client.run("install . {}".format(settings_layout))
assert os.path.exists(os.path.join(client.current_folder, "build", "generators"))
@pytest.mark.tool_cmake(version="3.23")
def test_cmake_presets_missing_setting():
client = TestClient(path_with_spaces=False)
client.run("new hello/0.1 --template=cmake_exe")
settings_layout = '-c tools.cmake.cmake_layout:build_folder_vars=' \
'\'["settings.missing"]\''
client.run("install . {}".format(settings_layout))
assert os.path.exists(os.path.join(client.current_folder, "build", "generators"))
@pytest.mark.tool_cmake(version="3.23")
def test_cmake_presets_multiple_settings_single_config():
client = TestClient(path_with_spaces=False)
client.run("new hello/0.1 --template=cmake_exe")
settings_layout = '-c tools.cmake.cmake_layout:build_folder_vars=' \
'\'["settings.compiler", "settings.compiler.version", ' \
' "settings.compiler.cppstd"]\''
user_presets_path = os.path.join(client.current_folder, "CMakeUserPresets.json")
# Check that all generated names are expected, both in the layout and in the Presets
settings = "-s compiler=apple-clang -s compiler.libcxx=libc++ " \
"-s compiler.version=12.0 -s compiler.cppstd=gnu17"
client.run("install . {} {}".format(settings, settings_layout))
assert os.path.exists(os.path.join(client.current_folder, "build", "apple-clang-12.0-gnu17",
"generators"))
assert os.path.exists(user_presets_path)
user_presets = json.loads(load(user_presets_path))
assert len(user_presets["include"]) == 1
presets = json.loads(load(user_presets["include"][0]))
assert len(presets["configurePresets"]) == 1
assert len(presets["buildPresets"]) == 1
assert presets["configurePresets"][0]["name"] == "apple-clang-12.0-gnu17-release"
assert presets["buildPresets"][0]["name"] == "apple-clang-12.0-gnu17-release"
assert presets["buildPresets"][0]["configurePreset"] == "apple-clang-12.0-gnu17-release"
# If we create the "Debug" one, it has the same toolchain and preset file, that is
# always multiconfig
client.run("install . {} -s build_type=Debug {}".format(settings, settings_layout))
assert os.path.exists(os.path.join(client.current_folder, "build", "apple-clang-12.0-gnu17", "generators"))
assert os.path.exists(user_presets_path)
user_presets = json.loads(load(user_presets_path))
assert len(user_presets["include"]) == 1
presets = json.loads(load(user_presets["include"][0]))
assert len(presets["configurePresets"]) == 2
assert len(presets["buildPresets"]) == 2
assert presets["configurePresets"][0]["name"] == "apple-clang-12.0-gnu17-release"
assert presets["configurePresets"][1]["name"] == "apple-clang-12.0-gnu17-debug"
assert presets["buildPresets"][0]["name"] == "apple-clang-12.0-gnu17-release"
assert presets["buildPresets"][1]["name"] == "apple-clang-12.0-gnu17-debug"
assert presets["buildPresets"][0]["configurePreset"] == "apple-clang-12.0-gnu17-release"
assert presets["buildPresets"][1]["configurePreset"] == "apple-clang-12.0-gnu17-debug"
# But If we change, for example, the cppstd and the compiler version, the toolchain
# and presets will be different, but it will be appended to the UserPresets.json
settings = "-s compiler=apple-clang -s compiler.libcxx=libc++ " \
"-s compiler.version=13 -s compiler.cppstd=gnu20"
client.run("install . {} {}".format(settings, settings_layout))
assert os.path.exists(os.path.join(client.current_folder, "build", "apple-clang-13-gnu20",
"generators"))
assert os.path.exists(user_presets_path)
user_presets = json.loads(load(user_presets_path))
# The [0] is the apple-clang 12 the [1] is the apple-clang 13
assert len(user_presets["include"]) == 2
presets = json.loads(load(user_presets["include"][1]))
assert len(presets["configurePresets"]) == 1
assert len(presets["buildPresets"]) == 1
assert presets["configurePresets"][0]["name"] == "apple-clang-13-gnu20-release"
assert presets["buildPresets"][0]["name"] == "apple-clang-13-gnu20-release"
assert presets["buildPresets"][0]["configurePreset"] == "apple-clang-13-gnu20-release"
# We can build with cmake manually
if platform.system() == "Darwin":
client.run_command("cmake . --preset apple-clang-12.0-gnu17-release")
client.run_command("cmake --build --preset apple-clang-12.0-gnu17-release")
client.run_command("./build/apple-clang-12.0-gnu17/Release/hello")
assert "Hello World Release!" in client.out
assert "__cplusplus2017" in client.out
client.run_command("cmake . --preset apple-clang-12.0-gnu17-debug")
client.run_command("cmake --build --preset apple-clang-12.0-gnu17-debug")
client.run_command("./build/apple-clang-12.0-gnu17/Debug/hello")
assert "Hello World Debug!" in client.out
assert "__cplusplus2017" in client.out
client.run_command("cmake . --preset apple-clang-13-gnu20-release")
client.run_command("cmake --build --preset apple-clang-13-gnu20-release")
client.run_command("./build/apple-clang-13-gnu20/Release/hello")
assert "Hello World Release!" in client.out
assert "__cplusplus2020" in client.out
@pytest.mark.parametrize("multiconfig", [True, False])
def test_cmake_presets_duplicated_install(multiconfig):
# https://github.com/conan-io/conan/issues/11409
"""Only failed when using a multiconfig generator"""
client = TestClient(path_with_spaces=False)
client.run("new hello/0.1 --template=cmake_exe")
settings = '-s compiler=gcc -s compiler.version=5 -s compiler.libcxx=libstdc++11 ' \
'-c tools.cmake.cmake_layout:build_folder_vars=' \
'\'["settings.compiler", "settings.compiler.version"]\' '
if multiconfig:
settings += '-c tools.cmake.cmaketoolchain:generator="Multi-Config"'
client.run("install . {}".format(settings))
client.run("install . {}".format(settings))
presets_path = os.path.join(client.current_folder, "build", "gcc-5", "generators",
"CMakePresets.json")
assert os.path.exists(presets_path)
contents = json.loads(load(presets_path))
assert len(contents["buildPresets"]) == 1
def test_remove_missing_presets():
# https://github.com/conan-io/conan/issues/11413
client = TestClient(path_with_spaces=False)
client.run("new hello/0.1 --template=cmake_exe")
settings = '-s compiler=gcc -s compiler.version=5 -s compiler.libcxx=libstdc++11 ' \
'-c tools.cmake.cmake_layout:build_folder_vars=' \
'\'["settings.compiler", "settings.compiler.version"]\' '
client.run("install . {}".format(settings))
client.run("install . {} -s compiler.version=6".format(settings))
presets_path_5 = os.path.join(client.current_folder, "build", "gcc-5")
assert os.path.exists(presets_path_5)
presets_path_6 = os.path.join(client.current_folder, "build", "gcc-6")
assert os.path.exists(presets_path_6)
rmdir(presets_path_5)
# If we generate another configuration, the missing one (removed) for gcc-5 is not included
client.run("install . {} -s compiler.version=11".format(settings))
user_presets_path = os.path.join(client.current_folder, "CMakeUserPresets.json")
assert os.path.exists(user_presets_path)
contents = json.loads(load(user_presets_path))
assert len(contents["include"]) == 2
assert "gcc-6" in contents["include"][0]
assert "gcc-11" in contents["include"][1]
@pytest.mark.tool_cmake(version="3.23")
def test_cmake_presets_options_single_config():
client = TestClient(path_with_spaces=False)
client.run("new hello/0.1 --template=cmake_lib")
conf_layout = '-c tools.cmake.cmake_layout:build_folder_vars=\'["settings.compiler", ' \
'"options.shared"]\''
default_compiler = {"Darwin": "apple-clang",
"Windows": "visual studio", # FIXME: replace it with 'msvc' in develop2
"Linux": "gcc"}.get(platform.system())
for shared in (True, False):
client.run("install . {} -o shared={}".format(conf_layout, shared))
shared_str = "shared_true" if shared else "shared_false"
assert os.path.exists(os.path.join(client.current_folder,
"build", "{}-{}".format(default_compiler, shared_str),
"generators"))
client.run("install . {}".format(conf_layout))
assert os.path.exists(os.path.join(client.current_folder,
"build", "{}-shared_false".format(default_compiler),
"generators"))
user_presets_path = os.path.join(client.current_folder, "CMakeUserPresets.json")
assert os.path.exists(user_presets_path)
# We can build with cmake manually
if platform.system() == "Darwin":
for shared in (True, False):
shared_str = "shared_true" if shared else "shared_false"
client.run_command("cmake . --preset apple-clang-{}-release".format(shared_str))
client.run_command("cmake --build --preset apple-clang-{}-release".format(shared_str))
the_lib = "libhello.a" if not shared else "libhello.dylib"
path = os.path.join(client.current_folder,
"build", "apple-clang-{}".format(shared_str), "release", the_lib)
assert os.path.exists(path)
@pytest.mark.tool_cmake(version="3.23")
@pytest.mark.skipif(platform.system() != "Windows", reason="Needs windows")
def test_cmake_presets_multiple_settings_multi_config():
client = TestClient(path_with_spaces=False)
client.run("new hello/0.1 --template=cmake_exe")
settings_layout = '-c tools.cmake.cmake_layout:build_folder_vars=' \
'\'["settings.compiler.runtime", "settings.compiler.cppstd"]\''
user_presets_path = os.path.join(client.current_folder, "CMakeUserPresets.json")
# Check that all generated names are expected, both in the layout and in the Presets
settings = "-s compiler=msvc -s compiler.version=191 -s compiler.runtime=dynamic " \
"-s compiler.cppstd=14"
client.run("install . {} {}".format(settings, settings_layout))
assert os.path.exists(os.path.join(client.current_folder, "build", "dynamic-14", "generators"))
assert os.path.exists(user_presets_path)
user_presets = json.loads(load(user_presets_path))
assert len(user_presets["include"]) == 1
presets = json.loads(load(user_presets["include"][0]))
assert len(presets["configurePresets"]) == 1
assert len(presets["buildPresets"]) == 1
assert presets["configurePresets"][0]["name"] == "dynamic-14"
assert presets["buildPresets"][0]["name"] == "dynamic-14-release"
assert presets["buildPresets"][0]["configurePreset"] == "dynamic-14"
# If we create the "Debug" one, it has the same toolchain and preset file, that is
# always multiconfig
client.run("install . {} -s build_type=Debug {}".format(settings, settings_layout))
assert os.path.exists(os.path.join(client.current_folder, "build", "dynamic-14", "generators"))
assert os.path.exists(user_presets_path)
user_presets = json.loads(load(user_presets_path))
assert len(user_presets["include"]) == 1
presets = json.loads(load(user_presets["include"][0]))
assert len(presets["configurePresets"]) == 1
assert len(presets["buildPresets"]) == 2
assert presets["configurePresets"][0]["name"] == "dynamic-14"
assert presets["buildPresets"][0]["name"] == "dynamic-14-release"
assert presets["buildPresets"][1]["name"] == "dynamic-14-debug"
assert presets["buildPresets"][0]["configurePreset"] == "dynamic-14"
assert presets["buildPresets"][1]["configurePreset"] == "dynamic-14"
# But If we change, for example, the cppstd and the compiler version, the toolchain
# and presets will be different, but it will be appended to the UserPresets.json
settings = "-s compiler=msvc -s compiler.version=191 -s compiler.runtime=static " \
"-s compiler.cppstd=17"
client.run("install . {} {}".format(settings, settings_layout))
assert os.path.exists(os.path.join(client.current_folder, "build", "static-17", "generators"))
assert os.path.exists(user_presets_path)
user_presets = json.loads(load(user_presets_path))
# The [0] is the msvc dynamic/14 the [1] is the static/17
assert len(user_presets["include"]) == 2
presets = json.loads(load(user_presets["include"][1]))
assert len(presets["configurePresets"]) == 1
assert len(presets["buildPresets"]) == 1
assert presets["configurePresets"][0]["name"] == "static-17"
assert presets["buildPresets"][0]["name"] == "static-17-release"
assert presets["buildPresets"][0]["configurePreset"] == "static-17"
# We can build with cmake manually
client.run_command("cmake . --preset dynamic-14")
client.run_command("cmake --build --preset dynamic-14-release")
client.run_command("build\\dynamic-14\\Release\\hello")
assert "Hello World Release!" in client.out
assert "MSVC_LANG2014" in client.out
client.run_command("cmake --build --preset dynamic-14-debug")
client.run_command("build\\dynamic-14\\Debug\\hello")
assert "Hello World Debug!" in client.out
assert "MSVC_LANG2014" in client.out
client.run_command("cmake . --preset static-17")
client.run_command("cmake --build --preset static-17-release")
client.run_command("build\\static-17\\Release\\hello")
assert "Hello World Release!" in client.out
assert "MSVC_LANG2017" in client.out
@pytest.mark.tool_cmake
def test_cmaketoolchain_sysroot():
client = TestClient(path_with_spaces=False)
conanfile = textwrap.dedent("""
from conan import ConanFile
from conan.tools.cmake import CMakeToolchain, CMake, cmake_layout
class AppConan(ConanFile):
settings = "os", "compiler", "build_type", "arch"
exports_sources = "CMakeLists.txt"
def generate(self):
tc = CMakeToolchain(self)
{}
tc.generate()
def build(self):
cmake = CMake(self)
cmake.configure()
cmake.build()
""")
cmakelist = textwrap.dedent("""
cmake_minimum_required(VERSION 3.15)
set(CMAKE_CXX_COMPILER_WORKS 1)
project(app CXX)
message("sysroot: '${CMAKE_SYSROOT}'")
message("osx_sysroot: '${CMAKE_OSX_SYSROOT}'")
""")
client.save({
"conanfile.py": conanfile.format(""),
"CMakeLists.txt": cmakelist
})
fake_sysroot = client.current_folder
output_fake_sysroot = fake_sysroot.replace("\\", "/") if platform.system() == "Windows" else fake_sysroot
client.run("create . app/1.0@ -c tools.build:sysroot='{}'".format(fake_sysroot))
assert "sysroot: '{}'".format(output_fake_sysroot) in client.out
# set in a block instead of using conf
set_sysroot_in_block = 'tc.blocks["generic_system"].values["cmake_sysroot"] = "{}"'.format(output_fake_sysroot)
client.save({
"conanfile.py": conanfile.format(set_sysroot_in_block),
})
client.run("create . app/1.0@")
assert "sysroot: '{}'".format(output_fake_sysroot) in client.out
# FIXME: DEVELOP2: @pytest.mark.tool("cmake", "3.23")
@pytest.mark.tool_cmake(version="3.23")
def test_cmake_presets_with_conanfile_txt():
c = TestClient()
# FIXME: DEVELOP 2: c.run("new cmake_exe -d name=foo -d version=1.0")
c.run("new foo/1.0 --template cmake_exe")
os.unlink(os.path.join(c.current_folder, "conanfile.py"))
c.save({"conanfile.txt": textwrap.dedent("""
[generators]
CMakeToolchain
[layout]
cmake_layout
""")})
c.run("install .")
c.run("install . -s build_type=Debug")
assert os.path.exists(os.path.join(c.current_folder, "CMakeUserPresets.json"))
presets_path = os.path.join(c.current_folder, "build", "generators", "CMakePresets.json")
assert os.path.exists(presets_path)
if platform.system() != "Windows":
c.run_command("cmake --preset debug")
c.run_command("cmake --build --preset debug")
c.run_command("./build/Debug/foo")
else:
c.run_command("cmake --preset default")
c.run_command("cmake --build --preset debug")
c.run_command("build\\Debug\\foo")
assert "Hello World Debug!" in c.out
if platform.system() != "Windows":
c.run_command("cmake --preset release")
c.run_command("cmake --build --preset release")
c.run_command("./build/Release/foo")
else:
c.run_command("cmake --build --preset release")
c.run_command("build\\Release\\foo")
assert "Hello World Release!" in c.out
def test_cmake_presets_forbidden_build_type():
client = TestClient(path_with_spaces=False)
client.run("new hello/0.1 --template cmake_exe")
# client.run("new cmake_exe -d name=hello -d version=0.1")
settings_layout = '-c tools.cmake.cmake_layout:build_folder_vars=' \
'\'["options.missing", "settings.build_type"]\''
client.run("install . {}".format(settings_layout), assert_error=True)
assert "Error, don't include 'settings.build_type' in the " \
"'tools.cmake.cmake_layout:build_folder_vars' conf" in client.out
|
[
"textwrap.dedent",
"conans.test.assets.genconanfile.GenConanfile",
"conans.util.files.load",
"conans.model.ref.ConanFileReference.loads",
"conans.test.utils.tools.TurboTestClient",
"os.path.exists",
"conan.tools.microsoft.visual.vcvars_command",
"conans.util.files.rmdir",
"conans.test.assets.cmake.gen_cmakelists",
"conans.util.files.save",
"conan.tools.cmake.presets.load_cmake_presets",
"pytest.mark.tool_cmake",
"platform.system",
"pytest.mark.parametrize",
"conans.test.utils.tools.TestClient",
"os.path.join"
] |
[((582, 751), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""compiler, version, update, runtime"""', "[('msvc', '192', None, 'dynamic'), ('msvc', '192', '6', 'static'), ('msvc',\n '192', '8', 'static')]"], {}), "('compiler, version, update, runtime', [('msvc',\n '192', None, 'dynamic'), ('msvc', '192', '6', 'static'), ('msvc', '192',\n '8', 'static')])\n", (605, 751), False, 'import pytest\n'), ((3229, 3267), 'pytest.mark.tool_cmake', 'pytest.mark.tool_cmake', ([], {'version': '"""3.23"""'}), "(version='3.23')\n", (3251, 3267), False, 'import pytest\n'), ((3269, 3365), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""existing_user_presets"""', "[None, 'user_provided', 'conan_generated']"], {}), "('existing_user_presets', [None, 'user_provided',\n 'conan_generated'])\n", (3292, 3365), False, 'import pytest\n'), ((22706, 22744), 'pytest.mark.tool_cmake', 'pytest.mark.tool_cmake', ([], {'version': '"""3.23"""'}), "(version='3.23')\n", (22728, 22744), False, 'import pytest\n'), ((23152, 23190), 'pytest.mark.tool_cmake', 'pytest.mark.tool_cmake', ([], {'version': '"""3.23"""'}), "(version='3.23')\n", (23174, 23190), False, 'import pytest\n'), ((23600, 23638), 'pytest.mark.tool_cmake', 'pytest.mark.tool_cmake', ([], {'version': '"""3.23"""'}), "(version='3.23')\n", (23622, 23638), False, 'import pytest\n'), ((28448, 28501), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""multiconfig"""', '[True, False]'], {}), "('multiconfig', [True, False])\n", (28471, 28501), False, 'import pytest\n'), ((30748, 30786), 'pytest.mark.tool_cmake', 'pytest.mark.tool_cmake', ([], {'version': '"""3.23"""'}), "(version='3.23')\n", (30770, 30786), False, 'import pytest\n'), ((32711, 32749), 'pytest.mark.tool_cmake', 'pytest.mark.tool_cmake', ([], {'version': '"""3.23"""'}), "(version='3.23')\n", (32733, 32749), False, 'import pytest\n'), ((38654, 38692), 'pytest.mark.tool_cmake', 'pytest.mark.tool_cmake', ([], {'version': '"""3.23"""'}), "(version='3.23')\n", (38676, 38692), False, 'import pytest\n'), ((908, 942), 'conans.test.utils.tools.TestClient', 'TestClient', ([], {'path_with_spaces': '(False)'}), '(path_with_spaces=False)\n', (918, 942), False, 'from conans.test.utils.tools import TestClient, TurboTestClient\n'), ((1959, 1993), 'conans.test.utils.tools.TestClient', 'TestClient', ([], {'path_with_spaces': '(False)'}), '(path_with_spaces=False)\n', (1969, 1993), False, 'from conans.test.utils.tools import TestClient, TurboTestClient\n'), ((2126, 2228), 'conans.util.files.save', 'save', (['client.cache.new_config_path', '"""tools.cmake.cmaketoolchain:user_toolchain+=mytoolchain.cmake"""'], {}), "(client.cache.new_config_path,\n 'tools.cmake.cmaketoolchain:user_toolchain+=mytoolchain.cmake')\n", (2130, 2228), False, 'from conans.util.files import save, load, rmdir\n'), ((2467, 2501), 'conans.test.utils.tools.TestClient', 'TestClient', ([], {'path_with_spaces': '(False)'}), '(path_with_spaces=False)\n', (2477, 2501), False, 'from conans.test.utils.tools import TestClient, TurboTestClient\n'), ((2634, 2735), 'conans.util.files.save', 'save', (['client.cache.new_config_path', '"""tools.cmake.cmaketoolchain:toolchain_file=mytoolchain.cmake"""'], {}), "(client.cache.new_config_path,\n 'tools.cmake.cmaketoolchain:toolchain_file=mytoolchain.cmake')\n", (2638, 2735), False, 'from conans.util.files import save, load, rmdir\n'), ((2912, 2953), 'conan.tools.cmake.presets.load_cmake_presets', 'load_cmake_presets', (['client.current_folder'], {}), '(client.current_folder)\n', (2930, 2953), False, 'from conan.tools.cmake.presets import load_cmake_presets\n'), ((3788, 3800), 'conans.test.utils.tools.TestClient', 'TestClient', ([], {}), '()\n', (3798, 3800), False, 'from conans.test.utils.tools import TestClient, TurboTestClient\n'), ((3955, 4336), 'textwrap.dedent', 'textwrap.dedent', (['"""\n from conan import ConanFile\n from conan.tools.cmake import cmake_layout\n\n class Consumer(ConanFile):\n\n settings = "build_type", "os", "arch", "compiler"\n requires = "mylib/1.0"\n generators = "CMakeToolchain", "CMakeDeps"\n\n def layout(self):\n cmake_layout(self)\n\n """'], {}), '(\n """\n from conan import ConanFile\n from conan.tools.cmake import cmake_layout\n\n class Consumer(ConanFile):\n\n settings = "build_type", "os", "arch", "compiler"\n requires = "mylib/1.0"\n generators = "CMakeToolchain", "CMakeDeps"\n\n def layout(self):\n cmake_layout(self)\n\n """\n )\n', (3970, 4336), False, 'import textwrap\n'), ((4344, 4507), 'textwrap.dedent', 'textwrap.dedent', (['"""\n cmake_minimum_required(VERSION 3.1)\n project(PackageTest CXX)\n find_package(mylib REQUIRED CONFIG)\n """'], {}), '(\n """\n cmake_minimum_required(VERSION 3.1)\n project(PackageTest CXX)\n find_package(mylib REQUIRED CONFIG)\n """\n )\n', (4359, 4507), False, 'import textwrap\n'), ((5065, 5120), 'os.path.join', 'os.path.join', (['t.current_folder', '"""CMakeUserPresets.json"""'], {}), "(t.current_folder, 'CMakeUserPresets.json')\n", (5077, 5120), False, 'import os\n'), ((5132, 5165), 'os.path.exists', 'os.path.exists', (['user_presets_path'], {}), '(user_presets_path)\n', (5146, 5165), False, 'import os\n'), ((5684, 5696), 'conans.test.utils.tools.TestClient', 'TestClient', ([], {}), '()\n', (5694, 5696), False, 'from conans.test.utils.tools import TestClient, TurboTestClient\n'), ((5713, 6133), 'textwrap.dedent', 'textwrap.dedent', (['"""\n import os\n from conans import ConanFile\n class Pkg(ConanFile):\n exports_sources = "*"\n def package(self):\n self.copy("*")\n def package_info(self):\n f = os.path.join(self.package_folder, "mytoolchain.cmake")\n self.conf_info.append("tools.cmake.cmaketoolchain:user_toolchain", f)\n """'], {}), '(\n """\n import os\n from conans import ConanFile\n class Pkg(ConanFile):\n exports_sources = "*"\n def package(self):\n self.copy("*")\n def package_info(self):\n f = os.path.join(self.package_folder, "mytoolchain.cmake")\n self.conf_info.append("tools.cmake.cmaketoolchain:user_toolchain", f)\n """\n )\n', (5728, 6133), False, 'import textwrap\n'), ((6319, 6767), 'textwrap.dedent', 'textwrap.dedent', (['"""\n from conans import ConanFile\n from conan.tools.cmake import CMake\n class Pkg(ConanFile):\n settings = "os", "compiler", "arch", "build_type"\n exports_sources = "CMakeLists.txt"\n build_requires = "toolchain/0.1"\n generators = "CMakeToolchain"\n def build(self):\n cmake = CMake(self)\n cmake.configure()\n """'], {}), '(\n """\n from conans import ConanFile\n from conan.tools.cmake import CMake\n class Pkg(ConanFile):\n settings = "os", "compiler", "arch", "build_type"\n exports_sources = "CMakeLists.txt"\n build_requires = "toolchain/0.1"\n generators = "CMakeToolchain"\n def build(self):\n cmake = CMake(self)\n cmake.configure()\n """\n )\n', (6334, 6767), False, 'import textwrap\n'), ((7189, 7223), 'conans.test.utils.tools.TestClient', 'TestClient', ([], {'path_with_spaces': '(False)'}), '(path_with_spaces=False)\n', (7199, 7223), False, 'from conans.test.utils.tools import TestClient, TurboTestClient\n'), ((8101, 8113), 'conans.test.utils.tools.TestClient', 'TestClient', ([], {}), '()\n', (8111, 8113), False, 'from conans.test.utils.tools import TestClient, TurboTestClient\n'), ((8130, 8550), 'textwrap.dedent', 'textwrap.dedent', (['"""\n import os\n from conans import ConanFile\n class Pkg(ConanFile):\n exports_sources = "*"\n def package(self):\n self.copy("*")\n def package_info(self):\n f = os.path.join(self.package_folder, "mytoolchain.cmake")\n self.conf_info.append("tools.cmake.cmaketoolchain:user_toolchain", f)\n """'], {}), '(\n """\n import os\n from conans import ConanFile\n class Pkg(ConanFile):\n exports_sources = "*"\n def package(self):\n self.copy("*")\n def package_info(self):\n f = os.path.join(self.package_folder, "mytoolchain.cmake")\n self.conf_info.append("tools.cmake.cmaketoolchain:user_toolchain", f)\n """\n )\n', (8145, 8550), False, 'import textwrap\n'), ((8918, 9385), 'textwrap.dedent', 'textwrap.dedent', (['"""\n from conans import ConanFile\n from conan.tools.cmake import CMake\n class Pkg(ConanFile):\n settings = "os", "compiler", "arch", "build_type"\n exports_sources = "CMakeLists.txt"\n tool_requires = "toolchain1/0.1", "toolchain2/0.1"\n generators = "CMakeToolchain"\n\n def build(self):\n cmake = CMake(self)\n cmake.configure()\n """'], {}), '(\n """\n from conans import ConanFile\n from conan.tools.cmake import CMake\n class Pkg(ConanFile):\n settings = "os", "compiler", "arch", "build_type"\n exports_sources = "CMakeLists.txt"\n tool_requires = "toolchain1/0.1", "toolchain2/0.1"\n generators = "CMakeToolchain"\n\n def build(self):\n cmake = CMake(self)\n cmake.configure()\n """\n )\n', (8933, 9385), False, 'import textwrap\n'), ((9964, 9976), 'conans.test.utils.tools.TestClient', 'TestClient', ([], {}), '()\n', (9974, 9976), False, 'from conans.test.utils.tools import TestClient, TurboTestClient\n'), ((9993, 10254), 'textwrap.dedent', 'textwrap.dedent', (['"""\n from conans import ConanFile\n class Conan(ConanFile):\n settings = "os", "compiler", "arch", "build_type"\n generators = "CMakeToolchain", "CMakeDeps"\n requires = "dep/0.1"\n """'], {}), '(\n """\n from conans import ConanFile\n class Conan(ConanFile):\n settings = "os", "compiler", "arch", "build_type"\n generators = "CMakeToolchain", "CMakeDeps"\n requires = "dep/0.1"\n """\n )\n', (10008, 10254), False, 'import textwrap\n'), ((10260, 10491), 'textwrap.dedent', 'textwrap.dedent', (['"""\n cmake_minimum_required(VERSION 3.15)\n set(CMAKE_CXX_COMPILER_WORKS 1)\n set(CMAKE_CXX_ABI_COMPILED 1)\n project(MyHello CXX)\n\n find_package(dep CONFIG REQUIRED)\n """'], {}), '(\n """\n cmake_minimum_required(VERSION 3.15)\n set(CMAKE_CXX_COMPILER_WORKS 1)\n set(CMAKE_CXX_ABI_COMPILED 1)\n project(MyHello CXX)\n\n find_package(dep CONFIG REQUIRED)\n """\n )\n', (10275, 10491), False, 'import textwrap\n'), ((11267, 11306), 'conans.model.ref.ConanFileReference.loads', 'ConanFileReference.loads', (['"""zlib/1.2.11"""'], {}), "('zlib/1.2.11')\n", (11291, 11306), False, 'from conans.model.ref import ConanFileReference\n'), ((11320, 11337), 'conans.test.utils.tools.TurboTestClient', 'TurboTestClient', ([], {}), '()\n', (11335, 11337), False, 'from conans.test.utils.tools import TestClient, TurboTestClient\n'), ((12522, 12556), 'conans.test.utils.tools.TestClient', 'TestClient', ([], {'path_with_spaces': '(False)'}), '(path_with_spaces=False)\n', (12532, 12556), False, 'from conans.test.utils.tools import TestClient, TurboTestClient\n'), ((12571, 12850), 'textwrap.dedent', 'textwrap.dedent', (['"""\n include(default)\n [conf]\n tools.build:defines+=["escape=partially \\\\"escaped\\\\""]\n tools.build:defines+=["spaces=me you"]\n tools.build:defines+=["foobar=bazbuz"]\n tools.build:defines+=["answer=42"]\n """'], {}), '(\n """\n include(default)\n [conf]\n tools.build:defines+=["escape=partially \\\\"escaped\\\\""]\n tools.build:defines+=["spaces=me you"]\n tools.build:defines+=["foobar=bazbuz"]\n tools.build:defines+=["answer=42"]\n """\n )\n', (12586, 12850), False, 'import textwrap\n'), ((12857, 14432), 'textwrap.dedent', 'textwrap.dedent', (['"""\n from conan import ConanFile\n from conan.tools.cmake import CMake, CMakeToolchain, cmake_layout\n\n class Test(ConanFile):\n exports_sources = "CMakeLists.txt", "src/*"\n settings = "os", "compiler", "arch", "build_type"\n\n def generate(self):\n tc = CMakeToolchain(self)\n tc.preprocessor_definitions["escape2"] = "partially \\\\"escaped\\\\""\n tc.preprocessor_definitions["spaces2"] = "me you"\n tc.preprocessor_definitions["foobar2"] = "bazbuz"\n tc.preprocessor_definitions["answer2"] = 42\n tc.preprocessor_definitions.release["escape_release"] = "release partially \\\\"escaped\\\\""\n tc.preprocessor_definitions.release["spaces_release"] = "release me you"\n tc.preprocessor_definitions.release["foobar_release"] = "release bazbuz"\n tc.preprocessor_definitions.release["answer_release"] = 42\n\n tc.preprocessor_definitions.debug["escape_debug"] = "debug partially \\\\"escaped\\\\""\n tc.preprocessor_definitions.debug["spaces_debug"] = "debug me you"\n tc.preprocessor_definitions.debug["foobar_debug"] = "debug bazbuz"\n tc.preprocessor_definitions.debug["answer_debug"] = 21\n tc.generate()\n\n def layout(self):\n cmake_layout(self)\n\n def build(self):\n cmake = CMake(self)\n cmake.configure()\n cmake.build()\n """'], {}), '(\n """\n from conan import ConanFile\n from conan.tools.cmake import CMake, CMakeToolchain, cmake_layout\n\n class Test(ConanFile):\n exports_sources = "CMakeLists.txt", "src/*"\n settings = "os", "compiler", "arch", "build_type"\n\n def generate(self):\n tc = CMakeToolchain(self)\n tc.preprocessor_definitions["escape2"] = "partially \\\\"escaped\\\\""\n tc.preprocessor_definitions["spaces2"] = "me you"\n tc.preprocessor_definitions["foobar2"] = "bazbuz"\n tc.preprocessor_definitions["answer2"] = 42\n tc.preprocessor_definitions.release["escape_release"] = "release partially \\\\"escaped\\\\""\n tc.preprocessor_definitions.release["spaces_release"] = "release me you"\n tc.preprocessor_definitions.release["foobar_release"] = "release bazbuz"\n tc.preprocessor_definitions.release["answer_release"] = 42\n\n tc.preprocessor_definitions.debug["escape_debug"] = "debug partially \\\\"escaped\\\\""\n tc.preprocessor_definitions.debug["spaces_debug"] = "debug me you"\n tc.preprocessor_definitions.debug["foobar_debug"] = "debug bazbuz"\n tc.preprocessor_definitions.debug["answer_debug"] = 21\n tc.generate()\n\n def layout(self):\n cmake_layout(self)\n\n def build(self):\n cmake = CMake(self)\n cmake.configure()\n cmake.build()\n """\n )\n', (12872, 14432), False, 'import textwrap\n'), ((14430, 15312), 'textwrap.dedent', 'textwrap.dedent', (['"""\n #include <stdio.h>\n #define STR(x) #x\n #define SHOW_DEFINE(x) printf("%s=%s", #x, STR(x))\n int main(int argc, char *argv[]) {\n SHOW_DEFINE(escape);\n SHOW_DEFINE(spaces);\n SHOW_DEFINE(foobar);\n SHOW_DEFINE(answer);\n SHOW_DEFINE(escape2);\n SHOW_DEFINE(spaces2);\n SHOW_DEFINE(foobar2);\n SHOW_DEFINE(answer2);\n #ifdef NDEBUG\n SHOW_DEFINE(escape_release);\n SHOW_DEFINE(spaces_release);\n SHOW_DEFINE(foobar_release);\n SHOW_DEFINE(answer_release);\n #else\n SHOW_DEFINE(escape_debug);\n SHOW_DEFINE(spaces_debug);\n SHOW_DEFINE(foobar_debug);\n SHOW_DEFINE(answer_debug);\n #endif\n return 0;\n }\n """'], {}), '(\n """\n #include <stdio.h>\n #define STR(x) #x\n #define SHOW_DEFINE(x) printf("%s=%s", #x, STR(x))\n int main(int argc, char *argv[]) {\n SHOW_DEFINE(escape);\n SHOW_DEFINE(spaces);\n SHOW_DEFINE(foobar);\n SHOW_DEFINE(answer);\n SHOW_DEFINE(escape2);\n SHOW_DEFINE(spaces2);\n SHOW_DEFINE(foobar2);\n SHOW_DEFINE(answer2);\n #ifdef NDEBUG\n SHOW_DEFINE(escape_release);\n SHOW_DEFINE(spaces_release);\n SHOW_DEFINE(foobar_release);\n SHOW_DEFINE(answer_release);\n #else\n SHOW_DEFINE(escape_debug);\n SHOW_DEFINE(spaces_debug);\n SHOW_DEFINE(foobar_debug);\n SHOW_DEFINE(answer_debug);\n #endif\n return 0;\n }\n """\n )\n', (14445, 15312), False, 'import textwrap\n'), ((15321, 15514), 'textwrap.dedent', 'textwrap.dedent', (['"""\n cmake_minimum_required(VERSION 3.15)\n project(Test CXX)\n set(CMAKE_CXX_STANDARD 11)\n add_executable(example src/main.cpp)\n """'], {}), '(\n """\n cmake_minimum_required(VERSION 3.15)\n project(Test CXX)\n set(CMAKE_CXX_STANDARD 11)\n add_executable(example src/main.cpp)\n """\n )\n', (15336, 15514), False, 'import textwrap\n'), ((17061, 18182), 'textwrap.dedent', 'textwrap.dedent', (['"""\n import os\n\n from conan import ConanFile\n from conan.tools.cmake import CMake, cmake_layout, CMakeDeps\n from conan.tools.build import cross_building\n\n\n class HelloTestConan(ConanFile):\n settings = "os", "compiler", "build_type", "arch"\n generators = "CMakeToolchain", "VirtualBuildEnv", "VirtualRunEnv"\n apply_env = False\n test_type = "explicit"\n\n def generate(self):\n deps = CMakeDeps(self)\n deps.set_interface_link_directories = True\n deps.generate()\n\n def requirements(self):\n self.requires(self.tested_reference_str)\n\n def build(self):\n cmake = CMake(self)\n cmake.configure()\n cmake.build()\n\n def layout(self):\n cmake_layout(self)\n\n def test(self):\n if not cross_building(self):\n cmd = os.path.join(self.cpp.build.bindirs[0], "example")\n self.run(cmd, env="conanrun")\n """'], {}), '(\n """\n import os\n\n from conan import ConanFile\n from conan.tools.cmake import CMake, cmake_layout, CMakeDeps\n from conan.tools.build import cross_building\n\n\n class HelloTestConan(ConanFile):\n settings = "os", "compiler", "build_type", "arch"\n generators = "CMakeToolchain", "VirtualBuildEnv", "VirtualRunEnv"\n apply_env = False\n test_type = "explicit"\n\n def generate(self):\n deps = CMakeDeps(self)\n deps.set_interface_link_directories = True\n deps.generate()\n\n def requirements(self):\n self.requires(self.tested_reference_str)\n\n def build(self):\n cmake = CMake(self)\n cmake.configure()\n cmake.build()\n\n def layout(self):\n cmake_layout(self)\n\n def test(self):\n if not cross_building(self):\n cmd = os.path.join(self.cpp.build.bindirs[0], "example")\n self.run(cmd, env="conanrun")\n """\n )\n', (17076, 18182), False, 'import textwrap\n'), ((21208, 21242), 'conans.test.utils.tools.TestClient', 'TestClient', ([], {'path_with_spaces': '(False)'}), '(path_with_spaces=False)\n', (21218, 21242), False, 'from conans.test.utils.tools import TestClient, TurboTestClient\n'), ((21408, 21456), 'conan.tools.microsoft.visual.vcvars_command', 'vcvars_command', ([], {'version': '"""15"""', 'architecture': '"""x64"""'}), "(version='15', architecture='x64')\n", (21422, 21456), False, 'from conan.tools.microsoft.visual import vcvars_command\n'), ((21467, 21533), 'os.path.join', 'os.path.join', (['client.current_folder', '"""build"""', '"""Debug"""', '"""hello.lib"""'], {}), "(client.current_folder, 'build', 'Debug', 'hello.lib')\n", (21479, 21533), False, 'import os\n'), ((21836, 21870), 'conans.test.utils.tools.TestClient', 'TestClient', ([], {'path_with_spaces': '(False)'}), '(path_with_spaces=False)\n', (21846, 21870), False, 'from conans.test.utils.tools import TestClient, TurboTestClient\n'), ((22433, 22481), 'conan.tools.microsoft.visual.vcvars_command', 'vcvars_command', ([], {'version': '"""15"""', 'architecture': '"""x64"""'}), "(version='15', architecture='x64')\n", (22447, 22481), False, 'from conan.tools.microsoft.visual import vcvars_command\n'), ((22492, 22558), 'os.path.join', 'os.path.join', (['client.current_folder', '"""build"""', '"""Debug"""', '"""hello.lib"""'], {}), "(client.current_folder, 'build', 'Debug', 'hello.lib')\n", (22504, 22558), False, 'import os\n'), ((22799, 22833), 'conans.test.utils.tools.TestClient', 'TestClient', ([], {'path_with_spaces': '(False)'}), '(path_with_spaces=False)\n', (22809, 22833), False, 'from conans.test.utils.tools import TestClient, TurboTestClient\n'), ((23246, 23280), 'conans.test.utils.tools.TestClient', 'TestClient', ([], {'path_with_spaces': '(False)'}), '(path_with_spaces=False)\n', (23256, 23280), False, 'from conans.test.utils.tools import TestClient, TurboTestClient\n'), ((23710, 23744), 'conans.test.utils.tools.TestClient', 'TestClient', ([], {'path_with_spaces': '(False)'}), '(path_with_spaces=False)\n', (23720, 23744), False, 'from conans.test.utils.tools import TestClient, TurboTestClient\n'), ((24033, 24093), 'os.path.join', 'os.path.join', (['client.current_folder', '"""CMakeUserPresets.json"""'], {}), "(client.current_folder, 'CMakeUserPresets.json')\n", (24045, 24093), False, 'import os\n'), ((24551, 24584), 'os.path.exists', 'os.path.exists', (['user_presets_path'], {}), '(user_presets_path)\n', (24565, 24584), False, 'import os\n'), ((25423, 25456), 'os.path.exists', 'os.path.exists', (['user_presets_path'], {}), '(user_presets_path)\n', (25437, 25456), False, 'import os\n'), ((26763, 26796), 'os.path.exists', 'os.path.exists', (['user_presets_path'], {}), '(user_presets_path)\n', (26777, 26796), False, 'import os\n'), ((28681, 28715), 'conans.test.utils.tools.TestClient', 'TestClient', ([], {'path_with_spaces': '(False)'}), '(path_with_spaces=False)\n', (28691, 28715), False, 'from conans.test.utils.tools import TestClient, TurboTestClient\n'), ((29209, 29301), 'os.path.join', 'os.path.join', (['client.current_folder', '"""build"""', '"""gcc-5"""', '"""generators"""', '"""CMakePresets.json"""'], {}), "(client.current_folder, 'build', 'gcc-5', 'generators',\n 'CMakePresets.json')\n", (29221, 29301), False, 'import os\n'), ((29341, 29369), 'os.path.exists', 'os.path.exists', (['presets_path'], {}), '(presets_path)\n', (29355, 29369), False, 'import os\n'), ((29565, 29599), 'conans.test.utils.tools.TestClient', 'TestClient', ([], {'path_with_spaces': '(False)'}), '(path_with_spaces=False)\n', (29575, 29599), False, 'from conans.test.utils.tools import TestClient, TurboTestClient\n'), ((30021, 30074), 'os.path.join', 'os.path.join', (['client.current_folder', '"""build"""', '"""gcc-5"""'], {}), "(client.current_folder, 'build', 'gcc-5')\n", (30033, 30074), False, 'import os\n'), ((30086, 30116), 'os.path.exists', 'os.path.exists', (['presets_path_5'], {}), '(presets_path_5)\n', (30100, 30116), False, 'import os\n'), ((30139, 30192), 'os.path.join', 'os.path.join', (['client.current_folder', '"""build"""', '"""gcc-6"""'], {}), "(client.current_folder, 'build', 'gcc-6')\n", (30151, 30192), False, 'import os\n'), ((30204, 30234), 'os.path.exists', 'os.path.exists', (['presets_path_6'], {}), '(presets_path_6)\n', (30218, 30234), False, 'import os\n'), ((30240, 30261), 'conans.util.files.rmdir', 'rmdir', (['presets_path_5'], {}), '(presets_path_5)\n', (30245, 30261), False, 'from conans.util.files import save, load, rmdir\n'), ((30455, 30515), 'os.path.join', 'os.path.join', (['client.current_folder', '"""CMakeUserPresets.json"""'], {}), "(client.current_folder, 'CMakeUserPresets.json')\n", (30467, 30515), False, 'import os\n'), ((30527, 30560), 'os.path.exists', 'os.path.exists', (['user_presets_path'], {}), '(user_presets_path)\n', (30541, 30560), False, 'import os\n'), ((30848, 30882), 'conans.test.utils.tools.TestClient', 'TestClient', ([], {'path_with_spaces': '(False)'}), '(path_with_spaces=False)\n', (30858, 30882), False, 'from conans.test.utils.tools import TestClient, TurboTestClient\n'), ((31962, 32022), 'os.path.join', 'os.path.join', (['client.current_folder', '"""CMakeUserPresets.json"""'], {}), "(client.current_folder, 'CMakeUserPresets.json')\n", (31974, 32022), False, 'import os\n'), ((32034, 32067), 'os.path.exists', 'os.path.exists', (['user_presets_path'], {}), '(user_presets_path)\n', (32048, 32067), False, 'import os\n'), ((32896, 32930), 'conans.test.utils.tools.TestClient', 'TestClient', ([], {'path_with_spaces': '(False)'}), '(path_with_spaces=False)\n', (32906, 32930), False, 'from conans.test.utils.tools import TestClient, TurboTestClient\n'), ((33168, 33228), 'os.path.join', 'os.path.join', (['client.current_folder', '"""CMakeUserPresets.json"""'], {}), "(client.current_folder, 'CMakeUserPresets.json')\n", (33180, 33228), False, 'import os\n'), ((33626, 33659), 'os.path.exists', 'os.path.exists', (['user_presets_path'], {}), '(user_presets_path)\n', (33640, 33659), False, 'import os\n'), ((34434, 34467), 'os.path.exists', 'os.path.exists', (['user_presets_path'], {}), '(user_presets_path)\n', (34448, 34467), False, 'import os\n'), ((35550, 35583), 'os.path.exists', 'os.path.exists', (['user_presets_path'], {}), '(user_presets_path)\n', (35564, 35583), False, 'import os\n'), ((36973, 37007), 'conans.test.utils.tools.TestClient', 'TestClient', ([], {'path_with_spaces': '(False)'}), '(path_with_spaces=False)\n', (36983, 37007), False, 'from conans.test.utils.tools import TestClient, TurboTestClient\n'), ((37025, 37576), 'textwrap.dedent', 'textwrap.dedent', (['"""\n from conan import ConanFile\n from conan.tools.cmake import CMakeToolchain, CMake, cmake_layout\n\n class AppConan(ConanFile):\n settings = "os", "compiler", "build_type", "arch"\n exports_sources = "CMakeLists.txt"\n\n def generate(self):\n tc = CMakeToolchain(self)\n {}\n tc.generate()\n\n def build(self):\n cmake = CMake(self)\n cmake.configure()\n cmake.build()\n """'], {}), '(\n """\n from conan import ConanFile\n from conan.tools.cmake import CMakeToolchain, CMake, cmake_layout\n\n class AppConan(ConanFile):\n settings = "os", "compiler", "build_type", "arch"\n exports_sources = "CMakeLists.txt"\n\n def generate(self):\n tc = CMakeToolchain(self)\n {}\n tc.generate()\n\n def build(self):\n cmake = CMake(self)\n cmake.configure()\n cmake.build()\n """\n )\n', (37040, 37576), False, 'import textwrap\n'), ((37584, 37838), 'textwrap.dedent', 'textwrap.dedent', (['"""\n cmake_minimum_required(VERSION 3.15)\n set(CMAKE_CXX_COMPILER_WORKS 1)\n project(app CXX)\n message("sysroot: \'${CMAKE_SYSROOT}\'")\n message("osx_sysroot: \'${CMAKE_OSX_SYSROOT}\'")\n """'], {}), '(\n """\n cmake_minimum_required(VERSION 3.15)\n set(CMAKE_CXX_COMPILER_WORKS 1)\n project(app CXX)\n message("sysroot: \'${CMAKE_SYSROOT}\'")\n message("osx_sysroot: \'${CMAKE_OSX_SYSROOT}\'")\n """\n )\n', (37599, 37838), False, 'import textwrap\n'), ((38746, 38758), 'conans.test.utils.tools.TestClient', 'TestClient', ([], {}), '()\n', (38756, 38758), False, 'from conans.test.utils.tools import TestClient, TurboTestClient\n'), ((39240, 39314), 'os.path.join', 'os.path.join', (['c.current_folder', '"""build"""', '"""generators"""', '"""CMakePresets.json"""'], {}), "(c.current_folder, 'build', 'generators', 'CMakePresets.json')\n", (39252, 39314), False, 'import os\n'), ((39326, 39354), 'os.path.exists', 'os.path.exists', (['presets_path'], {}), '(presets_path)\n', (39340, 39354), False, 'import os\n'), ((40141, 40175), 'conans.test.utils.tools.TestClient', 'TestClient', ([], {'path_with_spaces': '(False)'}), '(path_with_spaces=False)\n', (40151, 40175), False, 'from conans.test.utils.tools import TestClient, TurboTestClient\n'), ((522, 539), 'platform.system', 'platform.system', ([], {}), '()\n', (537, 539), False, 'import platform\n'), ((5202, 5225), 'conans.util.files.load', 'load', (['user_presets_path'], {}), '(user_presets_path)\n', (5206, 5225), False, 'from conans.util.files import save, load, rmdir\n'), ((3119, 3136), 'platform.system', 'platform.system', ([], {}), '()\n', (3134, 3136), False, 'import platform\n'), ((11631, 11660), 'os.path.join', 'os.path.join', (['p_folder', '"""lib"""'], {}), "(p_folder, 'lib')\n", (11643, 11660), False, 'import os\n'), ((12053, 12085), 'os.path.join', 'os.path.join', (['p_folder', '"""mylibs"""'], {}), "(p_folder, 'mylibs')\n", (12065, 12085), False, 'import os\n'), ((12241, 12311), 'os.path.join', 'os.path.join', (['b_folder', '"""build"""', '"""generators"""', '"""conan_toolchain.cmake"""'], {}), "(b_folder, 'build', 'generators', 'conan_toolchain.cmake')\n", (12253, 12311), False, 'import os\n'), ((18562, 18574), 'conans.test.utils.tools.TestClient', 'TestClient', ([], {}), '()\n', (18572, 18574), False, 'from conans.test.utils.tools import TestClient, TurboTestClient\n'), ((20104, 20116), 'conans.test.utils.tools.TestClient', 'TestClient', ([], {}), '()\n', (20114, 20116), False, 'from conans.test.utils.tools import TestClient, TurboTestClient\n'), ((21002, 21019), 'platform.system', 'platform.system', ([], {}), '()\n', (21017, 21019), False, 'import platform\n'), ((22106, 22159), 'os.path.join', 'os.path.join', (['client.current_folder', '"""CMakeLists.txt"""'], {}), "(client.current_folder, 'CMakeLists.txt')\n", (22118, 22159), False, 'import os\n'), ((21700, 21717), 'platform.system', 'platform.system', ([], {}), '()\n', (21715, 21717), False, 'import platform\n'), ((23089, 23147), 'os.path.join', 'os.path.join', (['client.current_folder', '"""build"""', '"""generators"""'], {}), "(client.current_folder, 'build', 'generators')\n", (23101, 23147), False, 'import os\n'), ((23537, 23595), 'os.path.join', 'os.path.join', (['client.current_folder', '"""build"""', '"""generators"""'], {}), "(client.current_folder, 'build', 'generators')\n", (23549, 23595), False, 'import os\n'), ((24415, 24503), 'os.path.join', 'os.path.join', (['client.current_folder', '"""build"""', '"""apple-clang-12.0-gnu17"""', '"""generators"""'], {}), "(client.current_folder, 'build', 'apple-clang-12.0-gnu17',\n 'generators')\n", (24427, 24503), False, 'import os\n'), ((24615, 24638), 'conans.util.files.load', 'load', (['user_presets_path'], {}), '(user_presets_path)\n', (24619, 24638), False, 'from conans.util.files import save, load, rmdir\n'), ((24710, 24742), 'conans.util.files.load', 'load', (["user_presets['include'][0]"], {}), "(user_presets['include'][0])\n", (24714, 24742), False, 'from conans.util.files import save, load, rmdir\n'), ((25326, 25414), 'os.path.join', 'os.path.join', (['client.current_folder', '"""build"""', '"""apple-clang-12.0-gnu17"""', '"""generators"""'], {}), "(client.current_folder, 'build', 'apple-clang-12.0-gnu17',\n 'generators')\n", (25338, 25414), False, 'import os\n'), ((25487, 25510), 'conans.util.files.load', 'load', (['user_presets_path'], {}), '(user_presets_path)\n', (25491, 25510), False, 'from conans.util.files import save, load, rmdir\n'), ((25582, 25614), 'conans.util.files.load', 'load', (["user_presets['include'][0]"], {}), "(user_presets['include'][0])\n", (25586, 25614), False, 'from conans.util.files import save, load, rmdir\n'), ((26629, 26715), 'os.path.join', 'os.path.join', (['client.current_folder', '"""build"""', '"""apple-clang-13-gnu20"""', '"""generators"""'], {}), "(client.current_folder, 'build', 'apple-clang-13-gnu20',\n 'generators')\n", (26641, 26715), False, 'import os\n'), ((26827, 26850), 'conans.util.files.load', 'load', (['user_presets_path'], {}), '(user_presets_path)\n', (26831, 26850), False, 'from conans.util.files import save, load, rmdir\n'), ((26988, 27020), 'conans.util.files.load', 'load', (["user_presets['include'][1]"], {}), "(user_presets['include'][1])\n", (26992, 27020), False, 'from conans.util.files import save, load, rmdir\n'), ((27418, 27435), 'platform.system', 'platform.system', ([], {}), '()\n', (27433, 27435), False, 'import platform\n'), ((29396, 29414), 'conans.util.files.load', 'load', (['presets_path'], {}), '(presets_path)\n', (29400, 29414), False, 'from conans.util.files import save, load, rmdir\n'), ((30588, 30611), 'conans.util.files.load', 'load', (['user_presets_path'], {}), '(user_presets_path)\n', (30592, 30611), False, 'from conans.util.files import save, load, rmdir\n'), ((31261, 31278), 'platform.system', 'platform.system', ([], {}), '()\n', (31276, 31278), False, 'import platform\n'), ((32115, 32132), 'platform.system', 'platform.system', ([], {}), '()\n', (32130, 32132), False, 'import platform\n'), ((33541, 33613), 'os.path.join', 'os.path.join', (['client.current_folder', '"""build"""', '"""dynamic-14"""', '"""generators"""'], {}), "(client.current_folder, 'build', 'dynamic-14', 'generators')\n", (33553, 33613), False, 'import os\n'), ((33690, 33713), 'conans.util.files.load', 'load', (['user_presets_path'], {}), '(user_presets_path)\n', (33694, 33713), False, 'from conans.util.files import save, load, rmdir\n'), ((33785, 33817), 'conans.util.files.load', 'load', (["user_presets['include'][0]"], {}), "(user_presets['include'][0])\n", (33789, 33817), False, 'from conans.util.files import save, load, rmdir\n'), ((34349, 34421), 'os.path.join', 'os.path.join', (['client.current_folder', '"""build"""', '"""dynamic-14"""', '"""generators"""'], {}), "(client.current_folder, 'build', 'dynamic-14', 'generators')\n", (34361, 34421), False, 'import os\n'), ((34498, 34521), 'conans.util.files.load', 'load', (['user_presets_path'], {}), '(user_presets_path)\n', (34502, 34521), False, 'from conans.util.files import save, load, rmdir\n'), ((34593, 34625), 'conans.util.files.load', 'load', (["user_presets['include'][0]"], {}), "(user_presets['include'][0])\n", (34597, 34625), False, 'from conans.util.files import save, load, rmdir\n'), ((35466, 35537), 'os.path.join', 'os.path.join', (['client.current_folder', '"""build"""', '"""static-17"""', '"""generators"""'], {}), "(client.current_folder, 'build', 'static-17', 'generators')\n", (35478, 35537), False, 'import os\n'), ((35614, 35637), 'conans.util.files.load', 'load', (['user_presets_path'], {}), '(user_presets_path)\n', (35618, 35637), False, 'from conans.util.files import save, load, rmdir\n'), ((35771, 35803), 'conans.util.files.load', 'load', (["user_presets['include'][1]"], {}), "(user_presets['include'][1])\n", (35775, 35803), False, 'from conans.util.files import save, load, rmdir\n'), ((32770, 32787), 'platform.system', 'platform.system', ([], {}), '()\n', (32785, 32787), False, 'import platform\n'), ((38894, 38940), 'os.path.join', 'os.path.join', (['c.current_folder', '"""conanfile.py"""'], {}), "(c.current_folder, 'conanfile.py')\n", (38906, 38940), False, 'import os\n'), ((39164, 39219), 'os.path.join', 'os.path.join', (['c.current_folder', '"""CMakeUserPresets.json"""'], {}), "(c.current_folder, 'CMakeUserPresets.json')\n", (39176, 39219), False, 'import os\n'), ((39363, 39380), 'platform.system', 'platform.system', ([], {}), '()\n', (39378, 39380), False, 'import platform\n'), ((39743, 39760), 'platform.system', 'platform.system', ([], {}), '()\n', (39758, 39760), False, 'import platform\n'), ((2836, 2896), 'os.path.join', 'os.path.join', (['client.current_folder', '"""conan_toolchain.cmake"""'], {}), "(client.current_folder, 'conan_toolchain.cmake')\n", (2848, 2896), False, 'import os\n'), ((6838, 6854), 'conans.test.assets.cmake.gen_cmakelists', 'gen_cmakelists', ([], {}), '()\n', (6852, 6854), False, 'from conans.test.assets.cmake import gen_cmakelists\n'), ((9456, 9472), 'conans.test.assets.cmake.gen_cmakelists', 'gen_cmakelists', ([], {}), '()\n', (9470, 9472), False, 'from conans.test.assets.cmake import gen_cmakelists\n'), ((10519, 10545), 'conans.test.assets.genconanfile.GenConanfile', 'GenConanfile', (['"""dep"""', '"""0.1"""'], {}), "('dep', '0.1')\n", (10531, 10545), False, 'from conans.test.assets.genconanfile import GenConanfile\n'), ((10744, 10761), 'platform.system', 'platform.system', ([], {}), '()\n', (10759, 10761), False, 'import platform\n'), ((11571, 11603), 'os.path.join', 'os.path.join', (['p_folder', '"""mylibs"""'], {}), "(p_folder, 'mylibs')\n", (11583, 11603), False, 'import os\n'), ((12117, 12146), 'os.path.join', 'os.path.join', (['p_folder', '"""lib"""'], {}), "(p_folder, 'lib')\n", (12129, 12146), False, 'import os\n'), ((15787, 15804), 'platform.system', 'platform.system', ([], {}), '()\n', (15802, 15804), False, 'import platform\n'), ((16627, 16644), 'platform.system', 'platform.system', ([], {}), '()\n', (16642, 16644), False, 'import platform\n'), ((18198, 18215), 'platform.system', 'platform.system', ([], {}), '()\n', (18213, 18215), False, 'import platform\n'), ((19878, 19895), 'platform.system', 'platform.system', ([], {}), '()\n', (19893, 19895), False, 'import platform\n'), ((32687, 32707), 'os.path.exists', 'os.path.exists', (['path'], {}), '(path)\n', (32701, 32707), False, 'import os\n'), ((38040, 38057), 'platform.system', 'platform.system', ([], {}), '()\n', (38055, 38057), False, 'import platform\n'), ((38971, 39078), 'textwrap.dedent', 'textwrap.dedent', (['"""\n\n [generators]\n CMakeToolchain\n\n [layout]\n cmake_layout\n\n """'], {}), '(\n """\n\n [generators]\n CMakeToolchain\n\n [layout]\n cmake_layout\n\n """\n )\n', (38986, 39078), False, 'import textwrap\n'), ((1388, 1402), 'conans.test.assets.genconanfile.GenConanfile', 'GenConanfile', ([], {}), '()\n', (1400, 1402), False, 'from conans.test.assets.genconanfile import GenConanfile\n'), ((2010, 2024), 'conans.test.assets.genconanfile.GenConanfile', 'GenConanfile', ([], {}), '()\n', (2022, 2024), False, 'from conans.test.assets.genconanfile import GenConanfile\n'), ((2518, 2532), 'conans.test.assets.genconanfile.GenConanfile', 'GenConanfile', ([], {}), '()\n', (2530, 2532), False, 'from conans.test.assets.genconanfile import GenConanfile\n'), ((7240, 7254), 'conans.test.assets.genconanfile.GenConanfile', 'GenConanfile', ([], {}), '()\n', (7252, 7254), False, 'from conans.test.assets.genconanfile import GenConanfile\n')]
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
########################################################################
#
# Copyright (c) 2016 Baidu.com, Inc. All Rights Reserved
#
########################################################################
"""
File: broadcast_manager.py
Author: haifeng(<EMAIL>)
Date: 2016/11/30 19:23:31
"""
import os
import json
import time
import socket
import struct
import decimal
import logging
import traceback
import threading
import functools
import rospy.core
from rospy.core import signal_shutdown
from rospy.impl.registration import Registration
from rospy.impl.registration import get_topic_manager
from rospy.impl.registration import get_service_manager
from rospy.impl.registration import get_node_handler
from rosgraph.network import parse_http_host_and_port,get_host_name
import sys
env = os.environ.get('LD_LIBRARY_PATH')
for sub_path in env.split(':'):
sys.path.append(sub_path)
from rospy.impl import participant
CMD_TIMEOUT = 1.2
REQUEST_TYPE = 'request_type'
NODE_NAME = 'node_name'
XMLRPC_URI = 'xmlrpc_uri'
TIMESTAMP = 'timestamp'
NODE_TIME = "node_time"
TOPIC_NAME = "topic_name"
TOPIC_TYPE = "topic_type"
SERVICE_NAME = "service_name"
SERVICE_TYPE = "service_type"
SERVICE_URI = "service_uri"
"""
class Singleton(object):
def __new__(cls, *args, **kwargs):
if not hasattr(cls, '_instance'):
orig = super(Singleton, cls)
cls._instance = orig.__new__(cls, *args, **kwargs)
return cls._instance
"""
class Singleton(type):
_instances = {}
def __call__(cls, *args, **kwargs):
if cls not in cls._instances:
cls._instances[cls] = super(Singleton, cls).__call__(*args, **kwargs)
return cls._instances[cls]
def byteify(input):
if isinstance(input, dict):
return {byteify(key): byteify(value) for key, value in input.iteritems()}
elif isinstance(input, list):
return [byteify(element) for element in input]
elif isinstance(input, unicode):
return input.encode('utf-8')
else:
return input
class BroadcastManager(object):
"""
BroadcastManager.
"""
__metaclass__ = Singleton
def __init__(self, name=None, callback=None):
"""
brief info for: __init__
"""
super(BroadcastManager, self).__init__()
self._logger = logging.getLogger(__name__)
self._logger.setLevel(logging.INFO)
self._service_cache = {}
self._stat_info = [[], [], []]
self._node_info = []
self._topic_info = []
self._node_time = str(int(round(time.time()*1000)))
if get_node_handler() is not None:
self._name = get_node_handler().name
self._uri = get_node_handler().uri
else:
if name is None:
self._name = "_null_name"
else:
self._name = name
self._uri = "_null_uri"
if callback is None:
self.callback = ["registerPublisher",
"unregisterPublisher",
"registerSubscriber",
"unregisterSubscriber",
"registerService",
"unregisterService",
"lookupService",
"getTopicTypes",
"lookupNode",
"registerNode",
]
else:
self.callback = callback
self._participant = participant.Participant(self._name)
self._participant.init_py()
self._broardcast_manager_thread = threading.Thread(
target=self.run, args=())
self._broardcast_manager_thread.setDaemon(True)
self._broardcast_manager_thread.start()
def run(self):
"""
brief info for: thread run method
"""
#print "starting broadcast_manager!"
self._logger.debug("starting broadcast_manager!")
if get_node_handler() is not None:
self._name = get_node_handler().name
while get_node_handler().uri is None:
time.sleep(0.0001)
self._uri = get_node_handler().uri
self._logger.debug("self.name:%s" % self._name)
self._logger.debug("self.uri:%s" % self._uri)
self._register_node()
#self.requestParam()
while True:
try:
msg = self._participant.read_msg()
if msg is None:
continue
if(len(msg) > 0):
data = self._unpack_msg(msg.strip())
if self._name == data[NODE_NAME] \
and data[REQUEST_TYPE] == "registerNode" \
and self._uri != data[XMLRPC_URI] \
and int(data[HEADER_OFFSET]) > int(self._node_time):
signal_shutdown("it has already same name node, exit it now.")
continue
self._logger.debug("recv data: %s " % data)
#print("recv data: %s " % data)
if data[REQUEST_TYPE] in self.callback:
self._registerNodeCallback(data)
cb = '_' + data[REQUEST_TYPE] + "Callback"
func = getattr(self, cb)
func(data)
pass
else:
self._logger.error("[broadcast_manager] invalid request type: %s" % data[REQUEST_TYPE])
else:
time.sleep(0.005)
except Exception as e:
self._logger.error("broadcast_manager thread error is %s" % e)
finally:
pass
def getUri(self, caller_id):
"""
getUri
"""
return 1, "", self._uri
def getPid(self, caller_id):
"""
Get the PID of this server
"""
return 1, "", os.getpid()
###Publisher/Subscirbe Service####
def registerPublisher(self, name, topic, datatype, uri):
"""
registerPublisher
"""
#data = ["registerPublisher", name, topic, datatype, uri]
data = self._set_header("registerPublisher")
data[TOPIC_NAME] = topic
data[TOPIC_TYPE] = datatype
self._send(self._pack_msg(data))
return 1, "Registered [%s] as publisher of [%s]" % (name, topic), []
def unregisterPublisher(self, name, topic, uri):
"""
unregisterPublisher
"""
data = self._set_header("unregisterPublisher")
data[TOPIC_NAME] = topic
self._send(self._pack_msg(data))
return 1, "unregisterPublisher" ,0
def registerSubscriber(self, name, topic, datatype, uri):
"""
registerSubscriber
"""
# print name, topic, datatype, uri
#data = ["registerSubscriber", name, topic, datatype, uri]
data = self._set_header("registerSubscriber")
data[TOPIC_NAME] = topic
data[TOPIC_TYPE] = datatype
self._send(self._pack_msg(data))
return 1, "Subscribed to [%s]" % topic, []
def unregisterSubscriber(self, name, topic, uri):
"""
unregisterSubscriber
"""
data = self._set_header("unregisterSubscriber")
data[TOPIC_NAME] = topic
self._send(self._pack_msg(data))
return 1, "unregisterSubscriber" ,0
def registerService(self, name, service_name, service_uri, uri):
"""
registerService
"""
self._service_cache[service_name] = service_uri
#data = ["registerService", name, service_name, service_uri, uri]
data = self._set_header("registerService")
data[SERVICE_NAME] = service_name
data[SERVICE_URI] = service_uri
self._send(self._pack_msg(data))
return 1, "Registered [%s] as provider of [%s] with service_uri[%s]" % \
(name, service_name, service_uri), 1
def unregisterService(self, name, service_name, service_uri):
"""
unregisterService
"""
data = self._set_header("unregisterService")
data[SERVICE_NAME] = service_name
data[SERVICE_URI] = service_uri
self._send(self._pack_msg(data))
return 1, "unregisterService" ,0
def lookupService(self, caller_id, service_name):
"""
lookupService
"""
if service_name in self._service_cache.keys():
return 1, "rosrpc URI: [%s]" % self._service_cache[service_name], self._service_cache[service_name]
else:
return -1, "no provider", ''
def _registerPublisherCallback(self, data):
name = data[NODE_NAME]
topic = data[TOPIC_NAME]
topic_type = data[TOPIC_TYPE]
topic_uri = data[XMLRPC_URI]
url_data = [topic_uri]
tmp = [topic, [name]]
if tmp not in self._stat_info[0]:
self._stat_info[0].append(tmp)
topic_info = [x for x in self._topic_info if x[0] == topic]
if topic_info:
topic_info[0][1] = topic_type
else:
self._topic_info.append([topic, topic_type])
tm = get_topic_manager()
try:
tm.lock.acquire()
if tm.has_subscription(topic):
self._logger.debug("I has sub topic : %s" % topic)
get_node_handler().publisherUpdate(name, topic, url_data)
except Exception as e:
self._logger.error(
"registerPublisherCallback error is %s" % e)
self._logger.error(traceback.format_exc())
finally:
tm.lock.release()
def _unregisterPublisherCallback(self, data):
name = data[NODE_NAME]
topic = data[TOPIC_NAME]
uri = data[XMLRPC_URI]
url_data = [uri]
tm = get_topic_manager()
try:
tm.lock.acquire()
if tm.has_subscription(topic):
self._logger.debug("I has sub topic, recv unregisSub: %s" % topic)
get_node_handler().reg_man.reg_removed(topic, url_data, Registration.SUB)
except Exception as e:
self._logger.error(
"unregisterPublisherCallback error is %s" % e)
self._logger.error(traceback.format_exc())
finally:
tm.lock.release()
pass
def _registerSubscriberCallback(self, data):
name = data[NODE_NAME]
topic = data[TOPIC_NAME]
datatype = data[TOPIC_TYPE]
uri = data[XMLRPC_URI]
url_data = [uri]
tmp = [topic, [name]]
if tmp not in self._stat_info[1]:
self._stat_info[1].append(tmp)
tm = get_topic_manager()
try:
tm.lock.acquire()
if tm.has_publication(topic):
self._logger.debug("I has pub topic :%s" % topic)
self.registerPublisher(
get_node_handler().name, topic, datatype, get_node_handler().uri)
except Exception as e:
self._logger.error(
"_registerSubscriberCallback error is %s" % e)
self._logger.error(traceback.format_exc())
finally:
tm.lock.release()
def _registerServiceCallback(self, data):
service_name = data[SERVICE_NAME]
service_uri = data[SERVICE_URI]
self._service_cache[service_name] = service_uri
def _unregisterServiceCallback(self, data):
service_name = data[SERVICE_NAME]
service_uri = data[SERVICE_URI]
if service_name in self._service_cache.keys():
del self._service_cache[service_name]
# def _lookupServiceCallback(self, name, service_name):
def _lookupServiceCallback(self, data):
print("deprecated API!")
name = data[NODE_NAME]
service_name = data[SERVICE_NAME]
# if self._service_cache.has_key(service_name):
if service_name in self._service_cache.keys():
#print self._uri
#v = ["registerService", get_node_handler().name, service_name,
# self._service_cache[service_name], get_node_handler().uri]
v = self._set_header("registerService")
v.append(service_name)
v.append(self._service_cache[service_name])
v.append(self._uri)
self._send(self._pack_msg(v))
######Graph Stat#######
def _registerNodeCallback(self, data):
name = data[NODE_NAME]
uri = data[XMLRPC_URI]
node_info = [x for x in self._node_info if x[0] == name]
if node_info:
node_info[0][1] = uri
else:
self._node_info.append([name, uri])
def _register_node(self):
"""
_register_node
"""
data = self._set_header("registerNode")
data[NODE_TIME] = self._node_time
self._send(self._pack_msg(data))
def lookupNode(self, caller_id, node_name):
"""
lookupNode
"""
#data = ["lookupNode", self._name, node_name]
"""
data = self._set_header("lookupNode")
data.append(node_name)
self._send(self._pack_msg(data))
# just for test
"""
node_info = [data for data in self._node_info if data[0] == node_name]
if node_info:
return 1, "node api", node_info[0][1]
else:
return -1, "unknown node" ,None
def _set_header(self, request, timestamp=None):
if timestamp is None:
nsec_time = str(int(round(time.time()*1000)))
else:
nsec_time = timestamp
header = {}
header[REQUEST_TYPE] = request
header[NODE_NAME] = self._name
header[XMLRPC_URI] = self._uri
header[TIMESTAMP] = nsec_time
return header
def _send(self, data):
"""
brief info for: Get _master_handler internal dict stuct according to dict_type
"""
self._participant.send(data)
def _recv(self, size=1024):
"""
brief info for: Get _master_handler internal dict stuct according to dict_type
"""
msg = addr = None
try:
msg, addr = self._sock.recvfrom(size)
except Exception as e:
self._logger.error("socket recv error is %s" % e)
self._logger.error(traceback.format_exc())
finally:
pass
return msg, addr
def _unpack_msg(self, msg):
try:
data = json.loads(msg, object_hook=byteify)
except Exception as e:
self._logger.error("parse json failed! %s" % e)
return data
def _pack_msg(self, data):
return json.dumps(data)
|
[
"sys.path.append",
"threading.Thread",
"rospy.core.signal_shutdown",
"os.getpid",
"rospy.impl.registration.get_topic_manager",
"json.loads",
"json.dumps",
"time.sleep",
"os.environ.get",
"time.time",
"rospy.impl.participant.Participant",
"traceback.format_exc",
"rospy.impl.registration.get_node_handler",
"logging.getLogger"
] |
[((839, 872), 'os.environ.get', 'os.environ.get', (['"""LD_LIBRARY_PATH"""'], {}), "('LD_LIBRARY_PATH')\n", (853, 872), False, 'import os\n'), ((909, 934), 'sys.path.append', 'sys.path.append', (['sub_path'], {}), '(sub_path)\n', (924, 934), False, 'import sys\n'), ((2359, 2386), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (2376, 2386), False, 'import logging\n'), ((3567, 3602), 'rospy.impl.participant.Participant', 'participant.Participant', (['self._name'], {}), '(self._name)\n', (3590, 3602), False, 'from rospy.impl import participant\n'), ((3681, 3723), 'threading.Thread', 'threading.Thread', ([], {'target': 'self.run', 'args': '()'}), '(target=self.run, args=())\n', (3697, 3723), False, 'import threading\n'), ((9333, 9352), 'rospy.impl.registration.get_topic_manager', 'get_topic_manager', ([], {}), '()\n', (9350, 9352), False, 'from rospy.impl.registration import get_topic_manager\n'), ((9992, 10011), 'rospy.impl.registration.get_topic_manager', 'get_topic_manager', ([], {}), '()\n', (10009, 10011), False, 'from rospy.impl.registration import get_topic_manager\n'), ((10861, 10880), 'rospy.impl.registration.get_topic_manager', 'get_topic_manager', ([], {}), '()\n', (10878, 10880), False, 'from rospy.impl.registration import get_topic_manager\n'), ((14849, 14865), 'json.dumps', 'json.dumps', (['data'], {}), '(data)\n', (14859, 14865), False, 'import json\n'), ((2635, 2653), 'rospy.impl.registration.get_node_handler', 'get_node_handler', ([], {}), '()\n', (2651, 2653), False, 'from rospy.impl.registration import get_node_handler\n'), ((4042, 4060), 'rospy.impl.registration.get_node_handler', 'get_node_handler', ([], {}), '()\n', (4058, 4060), False, 'from rospy.impl.registration import get_node_handler\n'), ((6100, 6111), 'os.getpid', 'os.getpid', ([], {}), '()\n', (6109, 6111), False, 'import os\n'), ((14654, 14690), 'json.loads', 'json.loads', (['msg'], {'object_hook': 'byteify'}), '(msg, object_hook=byteify)\n', (14664, 14690), False, 'import json\n'), ((2692, 2710), 'rospy.impl.registration.get_node_handler', 'get_node_handler', ([], {}), '()\n', (2708, 2710), False, 'from rospy.impl.registration import get_node_handler\n'), ((2740, 2758), 'rospy.impl.registration.get_node_handler', 'get_node_handler', ([], {}), '()\n', (2756, 2758), False, 'from rospy.impl.registration import get_node_handler\n'), ((4099, 4117), 'rospy.impl.registration.get_node_handler', 'get_node_handler', ([], {}), '()\n', (4115, 4117), False, 'from rospy.impl.registration import get_node_handler\n'), ((4202, 4220), 'time.sleep', 'time.sleep', (['(0.0001)'], {}), '(0.0001)\n', (4212, 4220), False, 'import time\n'), ((4246, 4264), 'rospy.impl.registration.get_node_handler', 'get_node_handler', ([], {}), '()\n', (4262, 4264), False, 'from rospy.impl.registration import get_node_handler\n'), ((4154, 4172), 'rospy.impl.registration.get_node_handler', 'get_node_handler', ([], {}), '()\n', (4170, 4172), False, 'from rospy.impl.registration import get_node_handler\n'), ((5694, 5711), 'time.sleep', 'time.sleep', (['(0.005)'], {}), '(0.005)\n', (5704, 5711), False, 'import time\n'), ((9736, 9758), 'traceback.format_exc', 'traceback.format_exc', ([], {}), '()\n', (9756, 9758), False, 'import traceback\n'), ((10429, 10451), 'traceback.format_exc', 'traceback.format_exc', ([], {}), '()\n', (10449, 10451), False, 'import traceback\n'), ((11316, 11338), 'traceback.format_exc', 'traceback.format_exc', ([], {}), '()\n', (11336, 11338), False, 'import traceback\n'), ((14506, 14528), 'traceback.format_exc', 'traceback.format_exc', ([], {}), '()\n', (14526, 14528), False, 'import traceback\n'), ((2603, 2614), 'time.time', 'time.time', ([], {}), '()\n', (2612, 2614), False, 'import time\n'), ((5003, 5065), 'rospy.core.signal_shutdown', 'signal_shutdown', (['"""it has already same name node, exit it now."""'], {}), "('it has already same name node, exit it now.')\n", (5018, 5065), False, 'from rospy.core import signal_shutdown\n'), ((9522, 9540), 'rospy.impl.registration.get_node_handler', 'get_node_handler', ([], {}), '()\n', (9538, 9540), False, 'from rospy.impl.registration import get_node_handler\n'), ((11092, 11110), 'rospy.impl.registration.get_node_handler', 'get_node_handler', ([], {}), '()\n', (11108, 11110), False, 'from rospy.impl.registration import get_node_handler\n'), ((11134, 11152), 'rospy.impl.registration.get_node_handler', 'get_node_handler', ([], {}), '()\n', (11150, 11152), False, 'from rospy.impl.registration import get_node_handler\n'), ((10197, 10215), 'rospy.impl.registration.get_node_handler', 'get_node_handler', ([], {}), '()\n', (10213, 10215), False, 'from rospy.impl.registration import get_node_handler\n'), ((13703, 13714), 'time.time', 'time.time', ([], {}), '()\n', (13712, 13714), False, 'import time\n')]
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
gnpy.core.request
=================
This module contains path request functionality.
This functionality allows the user to provide a JSON request
file in accordance with a Yang model for requesting path
computations and returns path results in terms of path
and feasibility
See: draft-ietf-teas-yang-path-computation-01.txt
"""
from collections import namedtuple
from logging import getLogger, basicConfig, CRITICAL, DEBUG, INFO
from networkx import (dijkstra_path, NetworkXNoPath)
from numpy import mean
from gnpy.core.service_sheet import convert_service_sheet, Request_element, Element
from gnpy.core.elements import Transceiver, Roadm, Edfa, Fused
from gnpy.core.network import set_roadm_loss
from gnpy.core.utils import db2lin, lin2db
from gnpy.core.info import create_input_spectral_information, SpectralInformation, Channel, Power
from copy import copy, deepcopy
from csv import writer
logger = getLogger(__name__)
RequestParams = namedtuple('RequestParams','request_id source destination trx_type'+
' trx_mode nodes_list loose_list spacing power nb_channel frequency format baud_rate OSNR bit_rate roll_off')
class Path_request:
def __init__(self, *args, **params):
params = RequestParams(**params)
self.request_id = params.request_id
self.source = params.source
self.destination = params.destination
self.tsp = params.trx_type
self.tsp_mode = params.trx_mode
self.baud_rate = params.baud_rate
self.nodes_list = params.nodes_list
self.loose_list = params.loose_list
self.spacing = params.spacing
self.power = params.power
self.nb_channel = params.nb_channel
self.frequency = params.frequency
self.format = params.format
self.OSNR = params.OSNR
self.bit_rate = params.bit_rate
self.roll_off = params.roll_off
def __str__(self):
return '\n\t'.join([ f'{type(self).__name__} {self.request_id}',
f'source: {self.source}',
f'destination: {self.destination}'])
def __repr__(self):
return '\n\t'.join([ f'{type(self).__name__} {self.request_id}',
f'source: \t{self.source}',
f'destination:\t{self.destination}',
f'trx type:\t{self.tsp}',
f'trx mode:\t{self.tsp_mode}',
f'baud_rate:\t{self.baud_rate * 1e-9} Gbaud',
f'bit_rate:\t{self.bit_rate * 1e-9} Gb/s',
f'spacing:\t{self.spacing * 1e-9} GHz',
f'power: \t{round(lin2db(self.power)+30,2)} dBm'
'\n'])
class Result_element(Element):
def __init__(self,path_request,computed_path):
self.path_id = path_request.request_id
self.path_request = path_request
self.computed_path = computed_path
hop_type = []
for e in computed_path :
if isinstance(e, Transceiver) :
hop_type.append(' - '.join([path_request.tsp,path_request.tsp_mode]))
else:
hop_type.append('not recorded')
self.hop_type = hop_type
uid = property(lambda self: repr(self))
@property
def pathresult(self):
if not self.computed_path:
return {
'path-id': self.path_id,
'path-properties':{
'path-metric': [
{
'metric-type': 'SNR@bandwidth',
'accumulative-value': 'None'
},
{
'metric-type': 'SNR@0.1nm',
'accumulative-value': 'None'
},
{
'metric-type': 'OSNR@bandwidth',
'accumulative-value': 'None'
},
{
'metric-type': 'OSNR@0.1nm',
'accumulative-value': 'None'
},
{
'metric-type': 'reference_power',
'accumulative-value': self.path_request.power
}
],
'path-srlgs': {
'usage': 'not used yet',
'values': 'not used yet'
},
'path-route-objects': [
{
'path-route-object': {
'index': 0,
'unnumbered-hop': {
'node-id': self.path_request.source,
'link-tp-id': self.path_request.source,
'hop-type': ' - '.join([self.path_request.tsp, self.path_request.tsp_mode]),
'direction': 'not used'
},
'label-hop': {
'te-label': {
'generic': 'not used yet',
'direction': 'not used yet'
}
}
}
},
{
'path-route-object': {
'index': 1,
'unnumbered-hop': {
'node-id': self.path_request.destination,
'link-tp-id': self.path_request.destination,
'hop-type': ' - '.join([self.path_request.tsp, self.path_request.tsp_mode]),
'direction': 'not used'
},
'label-hop': {
'te-label': {
'generic': 'not used yet',
'direction': 'not used yet'
}
}
}
}
]
}
}
else:
return {
'path-id': self.path_id,
'path-properties':{
'path-metric': [
{
'metric-type': 'SNR@bandwidth',
'accumulative-value': round(mean(self.computed_path[-1].snr),2)
},
{
'metric-type': 'SNR@0.1nm',
'accumulative-value': round(mean(self.computed_path[-1].snr+lin2db(self.path_request.baud_rate/12.5e9)),2)
},
{
'metric-type': 'OSNR@bandwidth',
'accumulative-value': round(mean(self.computed_path[-1].osnr_ase),2)
},
{
'metric-type': 'OSNR@0.1nm',
'accumulative-value': round(mean(self.computed_path[-1].osnr_ase_01nm),2)
},
{
'metric-type': 'reference_power',
'accumulative-value': self.path_request.power
}
],
'path-srlgs': {
'usage': 'not used yet',
'values': 'not used yet'
},
'path-route-objects': [
{
'path-route-object': {
'index': self.computed_path.index(n),
'unnumbered-hop': {
'node-id': n.uid,
'link-tp-id': n.uid,
'hop-type': self.hop_type[self.computed_path.index(n)],
'direction': 'not used'
},
'label-hop': {
'te-label': {
'generic': 'not used yet',
'direction': 'not used yet'
}
}
}
} for n in self.computed_path
]
}
}
@property
def json(self):
return self.pathresult
def compute_constrained_path(network, req):
trx = [n for n in network.nodes() if isinstance(n, Transceiver)]
roadm = [n for n in network.nodes() if isinstance(n, Roadm)]
edfa = [n for n in network.nodes() if isinstance(n, Edfa)]
source = next(el for el in trx if el.uid == req.source)
# start the path with its source
# TODO : avoid loops due to constraints , guess name base on string,
# avoid crashing if on req is not correct
total_path = [source]
for n in req.nodes_list:
# print(n)
try :
node = next(el for el in trx if el.uid == n)
except StopIteration:
try:
node = next(el for el in roadm if el.uid == f'roadm {n}')
except StopIteration:
try:
node = next(el for el in edfa
if el.uid.startswith(f'egress edfa in {n}'))
except StopIteration:
msg = f'could not find node : {n} in network topology: \
not a trx, roadm, edfa or fused element'
logger.critical(msg)
raise ValueError(msg)
# extend path list without repeating source -> skip first element in the list
try:
total_path.extend(dijkstra_path(network, source, node)[1:])
source = node
except NetworkXNoPath:
# for debug
# print(req.loose_list)
# print(req.nodes_list.index(n))
if req.loose_list[req.nodes_list.index(n)] == 'loose':
print(f'could not find a path from {source.uid} to loose node : {n} in network topology')
print(f'node {n} is skipped')
else:
msg = f'could not find a path from {source.uid} to node : {n} in network topology'
logger.critical(msg)
#raise ValueError(msg)
print(msg)
total_path = []
# preparing disjonction feature
# for p in all_simple_paths(network,\
# source=next(el for el in trx if el.uid == req.source),\
# target=next(el for el in trx if el.uid == req.destination)):
# print([e.uid for e in p if isinstance(e,Roadm)])
return total_path
def propagate(path, req, equipment, show=False):
#update roadm loss in case of power sweep (power mode only)
set_roadm_loss(path, equipment, lin2db(req.power*1e3))
si = create_input_spectral_information(
req.frequency['min'], req.roll_off,
req.baud_rate, req.power, req.spacing, req.nb_channel)
for el in path:
si = el(si)
if show :
print(el)
return path
def jsontocsv(json_data,equipment,fileout):
# read json path result file in accordance with:
# Yang model for requesting Path Computation
# draft-ietf-teas-yang-path-computation-01.txt.
# and write results in an CSV file
mywriter = writer(fileout)
mywriter.writerow(('path-id','source','destination','transponder-type',\
'transponder-mode','baud rate (Gbaud)', 'input power (dBm)','path',\
'OSNR@bandwidth','OSNR@0.1nm','SNR@bandwidth','SNR@0.1nm','Pass?'))
tspjsondata = equipment['Transceiver']
#print(tspjsondata)
for p in json_data['path']:
path_id = p['path-id']
source = p['path-properties']['path-route-objects'][0]\
['path-route-object']['unnumbered-hop']['node-id']
destination = p['path-properties']['path-route-objects'][-1]\
['path-route-object']['unnumbered-hop']['node-id']
pth = ' | '.join([ e['path-route-object']['unnumbered-hop']['node-id']
for e in p['path-properties']['path-route-objects']])
[tsp,mode] = p['path-properties']['path-route-objects'][0]\
['path-route-object']['unnumbered-hop']['hop-type'].split(' - ')
# find the min acceptable OSNR, baud rate from the eqpt library based on tsp (tupe) and mode (format)
try:
[minosnr, baud_rate] = next([m['OSNR'] , m['baud_rate']]
for m in equipment['Transceiver'][tsp].mode if m['format']==mode)
# for debug
# print(f'coucou {baud_rate}')
except IndexError:
msg = f'could not find tsp : {self.tsp} with mode: {self.tsp_mode} in eqpt library'
raise ValueError(msg)
output_snr = next(e['accumulative-value']
for e in p['path-properties']['path-metric'] if e['metric-type'] == 'SNR@0.1nm')
output_snrbandwidth = next(e['accumulative-value']
for e in p['path-properties']['path-metric'] if e['metric-type'] == 'SNR@bandwidth')
output_osnr = next(e['accumulative-value']
for e in p['path-properties']['path-metric'] if e['metric-type'] == 'OSNR@0.1nm')
output_osnrbandwidth = next(e['accumulative-value']
for e in p['path-properties']['path-metric'] if e['metric-type'] == 'OSNR@bandwidth')
power = next(e['accumulative-value']
for e in p['path-properties']['path-metric'] if e['metric-type'] == 'reference_power')
if isinstance(output_snr, str):
isok = ''
else:
isok = output_snr >= minosnr
mywriter.writerow((path_id,
source,
destination,
tsp,
mode,
baud_rate*1e-9,
round(lin2db(power)+30,2),
pth,
output_osnrbandwidth,
output_osnr,
output_snrbandwidth,
output_snr,
isok
))
|
[
"csv.writer",
"gnpy.core.info.create_input_spectral_information",
"networkx.dijkstra_path",
"numpy.mean",
"collections.namedtuple",
"gnpy.core.utils.lin2db",
"logging.getLogger"
] |
[((959, 978), 'logging.getLogger', 'getLogger', (['__name__'], {}), '(__name__)\n', (968, 978), False, 'from logging import getLogger, basicConfig, CRITICAL, DEBUG, INFO\n'), ((997, 1186), 'collections.namedtuple', 'namedtuple', (['"""RequestParams"""', "('request_id source destination trx_type' +\n ' trx_mode nodes_list loose_list spacing power nb_channel frequency format baud_rate OSNR bit_rate roll_off'\n )"], {}), "('RequestParams', 'request_id source destination trx_type' +\n ' trx_mode nodes_list loose_list spacing power nb_channel frequency format baud_rate OSNR bit_rate roll_off'\n )\n", (1007, 1186), False, 'from collections import namedtuple\n'), ((11596, 11725), 'gnpy.core.info.create_input_spectral_information', 'create_input_spectral_information', (["req.frequency['min']", 'req.roll_off', 'req.baud_rate', 'req.power', 'req.spacing', 'req.nb_channel'], {}), "(req.frequency['min'], req.roll_off, req.\n baud_rate, req.power, req.spacing, req.nb_channel)\n", (11629, 11725), False, 'from gnpy.core.info import create_input_spectral_information, SpectralInformation, Channel, Power\n'), ((12089, 12104), 'csv.writer', 'writer', (['fileout'], {}), '(fileout)\n', (12095, 12104), False, 'from csv import writer\n'), ((11564, 11590), 'gnpy.core.utils.lin2db', 'lin2db', (['(req.power * 1000.0)'], {}), '(req.power * 1000.0)\n', (11570, 11590), False, 'from gnpy.core.utils import db2lin, lin2db\n'), ((10444, 10480), 'networkx.dijkstra_path', 'dijkstra_path', (['network', 'source', 'node'], {}), '(network, source, node)\n', (10457, 10480), False, 'from networkx import dijkstra_path, NetworkXNoPath\n'), ((14545, 14558), 'gnpy.core.utils.lin2db', 'lin2db', (['power'], {}), '(power)\n', (14551, 14558), False, 'from gnpy.core.utils import db2lin, lin2db\n'), ((2769, 2787), 'gnpy.core.utils.lin2db', 'lin2db', (['self.power'], {}), '(self.power)\n', (2775, 2787), False, 'from gnpy.core.utils import db2lin, lin2db\n'), ((6928, 6960), 'numpy.mean', 'mean', (['self.computed_path[-1].snr'], {}), '(self.computed_path[-1].snr)\n', (6932, 6960), False, 'from numpy import mean\n'), ((7386, 7423), 'numpy.mean', 'mean', (['self.computed_path[-1].osnr_ase'], {}), '(self.computed_path[-1].osnr_ase)\n', (7390, 7423), False, 'from numpy import mean\n'), ((7597, 7639), 'numpy.mean', 'mean', (['self.computed_path[-1].osnr_ase_01nm'], {}), '(self.computed_path[-1].osnr_ase_01nm)\n', (7601, 7639), False, 'from numpy import mean\n'), ((7165, 7216), 'gnpy.core.utils.lin2db', 'lin2db', (['(self.path_request.baud_rate / 12500000000.0)'], {}), '(self.path_request.baud_rate / 12500000000.0)\n', (7171, 7216), False, 'from gnpy.core.utils import db2lin, lin2db\n')]
|
import pytest
from mock import Mock, MagicMock, patch
from dateutil.rrule import rrule, DAILY
import datetime
import trellostats
from trellostats import TrelloStats
from trellostats.settings import TOKEN_URL, LIST_URL, BOARD_URL
from trellostats.trellostats import TrelloStatsException
from requests.exceptions import ConnectionError
@pytest.fixture
def ts_obj():
mock_context = dict(app_key=Mock(), app_token=Mock(), board_id=Mock())
return TrelloStats(mock_context)
@patch('trellostats.TrelloStats.get_lists')
def test_get_list_id_from_name_works(mock_get_lists, ts_obj):
mock_get_lists.return_value = [{'id': 'eh23jnd2', 'name': 'Thang'}]
list_id = ts_obj.get_list_id_from_name("Thang")
assert list_id == 'eh23jnd2'
@patch('trellostats.TrelloStats.get_lists')
def test_get_list_id_from_name_is_none_with_nonexistent_name(mock_get_lists,
ts_obj):
mock_get_lists.return_value = [{'id': 'eh23jnd2', 'name': 'Thang'}]
list_id = ts_obj.get_list_id_from_name("NotThang")
assert not list_id
@patch('requests.get')
def test_get_lists(mock_get, ts_obj):
ts_obj.get_lists()
mock_get.assert_called_with(BOARD_URL.format(ts_obj.board_id,
ts_obj.app_key,
ts_obj.app_token))
@patch('requests.get')
def test_get_noneexisitent_done_board_returns_trellostatserror(mock_get, ts_obj):
with pytest.raises(TrelloStatsException):
mock_get.side_effect = ValueError
list_id = ts_obj.get_list_id_from_name("Thang")
@patch('requests.get')
def test_no_connection_board_returns_trellostatserror(mock_get, ts_obj):
with pytest.raises(TrelloStatsException):
mock_get.side_effect = ConnectionError
list_id = ts_obj.get_list_id_from_name("Thang")
@patch('trellostats.trellostats.webbrowser')
def test_get_token_opens_browser_with_right_token(mock_web, ts_obj):
ts_obj.get_token()
mock_web.open.assert_called_once_with(TOKEN_URL.format(ts_obj.app_key))
@patch('trellostats.requests.get')
def test_get_list_data(mock_get, ts_obj):
ts_obj.get_list_data('listylist')
mock_get.assert_called_with(LIST_URL.format('listylist',
ts_obj.app_key,
ts_obj.app_token))
@patch('trellostats.trellostats.parse')
@patch('__builtin__.sorted')
def test__get_cycle_time(mock_parse, mock_sorted, ts_obj):
mock_card_history = MagicMock()
mock_card_history.json.return_value = [dict(date=d) for d in xrange(10)]
ts_obj._get_cycle_time(mock_card_history)
assert mock_card_history.json.called
assert mock_parse.called
assert mock_sorted.called
@patch('trellostats.TrelloStats._get_history_for_cards')
@patch('trellostats.TrelloStats._get_cycle_time')
@patch('trellostats.trellostats.np')
def test_cycle_time(mock_ghfc, mock_gct, mock_np, ts_obj):
cards = MagicMock()
ct = ts_obj.cycle_time(cards)
mock_ghfc.assert_called
mock_np.mean.assert_called
mock_gct.assert_called
assert c
@patch('trellostats.TrelloStats._get_history_for_cards')
def test_cycle_time(mock_ghfc, ts_obj):
with pytest.raises(TrelloStatsException):
mock_ghfc.side_effect = AttributeError
ts_obj.cycle_time(MagicMock())
@patch('trellostats.grequests.map')
def test_get_history_for_cards(mock_g, ts_obj):
ts_obj._get_history_for_cards(MagicMock(spec=dict))
assert mock_g.called
def test_repr(ts_obj):
assert repr(ts_obj).startswith('<TrelloStats')
|
[
"trellostats.TrelloStats",
"mock.patch",
"trellostats.settings.LIST_URL.format",
"pytest.raises",
"trellostats.settings.TOKEN_URL.format",
"trellostats.settings.BOARD_URL.format",
"mock.Mock",
"mock.MagicMock"
] |
[((482, 524), 'mock.patch', 'patch', (['"""trellostats.TrelloStats.get_lists"""'], {}), "('trellostats.TrelloStats.get_lists')\n", (487, 524), False, 'from mock import Mock, MagicMock, patch\n'), ((747, 789), 'mock.patch', 'patch', (['"""trellostats.TrelloStats.get_lists"""'], {}), "('trellostats.TrelloStats.get_lists')\n", (752, 789), False, 'from mock import Mock, MagicMock, patch\n'), ((1090, 1111), 'mock.patch', 'patch', (['"""requests.get"""'], {}), "('requests.get')\n", (1095, 1111), False, 'from mock import Mock, MagicMock, patch\n'), ((1392, 1413), 'mock.patch', 'patch', (['"""requests.get"""'], {}), "('requests.get')\n", (1397, 1413), False, 'from mock import Mock, MagicMock, patch\n'), ((1647, 1668), 'mock.patch', 'patch', (['"""requests.get"""'], {}), "('requests.get')\n", (1652, 1668), False, 'from mock import Mock, MagicMock, patch\n'), ((1894, 1937), 'mock.patch', 'patch', (['"""trellostats.trellostats.webbrowser"""'], {}), "('trellostats.trellostats.webbrowser')\n", (1899, 1937), False, 'from mock import Mock, MagicMock, patch\n'), ((2109, 2142), 'mock.patch', 'patch', (['"""trellostats.requests.get"""'], {}), "('trellostats.requests.get')\n", (2114, 2142), False, 'from mock import Mock, MagicMock, patch\n'), ((2435, 2473), 'mock.patch', 'patch', (['"""trellostats.trellostats.parse"""'], {}), "('trellostats.trellostats.parse')\n", (2440, 2473), False, 'from mock import Mock, MagicMock, patch\n'), ((2475, 2502), 'mock.patch', 'patch', (['"""__builtin__.sorted"""'], {}), "('__builtin__.sorted')\n", (2480, 2502), False, 'from mock import Mock, MagicMock, patch\n'), ((2836, 2891), 'mock.patch', 'patch', (['"""trellostats.TrelloStats._get_history_for_cards"""'], {}), "('trellostats.TrelloStats._get_history_for_cards')\n", (2841, 2891), False, 'from mock import Mock, MagicMock, patch\n'), ((2893, 2941), 'mock.patch', 'patch', (['"""trellostats.TrelloStats._get_cycle_time"""'], {}), "('trellostats.TrelloStats._get_cycle_time')\n", (2898, 2941), False, 'from mock import Mock, MagicMock, patch\n'), ((2943, 2978), 'mock.patch', 'patch', (['"""trellostats.trellostats.np"""'], {}), "('trellostats.trellostats.np')\n", (2948, 2978), False, 'from mock import Mock, MagicMock, patch\n'), ((3197, 3252), 'mock.patch', 'patch', (['"""trellostats.TrelloStats._get_history_for_cards"""'], {}), "('trellostats.TrelloStats._get_history_for_cards')\n", (3202, 3252), False, 'from mock import Mock, MagicMock, patch\n'), ((3429, 3463), 'mock.patch', 'patch', (['"""trellostats.grequests.map"""'], {}), "('trellostats.grequests.map')\n", (3434, 3463), False, 'from mock import Mock, MagicMock, patch\n'), ((454, 479), 'trellostats.TrelloStats', 'TrelloStats', (['mock_context'], {}), '(mock_context)\n', (465, 479), False, 'from trellostats import TrelloStats\n'), ((2586, 2597), 'mock.MagicMock', 'MagicMock', ([], {}), '()\n', (2595, 2597), False, 'from mock import Mock, MagicMock, patch\n'), ((3050, 3061), 'mock.MagicMock', 'MagicMock', ([], {}), '()\n', (3059, 3061), False, 'from mock import Mock, MagicMock, patch\n'), ((1205, 1272), 'trellostats.settings.BOARD_URL.format', 'BOARD_URL.format', (['ts_obj.board_id', 'ts_obj.app_key', 'ts_obj.app_token'], {}), '(ts_obj.board_id, ts_obj.app_key, ts_obj.app_token)\n', (1221, 1272), False, 'from trellostats.settings import TOKEN_URL, LIST_URL, BOARD_URL\n'), ((1505, 1540), 'pytest.raises', 'pytest.raises', (['TrelloStatsException'], {}), '(TrelloStatsException)\n', (1518, 1540), False, 'import pytest\n'), ((1751, 1786), 'pytest.raises', 'pytest.raises', (['TrelloStatsException'], {}), '(TrelloStatsException)\n', (1764, 1786), False, 'import pytest\n'), ((2072, 2104), 'trellostats.settings.TOKEN_URL.format', 'TOKEN_URL.format', (['ts_obj.app_key'], {}), '(ts_obj.app_key)\n', (2088, 2104), False, 'from trellostats.settings import TOKEN_URL, LIST_URL, BOARD_URL\n'), ((2255, 2317), 'trellostats.settings.LIST_URL.format', 'LIST_URL.format', (['"""listylist"""', 'ts_obj.app_key', 'ts_obj.app_token'], {}), "('listylist', ts_obj.app_key, ts_obj.app_token)\n", (2270, 2317), False, 'from trellostats.settings import TOKEN_URL, LIST_URL, BOARD_URL\n'), ((3302, 3337), 'pytest.raises', 'pytest.raises', (['TrelloStatsException'], {}), '(TrelloStatsException)\n', (3315, 3337), False, 'import pytest\n'), ((3546, 3566), 'mock.MagicMock', 'MagicMock', ([], {'spec': 'dict'}), '(spec=dict)\n', (3555, 3566), False, 'from mock import Mock, MagicMock, patch\n'), ((400, 406), 'mock.Mock', 'Mock', ([], {}), '()\n', (404, 406), False, 'from mock import Mock, MagicMock, patch\n'), ((418, 424), 'mock.Mock', 'Mock', ([], {}), '()\n', (422, 424), False, 'from mock import Mock, MagicMock, patch\n'), ((435, 441), 'mock.Mock', 'Mock', ([], {}), '()\n', (439, 441), False, 'from mock import Mock, MagicMock, patch\n'), ((3413, 3424), 'mock.MagicMock', 'MagicMock', ([], {}), '()\n', (3422, 3424), False, 'from mock import Mock, MagicMock, patch\n')]
|
"""
----------------------------------------------------------------------
--- jumeg.jumeg_noise_reducer --------------------------------
----------------------------------------------------------------------
author : <NAME>
email : <EMAIL>
last update: 02.05.2019
version : 1.14
----------------------------------------------------------------------
Based on following publications:
----------------------------------------------------------------------
<NAME>., 'Environmental Noise Cancellation for
Biomagnetic Measurements', Advances in Biomagnetism,
Plenum Press, New York, 1989
----------------------------------------------------------------------
s'_i(t) = s_i(t) - sum(w_ij*r_j(t), j=1,nref)
where
s_i are the signal traces, i=1,nsig
r_j are the reference traces, j=1,nref after DC removal
w_ij are weights determined by minimizing
<(s'_i(t)-<s'_i>)^2> with <x> temporal mean
Typically s_i are magnetic signal channels and
r_j (selected) magnetic reference channels, but
other refs are possible.
----------------------------------------------------------------------
How to use the jumeg_noise_reducer?
----------------------------------------------------------------------
from jumeg import jumeg_noise_reducer
jumeg_noise_reducer.noise_reducer(fname_raw)
--> for further comments we refer directly to the functions
----------------------------------------------------------------------
"""
# Author: EE
# 150203/EE/
# 150619/EE/ fix for tmin/tmax-arg
# 170131/EE/ modified handling of refnotch-arg (no auto-harmonics)
# 180629/EE/ explicit spec. for reference-filter ('firwin','hann')
# 190103/EE/ fixed infosig-arg for _is_good()
# 190208/EE/ prep. f. transition to Python3
# 190502/EE/ Python3-version
#
# License: BSD (3-clause)
# cf. https://www.johndcook.com/blog/2019/01/09/projecting-unicode-to-ascii/
# for a ruggedized version of channel_indices_by_type()?
from builtins import str
from builtins import range
import sys # for sys.stdout.flush()
import os
import numpy as np
import time
import copy
import warnings
from math import floor, ceil
import mne
from mne.utils import logger
from mne.epochs import _is_good
from mne.io.pick import channel_indices_by_type
from jumeg.jumeg_utils import get_files_from_list
TINY = 1.e-38
SVD_RELCUTOFF = 1.e-08
##################################################
#
# generate plot of power spectrum before and
# after noise reduction
#
##################################################
def plot_denoising(fname_raw, fmin=0, fmax=300, tmin=0.0, tmax=60.0,
proj=False, n_fft=4096, color='blue',
stim_name=None, event_id=1,
tmin_stim=-0.2, tmax_stim=0.5,
area_mode='range', area_alpha=0.33, n_jobs=1,
title1='before denoising', title2='after denoising',
info=None, show=True, fnout=None):
"""Plot the power spectral density across channels to show denoising.
Parameters
----------
fname_raw : list or str
List of raw files, without denoising and with for comparison.
tmin : float
Start time for calculations.
tmax : float
End time for calculations.
fmin : float
Start frequency to consider.
fmax : float
End frequency to consider.
proj : bool
Apply projection.
n_fft : int
Number of points to use in Welch FFT calculations.
color : str | tuple
A matplotlib-compatible color to use.
area_mode : str | None
Mode for plotting area. If 'std', the mean +/- 1 STD (across channels)
will be plotted. If 'range', the min and max (across channels) will be
plotted. Bad channels will be excluded from these calculations.
If None, no area will be plotted.
area_alpha : float
Alpha for the area.
info : bool
Display information in the figure.
show : bool
Show figure.
fnout : str
Name of the saved output figure. If none, no figure will be saved.
title1, title2 : str
Title for two psd plots.
n_jobs : int
Number of jobs to use for parallel computation.
stim_name : str
Name of the stim channel. If stim_name is set, the plot of epochs
average is also shown alongside the PSD plots.
event_id : int
ID of the stim event. (only when stim_name is set)
Example Usage
-------------
plot_denoising(['orig-raw.fif', 'orig,nr-raw.fif', fnout='example')
"""
from matplotlib import gridspec as grd
import matplotlib.pyplot as plt
from mne.time_frequency import psd_welch
fnraw = get_files_from_list(fname_raw)
# ---------------------------------
# estimate power spectrum
# ---------------------------------
psds_all = []
freqs_all = []
# loop across all filenames
for fname in fnraw:
# read in data
raw = mne.io.Raw(fname, preload=True)
picks = mne.pick_types(raw.info, meg='mag', eeg=False,
stim=False, eog=False, exclude='bads')
if area_mode not in [None, 'std', 'range']:
raise ValueError('"area_mode" must be "std", "range", or None')
psds, freqs = psd_welch(raw, picks=picks, fmin=fmin, fmax=fmax,
tmin=tmin, tmax=tmax, n_fft=n_fft,
n_jobs=n_jobs, proj=proj)
psds_all.append(psds)
freqs_all.append(freqs)
if stim_name:
n_xplots = 2
# get some infos
events = mne.find_events(raw, stim_channel=stim_name, consecutive=True)
else:
n_xplots = 1
fig = plt.figure('denoising', figsize=(16, 6 * n_xplots))
gs = grd.GridSpec(n_xplots, int(len(psds_all)))
# loop across all filenames
for idx in range(int(len(psds_all))):
# ---------------------------------
# plot power spectrum
# ---------------------------------
p1 = plt.subplot(gs[0, idx])
# Convert PSDs to dB
psds = 10 * np.log10(psds_all[idx])
psd_mean = np.mean(psds, axis=0)
if area_mode == 'std':
psd_std = np.std(psds, axis=0)
hyp_limits = (psd_mean - psd_std, psd_mean + psd_std)
elif area_mode == 'range':
hyp_limits = (np.min(psds, axis=0), np.max(psds, axis=0))
else: # area_mode is None
hyp_limits = None
p1.plot(freqs_all[idx], psd_mean, color=color)
if hyp_limits is not None:
p1.fill_between(freqs_all[idx], hyp_limits[0], y2=hyp_limits[1],
color=color, alpha=area_alpha)
if idx == 0:
p1.set_title(title1)
ylim = [np.min(psd_mean) - 10, np.max(psd_mean) + 10]
else:
p1.set_title(title2)
p1.set_xlabel('Freq (Hz)')
p1.set_ylabel('Power Spectral Density (dB/Hz)')
p1.set_xlim(freqs_all[idx][0], freqs_all[idx][-1])
p1.set_ylim(ylim[0], ylim[1])
# ---------------------------------
# plot signal around stimulus
# onset
# ---------------------------------
if stim_name:
raw = mne.io.Raw(fnraw[idx], preload=True)
epochs = mne.Epochs(raw, events, event_id, proj=False,
tmin=tmin_stim, tmax=tmax_stim, picks=picks,
preload=True, baseline=(None, None))
evoked = epochs.average()
if idx == 0:
ymin = np.min(evoked.data)
ymax = np.max(evoked.data)
times = evoked.times * 1e3
p2 = plt.subplot(gs[1, idx])
p2.plot(times, evoked.data.T, 'blue', linewidth=0.5)
p2.set_xlim(times[0], times[len(times) - 1])
p2.set_ylim(1.1 * ymin, 1.1 * ymax)
if (idx == 1) and info:
plt.text(times[0], 0.9 * ymax, ' ICs: ' + str(info))
# save image
if fnout:
fig.savefig(fnout + '.png', format='png')
# show image if requested
if show:
plt.show()
plt.close('denoising')
plt.ion()
##################################################
#
# routine to detrend the data
#
##################################################
def perform_detrending(fname_raw, save=True):
from mne.io import Raw
from numpy import poly1d, polyfit
fnraw = get_files_from_list(fname_raw)
# loop across all filenames
for fname in fnraw:
# read data in
raw = Raw(fname, preload=True)
# get channels
picks = mne.pick_types(raw.info, meg='mag', ref_meg=True,
eeg=False, stim=False,
eog=False, exclude='bads')
xval = np.arange(raw._data.shape[1])
# loop over all channels
for ipick in picks:
coeff = polyfit(xval, raw._data[ipick, :], deg=1)
trend = poly1d(coeff)
raw._data[ipick, :] -= trend(xval)
# save detrended data
if save:
fnout = fname_raw[:fname_raw.rfind('-raw.fif')] + ',dt-raw.fif'
raw.save(fnout, overwrite=True)
return raw
##################################################
#
# Get indices of matching channel names from list
#
##################################################
def channel_indices_from_list(fulllist, findlist, excllist=None):
"""Get indices of matching channel names from list
Parameters
----------
fulllist: list of channel names
findlist: list of (regexp) names to find
regexp are resolved using mne.pick_channels_regexp()
excllist: list of channel names to exclude,
e.g., raw.info.get('bads')
Returns
-------
chnpick: array with indices
"""
chnpick = []
for ir in range(len(findlist)):
if findlist[ir].replace(' ', '').isalnum():
try:
chnpicktmp = ([fulllist.index(findlist[ir])])
chnpick = np.array(np.concatenate((chnpick, chnpicktmp)), dtype=int)
except:
print(">>>>> Channel '%s' not found." % findlist[ir])
else:
chnpicktmp = (mne.pick_channels_regexp(fulllist, findlist[ir]))
if len(chnpicktmp) == 0:
print(">>>>> '%s' does not match any channel name." % findlist[ir])
else:
chnpick = np.array(np.concatenate((chnpick, chnpicktmp)), dtype=int)
if len(chnpick) > 1:
# Remove duplicates:
chnpick = np.sort(np.array(list(set(np.sort(chnpick)))))
if excllist is not None and len(excllist) > 0:
exclinds = [fulllist.index(excllist[ie]) for ie in range(len(excllist))]
chnpick = list(np.setdiff1d(chnpick, exclinds))
return chnpick
##################################################
#
# Apply noise reduction to signal channels
# using reference channels.
#
##################################################
def noise_reducer(fname_raw, raw=None, signals=[], noiseref=[], detrending=None,
tmin=None, tmax=None, reflp=None, refhp=None, refnotch=None,
exclude_artifacts=True, checkresults=True, return_raw=False,
complementary_signal=False, fnout=None, verbose=False):
"""
Apply noise reduction to signal channels using reference channels.
Parameters
----------
fname_raw : (list of) rawfile name(s)
raw : mne Raw objects
Allows passing of (preloaded) raw object in addition to fname_raw
or solely (use fname_raw=None in this case).
signals : list of string
List of channels to compensate using noiseref.
If empty use the meg signal channels.
noiseref : list of string | str
List of channels to use as noise reference.
If empty use the magnetic reference channsls (default).
signals and noiseref may contain regexp, which are resolved
using mne.pick_channels_regexp(). All other channels are copied.
tmin : lower latency bound for weight-calc [start of trace]
tmax : upper latency bound for weight-calc [ end of trace]
Weights are calc'd for (tmin,tmax), but applied to entire data set
refhp : high-pass frequency for reference signal filter [None]
reflp : low-pass frequency for reference signal filter [None]
reflp < refhp: band-stop filter
reflp > refhp: band-pass filter
reflp is not None, refhp is None: low-pass filter
reflp is None, refhp is not None: high-pass filter
refnotch : (list of) notch frequencies for reference signal filter [None]
use raw(ref)-notched(ref) as reference signal
exclude_artifacts: filter signal-channels thru _is_good() [True]
(parameters are at present hard-coded!)
return_raw : bool
If return_raw is true, the raw object is returned and raw file
is not written to disk unless fnout is explicitly specified.
It is suggested that this option be used in cases where the
noise_reducer is applied multiple times. [False]
fnout : explicit specification for an output file name [None]
Automatic filenames replace '-raw.fif' by ',nr-raw.fif'.
complementary_signal : replaced signal by traces that would be
subtracted [False]
(can be useful for debugging)
detrending: boolean to ctrl subtraction of linear trend from all
magn. chans [False]
checkresults : boolean to control internal checks and overall success
[True]
Outputfile
----------
<wawa>,nr-raw.fif for input <wawa>-raw.fif
Returns
-------
If return_raw is True, then mne.io.Raw instance is returned.
Bugs
----
- artifact checking is incomplete (and with arb. window of tstep=0.2s)
- no accounting of channels used as signal/reference
- non existing input file handled ungracefully
"""
if type(complementary_signal) != bool:
raise ValueError("Argument complementary_signal must be of type bool")
# handle error if Raw object passed with file list
if raw and isinstance(fname_raw, list):
raise ValueError('List of file names cannot be combined with'
'one Raw object')
# handle error if return_raw is requested with file list
if return_raw and isinstance(fname_raw, list):
raise ValueError('List of file names cannot be combined return_raw.'
'Please pass one file at a time.')
# handle error if Raw object is passed with detrending option
# TODO include perform_detrending for Raw objects
if raw and detrending:
raise ValueError('Please perform detrending on the raw file directly.'
'Cannot perform detrending on the raw object')
# Handle combinations of fname_raw and raw object:
if fname_raw is not None:
fnraw = get_files_from_list(fname_raw)
have_input_file = True
elif raw is not None:
if 'filename' in raw.info:
fnraw = [os.path.basename(raw.filenames[0])]
else:
fnraw = raw._filenames[0]
warnings.warn('Setting file name from Raw object')
have_input_file = False
if fnout is None and not return_raw:
raise ValueError('Refusing to waste resources without result')
else:
raise ValueError('Refusing Creatio ex nihilo')
# loop across all filenames
for fname in fnraw:
if verbose:
print("########## Read raw data:")
tc0 = time.clock()
tw0 = time.time()
if raw is None:
if detrending:
raw = perform_detrending(fname, save=False)
else:
raw = mne.io.Raw(fname, preload=True)
else:
# perform sanity check to make sure Raw object and file are same
if 'filename' in raw.info:
fnintern = [os.path.basename(raw.filenames[0])]
else:
fnintern = raw._filenames[0]
if os.path.basename(fname) != os.path.basename(fnintern):
warnings.warn('The file name within the Raw object and provided\n '
'fname are not the same. Please check again.')
tc1 = time.clock()
tw1 = time.time()
if verbose:
print(">>> loading raw data took {:.1f} ms ({:.2f} s walltime)".format((1000. * (tc1 - tc0)), (tw1 - tw0)))
# Time window selection
# weights are calc'd based on [tmin,tmax], but applied to the entire data set.
# tstep is used in artifact detection
# tmin,tmax variables must not be changed here!
if tmin is None:
itmin = 0
else:
itmin = int(floor(tmin * raw.info['sfreq']))
if tmax is None:
itmax = raw.last_samp - raw.first_samp
else:
itmax = int(ceil(tmax * raw.info['sfreq']))
if itmax - itmin < 2:
raise ValueError("Time-window for noise compensation empty or too short")
if verbose:
print(">>> Set time-range to [%7.3f,%7.3f]" % \
(raw.times[itmin], raw.times[itmax]))
if signals is None or len(signals) == 0:
sigpick = mne.pick_types(raw.info, meg='mag', eeg=False, stim=False,
eog=False, exclude='bads')
else:
sigpick = channel_indices_from_list(raw.info['ch_names'][:], signals,
raw.info.get('bads'))
nsig = len(sigpick)
if nsig == 0:
raise ValueError("No channel selected for noise compensation")
if noiseref is None or len(noiseref) == 0:
# References are not limited to 4D ref-chans, but can be anything,
# incl. ECG or powerline monitor.
if verbose:
print(">>> Using all refchans.")
refexclude = "bads"
refpick = mne.pick_types(raw.info, ref_meg=True, meg=False,
eeg=False, stim=False,
eog=False, exclude='bads')
else:
refpick = channel_indices_from_list(raw.info['ch_names'][:],
noiseref, raw.info.get('bads'))
nref = len(refpick)
if nref == 0:
raise ValueError("No channel selected as noise reference")
if verbose:
print(">>> sigpick: %3d chans, refpick: %3d chans" % (nsig, nref))
badpick = np.intersect1d(sigpick, refpick, assume_unique=False)
if len(badpick) > 0:
raise Warning("Intersection of signal and reference channels not empty")
if reflp is None and refhp is None and refnotch is None:
use_reffilter = False
use_refantinotch = False
else:
use_reffilter = True
if verbose:
print("########## Filter reference channels:")
use_refantinotch = False
if refnotch is not None:
if reflp is not None or reflp is not None:
raise ValueError("Cannot specify notch- and high-/low-pass"
"reference filter together")
nyquist = (0.5 * raw.info['sfreq'])
if isinstance(refnotch, list):
notchfrqs = refnotch
else:
notchfrqs = [refnotch]
notchfrqscln = []
for nfrq in notchfrqs:
if not isinstance(nfrq, float) and not isinstance(nfrq, int):
raise ValueError("Illegal entry for notch-frequency (", nfrq, ")")
if nfrq >= nyquist:
warnings.warn('Ignoring notch frequency > 0.5*sample_rate=%.1fHz' % nyquist)
else:
notchfrqscln.append(nfrq)
if len(notchfrqscln) == 0:
raise ValueError("Notch frequency list is (now) empty")
use_refantinotch = True
if verbose:
print(">>> notches at freq ", end=' ')
print(notchfrqscln)
else:
if verbose:
if reflp is not None:
print(">>> low-pass with cutoff-freq %.1f" % reflp)
if refhp is not None:
print(">>> high-pass with cutoff-freq %.1f" % refhp)
# Adapt followg drop-chans cmd to use 'all-but-refpick'
droplist = [raw.info['ch_names'][k] for k in range(raw.info['nchan']) if not k in refpick]
tct = time.clock()
twt = time.time()
fltref = raw.copy().drop_channels(droplist)
if use_refantinotch:
rawref = raw.copy().drop_channels(droplist)
fltref.notch_filter(notchfrqscln, fir_design='firwin',
fir_window='hann', phase='zero',
picks=np.array(list(range(nref))),
method='fir')
fltref._data = (rawref._data - fltref._data)
else:
fltref.filter(refhp, reflp, fir_design='firwin',
fir_window='hann', phase='zero',
picks=np.array(list(range(nref))),
method='fir')
tc1 = time.clock()
tw1 = time.time()
if verbose:
print(">>> filtering ref-chans took {:.1f} ms ({:.2f} s walltime)".format((1000. * (tc1 - tct)),
(tw1 - twt)))
if verbose:
print("########## Calculating sig-ref/ref-ref-channel covariances:")
# Calculate sig-ref/ref-ref-channel covariance:
# (there is no need to calc inter-signal-chan cov,
# but there seems to be no appropriat fct available)
# Here we copy the idea from compute_raw_data_covariance()
# and truncate it as appropriate.
tct = time.clock()
twt = time.time()
# The following reject and infosig entries are only
# used in _is_good-calls.
# _is_good() from mne-0.9.git-py2.7.egg/mne/epochs.py seems to
# ignore ref-channels (not covered by dict) and checks individual
# data segments - artifacts across a buffer boundary are not found.
reject = dict(grad=4000e-13, # T / m (gradiometers)
mag=4e-12, # T (magnetometers)
eeg=40e-6, # uV (EEG channels)
eog=250e-6) # uV (EOG channels)
infosig = copy.copy(raw.info)
infosig['chs'] = [raw.info['chs'][k] for k in sigpick]
# the below fields are *NOT* (190103) updated automatically when 'chs' is updated
infosig['ch_names'] = [raw.info['ch_names'][k] for k in sigpick]
infosig['nchan'] = len(sigpick)
idx_by_typesig = channel_indices_by_type(infosig)
# Read data in chunks:
tstep = 0.2
itstep = int(ceil(tstep * raw.info['sfreq']))
sigmean = 0
refmean = 0
sscovdata = 0
srcovdata = 0
rrcovdata = 0
n_samples = 0
for first in range(itmin, itmax, itstep):
last = first + itstep
if last >= itmax:
last = itmax
raw_segmentsig, times = raw[sigpick, first:last]
if use_reffilter:
raw_segmentref, times = fltref[:, first:last]
else:
raw_segmentref, times = raw[refpick, first:last]
if not exclude_artifacts or \
_is_good(raw_segmentsig, infosig['ch_names'], idx_by_typesig, reject, flat=None,
ignore_chs=raw.info['bads']):
sigmean += raw_segmentsig.sum(axis=1)
refmean += raw_segmentref.sum(axis=1)
sscovdata += (raw_segmentsig * raw_segmentsig).sum(axis=1)
srcovdata += np.dot(raw_segmentsig, raw_segmentref.T)
rrcovdata += np.dot(raw_segmentref, raw_segmentref.T)
n_samples += raw_segmentsig.shape[1]
else:
logger.info("Artefact detected in [%d, %d]" % (first, last))
if n_samples <= 1:
raise ValueError('Too few samples to calculate weights')
sigmean /= n_samples
refmean /= n_samples
sscovdata -= n_samples * sigmean[:] * sigmean[:]
sscovdata /= (n_samples - 1)
srcovdata -= n_samples * sigmean[:, None] * refmean[None, :]
srcovdata /= (n_samples - 1)
rrcovdata -= n_samples * refmean[:, None] * refmean[None, :]
rrcovdata /= (n_samples - 1)
sscovinit = np.copy(sscovdata)
if verbose:
print(">>> Normalize srcov...")
rrslope = copy.copy(rrcovdata)
for iref in range(nref):
dtmp = rrcovdata[iref, iref]
if dtmp > TINY:
srcovdata[:, iref] /= dtmp
rrslope[:, iref] /= dtmp
else:
srcovdata[:, iref] = 0.
rrslope[:, iref] = 0.
if verbose:
print(">>> Number of samples used : %d" % n_samples)
tc1 = time.clock()
tw1 = time.time()
print(">>> sigrefchn covar-calc took %.1f ms (%.2f s walltime)" % (1000. * (tc1 - tct), (tw1 - twt)))
if checkresults:
if verbose:
print("########## Calculated initial signal channel covariance:")
# Calculate initial signal channel covariance:
# (only used as quality measure)
print(">>> initl rt(avg sig pwr) = %12.5e" % np.sqrt(np.mean(sscovdata)))
for i in range(min(5, nsig)):
print(">>> initl signal-rms[%3d] = %12.5e" % (i, np.sqrt(sscovdata.flatten()[i])))
for i in range(max(0, nsig - 5), nsig):
print(">>> initl signal-rms[%3d] = %12.5e" % (i, np.sqrt(sscovdata.flatten()[i])))
print(">>>")
U, s, V = np.linalg.svd(rrslope, full_matrices=True)
if verbose:
print(">>> singular values:")
print(s)
print(">>> Applying cutoff for smallest SVs:")
dtmp = s.max() * SVD_RELCUTOFF
s *= (abs(s) >= dtmp)
sinv = [1. / s[k] if s[k] != 0. else 0. for k in range(nref)]
if verbose:
print(">>> singular values (after cutoff):")
print(s)
stat = np.allclose(rrslope, np.dot(U, np.dot(np.diag(s), V)))
if verbose:
print(">>> Testing svd-result: %s" % stat)
if not stat:
print(" (Maybe due to SV-cutoff?)")
# Solve for inverse coefficients:
# Set RRinv.tr=U diag(sinv) V
RRinv = np.transpose(np.dot(U, np.dot(np.diag(sinv), V)))
if checkresults:
stat = np.allclose(np.identity(nref), np.dot(RRinv, rrslope))
if stat:
if verbose:
print(">>> Testing RRinv-result (should be unit-matrix): ok")
else:
print(">>> Testing RRinv-result (should be unit-matrix): failed")
print(np.transpose(np.dot(RRinv, rrslope)))
print(">>>")
if verbose:
print("########## Calc weight matrix...")
# weights-matrix will be somewhat larger than necessary,
# (to simplify indexing in compensation loop):
weights = np.zeros((raw._data.shape[0], nref))
for isig in range(nsig):
for iref in range(nref):
weights[sigpick[isig], iref] = np.dot(srcovdata[isig, :], RRinv[:, iref])
if verbose:
print("########## Compensating signal channels:")
if complementary_signal:
print(">>> Caveat: REPLACING signal by compensation signal")
tct = time.clock()
twt = time.time()
# Work on entire data stream:
for isl in range(raw._data.shape[1]):
slice = np.take(raw._data, [isl], axis=1)
if use_reffilter:
refslice = np.take(fltref._data, [isl], axis=1)
refarr = refslice[:].flatten() - refmean
# refarr = fltres[:,isl]-refmean
else:
refarr = slice[refpick].flatten() - refmean
subrefarr = np.dot(weights[:], refarr)
if not complementary_signal:
raw._data[:, isl] -= subrefarr
else:
raw._data[:, isl] = subrefarr
if (isl % 10000 == 0 or isl + 1 == raw._data.shape[1]) and verbose:
print("\rProcessed slice %6d" % isl, end=" ")
sys.stdout.flush()
if verbose:
print("\nDone.")
tc1 = time.clock()
tw1 = time.time()
print(">>> compensation loop took {:.1f} ms ({:.2f} s walltime)".format((1000. * (tc1 - tct)), (tw1 - twt)))
if checkresults:
if verbose:
print("########## Calculating final signal channel covariance:")
# Calculate final signal channel covariance:
# (only used as quality measure)
tct = time.clock()
twt = time.time()
sigmean = 0
sscovdata = 0
n_samples = 0
for first in range(itmin, itmax, itstep):
last = first + itstep
if last >= itmax:
last = itmax
raw_segmentsig, times = raw[sigpick, first:last]
# Artifacts found here will probably differ from pre-noisered artifacts!
if not exclude_artifacts or \
_is_good(raw_segmentsig, infosig['ch_names'], idx_by_typesig, reject,
flat=None, ignore_chs=raw.info['bads']):
sigmean += raw_segmentsig.sum(axis=1)
sscovdata += (raw_segmentsig * raw_segmentsig).sum(axis=1)
n_samples += raw_segmentsig.shape[1]
if n_samples <= 1:
raise ValueError('Too few samples to calculate final signal channel covariance')
sigmean /= n_samples
sscovdata -= n_samples * sigmean[:] * sigmean[:]
sscovdata /= (n_samples - 1)
if verbose:
print(">>> no channel got worse: %s" % str(np.all(np.less_equal(sscovdata, sscovinit))))
print(">>> final rt(avg sig pwr) = %12.5e" % np.sqrt(np.mean(sscovdata)))
for i in range(min(5, nsig)):
print(">>> final signal-rms[%3d] = %12.5e" % (i, np.sqrt(sscovdata.flatten()[i])))
# for i in range(min(5,nsig),max(0,nsig-5)):
# print(">>> final signal-rms[%3d] = %12.5e" % (i, np.sqrt(sscovdata.flatten()[i])))
for i in range(max(0, nsig - 5), nsig):
print(">>> final signal-rms[%3d] = %12.5e" % (i, np.sqrt(sscovdata.flatten()[i])))
tc1 = time.clock()
tw1 = time.time()
print(">>> signal covar-calc took {:.1f} ms ({:.2f} s walltime)".format((1000. * (tc1 - tct)),
(tw1 - twt)))
print(">>>")
if fnout is not None:
fnoutloc = fnout
elif return_raw:
fnoutloc = None
elif have_input_file:
fnoutloc = fname[:fname.rfind('-raw.fif')] + ',nr-raw.fif'
else:
fnoutloc = None
if fnoutloc is not None:
if verbose:
print(">>> Saving '%s'..." % fnoutloc)
raw.save(fnoutloc, overwrite=True)
tc1 = time.clock()
tw1 = time.time()
if verbose:
print(">>> Total run took {:.1f} ms ({:.2f} s walltime)".format((1000. * (tc1 - tc0)), (tw1 - tw0)))
if return_raw:
if verbose:
print(">>> Returning raw object...")
return raw
##################################################
#
# routine to test if the noise reducer is
# working properly
#
##################################################
def test_noise_reducer():
data_path = os.environ['SUBJECTS_DIR']
subject = os.environ['SUBJECT']
dname = data_path + '/' + 'empty_room_files' + '/109925_empty_room_file-raw.fif'
subjects_dir = data_path + '/subjects'
#
checkresults = True
exclart = False
use_reffilter = True
refflt_lpfreq = 52.
refflt_hpfreq = 48.
print("########## before of noisereducer call ##########")
sigchanlist = ['MEG ..1', 'MEG ..3', 'MEG ..5', 'MEG ..7', 'MEG ..9']
# sigchanlist = None
refchanlist = ['RFM 001', 'RFM 003', 'RFM 005', 'RFG ...']
tmin = 15.
inraw = mne.io.Raw(dname, preload=True)
dname1 = dname[:dname.rfind('-raw.fif')] + ',test-raw.fif'
dname1nr = dname[:dname.rfind('-raw.fif')] + ',testnr-raw.fif'
noise_reducer(dname, raw=None, signals=sigchanlist, noiseref=refchanlist, tmin=tmin,
reflp=refflt_lpfreq, refhp=refflt_hpfreq, fnout=None,
exclude_artifacts=exclart, verbose=True, return_raw=False)
print("########## behind of noisereducer call ##########")
print("########## Read raw data:")
tc0 = time.clock()
tw0 = time.time()
raw = mne.io.Raw(dname, preload=True)
tc1 = time.clock()
tw1 = time.time()
print("loading raw data took %.1f ms (%.2f s walltime)" % (1000. * (tc1 - tc0), (tw1 - tw0)))
# Time window selection
# weights are calc'd based on [tmin,tmax], but applied to the entire data set.
# tstep is used in artifact detection
tmax = raw.times[raw.last_samp]
tstep = 0.2
itmin = int(floor(tmin * raw.info['sfreq']))
itmax = int(ceil(tmax * raw.info['sfreq']))
itstep = int(ceil(tstep * raw.info['sfreq']))
print(">>> Set time-range to [%7.3f,%7.3f]" % (tmin, tmax))
if sigchanlist is None:
sigpick = mne.pick_types(raw.info, meg='mag', eeg=False, stim=False, eog=False, exclude='bads')
else:
sigpick = channel_indices_from_list(raw.info['ch_names'][:], sigchanlist)
nsig = len(sigpick)
print("sigpick: %3d chans" % nsig)
if nsig == 0:
raise ValueError("No channel selected for noise compensation")
if refchanlist is None:
# References are not limited to 4D ref-chans, but can be anything,
# incl. ECG or powerline monitor.
print(">>> Using all refchans.")
refexclude = "bads"
refpick = mne.pick_types(raw.info, ref_meg=True, meg=False, eeg=False,
stim=False, eog=False, exclude=refexclude)
else:
refpick = channel_indices_from_list(raw.info['ch_names'][:], refchanlist)
print("refpick = '%s'" % refpick)
nref = len(refpick)
print("refpick: %3d chans" % nref)
if nref == 0:
raise ValueError("No channel selected as noise reference")
print("########## Refchan geo data:")
# This is just for info to locate special 4D-refs.
for iref in refpick:
print(raw.info['chs'][iref]['ch_name'], raw.info['chs'][iref]['loc'][0:3])
print("")
if use_reffilter:
print("########## Filter reference channels:")
if refflt_lpfreq is not None:
print(" low-pass with cutoff-freq %.1f" % refflt_lpfreq)
if refflt_hpfreq is not None:
print("high-pass with cutoff-freq %.1f" % refflt_hpfreq)
# Adapt followg drop-chans cmd to use 'all-but-refpick'
droplist = [raw.info['ch_names'][k] for k in range(raw.info['nchan']) if not k in refpick]
fltref = raw.copy().drop_channels(droplist)
tct = time.clock()
twt = time.time()
fltref.filter(refflt_hpfreq, refflt_lpfreq, picks=np.array(list(range(nref))), method='fft')
tc1 = time.clock()
tw1 = time.time()
print("filtering ref-chans took %.1f ms (%.2f s walltime)" % (1000. * (tc1 - tct), (tw1 - twt)))
print("########## Calculating sig-ref/ref-ref-channel covariances:")
# Calculate sig-ref/ref-ref-channel covariance:
# (there is no need to calc inter-signal-chan cov,
# but there seems to be no appropriat fct available)
# Here we copy the idea from compute_raw_data_covariance()
# and truncate it as appropriate.
tct = time.clock()
twt = time.time()
# The following reject and info{sig,ref} entries are only
# used in _is_good-calls.
# _is_good() from mne-0.9.git-py2.7.egg/mne/epochs.py seems to
# ignore ref-channels (not covered by dict) and checks individual
# data segments - artifacts across a buffer boundary are not found.
reject = dict(grad=4000e-13, # T / m (gradiometers)
mag=4e-12, # T (magnetometers)
eeg=40e-6, # uV (EEG channels)
eog=250e-6) # uV (EOG channels)
infosig = copy.copy(raw.info)
infosig['chs'] = [raw.info['chs'][k] for k in sigpick]
# the below fields are *NOT* (190103) updated automatically when 'chs' is updated
infosig['ch_names'] = [raw.info['ch_names'][k] for k in sigpick]
infosig['nchan'] = len(sigpick)
idx_by_typesig = channel_indices_by_type(infosig)
# inforef not good w/ filtering, but anyway useless
inforef = copy.copy(raw.info)
inforef['chs'] = [raw.info['chs'][k] for k in refpick]
# 'ch_names' and 'nchan' updated automatically when 'chs' is updated
idx_by_typeref = channel_indices_by_type(inforef)
# Read data in chunks:
sigmean = 0
refmean = 0
sscovdata = 0
srcovdata = 0
rrcovdata = 0
n_samples = 0
for first in range(itmin, itmax, itstep):
last = first + itstep
if last >= itmax:
last = itmax
raw_segmentsig, times = raw[sigpick, first:last]
if use_reffilter:
raw_segmentref, times = fltref[:, first:last]
else:
raw_segmentref, times = raw[refpick, first:last]
# if True:
# if _is_good(raw_segmentsig, infosig['ch_names'], idx_by_typesig, reject, flat=None,
# ignore_chs=raw.info['bads']) and _is_good(raw_segmentref,
# inforef['ch_names'], idx_by_typeref, reject, flat=None,
# ignore_chs=raw.info['bads']):
if not exclart or \
_is_good(raw_segmentsig, infosig['ch_names'], idx_by_typesig, reject,
flat=None, ignore_chs=raw.info['bads']):
sigmean += raw_segmentsig.sum(axis=1)
refmean += raw_segmentref.sum(axis=1)
sscovdata += (raw_segmentsig * raw_segmentsig).sum(axis=1)
srcovdata += np.dot(raw_segmentsig, raw_segmentref.T)
rrcovdata += np.dot(raw_segmentref, raw_segmentref.T)
n_samples += raw_segmentsig.shape[1]
else:
logger.info("Artefact detected in [%d, %d]" % (first, last))
# _check_n_samples(n_samples, len(picks))
if n_samples <= 1:
raise ValueError('Too few samples to calculate covariances')
sigmean /= n_samples
refmean /= n_samples
sscovdata -= n_samples * sigmean[:] * sigmean[:]
sscovdata /= (n_samples - 1)
srcovdata -= n_samples * sigmean[:, None] * refmean[None, :]
srcovdata /= (n_samples - 1)
rrcovdata -= n_samples * refmean[:, None] * refmean[None, :]
rrcovdata /= (n_samples - 1)
sscovinit = sscovdata
print("Normalize srcov...")
rrslopedata = copy.copy(rrcovdata)
for iref in range(nref):
dtmp = rrcovdata[iref][iref]
if dtmp > TINY:
for isig in range(nsig):
srcovdata[isig][iref] /= dtmp
for jref in range(nref):
rrslopedata[jref][iref] /= dtmp
else:
for isig in range(nsig):
srcovdata[isig][iref] = 0.
for jref in range(nref):
rrslopedata[jref][iref] = 0.
logger.info("Number of samples used : %d" % n_samples)
tc1 = time.clock()
tw1 = time.time()
print("sigrefchn covar-calc took %.1f ms (%.2f s walltime)" % (1000. * (tc1 - tct), (tw1 - twt)))
print("########## Calculating sig-ref/ref-ref-channel covariances (robust):")
# Calculate sig-ref/ref-ref-channel covariance:
# (usg B.P.Welford, "Note on a method for calculating corrected sums
# of squares and products", Technometrics4 (1962) 419-420)
# (there is no need to calc inter-signal-chan cov,
# but there seems to be no appropriat fct available)
# Here we copy the idea from compute_raw_data_covariance()
# and truncate it as appropriate.
tct = time.clock()
twt = time.time()
# The following reject and info{sig,ref} entries are only
# used in _is_good-calls.
# _is_good() from mne-0.9.git-py2.7.egg/mne/epochs.py seems to
# ignore ref-channels (not covered by dict) and checks individual
# data segments - artifacts across a buffer boundary are not found.
reject = dict(grad=4000e-13, # T / m (gradiometers)
mag=4e-12, # T (magnetometers)
eeg=40e-6, # uV (EEG channels)
eog=250e-6) # uV (EOG channels)
infosig = copy.copy(raw.info)
infosig['chs'] = [raw.info['chs'][k] for k in sigpick]
# the below fields are *NOT* (190103) updated automatically when 'chs' is updated
infosig['ch_names'] = [raw.info['ch_names'][k] for k in sigpick]
infosig['nchan'] = len(sigpick)
idx_by_typesig = channel_indices_by_type(infosig)
# inforef not good w/ filtering, but anyway useless
inforef = copy.copy(raw.info)
inforef['chs'] = [raw.info['chs'][k] for k in refpick]
# 'ch_names' and 'nchan' updated automatically when 'chs' is updated
idx_by_typeref = channel_indices_by_type(inforef)
# Read data in chunks:
smean = np.zeros(nsig)
smold = np.zeros(nsig)
rmean = np.zeros(nref)
rmold = np.zeros(nref)
sscov = 0
srcov = 0
rrcov = np.zeros((nref, nref))
srcov = np.zeros((nsig, nref))
n_samples = 0
for first in range(itmin, itmax, itstep):
last = first + itstep
if last >= itmax:
last = itmax
raw_segmentsig, times = raw[sigpick, first:last]
if use_reffilter:
raw_segmentref, times = fltref[:, first:last]
else:
raw_segmentref, times = raw[refpick, first:last]
# if True:
# if _is_good(raw_segmentsig, infosig['ch_names'], idx_by_typesig, reject, flat=None,
# ignore_chs=raw.info['bads']) and _is_good(raw_segmentref,
# inforef['ch_names'], idx_by_typeref, reject, flat=None,
# ignore_chs=raw.info['bads']):
if not exclart or \
_is_good(raw_segmentsig, infosig['ch_names'], idx_by_typesig, reject,
flat=None, ignore_chs=raw.info['bads']):
for isl in range(raw_segmentsig.shape[1]):
nsl = isl + n_samples + 1
cnslm1dnsl = float((nsl - 1)) / float(nsl)
sslsubmean = (raw_segmentsig[:, isl] - smold)
rslsubmean = (raw_segmentref[:, isl] - rmold)
smean = smold + sslsubmean / float(nsl)
rmean = rmold + rslsubmean / float(nsl)
sscov += sslsubmean * (raw_segmentsig[:, isl] - smean)
srcov += cnslm1dnsl * np.dot(sslsubmean.reshape((nsig, 1)), rslsubmean.reshape((1, nref)))
rrcov += cnslm1dnsl * np.dot(rslsubmean.reshape((nref, 1)), rslsubmean.reshape((1, nref)))
smold = smean
rmold = rmean
n_samples += raw_segmentsig.shape[1]
else:
logger.info("Artefact detected in [%d, %d]" % (first, last))
# _check_n_samples(n_samples, len(picks))
if n_samples <= 1:
raise ValueError('Too few samples to calculate covariances')
sscov /= (n_samples - 1)
srcov /= (n_samples - 1)
rrcov /= (n_samples - 1)
print("Normalize srcov...")
rrslope = copy.copy(rrcov)
for iref in range(nref):
dtmp = rrcov[iref][iref]
if dtmp > TINY:
srcov[:, iref] /= dtmp
rrslope[:, iref] /= dtmp
else:
srcov[:, iref] = 0.
rrslope[:, iref] = 0.
logger.info("Number of samples used : %d" % n_samples)
print("Compare results with 'standard' values:")
print("cmp(sigmean,smean):", np.allclose(smean, sigmean, atol=0.))
print("cmp(refmean,rmean):", np.allclose(rmean, refmean, atol=0.))
print("cmp(sscovdata,sscov):", np.allclose(sscov, sscovdata, atol=0.))
print("cmp(srcovdata,srcov):", np.allclose(srcov, srcovdata, atol=0.))
print("cmp(rrcovdata,rrcov):", np.allclose(rrcov, rrcovdata, atol=0.))
tc1 = time.clock()
tw1 = time.time()
print("sigrefchn covar-calc took %.1f ms (%.2f s walltime)" % (1000. * (tc1 - tct), (tw1 - twt)))
if checkresults:
print("########## Calculated initial signal channel covariance:")
# Calculate initial signal channel covariance:
# (only used as quality measure)
print("initl rt(avg sig pwr) = %12.5e" % np.sqrt(np.mean(sscov)))
for i in range(min(5, nsig)):
print("initl signal-rms[%3d] = %12.5e" % (i, np.sqrt(sscov.flatten()[i])))
print(" ")
if nref < 6:
print("rrslope-entries:")
for i in range(nref):
print(rrslope[i][:])
U, s, V = np.linalg.svd(rrslope, full_matrices=True)
print(s)
print("Applying cutoff for smallest SVs:")
dtmp = s.max() * SVD_RELCUTOFF
sinv = np.zeros(nref)
for i in range(nref):
if abs(s[i]) >= dtmp:
sinv[i] = 1. / s[i]
else:
s[i] = 0.
# s *= (abs(s)>=dtmp)
# sinv = ???
print(s)
stat = np.allclose(rrslope, np.dot(U, np.dot(np.diag(s), V)))
print(">>> Testing svd-result: %s" % stat)
if not stat:
print(" (Maybe due to SV-cutoff?)")
# Solve for inverse coefficients:
print(">>> Setting RRinvtr=U diag(sinv) V")
RRinvtr = np.zeros((nref, nref))
RRinvtr = np.dot(U, np.dot(np.diag(sinv), V))
if checkresults:
# print(">>> RRinvtr-result:")
# print(RRinvtr)
stat = np.allclose(np.identity(nref), np.dot(rrslope.transpose(), RRinvtr))
if stat:
print(">>> Testing RRinvtr-result (shld be unit-matrix): ok")
else:
print(">>> Testing RRinvtr-result (shld be unit-matrix): failed")
print(np.dot(rrslope.transpose(), RRinvtr))
# np.less_equal(np.abs(np.dot(rrslope.transpose(),RRinvtr)-np.identity(nref)),0.01*np.ones((nref,nref)))
print("")
print("########## Calc weight matrix...")
# weights-matrix will be somewhat larger than necessary,
# (to simplify indexing in compensation loop):
weights = np.zeros((raw._data.shape[0], nref))
for isig in range(nsig):
for iref in range(nref):
weights[sigpick[isig]][iref] = np.dot(srcov[isig][:], RRinvtr[iref][:])
if np.allclose(np.zeros(weights.shape), np.abs(weights), atol=1.e-8):
print(">>> all weights are small (<=1.e-8).")
else:
print(">>> largest weight %12.5e" % np.max(np.abs(weights)))
wlrg = np.where(np.abs(weights) >= 0.99 * np.max(np.abs(weights)))
for iwlrg in range(len(wlrg[0])):
print(">>> weights[%3d,%2d] = %12.5e" % (wlrg[0][iwlrg], wlrg[1][iwlrg],
weights[wlrg[0][iwlrg], wlrg[1][iwlrg]]))
if nref < 5:
print("weights-entries for first sigchans:")
for i in range(min(5, nsig)):
print('weights[sp(%2d)][r]=[' % i + ' '.join([' %+10.7f' % val for val in weights[sigpick[i]][:]]) + ']')
print("########## Compensating signal channels:")
tct = time.clock()
twt = time.time()
# data,times = raw[:,raw.time_as_index(tmin)[0]:raw.time_as_index(tmax)[0]:]
# Work on entire data stream:
for isl in range(raw._data.shape[1]):
slice = np.take(raw._data, [isl], axis=1)
if use_reffilter:
refslice = np.take(fltref._data, [isl], axis=1)
refarr = refslice[:].flatten() - rmean
# refarr = fltres[:,isl]-rmean
else:
refarr = slice[refpick].flatten() - rmean
subrefarr = np.dot(weights[:], refarr)
# data[:,isl] -= subrefarr will not modify raw._data?
raw._data[:, isl] -= subrefarr
if isl % 10000 == 0:
print("\rProcessed slice %6d" % isl)
print("\nDone.")
tc1 = time.clock()
tw1 = time.time()
print("compensation loop took %.1f ms (%.2f s walltime)" % (1000. * (tc1 - tct), (tw1 - twt)))
if checkresults:
print("########## Calculating final signal channel covariance:")
# Calculate final signal channel covariance:
# (only used as quality measure)
tct = time.clock()
twt = time.time()
sigmean = 0
sscovdata = 0
n_samples = 0
for first in range(itmin, itmax, itstep):
last = first + itstep
if last >= itmax:
last = itmax
raw_segmentsig, times = raw[sigpick, first:last]
# Artifacts found here will probably differ from pre-noisered artifacts!
if not exclart or \
_is_good(raw_segmentsig, infosig['ch_names'], idx_by_typesig, reject,
flat=None, ignore_chs=raw.info['bads']):
sigmean += raw_segmentsig.sum(axis=1)
sscovdata += (raw_segmentsig * raw_segmentsig).sum(axis=1)
n_samples += raw_segmentsig.shape[1]
if n_samples <= 1:
raise ValueError('Too few samples to calculate final signal channel covariances')
sigmean /= n_samples
sscovdata -= n_samples * sigmean[:] * sigmean[:]
sscovdata /= (n_samples - 1)
print(">>> no channel got worse: ", np.all(np.less_equal(sscovdata, sscovinit)))
print("final rt(avg sig pwr) = %12.5e" % np.sqrt(np.mean(sscovdata)))
for i in range(min(5, nsig)):
print("final signal-rms[%3d] = %12.5e" % (i, np.sqrt(sscovdata.flatten()[i])))
tc1 = time.clock()
tw1 = time.time()
print("signal covar-calc took %.1f ms (%.2f s walltime)" % (1000. * (tc1 - tct), (tw1 - twt)))
print(" ")
nrname = dname[:dname.rfind('-raw.fif')] + ',nold-raw.fif'
print("Saving '%s'..." % nrname)
raw.save(nrname, overwrite=True)
tc1 = time.clock()
tw1 = time.time()
print("Total run took %.1f ms (%.2f s walltime)" % (1000. * (tc1 - tc0), (tw1 - tw0)))
|
[
"numpy.abs",
"numpy.polyfit",
"mne.pick_types",
"mne.io.Raw",
"numpy.allclose",
"mne.epochs._is_good",
"mne.find_events",
"matplotlib.pyplot.figure",
"numpy.linalg.svd",
"numpy.mean",
"numpy.arange",
"sys.stdout.flush",
"numpy.diag",
"builtins.range",
"mne.utils.logger.info",
"numpy.copy",
"numpy.std",
"matplotlib.pyplot.close",
"numpy.identity",
"time.clock",
"numpy.max",
"numpy.intersect1d",
"numpy.log10",
"numpy.less_equal",
"matplotlib.pyplot.show",
"math.ceil",
"os.path.basename",
"jumeg.jumeg_utils.get_files_from_list",
"matplotlib.pyplot.ion",
"numpy.min",
"numpy.sort",
"numpy.dot",
"builtins.str",
"mne.io.pick.channel_indices_by_type",
"numpy.concatenate",
"matplotlib.pyplot.subplot",
"numpy.poly1d",
"mne.time_frequency.psd_welch",
"copy.copy",
"numpy.zeros",
"math.floor",
"time.time",
"numpy.setdiff1d",
"mne.Epochs",
"numpy.take",
"mne.pick_channels_regexp",
"warnings.warn"
] |
[((4687, 4717), 'jumeg.jumeg_utils.get_files_from_list', 'get_files_from_list', (['fname_raw'], {}), '(fname_raw)\n', (4706, 4717), False, 'from jumeg.jumeg_utils import get_files_from_list\n'), ((5704, 5755), 'matplotlib.pyplot.figure', 'plt.figure', (['"""denoising"""'], {'figsize': '(16, 6 * n_xplots)'}), "('denoising', figsize=(16, 6 * n_xplots))\n", (5714, 5755), True, 'import matplotlib.pyplot as plt\n'), ((8138, 8160), 'matplotlib.pyplot.close', 'plt.close', (['"""denoising"""'], {}), "('denoising')\n", (8147, 8160), True, 'import matplotlib.pyplot as plt\n'), ((8165, 8174), 'matplotlib.pyplot.ion', 'plt.ion', ([], {}), '()\n', (8172, 8174), True, 'import matplotlib.pyplot as plt\n'), ((8438, 8468), 'jumeg.jumeg_utils.get_files_from_list', 'get_files_from_list', (['fname_raw'], {}), '(fname_raw)\n', (8457, 8468), False, 'from jumeg.jumeg_utils import get_files_from_list\n'), ((33164, 33195), 'mne.io.Raw', 'mne.io.Raw', (['dname'], {'preload': '(True)'}), '(dname, preload=True)\n', (33174, 33195), False, 'import mne\n'), ((33677, 33689), 'time.clock', 'time.clock', ([], {}), '()\n', (33687, 33689), False, 'import time\n'), ((33700, 33711), 'time.time', 'time.time', ([], {}), '()\n', (33709, 33711), False, 'import time\n'), ((33722, 33753), 'mne.io.Raw', 'mne.io.Raw', (['dname'], {'preload': '(True)'}), '(dname, preload=True)\n', (33732, 33753), False, 'import mne\n'), ((33764, 33776), 'time.clock', 'time.clock', ([], {}), '()\n', (33774, 33776), False, 'import time\n'), ((33787, 33798), 'time.time', 'time.time', ([], {}), '()\n', (33796, 33798), False, 'import time\n'), ((36734, 36746), 'time.clock', 'time.clock', ([], {}), '()\n', (36744, 36746), False, 'import time\n'), ((36757, 36768), 'time.time', 'time.time', ([], {}), '()\n', (36766, 36768), False, 'import time\n'), ((37293, 37312), 'copy.copy', 'copy.copy', (['raw.info'], {}), '(raw.info)\n', (37302, 37312), False, 'import copy\n'), ((37584, 37616), 'mne.io.pick.channel_indices_by_type', 'channel_indices_by_type', (['infosig'], {}), '(infosig)\n', (37607, 37616), False, 'from mne.io.pick import channel_indices_by_type\n'), ((37688, 37707), 'copy.copy', 'copy.copy', (['raw.info'], {}), '(raw.info)\n', (37697, 37707), False, 'import copy\n'), ((37861, 37893), 'mne.io.pick.channel_indices_by_type', 'channel_indices_by_type', (['inforef'], {}), '(inforef)\n', (37884, 37893), False, 'from mne.io.pick import channel_indices_by_type\n'), ((38043, 38070), 'builtins.range', 'range', (['itmin', 'itmax', 'itstep'], {}), '(itmin, itmax, itstep)\n', (38048, 38070), False, 'from builtins import range\n'), ((39861, 39881), 'copy.copy', 'copy.copy', (['rrcovdata'], {}), '(rrcovdata)\n', (39870, 39881), False, 'import copy\n'), ((39898, 39909), 'builtins.range', 'range', (['nref'], {}), '(nref)\n', (39903, 39909), False, 'from builtins import range\n'), ((40320, 40374), 'mne.utils.logger.info', 'logger.info', (["('Number of samples used : %d' % n_samples)"], {}), "('Number of samples used : %d' % n_samples)\n", (40331, 40374), False, 'from mne.utils import logger\n'), ((40385, 40397), 'time.clock', 'time.clock', ([], {}), '()\n', (40395, 40397), False, 'import time\n'), ((40408, 40419), 'time.time', 'time.time', ([], {}), '()\n', (40417, 40419), False, 'import time\n'), ((41035, 41047), 'time.clock', 'time.clock', ([], {}), '()\n', (41045, 41047), False, 'import time\n'), ((41058, 41069), 'time.time', 'time.time', ([], {}), '()\n', (41067, 41069), False, 'import time\n'), ((41594, 41613), 'copy.copy', 'copy.copy', (['raw.info'], {}), '(raw.info)\n', (41603, 41613), False, 'import copy\n'), ((41885, 41917), 'mne.io.pick.channel_indices_by_type', 'channel_indices_by_type', (['infosig'], {}), '(infosig)\n', (41908, 41917), False, 'from mne.io.pick import channel_indices_by_type\n'), ((41989, 42008), 'copy.copy', 'copy.copy', (['raw.info'], {}), '(raw.info)\n', (41998, 42008), False, 'import copy\n'), ((42162, 42194), 'mne.io.pick.channel_indices_by_type', 'channel_indices_by_type', (['inforef'], {}), '(inforef)\n', (42185, 42194), False, 'from mne.io.pick import channel_indices_by_type\n'), ((42235, 42249), 'numpy.zeros', 'np.zeros', (['nsig'], {}), '(nsig)\n', (42243, 42249), True, 'import numpy as np\n'), ((42262, 42276), 'numpy.zeros', 'np.zeros', (['nsig'], {}), '(nsig)\n', (42270, 42276), True, 'import numpy as np\n'), ((42289, 42303), 'numpy.zeros', 'np.zeros', (['nref'], {}), '(nref)\n', (42297, 42303), True, 'import numpy as np\n'), ((42316, 42330), 'numpy.zeros', 'np.zeros', (['nref'], {}), '(nref)\n', (42324, 42330), True, 'import numpy as np\n'), ((42371, 42393), 'numpy.zeros', 'np.zeros', (['(nref, nref)'], {}), '((nref, nref))\n', (42379, 42393), True, 'import numpy as np\n'), ((42406, 42428), 'numpy.zeros', 'np.zeros', (['(nsig, nref)'], {}), '((nsig, nref))\n', (42414, 42428), True, 'import numpy as np\n'), ((42464, 42491), 'builtins.range', 'range', (['itmin', 'itmax', 'itstep'], {}), '(itmin, itmax, itstep)\n', (42469, 42491), False, 'from builtins import range\n'), ((44441, 44457), 'copy.copy', 'copy.copy', (['rrcov'], {}), '(rrcov)\n', (44450, 44457), False, 'import copy\n'), ((44474, 44485), 'builtins.range', 'range', (['nref'], {}), '(nref)\n', (44479, 44485), False, 'from builtins import range\n'), ((44700, 44754), 'mne.utils.logger.info', 'logger.info', (["('Number of samples used : %d' % n_samples)"], {}), "('Number of samples used : %d' % n_samples)\n", (44711, 44754), False, 'from mne.utils import logger\n'), ((45185, 45197), 'time.clock', 'time.clock', ([], {}), '()\n', (45195, 45197), False, 'import time\n'), ((45208, 45219), 'time.time', 'time.time', ([], {}), '()\n', (45217, 45219), False, 'import time\n'), ((45861, 45903), 'numpy.linalg.svd', 'np.linalg.svd', (['rrslope'], {'full_matrices': '(True)'}), '(rrslope, full_matrices=True)\n', (45874, 45903), True, 'import numpy as np\n'), ((46011, 46025), 'numpy.zeros', 'np.zeros', (['nref'], {}), '(nref)\n', (46019, 46025), True, 'import numpy as np\n'), ((46039, 46050), 'builtins.range', 'range', (['nref'], {}), '(nref)\n', (46044, 46050), False, 'from builtins import range\n'), ((46484, 46506), 'numpy.zeros', 'np.zeros', (['(nref, nref)'], {}), '((nref, nref))\n', (46492, 46506), True, 'import numpy as np\n'), ((47273, 47309), 'numpy.zeros', 'np.zeros', (['(raw._data.shape[0], nref)'], {}), '((raw._data.shape[0], nref))\n', (47281, 47309), True, 'import numpy as np\n'), ((47326, 47337), 'builtins.range', 'range', (['nsig'], {}), '(nsig)\n', (47331, 47337), False, 'from builtins import range\n'), ((48253, 48265), 'time.clock', 'time.clock', ([], {}), '()\n', (48263, 48265), False, 'import time\n'), ((48276, 48287), 'time.time', 'time.time', ([], {}), '()\n', (48285, 48287), False, 'import time\n'), ((48418, 48443), 'builtins.range', 'range', (['raw._data.shape[1]'], {}), '(raw._data.shape[1])\n', (48423, 48443), False, 'from builtins import range\n'), ((49002, 49014), 'time.clock', 'time.clock', ([], {}), '()\n', (49012, 49014), False, 'import time\n'), ((49025, 49036), 'time.time', 'time.time', ([], {}), '()\n', (49034, 49036), False, 'import time\n'), ((50968, 50980), 'time.clock', 'time.clock', ([], {}), '()\n', (50978, 50980), False, 'import time\n'), ((50991, 51002), 'time.time', 'time.time', ([], {}), '()\n', (51000, 51002), False, 'import time\n'), ((4961, 4992), 'mne.io.Raw', 'mne.io.Raw', (['fname'], {'preload': '(True)'}), '(fname, preload=True)\n', (4971, 4992), False, 'import mne\n'), ((5009, 5098), 'mne.pick_types', 'mne.pick_types', (['raw.info'], {'meg': '"""mag"""', 'eeg': '(False)', 'stim': '(False)', 'eog': '(False)', 'exclude': '"""bads"""'}), "(raw.info, meg='mag', eeg=False, stim=False, eog=False,\n exclude='bads')\n", (5023, 5098), False, 'import mne\n'), ((5278, 5392), 'mne.time_frequency.psd_welch', 'psd_welch', (['raw'], {'picks': 'picks', 'fmin': 'fmin', 'fmax': 'fmax', 'tmin': 'tmin', 'tmax': 'tmax', 'n_fft': 'n_fft', 'n_jobs': 'n_jobs', 'proj': 'proj'}), '(raw, picks=picks, fmin=fmin, fmax=fmax, tmin=tmin, tmax=tmax,\n n_fft=n_fft, n_jobs=n_jobs, proj=proj)\n', (5287, 5392), False, 'from mne.time_frequency import psd_welch\n'), ((5598, 5660), 'mne.find_events', 'mne.find_events', (['raw'], {'stim_channel': 'stim_name', 'consecutive': '(True)'}), '(raw, stim_channel=stim_name, consecutive=True)\n', (5613, 5660), False, 'import mne\n'), ((6015, 6038), 'matplotlib.pyplot.subplot', 'plt.subplot', (['gs[0, idx]'], {}), '(gs[0, idx])\n', (6026, 6038), True, 'import matplotlib.pyplot as plt\n'), ((6132, 6153), 'numpy.mean', 'np.mean', (['psds'], {'axis': '(0)'}), '(psds, axis=0)\n', (6139, 6153), True, 'import numpy as np\n'), ((8122, 8132), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (8130, 8132), True, 'import matplotlib.pyplot as plt\n'), ((8564, 8588), 'mne.io.Raw', 'Raw', (['fname'], {'preload': '(True)'}), '(fname, preload=True)\n', (8567, 8588), False, 'from mne.io import Raw\n'), ((8629, 8732), 'mne.pick_types', 'mne.pick_types', (['raw.info'], {'meg': '"""mag"""', 'ref_meg': '(True)', 'eeg': '(False)', 'stim': '(False)', 'eog': '(False)', 'exclude': '"""bads"""'}), "(raw.info, meg='mag', ref_meg=True, eeg=False, stim=False,\n eog=False, exclude='bads')\n", (8643, 8732), False, 'import mne\n'), ((8806, 8835), 'numpy.arange', 'np.arange', (['raw._data.shape[1]'], {}), '(raw._data.shape[1])\n', (8815, 8835), True, 'import numpy as np\n'), ((15040, 15070), 'jumeg.jumeg_utils.get_files_from_list', 'get_files_from_list', (['fname_raw'], {}), '(fname_raw)\n', (15059, 15070), False, 'from jumeg.jumeg_utils import get_files_from_list\n'), ((15688, 15700), 'time.clock', 'time.clock', ([], {}), '()\n', (15698, 15700), False, 'import time\n'), ((15715, 15726), 'time.time', 'time.time', ([], {}), '()\n', (15724, 15726), False, 'import time\n'), ((16416, 16428), 'time.clock', 'time.clock', ([], {}), '()\n', (16426, 16428), False, 'import time\n'), ((16443, 16454), 'time.time', 'time.time', ([], {}), '()\n', (16452, 16454), False, 'import time\n'), ((18706, 18759), 'numpy.intersect1d', 'np.intersect1d', (['sigpick', 'refpick'], {'assume_unique': '(False)'}), '(sigpick, refpick, assume_unique=False)\n', (18720, 18759), True, 'import numpy as np\n'), ((22328, 22340), 'time.clock', 'time.clock', ([], {}), '()\n', (22338, 22340), False, 'import time\n'), ((22355, 22366), 'time.time', 'time.time', ([], {}), '()\n', (22364, 22366), False, 'import time\n'), ((22925, 22944), 'copy.copy', 'copy.copy', (['raw.info'], {}), '(raw.info)\n', (22934, 22944), False, 'import copy\n'), ((23236, 23268), 'mne.io.pick.channel_indices_by_type', 'channel_indices_by_type', (['infosig'], {}), '(infosig)\n', (23259, 23268), False, 'from mne.io.pick import channel_indices_by_type\n'), ((23525, 23552), 'builtins.range', 'range', (['itmin', 'itmax', 'itstep'], {}), '(itmin, itmax, itstep)\n', (23530, 23552), False, 'from builtins import range\n'), ((25037, 25055), 'numpy.copy', 'np.copy', (['sscovdata'], {}), '(sscovdata)\n', (25044, 25055), True, 'import numpy as np\n'), ((25139, 25159), 'copy.copy', 'copy.copy', (['rrcovdata'], {}), '(rrcovdata)\n', (25148, 25159), False, 'import copy\n'), ((25180, 25191), 'builtins.range', 'range', (['nref'], {}), '(nref)\n', (25185, 25191), False, 'from builtins import range\n'), ((26393, 26435), 'numpy.linalg.svd', 'np.linalg.svd', (['rrslope'], {'full_matrices': '(True)'}), '(rrslope, full_matrices=True)\n', (26406, 26435), True, 'import numpy as np\n'), ((27822, 27858), 'numpy.zeros', 'np.zeros', (['(raw._data.shape[0], nref)'], {}), '((raw._data.shape[0], nref))\n', (27830, 27858), True, 'import numpy as np\n'), ((27879, 27890), 'builtins.range', 'range', (['nsig'], {}), '(nsig)\n', (27884, 27890), False, 'from builtins import range\n'), ((28231, 28243), 'time.clock', 'time.clock', ([], {}), '()\n', (28241, 28243), False, 'import time\n'), ((28258, 28269), 'time.time', 'time.time', ([], {}), '()\n', (28267, 28269), False, 'import time\n'), ((28328, 28353), 'builtins.range', 'range', (['raw._data.shape[1]'], {}), '(raw._data.shape[1])\n', (28333, 28353), False, 'from builtins import range\n'), ((32089, 32101), 'time.clock', 'time.clock', ([], {}), '()\n', (32099, 32101), False, 'import time\n'), ((32116, 32127), 'time.time', 'time.time', ([], {}), '()\n', (32125, 32127), False, 'import time\n'), ((34120, 34151), 'math.floor', 'floor', (["(tmin * raw.info['sfreq'])"], {}), "(tmin * raw.info['sfreq'])\n", (34125, 34151), False, 'from math import floor, ceil\n'), ((34169, 34199), 'math.ceil', 'ceil', (["(tmax * raw.info['sfreq'])"], {}), "(tmax * raw.info['sfreq'])\n", (34173, 34199), False, 'from math import floor, ceil\n'), ((34218, 34249), 'math.ceil', 'ceil', (["(tstep * raw.info['sfreq'])"], {}), "(tstep * raw.info['sfreq'])\n", (34222, 34249), False, 'from math import floor, ceil\n'), ((34362, 34451), 'mne.pick_types', 'mne.pick_types', (['raw.info'], {'meg': '"""mag"""', 'eeg': '(False)', 'stim': '(False)', 'eog': '(False)', 'exclude': '"""bads"""'}), "(raw.info, meg='mag', eeg=False, stim=False, eog=False,\n exclude='bads')\n", (34376, 34451), False, 'import mne\n'), ((34925, 35032), 'mne.pick_types', 'mne.pick_types', (['raw.info'], {'ref_meg': '(True)', 'meg': '(False)', 'eeg': '(False)', 'stim': '(False)', 'eog': '(False)', 'exclude': 'refexclude'}), '(raw.info, ref_meg=True, meg=False, eeg=False, stim=False,\n eog=False, exclude=refexclude)\n', (34939, 35032), False, 'import mne\n'), ((36085, 36097), 'time.clock', 'time.clock', ([], {}), '()\n', (36095, 36097), False, 'import time\n'), ((36112, 36123), 'time.time', 'time.time', ([], {}), '()\n', (36121, 36123), False, 'import time\n'), ((36239, 36251), 'time.clock', 'time.clock', ([], {}), '()\n', (36249, 36251), False, 'import time\n'), ((36266, 36277), 'time.time', 'time.time', ([], {}), '()\n', (36275, 36277), False, 'import time\n'), ((44841, 44878), 'numpy.allclose', 'np.allclose', (['smean', 'sigmean'], {'atol': '(0.0)'}), '(smean, sigmean, atol=0.0)\n', (44852, 44878), True, 'import numpy as np\n'), ((44912, 44949), 'numpy.allclose', 'np.allclose', (['rmean', 'refmean'], {'atol': '(0.0)'}), '(rmean, refmean, atol=0.0)\n', (44923, 44949), True, 'import numpy as np\n'), ((44985, 45024), 'numpy.allclose', 'np.allclose', (['sscov', 'sscovdata'], {'atol': '(0.0)'}), '(sscov, sscovdata, atol=0.0)\n', (44996, 45024), True, 'import numpy as np\n'), ((45060, 45099), 'numpy.allclose', 'np.allclose', (['srcov', 'srcovdata'], {'atol': '(0.0)'}), '(srcov, srcovdata, atol=0.0)\n', (45071, 45099), True, 'import numpy as np\n'), ((45135, 45174), 'numpy.allclose', 'np.allclose', (['rrcov', 'rrcovdata'], {'atol': '(0.0)'}), '(rrcov, rrcovdata, atol=0.0)\n', (45146, 45174), True, 'import numpy as np\n'), ((45800, 45811), 'builtins.range', 'range', (['nref'], {}), '(nref)\n', (45805, 45811), False, 'from builtins import range\n'), ((47359, 47370), 'builtins.range', 'range', (['nref'], {}), '(nref)\n', (47364, 47370), False, 'from builtins import range\n'), ((47476, 47499), 'numpy.zeros', 'np.zeros', (['weights.shape'], {}), '(weights.shape)\n', (47484, 47499), True, 'import numpy as np\n'), ((47501, 47516), 'numpy.abs', 'np.abs', (['weights'], {}), '(weights)\n', (47507, 47516), True, 'import numpy as np\n'), ((48461, 48494), 'numpy.take', 'np.take', (['raw._data', '[isl]'], {'axis': '(1)'}), '(raw._data, [isl], axis=1)\n', (48468, 48494), True, 'import numpy as np\n'), ((48763, 48789), 'numpy.dot', 'np.dot', (['weights[:]', 'refarr'], {}), '(weights[:], refarr)\n', (48769, 48789), True, 'import numpy as np\n'), ((49339, 49351), 'time.clock', 'time.clock', ([], {}), '()\n', (49349, 49351), False, 'import time\n'), ((49366, 49377), 'time.time', 'time.time', ([], {}), '()\n', (49375, 49377), False, 'import time\n'), ((49463, 49490), 'builtins.range', 'range', (['itmin', 'itmax', 'itstep'], {}), '(itmin, itmax, itstep)\n', (49468, 49490), False, 'from builtins import range\n'), ((50659, 50671), 'time.clock', 'time.clock', ([], {}), '()\n', (50669, 50671), False, 'import time\n'), ((50686, 50697), 'time.time', 'time.time', ([], {}), '()\n', (50695, 50697), False, 'import time\n'), ((6089, 6112), 'numpy.log10', 'np.log10', (['psds_all[idx]'], {}), '(psds_all[idx])\n', (6097, 6112), True, 'import numpy as np\n'), ((6207, 6227), 'numpy.std', 'np.std', (['psds'], {'axis': '(0)'}), '(psds, axis=0)\n', (6213, 6227), True, 'import numpy as np\n'), ((7231, 7267), 'mne.io.Raw', 'mne.io.Raw', (['fnraw[idx]'], {'preload': '(True)'}), '(fnraw[idx], preload=True)\n', (7241, 7267), False, 'import mne\n'), ((7289, 7421), 'mne.Epochs', 'mne.Epochs', (['raw', 'events', 'event_id'], {'proj': '(False)', 'tmin': 'tmin_stim', 'tmax': 'tmax_stim', 'picks': 'picks', 'preload': '(True)', 'baseline': '(None, None)'}), '(raw, events, event_id, proj=False, tmin=tmin_stim, tmax=\n tmax_stim, picks=picks, preload=True, baseline=(None, None))\n', (7299, 7421), False, 'import mne\n'), ((7687, 7710), 'matplotlib.pyplot.subplot', 'plt.subplot', (['gs[1, idx]'], {}), '(gs[1, idx])\n', (7698, 7710), True, 'import matplotlib.pyplot as plt\n'), ((8918, 8959), 'numpy.polyfit', 'polyfit', (['xval', 'raw._data[ipick, :]'], {'deg': '(1)'}), '(xval, raw._data[ipick, :], deg=1)\n', (8925, 8959), False, 'from numpy import poly1d, polyfit\n'), ((8980, 8993), 'numpy.poly1d', 'poly1d', (['coeff'], {}), '(coeff)\n', (8986, 8993), False, 'from numpy import poly1d, polyfit\n'), ((10221, 10269), 'mne.pick_channels_regexp', 'mne.pick_channels_regexp', (['fulllist', 'findlist[ir]'], {}), '(fulllist, findlist[ir])\n', (10245, 10269), False, 'import mne\n'), ((10770, 10801), 'numpy.setdiff1d', 'np.setdiff1d', (['chnpick', 'exclinds'], {}), '(chnpick, exclinds)\n', (10782, 10801), True, 'import numpy as np\n'), ((15280, 15330), 'warnings.warn', 'warnings.warn', (['"""Setting file name from Raw object"""'], {}), "('Setting file name from Raw object')\n", (15293, 15330), False, 'import warnings\n'), ((17408, 17497), 'mne.pick_types', 'mne.pick_types', (['raw.info'], {'meg': '"""mag"""', 'eeg': '(False)', 'stim': '(False)', 'eog': '(False)', 'exclude': '"""bads"""'}), "(raw.info, meg='mag', eeg=False, stim=False, eog=False,\n exclude='bads')\n", (17422, 17497), False, 'import mne\n'), ((18126, 18229), 'mne.pick_types', 'mne.pick_types', (['raw.info'], {'ref_meg': '(True)', 'meg': '(False)', 'eeg': '(False)', 'stim': '(False)', 'eog': '(False)', 'exclude': '"""bads"""'}), "(raw.info, ref_meg=True, meg=False, eeg=False, stim=False,\n eog=False, exclude='bads')\n", (18140, 18229), False, 'import mne\n'), ((20853, 20865), 'time.clock', 'time.clock', ([], {}), '()\n', (20863, 20865), False, 'import time\n'), ((20884, 20895), 'time.time', 'time.time', ([], {}), '()\n', (20893, 20895), False, 'import time\n'), ((21640, 21652), 'time.clock', 'time.clock', ([], {}), '()\n', (21650, 21652), False, 'import time\n'), ((21671, 21682), 'time.time', 'time.time', ([], {}), '()\n', (21680, 21682), False, 'import time\n'), ((23342, 23373), 'math.ceil', 'ceil', (["(tstep * raw.info['sfreq'])"], {}), "(tstep * raw.info['sfreq'])\n", (23346, 23373), False, 'from math import floor, ceil\n'), ((25546, 25558), 'time.clock', 'time.clock', ([], {}), '()\n', (25556, 25558), False, 'import time\n'), ((25577, 25588), 'time.time', 'time.time', ([], {}), '()\n', (25586, 25588), False, 'import time\n'), ((27916, 27927), 'builtins.range', 'range', (['nref'], {}), '(nref)\n', (27921, 27927), False, 'from builtins import range\n'), ((28375, 28408), 'numpy.take', 'np.take', (['raw._data', '[isl]'], {'axis': '(1)'}), '(raw._data, [isl], axis=1)\n', (28382, 28408), True, 'import numpy as np\n'), ((28711, 28737), 'numpy.dot', 'np.dot', (['weights[:]', 'refarr'], {}), '(weights[:], refarr)\n', (28717, 28737), True, 'import numpy as np\n'), ((29137, 29149), 'time.clock', 'time.clock', ([], {}), '()\n', (29147, 29149), False, 'import time\n'), ((29168, 29179), 'time.time', 'time.time', ([], {}), '()\n', (29177, 29179), False, 'import time\n'), ((29552, 29564), 'time.clock', 'time.clock', ([], {}), '()\n', (29562, 29564), False, 'import time\n'), ((29583, 29594), 'time.time', 'time.time', ([], {}), '()\n', (29592, 29594), False, 'import time\n'), ((29696, 29723), 'builtins.range', 'range', (['itmin', 'itmax', 'itstep'], {}), '(itmin, itmax, itstep)\n', (29701, 29723), False, 'from builtins import range\n'), ((38739, 38853), 'mne.epochs._is_good', '_is_good', (['raw_segmentsig', "infosig['ch_names']", 'idx_by_typesig', 'reject'], {'flat': 'None', 'ignore_chs': "raw.info['bads']"}), "(raw_segmentsig, infosig['ch_names'], idx_by_typesig, reject, flat=\n None, ignore_chs=raw.info['bads'])\n", (38747, 38853), False, 'from mne.epochs import _is_good\n'), ((39071, 39111), 'numpy.dot', 'np.dot', (['raw_segmentsig', 'raw_segmentref.T'], {}), '(raw_segmentsig, raw_segmentref.T)\n', (39077, 39111), True, 'import numpy as np\n'), ((39137, 39177), 'numpy.dot', 'np.dot', (['raw_segmentref', 'raw_segmentref.T'], {}), '(raw_segmentref, raw_segmentref.T)\n', (39143, 39177), True, 'import numpy as np\n'), ((39253, 39313), 'mne.utils.logger.info', 'logger.info', (["('Artefact detected in [%d, %d]' % (first, last))"], {}), "('Artefact detected in [%d, %d]' % (first, last))\n", (39264, 39313), False, 'from mne.utils import logger\n'), ((39996, 40007), 'builtins.range', 'range', (['nsig'], {}), '(nsig)\n', (40001, 40007), False, 'from builtins import range\n'), ((40079, 40090), 'builtins.range', 'range', (['nref'], {}), '(nref)\n', (40084, 40090), False, 'from builtins import range\n'), ((40178, 40189), 'builtins.range', 'range', (['nsig'], {}), '(nsig)\n', (40183, 40189), False, 'from builtins import range\n'), ((40258, 40269), 'builtins.range', 'range', (['nref'], {}), '(nref)\n', (40263, 40269), False, 'from builtins import range\n'), ((43160, 43274), 'mne.epochs._is_good', '_is_good', (['raw_segmentsig', "infosig['ch_names']", 'idx_by_typesig', 'reject'], {'flat': 'None', 'ignore_chs': "raw.info['bads']"}), "(raw_segmentsig, infosig['ch_names'], idx_by_typesig, reject, flat=\n None, ignore_chs=raw.info['bads'])\n", (43168, 43274), False, 'from mne.epochs import _is_good\n'), ((43319, 43349), 'builtins.range', 'range', (['raw_segmentsig.shape[1]'], {}), '(raw_segmentsig.shape[1])\n', (43324, 43349), False, 'from builtins import range\n'), ((44108, 44168), 'mne.utils.logger.info', 'logger.info', (["('Artefact detected in [%d, %d]' % (first, last))"], {}), "('Artefact detected in [%d, %d]' % (first, last))\n", (44119, 44168), False, 'from mne.utils import logger\n'), ((46538, 46551), 'numpy.diag', 'np.diag', (['sinv'], {}), '(sinv)\n', (46545, 46551), True, 'import numpy as np\n'), ((46669, 46686), 'numpy.identity', 'np.identity', (['nref'], {}), '(nref)\n', (46680, 46686), True, 'import numpy as np\n'), ((47415, 47455), 'numpy.dot', 'np.dot', (['srcov[isig][:]', 'RRinvtr[iref][:]'], {}), '(srcov[isig][:], RRinvtr[iref][:])\n', (47421, 47455), True, 'import numpy as np\n'), ((48544, 48580), 'numpy.take', 'np.take', (['fltref._data', '[isl]'], {'axis': '(1)'}), '(fltref._data, [isl], axis=1)\n', (48551, 48580), True, 'import numpy as np\n'), ((7567, 7586), 'numpy.min', 'np.min', (['evoked.data'], {}), '(evoked.data)\n', (7573, 7586), True, 'import numpy as np\n'), ((7610, 7629), 'numpy.max', 'np.max', (['evoked.data'], {}), '(evoked.data)\n', (7616, 7629), True, 'import numpy as np\n'), ((15879, 15910), 'mne.io.Raw', 'mne.io.Raw', (['fname'], {'preload': '(True)'}), '(fname, preload=True)\n', (15889, 15910), False, 'import mne\n'), ((16183, 16206), 'os.path.basename', 'os.path.basename', (['fname'], {}), '(fname)\n', (16199, 16206), False, 'import os\n'), ((16210, 16236), 'os.path.basename', 'os.path.basename', (['fnintern'], {}), '(fnintern)\n', (16226, 16236), False, 'import os\n'), ((16254, 16380), 'warnings.warn', 'warnings.warn', (['"""The file name within the Raw object and provided\n fname are not the same. Please check again."""'], {}), '(\n """The file name within the Raw object and provided\n fname are not the same. Please check again."""\n )\n', (16267, 16380), False, 'import warnings\n'), ((16903, 16934), 'math.floor', 'floor', (["(tmin * raw.info['sfreq'])"], {}), "(tmin * raw.info['sfreq'])\n", (16908, 16934), False, 'from math import floor, ceil\n'), ((17050, 17080), 'math.ceil', 'ceil', (["(tmax * raw.info['sfreq'])"], {}), "(tmax * raw.info['sfreq'])\n", (17054, 17080), False, 'from math import floor, ceil\n'), ((23946, 24060), 'mne.epochs._is_good', '_is_good', (['raw_segmentsig', "infosig['ch_names']", 'idx_by_typesig', 'reject'], {'flat': 'None', 'ignore_chs': "raw.info['bads']"}), "(raw_segmentsig, infosig['ch_names'], idx_by_typesig, reject, flat=\n None, ignore_chs=raw.info['bads'])\n", (23954, 24060), False, 'from mne.epochs import _is_good\n'), ((24298, 24338), 'numpy.dot', 'np.dot', (['raw_segmentsig', 'raw_segmentref.T'], {}), '(raw_segmentsig, raw_segmentref.T)\n', (24304, 24338), True, 'import numpy as np\n'), ((24368, 24408), 'numpy.dot', 'np.dot', (['raw_segmentref', 'raw_segmentref.T'], {}), '(raw_segmentref, raw_segmentref.T)\n', (24374, 24408), True, 'import numpy as np\n'), ((24496, 24556), 'mne.utils.logger.info', 'logger.info', (["('Artefact detected in [%d, %d]' % (first, last))"], {}), "('Artefact detected in [%d, %d]' % (first, last))\n", (24507, 24556), False, 'from mne.utils import logger\n'), ((26705, 26716), 'builtins.range', 'range', (['nref'], {}), '(nref)\n', (26710, 26716), False, 'from builtins import range\n'), ((27245, 27262), 'numpy.identity', 'np.identity', (['nref'], {}), '(nref)\n', (27256, 27262), True, 'import numpy as np\n'), ((27264, 27286), 'numpy.dot', 'np.dot', (['RRinv', 'rrslope'], {}), '(RRinv, rrslope)\n', (27270, 27286), True, 'import numpy as np\n'), ((27976, 28018), 'numpy.dot', 'np.dot', (['srcovdata[isig, :]', 'RRinv[:, iref]'], {}), '(srcovdata[isig, :], RRinv[:, iref])\n', (27982, 28018), True, 'import numpy as np\n'), ((28466, 28502), 'numpy.take', 'np.take', (['fltref._data', '[isl]'], {'axis': '(1)'}), '(fltref._data, [isl], axis=1)\n', (28473, 28502), True, 'import numpy as np\n'), ((29050, 29068), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (29066, 29068), False, 'import sys\n'), ((31369, 31381), 'time.clock', 'time.clock', ([], {}), '()\n', (31379, 31381), False, 'import time\n'), ((31404, 31415), 'time.time', 'time.time', ([], {}), '()\n', (31413, 31415), False, 'import time\n'), ((35973, 35997), 'builtins.range', 'range', (["raw.info['nchan']"], {}), "(raw.info['nchan'])\n", (35978, 35997), False, 'from builtins import range\n'), ((46255, 46265), 'numpy.diag', 'np.diag', (['s'], {}), '(s)\n', (46262, 46265), True, 'import numpy as np\n'), ((47688, 47703), 'numpy.abs', 'np.abs', (['weights'], {}), '(weights)\n', (47694, 47703), True, 'import numpy as np\n'), ((49783, 49897), 'mne.epochs._is_good', '_is_good', (['raw_segmentsig', "infosig['ch_names']", 'idx_by_typesig', 'reject'], {'flat': 'None', 'ignore_chs': "raw.info['bads']"}), "(raw_segmentsig, infosig['ch_names'], idx_by_typesig, reject, flat=\n None, ignore_chs=raw.info['bads'])\n", (49791, 49897), False, 'from mne.epochs import _is_good\n'), ((50400, 50435), 'numpy.less_equal', 'np.less_equal', (['sscovdata', 'sscovinit'], {}), '(sscovdata, sscovinit)\n', (50413, 50435), True, 'import numpy as np\n'), ((6355, 6375), 'numpy.min', 'np.min', (['psds'], {'axis': '(0)'}), '(psds, axis=0)\n', (6361, 6375), True, 'import numpy as np\n'), ((6377, 6397), 'numpy.max', 'np.max', (['psds'], {'axis': '(0)'}), '(psds, axis=0)\n', (6383, 6397), True, 'import numpy as np\n'), ((6766, 6782), 'numpy.min', 'np.min', (['psd_mean'], {}), '(psd_mean)\n', (6772, 6782), True, 'import numpy as np\n'), ((6789, 6805), 'numpy.max', 'np.max', (['psd_mean'], {}), '(psd_mean)\n', (6795, 6805), True, 'import numpy as np\n'), ((10041, 10078), 'numpy.concatenate', 'np.concatenate', (['(chnpick, chnpicktmp)'], {}), '((chnpick, chnpicktmp))\n', (10055, 10078), True, 'import numpy as np\n'), ((10445, 10482), 'numpy.concatenate', 'np.concatenate', (['(chnpick, chnpicktmp)'], {}), '((chnpick, chnpicktmp))\n', (10459, 10482), True, 'import numpy as np\n'), ((15184, 15218), 'os.path.basename', 'os.path.basename', (['raw.filenames[0]'], {}), '(raw.filenames[0])\n', (15200, 15218), False, 'import os\n'), ((16069, 16103), 'os.path.basename', 'os.path.basename', (['raw.filenames[0]'], {}), '(raw.filenames[0])\n', (16085, 16103), False, 'import os\n'), ((20789, 20813), 'builtins.range', 'range', (["raw.info['nchan']"], {}), "(raw.info['nchan'])\n", (20794, 20813), False, 'from builtins import range\n'), ((26870, 26880), 'numpy.diag', 'np.diag', (['s'], {}), '(s)\n', (26877, 26880), True, 'import numpy as np\n'), ((27169, 27182), 'numpy.diag', 'np.diag', (['sinv'], {}), '(sinv)\n', (27176, 27182), True, 'import numpy as np\n'), ((30054, 30168), 'mne.epochs._is_good', '_is_good', (['raw_segmentsig', "infosig['ch_names']", 'idx_by_typesig', 'reject'], {'flat': 'None', 'ignore_chs': "raw.info['bads']"}), "(raw_segmentsig, infosig['ch_names'], idx_by_typesig, reject, flat=\n None, ignore_chs=raw.info['bads'])\n", (30062, 30168), False, 'from mne.epochs import _is_good\n'), ((45571, 45585), 'numpy.mean', 'np.mean', (['sscov'], {}), '(sscov)\n', (45578, 45585), True, 'import numpy as np\n'), ((47646, 47661), 'numpy.abs', 'np.abs', (['weights'], {}), '(weights)\n', (47652, 47661), True, 'import numpy as np\n'), ((50495, 50513), 'numpy.mean', 'np.mean', (['sscovdata'], {}), '(sscovdata)\n', (50502, 50513), True, 'import numpy as np\n'), ((7977, 7986), 'builtins.str', 'str', (['info'], {}), '(info)\n', (7980, 7986), False, 'from builtins import str\n'), ((10593, 10609), 'numpy.sort', 'np.sort', (['chnpick'], {}), '(chnpick)\n', (10600, 10609), True, 'import numpy as np\n'), ((19940, 20016), 'warnings.warn', 'warnings.warn', (["('Ignoring notch frequency > 0.5*sample_rate=%.1fHz' % nyquist)"], {}), "('Ignoring notch frequency > 0.5*sample_rate=%.1fHz' % nyquist)\n", (19953, 20016), False, 'import warnings\n'), ((27554, 27576), 'numpy.dot', 'np.dot', (['RRinv', 'rrslope'], {}), '(RRinv, rrslope)\n', (27560, 27576), True, 'import numpy as np\n'), ((36196, 36207), 'builtins.range', 'range', (['nref'], {}), '(nref)\n', (36201, 36207), False, 'from builtins import range\n'), ((47721, 47736), 'numpy.abs', 'np.abs', (['weights'], {}), '(weights)\n', (47727, 47736), True, 'import numpy as np\n'), ((26016, 26034), 'numpy.mean', 'np.mean', (['sscovdata'], {}), '(sscovdata)\n', (26023, 26034), True, 'import numpy as np\n'), ((30853, 30871), 'numpy.mean', 'np.mean', (['sscovdata'], {}), '(sscovdata)\n', (30860, 30871), True, 'import numpy as np\n'), ((21241, 21252), 'builtins.range', 'range', (['nref'], {}), '(nref)\n', (21246, 21252), False, 'from builtins import range\n'), ((21563, 21574), 'builtins.range', 'range', (['nref'], {}), '(nref)\n', (21568, 21574), False, 'from builtins import range\n'), ((30745, 30780), 'numpy.less_equal', 'np.less_equal', (['sscovdata', 'sscovinit'], {}), '(sscovdata, sscovinit)\n', (30758, 30780), True, 'import numpy as np\n')]
|
import argparse
import os
import logging
import string
import sys
import json
import numpy as np
import pandas as pd
import tensorflow as tf
from sqlalchemy import create_engine
from .alphabet import ALPHABET_DNA
from .model import (
conv1d_densenet_regression_model,
compile_regression_model,
DenormalizedMAE,
)
from .load_sequences import (
TrainingSequence,
TestingSequence,
load_growth_temperatures,
assign_weight_to_batch_values,
compute_inverse_probability_weights,
)
from .utilities import (
SaveModelCallback,
generate_random_run_id,
)
from .validation import validate_model_on_test_set
DB_PATH = 'data/db/seq.db'
logger = logging.getLogger(__name__)
def main():
logging.basicConfig(level=logging.INFO, format="%(asctime)s (%(levelname)s) %(message)s")
parser = argparse.ArgumentParser()
parser.add_argument('--run_id', type=str, default=None)
parser.add_argument('--resume', action='store_true')
parser.add_argument('--learning_rate', type=float, default=1e-4)
parser.add_argument('--batch_size', type=int, default=64)
parser.add_argument('--n_epochs', type=int, default=10)
parser.add_argument('--db_path', type=str, default=None)
parser.add_argument('--verbose', type=int, default=1)
parser.add_argument('--max_queue_size', type=int, default=50)
parser.add_argument('--max_sequence_length', type=int, default=5001)
parser.add_argument('--dtype', type=str, default='float32')
args = parser.parse_args()
run_id = args.run_id
resume = args.resume
learning_rate = args.learning_rate
batch_size = args.batch_size
n_epochs = args.n_epochs
db_path = args.db_path
verbose = args.verbose
max_queue_size = args.max_queue_size
max_sequence_length = args.max_sequence_length
dtype = args.dtype
if run_id is None and resume:
logger.error('Specify --run_id to resume run')
sys.exit(1)
elif run_id is None and not resume:
run_id = generate_random_run_id()
if db_path is None:
db_path = os.path.join(os.getcwd(), DB_PATH)
engine = create_engine(f'sqlite+pysqlite:///{db_path}')
logger.info(f'Run {run_id}')
output_folder = os.path.join(os.getcwd(), f'saved_models/{run_id}/')
model_path = os.path.join(output_folder, 'model.h5')
metadata_path = os.path.join(output_folder, 'metadata.json')
validation_output_path = os.path.join(output_folder, 'validation.csv')
log_dir = os.path.join(os.getcwd(), f'summary_log/{run_id}')
try:
os.makedirs(output_folder)
except FileExistsError:
pass
if resume:
with open(metadata_path, 'r') as f:
metadata = json.load(f)
else:
initial_epoch = 0
dropout_rate = 0.5
seed = np.random.randint(0, 9999)
encoding_size = 20
decoder_n_hidden = 100
growth_rate = 15
kernel_sizes = [3] + [5] * 9
strides = None
dilation_rates = None
n_layers = len(kernel_sizes)
l2_reg = 1e-5
metadata = {
'run_id': run_id,
'alphabet': ALPHABET_DNA,
'learning_rate': learning_rate,
'batch_size': batch_size,
'encoding_size': encoding_size,
'decoder_n_hidden': decoder_n_hidden,
'growth_rate': growth_rate,
'n_layers': n_layers,
'kernel_sizes': kernel_sizes,
'strides': strides,
'dilation_rates': dilation_rates,
'l2_reg': l2_reg,
'dropout': dropout_rate,
'n_epochs': initial_epoch,
'max_sequence_length': max_sequence_length,
'seed': seed,
}
logger.info('Loading data')
tmps, mean, std = load_growth_temperatures(engine)
max_sequence_length = metadata['max_sequence_length']
training_sequence = TrainingSequence(
engine,
batch_size=batch_size,
temperatures=tmps,
mean=mean,
std=std,
dtype=dtype,
alphabet=metadata['alphabet'],
max_sequence_length=max_sequence_length,
random_seed=metadata['seed'],
)
testing_sequence = TestingSequence(
engine,
batch_size=batch_size,
temperatures=tmps,
mean=mean,
std=std,
dtype=dtype,
alphabet=metadata['alphabet'],
max_sequence_length=max_sequence_length,
random_seed=metadata['seed'],
)
model = conv1d_densenet_regression_model(
alphabet_size=len(metadata['alphabet']),
growth_rate=metadata['growth_rate'],
n_layers=metadata['n_layers'],
kernel_sizes=metadata['kernel_sizes'],
strides=metadata.get('strides'),
dilation_rates=metadata.get('dilation_rates'),
l2_reg=metadata['l2_reg'],
dropout=metadata['dropout'],
masking=True,
)
compile_regression_model(model, learning_rate)
if resume:
logger.info(f'Resuming from {model_path}')
model.load_weights(model_path)
initial_epoch = 0
epochs = n_epochs
if resume:
initial_epoch = metadata['n_epochs']
epochs += initial_epoch
logger.info(f'Training run {run_id}')
model.fit(
training_sequence,
validation_data=testing_sequence,
max_queue_size=max_queue_size,
epochs=epochs,
initial_epoch=initial_epoch,
verbose=verbose,
callbacks=[
tf.keras.callbacks.TensorBoard(
log_dir=log_dir,
histogram_freq=0,
write_graph=False,
update_freq=1000,
embeddings_freq=0,
profile_batch=(2, 100),
),
SaveModelCallback(
model_path=model_path,
metadata_path=metadata_path,
metadata=metadata,
),
],
)
logger.info('Training completed')
logger.info('Validating on test set')
validation_df = validate_model_on_test_set(
engine,
model,
batch_size=batch_size,
max_queue_size=max_queue_size,
max_sequence_length=max_sequence_length,
)
validation_df.to_csv(validation_output_path)
logger.info('DONE')
if __name__ == '__main__':
main()
|
[
"json.load",
"argparse.ArgumentParser",
"logging.basicConfig",
"os.makedirs",
"os.getcwd",
"numpy.random.randint",
"sqlalchemy.create_engine",
"sys.exit",
"tensorflow.keras.callbacks.TensorBoard",
"os.path.join",
"logging.getLogger"
] |
[((677, 704), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (694, 704), False, 'import logging\n'), ((723, 817), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.INFO', 'format': '"""%(asctime)s (%(levelname)s) %(message)s"""'}), "(level=logging.INFO, format=\n '%(asctime)s (%(levelname)s) %(message)s')\n", (742, 817), False, 'import logging\n'), ((831, 856), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (854, 856), False, 'import argparse\n'), ((2123, 2169), 'sqlalchemy.create_engine', 'create_engine', (['f"""sqlite+pysqlite:///{db_path}"""'], {}), "(f'sqlite+pysqlite:///{db_path}')\n", (2136, 2169), False, 'from sqlalchemy import create_engine\n'), ((2295, 2334), 'os.path.join', 'os.path.join', (['output_folder', '"""model.h5"""'], {}), "(output_folder, 'model.h5')\n", (2307, 2334), False, 'import os\n'), ((2355, 2399), 'os.path.join', 'os.path.join', (['output_folder', '"""metadata.json"""'], {}), "(output_folder, 'metadata.json')\n", (2367, 2399), False, 'import os\n'), ((2429, 2474), 'os.path.join', 'os.path.join', (['output_folder', '"""validation.csv"""'], {}), "(output_folder, 'validation.csv')\n", (2441, 2474), False, 'import os\n'), ((1937, 1948), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (1945, 1948), False, 'import sys\n'), ((2238, 2249), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (2247, 2249), False, 'import os\n'), ((2502, 2513), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (2511, 2513), False, 'import os\n'), ((2558, 2584), 'os.makedirs', 'os.makedirs', (['output_folder'], {}), '(output_folder)\n', (2569, 2584), False, 'import os\n'), ((2800, 2826), 'numpy.random.randint', 'np.random.randint', (['(0)', '(9999)'], {}), '(0, 9999)\n', (2817, 2826), True, 'import numpy as np\n'), ((2087, 2098), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (2096, 2098), False, 'import os\n'), ((2709, 2721), 'json.load', 'json.load', (['f'], {}), '(f)\n', (2718, 2721), False, 'import json\n'), ((5479, 5633), 'tensorflow.keras.callbacks.TensorBoard', 'tf.keras.callbacks.TensorBoard', ([], {'log_dir': 'log_dir', 'histogram_freq': '(0)', 'write_graph': '(False)', 'update_freq': '(1000)', 'embeddings_freq': '(0)', 'profile_batch': '(2, 100)'}), '(log_dir=log_dir, histogram_freq=0,\n write_graph=False, update_freq=1000, embeddings_freq=0, profile_batch=(\n 2, 100))\n', (5509, 5633), True, 'import tensorflow as tf\n')]
|
import numpy
import xraylib
def transfocator_guess_configuration(focal_f_target, deltas=[0.999998], radii=[500e-4],
initial_focal_distance=None, verbose=0):
nn = len(radii)
ncombinations = 2**nn
Farray = numpy.zeros(ncombinations)
# Rarray = numpy.zeros(ncombinations)
for i in range(ncombinations):
str1 = numpy.binary_repr(i, width=nn)
if initial_focal_distance is None:
invF = 0
# invR = 0
else:
invF = 1.0 / initial_focal_distance
# invR = invF / (2 * deltas[0])
for j in range(nn):
# if float(str1[j]) != 0:
invF += 2 * deltas[j] / radii[j] * float(str1[j])
# invR += 1 / radii[j] * float(str1[j])
# try:
# print(">>>", i, nn, j, str1, float(str1[j]), 1/invF, 1e6/invR, ">", 1e6*radii[j])
# except:
# print(">>>>>", i, nn, j, str1, float(str1[j]))
if invF != 0:
Farray[i] = 1.0 / invF
else:
Farray[i] = 1e5
# if invR != 0:
# Rarray[i] = 1.0 / invR
# else:
# Rarray[i] = 1e15
# print(Farray)
iarg = numpy.argmin( numpy.abs(focal_f_target - Farray))
if verbose:
# print(">>>> optimum: ", iarg )
print(">>>> optimum for f=%g (idx %d): " % (focal_f_target, iarg), numpy.binary_repr(iarg, width=nn), Farray[iarg] )
print(" cumulated radius: wanted R=%g found R=%g: " % (
1e6*_transfocator_calculate_radius(delta=deltas[0], focal_distance=focal_f_target),
1e6*_transfocator_calculate_radius(delta=deltas[0], focal_distance=Farray[iarg]),
# 1e6*Rarray[iarg]
))
print(" Initial focal distance: ", initial_focal_distance)
return Farray[iarg]
def _transfocator_calculate_focal_distance(deltas=[0.999998],nlenses=[1],radii=[500e-4]):
inverse_focal_distance = 0.0
for i,nlensesi in enumerate(nlenses):
if nlensesi > 0:
focal_distance_i = radii[i] / (2.*nlensesi*deltas[i])
inverse_focal_distance += 1.0/focal_distance_i
if inverse_focal_distance == 0:
return 99999999999999999999999999.
else:
return 1.0/inverse_focal_distance
def _transfocator_calculate_radius(delta=0.999998,focal_distance=10):
radius = focal_distance * (2.*delta)
return radius
if __name__ == "__main__":
symbol = "Be"
density = 1.845
photon_energy_ev = 7000.0
delta = 1.0 - xraylib.Refractive_Index_Re(symbol,photon_energy_ev*1e-3,density)
print("delta: %g" % delta)
# f1 in 15-85
# focal_f_target = 30.0
fwanted = numpy.linspace(2,85,50)
# fwanted = numpy.array([2])
# fpaper_tf1v = numpy.array([15.0, 42.2, 85.2, 42.2])
# fpaper_tf1h = numpy.array([46.1, 25.1, 46.1, 25.1])
#
# fpaper_tf2v = numpy.array([22.2, 55.6, 27.8, 55.7])
# fpaper_tf2h = numpy.array([26.5, 21.3, 31.8, 20.7])
fpaper_tf1v = numpy.array([ 42.2 ])
fpaper_tf1h = numpy.array([ 25.1 ])
fpaper_tf2v = numpy.array([ 55.7 ])
fpaper_tf2h = numpy.array([ 20.7 ])
# ## TRANSFOCATOR @ 65
# Transfocator 2D with 7 axis and 11 lenses
# - 1×Be lenses, r=5000.0 μm, D=1.0 mm (2R_0=4405 μm)
# - 1×Be lenses, r=2000.0 μm, D=1.0 mm (2R_0=2786 μm)
# - 1×Be lenses, r=1000.0 μm, D=1.0 mm (2R_0=1970 μm)
# - 1×Be lenses, r=500.0 μm, D=1.0 mm (2R_0=1393 μm)
# - 1×Be lenses, r=200.0 μm, D=1.0 mm (2R_0=881 μm)
# - 2×Be lenses, r=200.0 μm, D=1.0 mm (2R_0=881 μm)
# - 4×Be lenses, r=200.0 μm, D=1.0 mm (2R_0=881 μm)
# ## TRANSFOCATOR @ 65
# Transfocator 1DH with 6 axis and 7 lenses
# - 1×Be lenses, r=5000.0 μm, D=1.0 mm (2R_0=4405 μm)
# - 1×Be lenses, r=2000.0 μm, D=1.0 mm (2R_0=2786 μm)
# - 1×Be lenses, r=1000.0 μm, D=1.0 mm (2R_0=1970 μm)
# - 1×Be lenses, r=500.0 μm, D=1.0 mm (2R_0=1393 μm)
# - 1×Be lenses, r=200.0 μm, D=1.0 mm (2R_0=881 μm)
# - 2×Be lenses, r=200.0 μm, D=1.0 mm (2R_0=881 μm)
radii_tf1v = [5000e-6, 2000e-6, 1000e-6, 500e-6, 200e-6, 200e-6/2, 200e-6/4]
radii_tf1h = [5000e-6, 2000e-6, 1000e-6, 500e-6, 200e-6, 200e-6/2]
# ## TRANSFOCATOR 2D @ 170
# Transfocator 2D with 9 axis and 20 lenses
# - 1×Be lenses, r=5000.0 μm, D=1.0 mm (2R_0=4405 μm)
# - 1×Be lenses, r=2000.0 μm, D=1.0 mm (2R_0=2786 μm)
# - 1×Be lenses, r=1000.0 μm, D=1.0 mm (2R_0=1970 μm)
# - 1×Be lenses, r=500.0 μm, D=1.0 mm (2R_0=1393 μm)
# - 1×Be lenses, r=300.0 μm, D=1.0 mm (2R_0=1079 μm)
# - 1×Be lenses, r=200.0 μm, D=1.0 mm (2R_0=881 μm)
# - 2×Be lenses, r=200.0 μm, D=1.0 mm (2R_0=881 μm)
# - 4×Be lenses, r=200.0 μm, D=1.0 mm (2R_0=881 μm)
# - 8×Be lenses, r=200.0 μm, D=1.0 mm (2R_0=881 μm)
#
# ## TRANSFOCATOR 1DH @ 170
# Transfocator with 4 axis and 4 lenses
# - 1×Be lenses, r=5000.0 μm, D=1.0 mm (2R_0=4405 μm)
# - 1×Be lenses, r=2000.0 μm, D=1.0 mm (2R_0=2786 μm)
# - 1×Be lenses, r=1000.0 μm, D=1.0 mm (2R_0=1970 μm)
# - 1×Be lenses, r=500.0 μm, D=1.0 mm (2R_0=1393 μm)
radii_tf2v = [5000e-6, 2000e-6, 1000e-6, 500e-6, 300e-6, 200e-6, 200e-6/2, 200e-6/4, 200e-6/8]
radii_tf2h = [5000e-6, 2000e-6, 1000e-6, 500e-6]
if False:
ffound = numpy.zeros_like(fwanted)
for ii,focal_f_target in enumerate(fwanted):
a = transfocator_guess_configuration(focal_f_target, deltas=[delta]*len(radii_tf1v), radii=radii_tf1v)
ffound[ii] = a
# print(ffound)
ffound2 = numpy.zeros_like(fwanted)
for ii,focal_f_target in enumerate(fwanted):
a = transfocator_guess_configuration(focal_f_target,
deltas=[delta]*len(radii_tf2v), radii=radii_tf2v)
ffound2[ii] = a
# print(ffound2)
#
# plot
#
from srxraylib.plot.gol import plot, set_qt
set_qt()
plot(fwanted, fwanted,
fwanted, ffound,
fwanted,ffound2,
fpaper_tf1v, fpaper_tf1v,
fpaper_tf2v, fpaper_tf2v,
fpaper_tf1h, fpaper_tf1h,
fpaper_tf2h, fpaper_tf2h,
xtitle="f wanted [m]", ytitle="f found [m]",
legend=["ideal","TF1","TF2","f wanted TF1 V","f wanted TF2 V","f wanted TF1 H","f wanted TF2 H"],
linestyle=[":",None,None,"","","",""],
marker=[None,None,None,'+','+','x','x'],
title="2D focusing")
#
# TF1
#
fwanted_2d = numpy.zeros_like(fpaper_tf1h)
ffound_2d = numpy.zeros_like(fpaper_tf1h)
for i in range(fwanted_2d.size):
fwanted_2d[i] = numpy.max( (fpaper_tf1h[i], fpaper_tf1v[i]))
tmp = transfocator_guess_configuration(fwanted_2d[i], deltas=[delta]*len(radii_tf1v),
radii=radii_tf1v, verbose=1)
ffound_2d[i] = tmp
fwanted_1d = numpy.zeros_like(fpaper_tf1h)
ffound_1d = numpy.zeros_like(fpaper_tf1h)
for i in range(fwanted_1d.size):
fwanted_1d[i] = fpaper_tf1h[i]
tmp = transfocator_guess_configuration(fwanted_1d[i], deltas=[delta]*len(radii_tf1h),
radii=radii_tf1h, verbose=1, initial_focal_distance=ffound_2d[i])
ffound_1d[i] = tmp
print("TF1 V 2D f wanted, f found: ", fpaper_tf1v,ffound_2d)
print("TF1 H 1D f wanted, f found: ", fpaper_tf1h,ffound_1d)
#
# TF2
#
fwanted_2d = numpy.zeros_like(fpaper_tf2h)
ffound_2d = numpy.zeros_like( fpaper_tf2h)
for i in range(fwanted_2d.size):
fwanted_2d[i] = numpy.max( (fpaper_tf2h[i], fpaper_tf2v[i]))
tmp = transfocator_guess_configuration(fwanted_2d[i], deltas=[delta]*len(radii_tf2v),
radii=radii_tf2v, verbose=1)
ffound_2d[i] = tmp
fwanted_1d = numpy.zeros_like(fpaper_tf2h)
ffound_1d = numpy.zeros_like(fpaper_tf2h)
for i in range(fwanted_1d.size):
fwanted_1d[i] = fpaper_tf2h[i]
tmp = transfocator_guess_configuration(fwanted_1d[i], deltas=[delta]*len(radii_tf2h),
radii=radii_tf2h, verbose=1, initial_focal_distance=ffound_2d[i])
ffound_1d[i] = tmp
print("TF2 V 2D f wanted, f found: ", fpaper_tf2v,ffound_2d)
print("TF2 H 1D f wanted, f found: ", fpaper_tf2h,ffound_1d)
print(1.0 / (1/5000 + 1/1000 + 1/5000 + 1/500))
|
[
"numpy.binary_repr",
"numpy.zeros_like",
"numpy.abs",
"numpy.zeros",
"numpy.max",
"numpy.array",
"numpy.linspace",
"srxraylib.plot.gol.set_qt",
"srxraylib.plot.gol.plot",
"xraylib.Refractive_Index_Re"
] |
[((258, 284), 'numpy.zeros', 'numpy.zeros', (['ncombinations'], {}), '(ncombinations)\n', (269, 284), False, 'import numpy\n'), ((2747, 2772), 'numpy.linspace', 'numpy.linspace', (['(2)', '(85)', '(50)'], {}), '(2, 85, 50)\n', (2761, 2772), False, 'import numpy\n'), ((3063, 3082), 'numpy.array', 'numpy.array', (['[42.2]'], {}), '([42.2])\n', (3074, 3082), False, 'import numpy\n'), ((3103, 3122), 'numpy.array', 'numpy.array', (['[25.1]'], {}), '([25.1])\n', (3114, 3122), False, 'import numpy\n'), ((3143, 3162), 'numpy.array', 'numpy.array', (['[55.7]'], {}), '([55.7])\n', (3154, 3162), False, 'import numpy\n'), ((3183, 3202), 'numpy.array', 'numpy.array', (['[20.7]'], {}), '([20.7])\n', (3194, 3202), False, 'import numpy\n'), ((6493, 6522), 'numpy.zeros_like', 'numpy.zeros_like', (['fpaper_tf1h'], {}), '(fpaper_tf1h)\n', (6509, 6522), False, 'import numpy\n'), ((6539, 6568), 'numpy.zeros_like', 'numpy.zeros_like', (['fpaper_tf1h'], {}), '(fpaper_tf1h)\n', (6555, 6568), False, 'import numpy\n'), ((6891, 6920), 'numpy.zeros_like', 'numpy.zeros_like', (['fpaper_tf1h'], {}), '(fpaper_tf1h)\n', (6907, 6920), False, 'import numpy\n'), ((6937, 6966), 'numpy.zeros_like', 'numpy.zeros_like', (['fpaper_tf1h'], {}), '(fpaper_tf1h)\n', (6953, 6966), False, 'import numpy\n'), ((7449, 7478), 'numpy.zeros_like', 'numpy.zeros_like', (['fpaper_tf2h'], {}), '(fpaper_tf2h)\n', (7465, 7478), False, 'import numpy\n'), ((7495, 7524), 'numpy.zeros_like', 'numpy.zeros_like', (['fpaper_tf2h'], {}), '(fpaper_tf2h)\n', (7511, 7524), False, 'import numpy\n'), ((7848, 7877), 'numpy.zeros_like', 'numpy.zeros_like', (['fpaper_tf2h'], {}), '(fpaper_tf2h)\n', (7864, 7877), False, 'import numpy\n'), ((7895, 7924), 'numpy.zeros_like', 'numpy.zeros_like', (['fpaper_tf2h'], {}), '(fpaper_tf2h)\n', (7911, 7924), False, 'import numpy\n'), ((378, 408), 'numpy.binary_repr', 'numpy.binary_repr', (['i'], {'width': 'nn'}), '(i, width=nn)\n', (395, 408), False, 'import numpy\n'), ((1243, 1277), 'numpy.abs', 'numpy.abs', (['(focal_f_target - Farray)'], {}), '(focal_f_target - Farray)\n', (1252, 1277), False, 'import numpy\n'), ((2588, 2658), 'xraylib.Refractive_Index_Re', 'xraylib.Refractive_Index_Re', (['symbol', '(photon_energy_ev * 0.001)', 'density'], {}), '(symbol, photon_energy_ev * 0.001, density)\n', (2615, 2658), False, 'import xraylib\n'), ((5229, 5254), 'numpy.zeros_like', 'numpy.zeros_like', (['fwanted'], {}), '(fwanted)\n', (5245, 5254), False, 'import numpy\n'), ((5493, 5518), 'numpy.zeros_like', 'numpy.zeros_like', (['fwanted'], {}), '(fwanted)\n', (5509, 5518), False, 'import numpy\n'), ((5887, 5895), 'srxraylib.plot.gol.set_qt', 'set_qt', ([], {}), '()\n', (5893, 5895), False, 'from srxraylib.plot.gol import plot, set_qt\n'), ((5904, 6352), 'srxraylib.plot.gol.plot', 'plot', (['fwanted', 'fwanted', 'fwanted', 'ffound', 'fwanted', 'ffound2', 'fpaper_tf1v', 'fpaper_tf1v', 'fpaper_tf2v', 'fpaper_tf2v', 'fpaper_tf1h', 'fpaper_tf1h', 'fpaper_tf2h', 'fpaper_tf2h'], {'xtitle': '"""f wanted [m]"""', 'ytitle': '"""f found [m]"""', 'legend': "['ideal', 'TF1', 'TF2', 'f wanted TF1 V', 'f wanted TF2 V',\n 'f wanted TF1 H', 'f wanted TF2 H']", 'linestyle': "[':', None, None, '', '', '', '']", 'marker': "[None, None, None, '+', '+', 'x', 'x']", 'title': '"""2D focusing"""'}), "(fwanted, fwanted, fwanted, ffound, fwanted, ffound2, fpaper_tf1v,\n fpaper_tf1v, fpaper_tf2v, fpaper_tf2v, fpaper_tf1h, fpaper_tf1h,\n fpaper_tf2h, fpaper_tf2h, xtitle='f wanted [m]', ytitle='f found [m]',\n legend=['ideal', 'TF1', 'TF2', 'f wanted TF1 V', 'f wanted TF2 V',\n 'f wanted TF1 H', 'f wanted TF2 H'], linestyle=[':', None, None, '', '',\n '', ''], marker=[None, None, None, '+', '+', 'x', 'x'], title='2D focusing'\n )\n", (5908, 6352), False, 'from srxraylib.plot.gol import plot, set_qt\n'), ((6630, 6673), 'numpy.max', 'numpy.max', (['(fpaper_tf1h[i], fpaper_tf1v[i])'], {}), '((fpaper_tf1h[i], fpaper_tf1v[i]))\n', (6639, 6673), False, 'import numpy\n'), ((7587, 7630), 'numpy.max', 'numpy.max', (['(fpaper_tf2h[i], fpaper_tf2v[i])'], {}), '((fpaper_tf2h[i], fpaper_tf2v[i]))\n', (7596, 7630), False, 'import numpy\n'), ((1412, 1445), 'numpy.binary_repr', 'numpy.binary_repr', (['iarg'], {'width': 'nn'}), '(iarg, width=nn)\n', (1429, 1445), False, 'import numpy\n')]
|
#!usr/bin/env python
# -*- coding: utf-8 -*-
import os, argparse
from simple_file_user.File import File
from importlib import import_module, invalidate_caches
from . import testRunner
class Lens:
def __init__(self, testNames: list, functionsNames: list, resultsAndIterations: list) -> None:
self.testLen = max([len(name) for name in testNames])
self.functionLen = max([len(name) for name in functionsNames])
self.coloumnLens = [max(iter) for iter in resultsAndIterations]
self.tableLen = self.testLen + self.functionLen + sum(self.coloumnLens) + 3
class main:
possibleTests = ["runtime", "memory"]
testingFunctions = []
def __init__(self, tests: list, iters: int) -> None:
if not isinstance(tests, list):
raise TypeError("Invalid test given.")
for test in tests:
if not test in self.possibleTests:
raise ValueError(f"Invalid test given: {test}")
self.tests = tests
self.iters = iters
def showRes(self) -> None:
self.__createHTable()
self.__showHTable()
def importScript(self, pathToScript: str) -> None:
programFolder = os.path.abspath(os.path.dirname(__file__))
pathToScript = os.path.abspath(pathToScript)
script = File(pathToScript)
scriptContent = script.read()
scriptName = script.getName()
if scriptName in os.listdir(programFolder) and scriptName.endswith(".py"):
invalidate_caches()
self.script = import_module(os.path.splitext(scriptName)[0])
else:
newScriptName = self.__makeNewScriptName(scriptName)
newScript = File(os.path.join(programFolder, newScriptName), new = True)
newScript.rewrite(scriptContent)
moduleName = os.path.splitext(os.path.split(newScriptName)[1])[0]
invalidate_caches()
self.script = import_module(moduleName)
newScript.remove()
for name in dir(self.script):
if isinstance(getattr(self.script, name), testRunner):
self.testingFunctions.append(getattr(self.script, name))
def startTesting(self) -> None:
self.res = {test: [[func(test) for i in range(self.iters)] for func in self.testingFunctions] for test in self.tests}
self.showRes()
def __makeNewScriptName(self, scriptName: str) -> str:
scriptName = os.path.splitext(scriptName)[0]
return scriptName + ".py"
def __createHTable(self) -> None:
self.functionNames = [func.name for func in self.testingFunctions]
self.iters_ = [f"Iteration {i}|" for i in range(1, self.iters + 1)]
lens = self.__setLens()
heading = f"|%+{lens.testLen}s|%+{lens.functionLen}s|" % ("Tests.", "Functions.") + ''.join(
[f"%+{lens.coloumnLens[i]}s" % self.iters_[i] for i in range(self.iters)]
)
content = self.__createContentForHTable(lens.testLen, lens.functionLen, lens.coloumnLens, lens.tableLen)
self.hTable = ["-" * lens.tableLen + "\n", *heading, "\n" + "-" * lens.tableLen + "\n", *content]
def __showHTable(self) -> None:
print("".join(self.hTable))
def __setLens(self) -> Lens:
tests = ["Tests.", *[test for test in self.tests]]
functions = ["Functions.", *[name for name in self.functionNames]]
resultColumns = []
for iterIndex in range(self.iters):
column = [len(self.iters_[iterIndex])]
for funcs in self.res.values():
for results in funcs:
column.append(len(str(results[iterIndex])))
resultColumns.append(column)
lens = Lens(tests, functions, resultColumns)
return lens
def __createContentForHTable(self, test_len, func_len, res_lens: list, tableLen: int) -> list:
strs = []
for test, funcs in self.res.items():
strs.append( f"|%+{test_len}s|" % test)
i = 0
for func, name in zip(funcs, [func.name for func in self.testingFunctions]):
if i:
strs.append(f"|{' ' * test_len}|")
strs.append(f"%+{func_len}s|" % name)
for iter, iter_res in enumerate(func):
strs.append(f"%+{res_lens[iter] - 1}s|" % str(iter_res))
i += 1
strs.append("\n")
if not i:
strs.extend([" " * func_len + "|", *[" " * (len_ - 1) + "|" for len_ in res_lens], "\n"])
strs.append("-" * tableLen + "\n")
return strs
if __name__ == "__main__":
argparser = argparse.ArgumentParser(description = "Program for testing python modules.", prog = "tester")
argparser.add_argument("module", type = str, help = "Given module for testing.")
argparser.add_argument("iters", type = int, help = "How many times module will be tested.")
argparser.add_argument("tests", nargs = "+", choices = ["runtime", "memory"], help = "Tests those program should do with given module.")
args = argparser.parse_args()
tester = main(args.tests, args.iters)
tester.importScript(args.module)
tester.startTesting()
|
[
"os.path.abspath",
"importlib.invalidate_caches",
"argparse.ArgumentParser",
"importlib.import_module",
"os.path.dirname",
"simple_file_user.File.File",
"os.path.splitext",
"os.path.split",
"os.path.join",
"os.listdir"
] |
[((4681, 4774), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Program for testing python modules."""', 'prog': '"""tester"""'}), "(description='Program for testing python modules.',\n prog='tester')\n", (4704, 4774), False, 'import os, argparse\n'), ((1245, 1274), 'os.path.abspath', 'os.path.abspath', (['pathToScript'], {}), '(pathToScript)\n', (1260, 1274), False, 'import os, argparse\n'), ((1293, 1311), 'simple_file_user.File.File', 'File', (['pathToScript'], {}), '(pathToScript)\n', (1297, 1311), False, 'from simple_file_user.File import File\n'), ((1195, 1220), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (1210, 1220), False, 'import os, argparse\n'), ((1484, 1503), 'importlib.invalidate_caches', 'invalidate_caches', ([], {}), '()\n', (1501, 1503), False, 'from importlib import import_module, invalidate_caches\n'), ((1879, 1898), 'importlib.invalidate_caches', 'invalidate_caches', ([], {}), '()\n', (1896, 1898), False, 'from importlib import import_module, invalidate_caches\n'), ((1925, 1950), 'importlib.import_module', 'import_module', (['moduleName'], {}), '(moduleName)\n', (1938, 1950), False, 'from importlib import import_module, invalidate_caches\n'), ((2438, 2466), 'os.path.splitext', 'os.path.splitext', (['scriptName'], {}), '(scriptName)\n', (2454, 2466), False, 'import os, argparse\n'), ((1414, 1439), 'os.listdir', 'os.listdir', (['programFolder'], {}), '(programFolder)\n', (1424, 1439), False, 'import os, argparse\n'), ((1686, 1728), 'os.path.join', 'os.path.join', (['programFolder', 'newScriptName'], {}), '(programFolder, newScriptName)\n', (1698, 1728), False, 'import os, argparse\n'), ((1544, 1572), 'os.path.splitext', 'os.path.splitext', (['scriptName'], {}), '(scriptName)\n', (1560, 1572), False, 'import os, argparse\n'), ((1830, 1858), 'os.path.split', 'os.path.split', (['newScriptName'], {}), '(newScriptName)\n', (1843, 1858), False, 'import os, argparse\n')]
|
from pathlib import Path
import os
from PIL import Image, ImageDraw
import nml
SPRITE_SIZE = (20, 45)
NUMBERS_SHEET = Image.open('numbers.png')
NUMBERS = [NUMBERS_SHEET.crop((i * 11, 0, i * 11 + 11, 11)) for i in range(20)]
NUMBERS_MASK = [Image.eval(img, (lambda a: 255 if a == 1 else 1)).convert('1') for img in NUMBERS]
class TreeSprite(nml.BaseSprite):
def __init__(self, sprite_id, index, stage, climate):
self.index = index
self.stage = stage
self.climate = climate
super().__init__(sprite_id, *SPRITE_SIZE)
self.ofs_x = -self.w // 2
self.ofs_y = -self.h
def draw(self, img):
x, y, w, h = self.x, self.y, self.w, self.h
green_colors = [0x1c, 0x24, 0x57, 0xd0, 0x5c, 0x1d, 0x63, 0xd0] # min 8 (arctic)
blue_colors = [0x84, 0x8d, 0x97, 0x9e, 0xac, 0xcd, 0xd6, 0xa1] # min 8 (arctic-snow)
misc_colors = [0xb6, 0xbe, 0x39, 0x41, 0x3f, 0x4d, 0x77, 0xa5, 0xad, 0x15] # min 10 (tropic)
color = {
'temperate': green_colors + blue_colors[:4] + misc_colors,
'arctic': green_colors,
'arctic-snow': blue_colors,
'tropic': green_colors + misc_colors,
'toyland': misc_colors,
}[self.climate][self.index]
# color = {"arctic": 0x8C, "arctic-snow": 0x98, "temperate": 0x57, "tropic": 0x43, "toyland": 0xA5}[self.climate]
s = 12
px = (w - s) // 2
imd = ImageDraw.Draw(img)
imd.rectangle((x, y, x + w, y + h), fill=0)
imd.rectangle((x + px, y + h - s - 1, x + w - px, y + h - 1), fill=color, outline=1)
for i in range(self.stage - 1):
imd.rectangle((x + px + 1 + i, y + h - s - i * 3 - 4, x + w - px - 1 - i, y + h - s - 1 - i * 3), fill=color, outline=1)
img.paste(NUMBERS[self.index], (x + px + 1, y + h - s), mask=NUMBERS_MASK[self.index])
# img.paste(NUMBERS[self.index], (x + px + 1, y + h - s))
def get_nml(self):
return (f'replace ({self.sprite_id}, "{self.file}") {{ [{self.x}, {self.y}, {self.w}, {self.h}, {self.ofs_x}, {self.ofs_y}] }}'
f' // {self.climate} #{self.index} stage:{self.stage}')
base_path = Path(__file__).parent.absolute()
build_dir = Path("build")
build_dir.mkdir(parents=True, exist_ok=True)
os.chdir(build_dir)
trees = [TreeSprite(1576 + x, x // 7, x % 7 + 1, "temperate") for x in range(7 * 19)]
trees += [TreeSprite(1709 + x, x // 7, x % 7 + 1, "arctic") for x in range(7 * 8)]
trees += [TreeSprite(1765 + x, x // 7, x % 7 + 1, "arctic-snow") for x in range(7 * 8)]
trees += [TreeSprite(1821 + x, x // 7, x % 7 + 1, "tropic") for x in range(7 * 18)]
trees += [TreeSprite(1947 + x, x // 7, x % 7 + 1, "toyland") for x in range(7 * 9)]
trees_ss = nml.SpriteSheet(trees)
trees_ss.make_image("debug-trees.png", columns=7)
lang_dir = Path("lang")
lang_dir.mkdir(exist_ok=True)
with open(lang_dir / "english.lng", "w") as f:
f.write("##grflangid 0x01\n")
f.write("STR_GRF_NAME :Debug Trees\n")
f.write("STR_GRF_DESCRIPTION :Trees for debugging\n")
with open('debug-trees.nml', 'w') as f:
f.write('''\
grf {
grfid: "CMDT";
name: string(STR_GRF_NAME);
desc: string(STR_GRF_DESCRIPTION);
version: 2;
min_compatible_version: 1;
}
''')
trees_ss.write_nml(f)
|
[
"nml.SpriteSheet",
"PIL.Image.open",
"pathlib.Path",
"PIL.ImageDraw.Draw",
"os.chdir",
"PIL.Image.eval"
] |
[((120, 145), 'PIL.Image.open', 'Image.open', (['"""numbers.png"""'], {}), "('numbers.png')\n", (130, 145), False, 'from PIL import Image, ImageDraw\n'), ((2237, 2250), 'pathlib.Path', 'Path', (['"""build"""'], {}), "('build')\n", (2241, 2250), False, 'from pathlib import Path\n'), ((2296, 2315), 'os.chdir', 'os.chdir', (['build_dir'], {}), '(build_dir)\n', (2304, 2315), False, 'import os\n'), ((2753, 2775), 'nml.SpriteSheet', 'nml.SpriteSheet', (['trees'], {}), '(trees)\n', (2768, 2775), False, 'import nml\n'), ((2838, 2850), 'pathlib.Path', 'Path', (['"""lang"""'], {}), "('lang')\n", (2842, 2850), False, 'from pathlib import Path\n'), ((1445, 1464), 'PIL.ImageDraw.Draw', 'ImageDraw.Draw', (['img'], {}), '(img)\n', (1459, 1464), False, 'from PIL import Image, ImageDraw\n'), ((242, 289), 'PIL.Image.eval', 'Image.eval', (['img', '(lambda a: 255 if a == 1 else 1)'], {}), '(img, lambda a: 255 if a == 1 else 1)\n', (252, 289), False, 'from PIL import Image, ImageDraw\n'), ((2191, 2205), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (2195, 2205), False, 'from pathlib import Path\n')]
|
"""1-100随机数字,猜数字游戏
程序产生 1 个,1 到 100 之间的随机数。
让玩家重复猜测,直到猜对为止。
每次提示:大了、小了、恭喜猜对了,总共猜了多少次。
效果:
请输入要猜的数字:50
大了
请输入要猜的数字:25
小了
请输入要猜的数字:35
大了
请输入要猜的数字:30
小了
请输入要猜的数字:32
恭喜猜对啦,总共猜了 5 次"""
sum=0
import random
number= random.randint (1,100)
while sum<8:
n1=int(input("请输入数字:"))
sum += 1
if number<n1:
print("大了")
elif number>n1:
print("小了")
else:
print("你真牛逼!花了"+str(sum)+"次就猜对了!")
break
else:
print("游戏失败")
|
[
"random.randint"
] |
[((208, 230), 'random.randint', 'random.randint', (['(1)', '(100)'], {}), '(1, 100)\n', (222, 230), False, 'import random\n')]
|
import RPi.GPIO as GPIO
from time import sleep
#GPIO.setmode(GPIO.BOARD)
class GpioMotor():
def __init__(self, in1,in2,en,freq=50,duty=100):
self.in1=in1
self.in2=in2
self.en=en
self.freq = freq
self.duty = duty
GPIO.setup(self.in1, GPIO.OUT)
GPIO.setup(self.in2, GPIO.OUT)
GPIO.setup(self.en, GPIO.OUT)
self.pwm=GPIO.PWM(self.en,freq)
def changeDutyCycle(self,duty):
self.duty = duty
self.pwm.ChangeDutyCycle(self.duty)
def stop(self):
self.pwm.stop()
def forward(self):
GPIO.output(self.in2, GPIO.HIGH)
GPIO.output(self.in1, GPIO.LOW)
self.pwm.start(self.duty)
def reverse(self):
GPIO.output(self.in1, GPIO.HIGH)
GPIO.output(self.in2, GPIO.LOW)
self.pwm.start(self.duty)
def __str__(self):
return "Motor(in1=%d,in2=%d,en=%d)"%(self.in1,self.in2,self.en)
#GPIO.cleanup()
|
[
"RPi.GPIO.setup",
"RPi.GPIO.output",
"RPi.GPIO.PWM"
] |
[((246, 276), 'RPi.GPIO.setup', 'GPIO.setup', (['self.in1', 'GPIO.OUT'], {}), '(self.in1, GPIO.OUT)\n', (256, 276), True, 'import RPi.GPIO as GPIO\n'), ((281, 311), 'RPi.GPIO.setup', 'GPIO.setup', (['self.in2', 'GPIO.OUT'], {}), '(self.in2, GPIO.OUT)\n', (291, 311), True, 'import RPi.GPIO as GPIO\n'), ((316, 345), 'RPi.GPIO.setup', 'GPIO.setup', (['self.en', 'GPIO.OUT'], {}), '(self.en, GPIO.OUT)\n', (326, 345), True, 'import RPi.GPIO as GPIO\n'), ((364, 387), 'RPi.GPIO.PWM', 'GPIO.PWM', (['self.en', 'freq'], {}), '(self.en, freq)\n', (372, 387), True, 'import RPi.GPIO as GPIO\n'), ((559, 591), 'RPi.GPIO.output', 'GPIO.output', (['self.in2', 'GPIO.HIGH'], {}), '(self.in2, GPIO.HIGH)\n', (570, 591), True, 'import RPi.GPIO as GPIO\n'), ((596, 627), 'RPi.GPIO.output', 'GPIO.output', (['self.in1', 'GPIO.LOW'], {}), '(self.in1, GPIO.LOW)\n', (607, 627), True, 'import RPi.GPIO as GPIO\n'), ((684, 716), 'RPi.GPIO.output', 'GPIO.output', (['self.in1', 'GPIO.HIGH'], {}), '(self.in1, GPIO.HIGH)\n', (695, 716), True, 'import RPi.GPIO as GPIO\n'), ((721, 752), 'RPi.GPIO.output', 'GPIO.output', (['self.in2', 'GPIO.LOW'], {}), '(self.in2, GPIO.LOW)\n', (732, 752), True, 'import RPi.GPIO as GPIO\n')]
|